summaryrefslogtreecommitdiffstats
path: root/freebsd/sys/netinet/sctp_output.c
diff options
context:
space:
mode:
Diffstat (limited to 'freebsd/sys/netinet/sctp_output.c')
-rw-r--r--freebsd/sys/netinet/sctp_output.c5195
1 files changed, 2525 insertions, 2670 deletions
diff --git a/freebsd/sys/netinet/sctp_output.c b/freebsd/sys/netinet/sctp_output.c
index 648a87a2..30c6f3c0 100644
--- a/freebsd/sys/netinet/sctp_output.c
+++ b/freebsd/sys/netinet/sctp_output.c
@@ -2,16 +2,18 @@
/*-
* Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
+ * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* a) Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
+ * this list of conditions and the following disclaimer.
*
* b) Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the distribution.
+ * the documentation and/or other materials provided with the distribution.
*
* c) Neither the name of Cisco Systems, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
@@ -30,8 +32,6 @@
* THE POSSIBILITY OF SUCH DAMAGE.
*/
-/* $KAME: sctp_output.c,v 1.46 2005/03/06 16:04:17 itojun Exp $ */
-
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
@@ -53,6 +53,7 @@ __FBSDID("$FreeBSD$");
#include <netinet/sctp_input.h>
#include <netinet/sctp_crc32.h>
#include <netinet/udp.h>
+#include <netinet/udp_var.h>
#include <machine/in_cksum.h>
@@ -1864,15 +1865,10 @@ struct sack_track sack_array[256] = {
int
sctp_is_address_in_scope(struct sctp_ifa *ifa,
- int ipv4_addr_legal,
- int ipv6_addr_legal,
- int loopback_scope,
- int ipv4_local_scope,
- int local_scope,
- int site_scope,
+ struct sctp_scoping *scope,
int do_update)
{
- if ((loopback_scope == 0) &&
+ if ((scope->loopback_scope == 0) &&
(ifa->ifn_p) && SCTP_IFN_IS_IFT_LOOP(ifa->ifn_p)) {
/*
* skip loopback if not in scope *
@@ -1880,8 +1876,9 @@ sctp_is_address_in_scope(struct sctp_ifa *ifa,
return (0);
}
switch (ifa->address.sa.sa_family) {
+#ifdef INET
case AF_INET:
- if (ipv4_addr_legal) {
+ if (scope->ipv4_addr_legal) {
struct sockaddr_in *sin;
sin = (struct sockaddr_in *)&ifa->address.sin;
@@ -1889,7 +1886,7 @@ sctp_is_address_in_scope(struct sctp_ifa *ifa,
/* not in scope , unspecified */
return (0);
}
- if ((ipv4_local_scope == 0) &&
+ if ((scope->ipv4_local_scope == 0) &&
(IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
/* private address not in scope */
return (0);
@@ -1898,9 +1895,10 @@ sctp_is_address_in_scope(struct sctp_ifa *ifa,
return (0);
}
break;
+#endif
#ifdef INET6
case AF_INET6:
- if (ipv6_addr_legal) {
+ if (scope->ipv6_addr_legal) {
struct sockaddr_in6 *sin6;
/*
@@ -1923,7 +1921,7 @@ sctp_is_address_in_scope(struct sctp_ifa *ifa,
(IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr))) {
return (0);
}
- if ((site_scope == 0) &&
+ if ((scope->site_scope == 0) &&
(IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
return (0);
}
@@ -1939,21 +1937,27 @@ sctp_is_address_in_scope(struct sctp_ifa *ifa,
}
static struct mbuf *
-sctp_add_addr_to_mbuf(struct mbuf *m, struct sctp_ifa *ifa)
+sctp_add_addr_to_mbuf(struct mbuf *m, struct sctp_ifa *ifa, uint16_t * len)
{
struct sctp_paramhdr *parmh;
struct mbuf *mret;
- int len;
+ uint16_t plen;
- if (ifa->address.sa.sa_family == AF_INET) {
- len = sizeof(struct sctp_ipv4addr_param);
- } else if (ifa->address.sa.sa_family == AF_INET6) {
- len = sizeof(struct sctp_ipv6addr_param);
- } else {
- /* unknown type */
+ switch (ifa->address.sa.sa_family) {
+#ifdef INET
+ case AF_INET:
+ plen = (uint16_t) sizeof(struct sctp_ipv4addr_param);
+ break;
+#endif
+#ifdef INET6
+ case AF_INET6:
+ plen = (uint16_t) sizeof(struct sctp_ipv6addr_param);
+ break;
+#endif
+ default:
return (m);
}
- if (M_TRAILINGSPACE(m) >= len) {
+ if (M_TRAILINGSPACE(m) >= plen) {
/* easy side we just drop it on the end */
parmh = (struct sctp_paramhdr *)(SCTP_BUF_AT(m, SCTP_BUF_LEN(m)));
mret = m;
@@ -1963,7 +1967,7 @@ sctp_add_addr_to_mbuf(struct mbuf *m, struct sctp_ifa *ifa)
while (SCTP_BUF_NEXT(mret) != NULL) {
mret = SCTP_BUF_NEXT(mret);
}
- SCTP_BUF_NEXT(mret) = sctp_get_mbuf_for_msg(len, 0, M_DONTWAIT, 1, MT_DATA);
+ SCTP_BUF_NEXT(mret) = sctp_get_mbuf_for_msg(plen, 0, M_NOWAIT, 1, MT_DATA);
if (SCTP_BUF_NEXT(mret) == NULL) {
/* We are hosed, can't add more addresses */
return (m);
@@ -1973,6 +1977,7 @@ sctp_add_addr_to_mbuf(struct mbuf *m, struct sctp_ifa *ifa)
}
/* now add the parameter */
switch (ifa->address.sa.sa_family) {
+#ifdef INET
case AF_INET:
{
struct sctp_ipv4addr_param *ipv4p;
@@ -1981,11 +1986,12 @@ sctp_add_addr_to_mbuf(struct mbuf *m, struct sctp_ifa *ifa)
sin = (struct sockaddr_in *)&ifa->address.sin;
ipv4p = (struct sctp_ipv4addr_param *)parmh;
parmh->param_type = htons(SCTP_IPV4_ADDRESS);
- parmh->param_length = htons(len);
+ parmh->param_length = htons(plen);
ipv4p->addr = sin->sin_addr.s_addr;
- SCTP_BUF_LEN(mret) += len;
+ SCTP_BUF_LEN(mret) += plen;
break;
}
+#endif
#ifdef INET6
case AF_INET6:
{
@@ -1995,25 +2001,30 @@ sctp_add_addr_to_mbuf(struct mbuf *m, struct sctp_ifa *ifa)
sin6 = (struct sockaddr_in6 *)&ifa->address.sin6;
ipv6p = (struct sctp_ipv6addr_param *)parmh;
parmh->param_type = htons(SCTP_IPV6_ADDRESS);
- parmh->param_length = htons(len);
+ parmh->param_length = htons(plen);
memcpy(ipv6p->addr, &sin6->sin6_addr,
sizeof(ipv6p->addr));
/* clear embedded scope in the address */
in6_clearscope((struct in6_addr *)ipv6p->addr);
- SCTP_BUF_LEN(mret) += len;
+ SCTP_BUF_LEN(mret) += plen;
break;
}
#endif
default:
return (m);
}
+ if (len != NULL) {
+ *len += plen;
+ }
return (mret);
}
struct mbuf *
-sctp_add_addresses_to_i_ia(struct sctp_inpcb *inp, struct sctp_scoping *scope,
- struct mbuf *m_at, int cnt_inits_to)
+sctp_add_addresses_to_i_ia(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
+ struct sctp_scoping *scope,
+ struct mbuf *m_at, int cnt_inits_to,
+ uint16_t * padding_len, uint16_t * chunk_len)
{
struct sctp_vrf *vrf = NULL;
int cnt, limit_out = 0, total_count;
@@ -2046,13 +2057,10 @@ sctp_add_addresses_to_i_ia(struct sctp_inpcb *inp, struct sctp_scoping *scope,
continue;
}
LIST_FOREACH(sctp_ifap, &sctp_ifnp->ifalist, next_ifa) {
- if (sctp_is_address_in_scope(sctp_ifap,
- scope->ipv4_addr_legal,
- scope->ipv6_addr_legal,
- scope->loopback_scope,
- scope->ipv4_local_scope,
- scope->local_scope,
- scope->site_scope, 1) == 0) {
+ if (sctp_is_addr_restricted(stcb, sctp_ifap)) {
+ continue;
+ }
+ if (sctp_is_address_in_scope(sctp_ifap, scope, 1) == 0) {
continue;
}
cnt++;
@@ -2078,16 +2086,22 @@ skip_count:
continue;
}
LIST_FOREACH(sctp_ifap, &sctp_ifnp->ifalist, next_ifa) {
+ if (sctp_is_addr_restricted(stcb, sctp_ifap)) {
+ continue;
+ }
if (sctp_is_address_in_scope(sctp_ifap,
- scope->ipv4_addr_legal,
- scope->ipv6_addr_legal,
- scope->loopback_scope,
- scope->ipv4_local_scope,
- scope->local_scope,
- scope->site_scope, 0) == 0) {
+ scope, 0) == 0) {
continue;
}
- m_at = sctp_add_addr_to_mbuf(m_at, sctp_ifap);
+ if ((chunk_len != NULL) &&
+ (padding_len != NULL) &&
+ (*padding_len > 0)) {
+ memset(mtod(m_at, caddr_t)+*chunk_len, 0, *padding_len);
+ SCTP_BUF_LEN(m_at) += *padding_len;
+ *chunk_len += *padding_len;
+ *padding_len = 0;
+ }
+ m_at = sctp_add_addr_to_mbuf(m_at, sctp_ifap, chunk_len);
if (limit_out) {
cnt++;
total_count++;
@@ -2129,43 +2143,38 @@ skip_count:
continue;
}
if (sctp_is_address_in_scope(laddr->ifa,
- scope->ipv4_addr_legal,
- scope->ipv6_addr_legal,
- scope->loopback_scope,
- scope->ipv4_local_scope,
- scope->local_scope,
- scope->site_scope, 1) == 0) {
+ scope, 1) == 0) {
continue;
}
cnt++;
}
- if (cnt > SCTP_ADDRESS_LIMIT) {
- limit_out = 1;
- }
/*
* To get through a NAT we only list addresses if we have
* more than one. That way if you just bind a single address
* we let the source of the init dictate our address.
*/
if (cnt > 1) {
+ cnt = cnt_inits_to;
LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
- cnt = 0;
if (laddr->ifa == NULL) {
continue;
}
- if (laddr->ifa->localifa_flags & SCTP_BEING_DELETED)
+ if (laddr->ifa->localifa_flags & SCTP_BEING_DELETED) {
continue;
-
+ }
if (sctp_is_address_in_scope(laddr->ifa,
- scope->ipv4_addr_legal,
- scope->ipv6_addr_legal,
- scope->loopback_scope,
- scope->ipv4_local_scope,
- scope->local_scope,
- scope->site_scope, 0) == 0) {
+ scope, 0) == 0) {
continue;
}
- m_at = sctp_add_addr_to_mbuf(m_at, laddr->ifa);
+ if ((chunk_len != NULL) &&
+ (padding_len != NULL) &&
+ (*padding_len > 0)) {
+ memset(mtod(m_at, caddr_t)+*chunk_len, 0, *padding_len);
+ SCTP_BUF_LEN(m_at) += *padding_len;
+ *chunk_len += *padding_len;
+ *padding_len = 0;
+ }
+ m_at = sctp_add_addr_to_mbuf(m_at, laddr->ifa, chunk_len);
cnt++;
if (cnt >= SCTP_ADDRESS_LIMIT) {
break;
@@ -2193,26 +2202,26 @@ sctp_is_ifa_addr_preferred(struct sctp_ifa *ifa,
* means it is the same scope or higher scope then the destination.
* L = loopback, P = private, G = global
* -----------------------------------------
- * src | dest | result
- * ----------------------------------------
- * L | L | yes
- * -----------------------------------------
- * P | L | yes-v4 no-v6
- * -----------------------------------------
- * G | L | yes-v4 no-v6
- * -----------------------------------------
- * L | P | no
- * -----------------------------------------
- * P | P | yes
- * -----------------------------------------
- * G | P | no
- * -----------------------------------------
- * L | G | no
- * -----------------------------------------
- * P | G | no
- * -----------------------------------------
- * G | G | yes
- * -----------------------------------------
+ * src | dest | result
+ * ----------------------------------------
+ * L | L | yes
+ * -----------------------------------------
+ * P | L | yes-v4 no-v6
+ * -----------------------------------------
+ * G | L | yes-v4 no-v6
+ * -----------------------------------------
+ * L | P | no
+ * -----------------------------------------
+ * P | P | yes
+ * -----------------------------------------
+ * G | P | no
+ * -----------------------------------------
+ * L | G | no
+ * -----------------------------------------
+ * P | G | no
+ * -----------------------------------------
+ * G | G | yes
+ * -----------------------------------------
*/
if (ifa->address.sa.sa_family != fam) {
@@ -2225,6 +2234,7 @@ sctp_is_ifa_addr_preferred(struct sctp_ifa *ifa,
SCTPDBG(SCTP_DEBUG_OUTPUT2, "Is destination preferred:");
SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &ifa->address.sa);
/* Ok the address may be ok */
+#ifdef INET6
if (fam == AF_INET6) {
/* ok to use deprecated addresses? no lets not! */
if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) {
@@ -2244,6 +2254,7 @@ sctp_is_ifa_addr_preferred(struct sctp_ifa *ifa,
}
}
}
+#endif
/*
* Now that we know what is what, implement or table this could in
* theory be done slicker (it used to be), but this is
@@ -2283,35 +2294,50 @@ sctp_is_ifa_addr_acceptable(struct sctp_ifa *ifa,
{
uint8_t dest_is_global = 0;
- /*
+ /**
* Here we determine if its a acceptable address. A acceptable
* address means it is the same scope or higher scope but we can
* allow for NAT which means its ok to have a global dest and a
* private src.
*
* L = loopback, P = private, G = global
- * ----------------------------------------- src | dest | result
- * ----------------------------------------- L | L | yes
- * ----------------------------------------- P | L |
- * yes-v4 no-v6 ----------------------------------------- G |
- * L | yes ----------------------------------------- L |
- * P | no ----------------------------------------- P | P
- * | yes ----------------------------------------- G | P
- * | yes - May not work -----------------------------------------
- * L | G | no ----------------------------------------- P
- * | G | yes - May not work
- * ----------------------------------------- G | G | yes
+ * -----------------------------------------
+ * src | dest | result
+ * -----------------------------------------
+ * L | L | yes
+ * -----------------------------------------
+ * P | L | yes-v4 no-v6
+ * -----------------------------------------
+ * G | L | yes
+ * -----------------------------------------
+ * L | P | no
+ * -----------------------------------------
+ * P | P | yes
+ * -----------------------------------------
+ * G | P | yes - May not work
+ * -----------------------------------------
+ * L | G | no
+ * -----------------------------------------
+ * P | G | yes - May not work
+ * -----------------------------------------
+ * G | G | yes
* -----------------------------------------
*/
if (ifa->address.sa.sa_family != fam) {
/* forget non matching family */
+ SCTPDBG(SCTP_DEBUG_OUTPUT3, "ifa_fam:%d fam:%d\n",
+ ifa->address.sa.sa_family, fam);
return (NULL);
}
/* Ok the address may be ok */
+ SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT3, &ifa->address.sa);
+ SCTPDBG(SCTP_DEBUG_OUTPUT3, "dst_is_loop:%d dest_is_priv:%d\n",
+ dest_is_loop, dest_is_priv);
if ((dest_is_loop == 0) && (dest_is_priv == 0)) {
dest_is_global = 1;
}
+#ifdef INET6
if (fam == AF_INET6) {
/* ok to use deprecated addresses? */
if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) {
@@ -2323,17 +2349,25 @@ sctp_is_ifa_addr_acceptable(struct sctp_ifa *ifa,
return (NULL);
}
}
+#endif
/*
* Now that we know what is what, implement our table. This could in
* theory be done slicker (it used to be), but this is
* straightforward and easier to validate :-)
*/
+ SCTPDBG(SCTP_DEBUG_OUTPUT3, "ifa->src_is_loop:%d dest_is_priv:%d\n",
+ ifa->src_is_loop,
+ dest_is_priv);
if ((ifa->src_is_loop == 1) && (dest_is_priv)) {
return (NULL);
}
+ SCTPDBG(SCTP_DEBUG_OUTPUT3, "ifa->src_is_loop:%d dest_is_glob:%d\n",
+ ifa->src_is_loop,
+ dest_is_global);
if ((ifa->src_is_loop == 1) && (dest_is_global)) {
return (NULL);
}
+ SCTPDBG(SCTP_DEBUG_OUTPUT3, "address is acceptable\n");
/* its an acceptable address */
return (ifa);
}
@@ -2504,7 +2538,6 @@ once_again_too:
static struct sctp_ifa *
sctp_choose_boundspecific_stcb(struct sctp_inpcb *inp,
struct sctp_tcb *stcb,
- struct sctp_nets *net,
sctp_route_t * ro,
uint32_t vrf_id,
uint8_t dest_is_priv,
@@ -2734,6 +2767,7 @@ sctp_select_nth_preferred_addr_from_ifn_boundall(struct sctp_ifn *ifn,
}
}
#endif
+#ifdef INET
/* Avoid topologically incorrect IPv4 address */
if (stcb && fam == AF_INET &&
sctp_is_mobility_feature_on(stcb->sctp_ep, SCTP_MOBILITY_BASE)) {
@@ -2741,14 +2775,9 @@ sctp_select_nth_preferred_addr_from_ifn_boundall(struct sctp_ifn *ifn,
continue;
}
}
+#endif
if (stcb) {
- if (sctp_is_address_in_scope(ifa,
- stcb->asoc.ipv4_addr_legal,
- stcb->asoc.ipv6_addr_legal,
- stcb->asoc.loopback_scope,
- stcb->asoc.ipv4_local_scope,
- stcb->asoc.local_scope,
- stcb->asoc.site_scope, 0) == 0) {
+ if (sctp_is_address_in_scope(ifa, &stcb->asoc.scope, 0) == 0) {
continue;
}
if (((non_asoc_addr_ok == 0) &&
@@ -2794,13 +2823,7 @@ sctp_count_num_preferred_boundall(struct sctp_ifn *ifn,
continue;
}
if (stcb) {
- if (sctp_is_address_in_scope(ifa,
- stcb->asoc.ipv4_addr_legal,
- stcb->asoc.ipv6_addr_legal,
- stcb->asoc.loopback_scope,
- stcb->asoc.ipv4_local_scope,
- stcb->asoc.local_scope,
- stcb->asoc.site_scope, 0) == 0) {
+ if (sctp_is_address_in_scope(ifa, &stcb->asoc.scope, 0) == 0) {
continue;
}
if (((non_asoc_addr_ok == 0) &&
@@ -2821,8 +2844,7 @@ sctp_count_num_preferred_boundall(struct sctp_ifn *ifn,
}
static struct sctp_ifa *
-sctp_choose_boundall(struct sctp_inpcb *inp,
- struct sctp_tcb *stcb,
+sctp_choose_boundall(struct sctp_tcb *stcb,
struct sctp_nets *net,
sctp_route_t * ro,
uint32_t vrf_id,
@@ -2838,6 +2860,11 @@ sctp_choose_boundall(struct sctp_inpcb *inp,
uint32_t ifn_index;
struct sctp_vrf *vrf;
+#ifdef INET
+ int retried = 0;
+
+#endif
+
/*-
* For boundall we can use any address in the association.
* If non_asoc_addr_ok is set we can use any address (at least in
@@ -2858,6 +2885,7 @@ sctp_choose_boundall(struct sctp_inpcb *inp,
ifn = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro);
+ SCTPDBG(SCTP_DEBUG_OUTPUT2, "ifn from route:%p ifn_index:%d\n", ifn, ifn_index);
emit_ifn = looked_at = sctp_ifn = sctp_find_ifn(ifn, ifn_index);
if (sctp_ifn == NULL) {
/* ?? We don't have this guy ?? */
@@ -2941,7 +2969,7 @@ bound_all_plan_b:
}
SCTPDBG(SCTP_DEBUG_OUTPUT2,
"num preferred:%d on interface:%p cur_addr_num:%d\n",
- num_preferred, sctp_ifn, cur_addr_num);
+ num_preferred, (void *)sctp_ifn, cur_addr_num);
/*
* Ok we have num_eligible_addr set with how many we can
@@ -2966,30 +2994,34 @@ bound_all_plan_b:
}
atomic_add_int(&sifa->refcount, 1);
return (sifa);
-
}
-
+#ifdef INET
+again_with_private_addresses_allowed:
+#endif
/* plan_c: do we have an acceptable address on the emit interface */
+ sifa = NULL;
SCTPDBG(SCTP_DEBUG_OUTPUT2, "Trying Plan C: find acceptable on interface\n");
if (emit_ifn == NULL) {
+ SCTPDBG(SCTP_DEBUG_OUTPUT2, "Jump to Plan D - no emit_ifn\n");
goto plan_d;
}
LIST_FOREACH(sctp_ifa, &emit_ifn->ifalist, next_ifa) {
+ SCTPDBG(SCTP_DEBUG_OUTPUT2, "ifa:%p\n", (void *)sctp_ifa);
if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
- (non_asoc_addr_ok == 0))
+ (non_asoc_addr_ok == 0)) {
+ SCTPDBG(SCTP_DEBUG_OUTPUT2, "Defer\n");
continue;
+ }
sifa = sctp_is_ifa_addr_acceptable(sctp_ifa, dest_is_loop,
dest_is_priv, fam);
- if (sifa == NULL)
+ if (sifa == NULL) {
+ SCTPDBG(SCTP_DEBUG_OUTPUT2, "IFA not acceptable\n");
continue;
+ }
if (stcb) {
- if (sctp_is_address_in_scope(sifa,
- stcb->asoc.ipv4_addr_legal,
- stcb->asoc.ipv6_addr_legal,
- stcb->asoc.loopback_scope,
- stcb->asoc.ipv4_local_scope,
- stcb->asoc.local_scope,
- stcb->asoc.site_scope, 0) == 0) {
+ if (sctp_is_address_in_scope(sifa, &stcb->asoc.scope, 0) == 0) {
+ SCTPDBG(SCTP_DEBUG_OUTPUT2, "NOT in scope\n");
+ sifa = NULL;
continue;
}
if (((non_asoc_addr_ok == 0) &&
@@ -3001,11 +3033,15 @@ bound_all_plan_b:
* It is restricted for some reason..
* probably not yet added.
*/
+ SCTPDBG(SCTP_DEBUG_OUTPUT2, "Its resticted\n");
+ sifa = NULL;
continue;
}
+ } else {
+ SCTP_PRINTF("Stcb is null - no print\n");
}
atomic_add_int(&sifa->refcount, 1);
- return (sifa);
+ goto out;
}
plan_d:
/*
@@ -3014,16 +3050,12 @@ plan_d:
* out and see if we can find an acceptable address somewhere
* amongst all interfaces.
*/
- SCTPDBG(SCTP_DEBUG_OUTPUT2, "Trying Plan D\n");
+ SCTPDBG(SCTP_DEBUG_OUTPUT2, "Trying Plan D looked_at is %p\n", (void *)looked_at);
LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
if (dest_is_loop == 0 && SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
/* wrong base scope */
continue;
}
- if ((sctp_ifn == looked_at) && looked_at)
- /* already looked at this guy */
- continue;
-
LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
(non_asoc_addr_ok == 0))
@@ -3034,13 +3066,8 @@ plan_d:
if (sifa == NULL)
continue;
if (stcb) {
- if (sctp_is_address_in_scope(sifa,
- stcb->asoc.ipv4_addr_legal,
- stcb->asoc.ipv6_addr_legal,
- stcb->asoc.loopback_scope,
- stcb->asoc.ipv4_local_scope,
- stcb->asoc.local_scope,
- stcb->asoc.site_scope, 0) == 0) {
+ if (sctp_is_address_in_scope(sifa, &stcb->asoc.scope, 0) == 0) {
+ sifa = NULL;
continue;
}
if (((non_asoc_addr_ok == 0) &&
@@ -3052,19 +3079,76 @@ plan_d:
* It is restricted for some
* reason.. probably not yet added.
*/
+ sifa = NULL;
continue;
}
}
- atomic_add_int(&sifa->refcount, 1);
- return (sifa);
+ goto out;
}
}
- /*
- * Ok we can find NO address to source from that is not on our
- * restricted list and non_asoc_address is NOT ok, or it is on our
- * restricted list. We can't source to it :-(
- */
- return (NULL);
+#ifdef INET
+ if ((retried == 0) && (stcb->asoc.scope.ipv4_local_scope == 0)) {
+ stcb->asoc.scope.ipv4_local_scope = 1;
+ retried = 1;
+ goto again_with_private_addresses_allowed;
+ } else if (retried == 1) {
+ stcb->asoc.scope.ipv4_local_scope = 0;
+ }
+#endif
+out:
+#ifdef INET
+ if (sifa) {
+ if (retried == 1) {
+ LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
+ if (dest_is_loop == 0 && SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
+ /* wrong base scope */
+ continue;
+ }
+ LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
+ struct sctp_ifa *tmp_sifa;
+
+ if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
+ (non_asoc_addr_ok == 0))
+ continue;
+ tmp_sifa = sctp_is_ifa_addr_acceptable(sctp_ifa,
+ dest_is_loop,
+ dest_is_priv, fam);
+ if (tmp_sifa == NULL) {
+ continue;
+ }
+ if (tmp_sifa == sifa) {
+ continue;
+ }
+ if (stcb) {
+ if (sctp_is_address_in_scope(tmp_sifa,
+ &stcb->asoc.scope, 0) == 0) {
+ continue;
+ }
+ if (((non_asoc_addr_ok == 0) &&
+ (sctp_is_addr_restricted(stcb, tmp_sifa))) ||
+ (non_asoc_addr_ok &&
+ (sctp_is_addr_restricted(stcb, tmp_sifa)) &&
+ (!sctp_is_addr_pending(stcb, tmp_sifa)))) {
+ /*
+ * It is restricted
+ * for some reason..
+ * probably not yet
+ * added.
+ */
+ continue;
+ }
+ }
+ if ((tmp_sifa->address.sin.sin_family == AF_INET) &&
+ (IN4_ISPRIVATE_ADDRESS(&(tmp_sifa->address.sin.sin_addr)))) {
+ sctp_add_local_addr_restricted(stcb, tmp_sifa);
+ }
+ }
+ }
+ }
+ atomic_add_int(&sifa->refcount, 1);
+ }
+#endif
+ return (sifa);
}
@@ -3077,17 +3161,20 @@ sctp_source_address_selection(struct sctp_inpcb *inp,
struct sctp_nets *net,
int non_asoc_addr_ok, uint32_t vrf_id)
{
+ struct sctp_ifa *answer;
+ uint8_t dest_is_priv, dest_is_loop;
+ sa_family_t fam;
+
+#ifdef INET
struct sockaddr_in *to = (struct sockaddr_in *)&ro->ro_dst;
+#endif
#ifdef INET6
struct sockaddr_in6 *to6 = (struct sockaddr_in6 *)&ro->ro_dst;
#endif
- struct sctp_ifa *answer;
- uint8_t dest_is_priv, dest_is_loop;
- sa_family_t fam;
- /*-
+ /**
* Rules: - Find the route if needed, cache if I can. - Look at
* interface address in route, Is it in the bound list. If so we
* have the best source. - If not we must rotate amongst the
@@ -3158,10 +3245,11 @@ sctp_source_address_selection(struct sctp_inpcb *inp,
if (ro->ro_rt == NULL) {
return (NULL);
}
- fam = to->sin_family;
+ fam = ro->ro_dst.sa_family;
dest_is_priv = dest_is_loop = 0;
/* Setup our scopes for the destination */
switch (fam) {
+#ifdef INET
case AF_INET:
/* Scope based on outbound address */
if (IN4_ISLOOPBACK_ADDRESS(&to->sin_addr)) {
@@ -3174,6 +3262,7 @@ sctp_source_address_selection(struct sctp_inpcb *inp,
dest_is_priv = 1;
}
break;
+#endif
#ifdef INET6
case AF_INET6:
/* Scope based on outbound address */
@@ -3197,13 +3286,13 @@ sctp_source_address_selection(struct sctp_inpcb *inp,
#endif
}
SCTPDBG(SCTP_DEBUG_OUTPUT2, "Select source addr for:");
- SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)to);
+ SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)&ro->ro_dst);
SCTP_IPI_ADDR_RLOCK();
if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
/*
* Bound all case
*/
- answer = sctp_choose_boundall(inp, stcb, net, ro, vrf_id,
+ answer = sctp_choose_boundall(stcb, net, ro, vrf_id,
dest_is_priv, dest_is_loop,
non_asoc_addr_ok, fam);
SCTP_IPI_ADDR_RUNLOCK();
@@ -3213,7 +3302,7 @@ sctp_source_address_selection(struct sctp_inpcb *inp,
* Subset bound case
*/
if (stcb) {
- answer = sctp_choose_boundspecific_stcb(inp, stcb, net, ro,
+ answer = sctp_choose_boundspecific_stcb(inp, stcb, ro,
vrf_id, dest_is_priv,
dest_is_loop,
non_asoc_addr_ok, fam);
@@ -3228,56 +3317,344 @@ sctp_source_address_selection(struct sctp_inpcb *inp,
}
static int
-sctp_find_cmsg(int c_type, void *data, struct mbuf *control, int cpsize)
+sctp_find_cmsg(int c_type, void *data, struct mbuf *control, size_t cpsize)
{
struct cmsghdr cmh;
- int tlen, at;
+ int tlen, at, found;
+ struct sctp_sndinfo sndinfo;
+ struct sctp_prinfo prinfo;
+ struct sctp_authinfo authinfo;
tlen = SCTP_BUF_LEN(control);
at = 0;
+ found = 0;
/*
* Independent of how many mbufs, find the c_type inside the control
* structure and copy out the data.
*/
while (at < tlen) {
if ((tlen - at) < (int)CMSG_ALIGN(sizeof(cmh))) {
- /* not enough room for one more we are done. */
- return (0);
+ /* There is not enough room for one more. */
+ return (found);
}
m_copydata(control, at, sizeof(cmh), (caddr_t)&cmh);
+ if (cmh.cmsg_len < CMSG_ALIGN(sizeof(cmh))) {
+ /* We dont't have a complete CMSG header. */
+ return (found);
+ }
if (((int)cmh.cmsg_len + at) > tlen) {
- /*
- * this is real messed up since there is not enough
- * data here to cover the cmsg header. We are done.
- */
- return (0);
+ /* We don't have the complete CMSG. */
+ return (found);
}
if ((cmh.cmsg_level == IPPROTO_SCTP) &&
- (c_type == cmh.cmsg_type)) {
- /* found the one we want, copy it out */
- at += CMSG_ALIGN(sizeof(struct cmsghdr));
- if ((int)(cmh.cmsg_len - CMSG_ALIGN(sizeof(struct cmsghdr))) < cpsize) {
- /*
- * space of cmsg_len after header not big
- * enough
- */
- return (0);
+ ((c_type == cmh.cmsg_type) ||
+ ((c_type == SCTP_SNDRCV) &&
+ ((cmh.cmsg_type == SCTP_SNDINFO) ||
+ (cmh.cmsg_type == SCTP_PRINFO) ||
+ (cmh.cmsg_type == SCTP_AUTHINFO))))) {
+ if (c_type == cmh.cmsg_type) {
+ if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < cpsize) {
+ return (found);
+ }
+ /* It is exactly what we want. Copy it out. */
+ m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), cpsize, (caddr_t)data);
+ return (1);
+ } else {
+ struct sctp_sndrcvinfo *sndrcvinfo;
+
+ sndrcvinfo = (struct sctp_sndrcvinfo *)data;
+ if (found == 0) {
+ if (cpsize < sizeof(struct sctp_sndrcvinfo)) {
+ return (found);
+ }
+ memset(sndrcvinfo, 0, sizeof(struct sctp_sndrcvinfo));
+ }
+ switch (cmh.cmsg_type) {
+ case SCTP_SNDINFO:
+ if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct sctp_sndinfo)) {
+ return (found);
+ }
+ m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct sctp_sndinfo), (caddr_t)&sndinfo);
+ sndrcvinfo->sinfo_stream = sndinfo.snd_sid;
+ sndrcvinfo->sinfo_flags = sndinfo.snd_flags;
+ sndrcvinfo->sinfo_ppid = sndinfo.snd_ppid;
+ sndrcvinfo->sinfo_context = sndinfo.snd_context;
+ sndrcvinfo->sinfo_assoc_id = sndinfo.snd_assoc_id;
+ break;
+ case SCTP_PRINFO:
+ if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct sctp_prinfo)) {
+ return (found);
+ }
+ m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct sctp_prinfo), (caddr_t)&prinfo);
+ sndrcvinfo->sinfo_timetolive = prinfo.pr_value;
+ sndrcvinfo->sinfo_flags |= prinfo.pr_policy;
+ break;
+ case SCTP_AUTHINFO:
+ if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct sctp_authinfo)) {
+ return (found);
+ }
+ m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct sctp_authinfo), (caddr_t)&authinfo);
+ sndrcvinfo->sinfo_keynumber_valid = 1;
+ sndrcvinfo->sinfo_keynumber = authinfo.auth_keynumber;
+ break;
+ default:
+ return (found);
+ }
+ found = 1;
}
- m_copydata(control, at, cpsize, data);
+ }
+ at += CMSG_ALIGN(cmh.cmsg_len);
+ }
+ return (found);
+}
+
+static int
+sctp_process_cmsgs_for_init(struct sctp_tcb *stcb, struct mbuf *control, int *error)
+{
+ struct cmsghdr cmh;
+ int tlen, at;
+ struct sctp_initmsg initmsg;
+
+#ifdef INET
+ struct sockaddr_in sin;
+
+#endif
+#ifdef INET6
+ struct sockaddr_in6 sin6;
+
+#endif
+
+ tlen = SCTP_BUF_LEN(control);
+ at = 0;
+ while (at < tlen) {
+ if ((tlen - at) < (int)CMSG_ALIGN(sizeof(cmh))) {
+ /* There is not enough room for one more. */
+ *error = EINVAL;
return (1);
- } else {
- at += CMSG_ALIGN(cmh.cmsg_len);
- if (cmh.cmsg_len == 0) {
+ }
+ m_copydata(control, at, sizeof(cmh), (caddr_t)&cmh);
+ if (cmh.cmsg_len < CMSG_ALIGN(sizeof(cmh))) {
+ /* We dont't have a complete CMSG header. */
+ *error = EINVAL;
+ return (1);
+ }
+ if (((int)cmh.cmsg_len + at) > tlen) {
+ /* We don't have the complete CMSG. */
+ *error = EINVAL;
+ return (1);
+ }
+ if (cmh.cmsg_level == IPPROTO_SCTP) {
+ switch (cmh.cmsg_type) {
+ case SCTP_INIT:
+ if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct sctp_initmsg)) {
+ *error = EINVAL;
+ return (1);
+ }
+ m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct sctp_initmsg), (caddr_t)&initmsg);
+ if (initmsg.sinit_max_attempts)
+ stcb->asoc.max_init_times = initmsg.sinit_max_attempts;
+ if (initmsg.sinit_num_ostreams)
+ stcb->asoc.pre_open_streams = initmsg.sinit_num_ostreams;
+ if (initmsg.sinit_max_instreams)
+ stcb->asoc.max_inbound_streams = initmsg.sinit_max_instreams;
+ if (initmsg.sinit_max_init_timeo)
+ stcb->asoc.initial_init_rto_max = initmsg.sinit_max_init_timeo;
+ if (stcb->asoc.streamoutcnt < stcb->asoc.pre_open_streams) {
+ struct sctp_stream_out *tmp_str;
+ unsigned int i;
+
+ /* Default is NOT correct */
+ SCTPDBG(SCTP_DEBUG_OUTPUT1, "Ok, default:%d pre_open:%d\n",
+ stcb->asoc.streamoutcnt, stcb->asoc.pre_open_streams);
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_MALLOC(tmp_str,
+ struct sctp_stream_out *,
+ (stcb->asoc.pre_open_streams * sizeof(struct sctp_stream_out)),
+ SCTP_M_STRMO);
+ SCTP_TCB_LOCK(stcb);
+ if (tmp_str != NULL) {
+ SCTP_FREE(stcb->asoc.strmout, SCTP_M_STRMO);
+ stcb->asoc.strmout = tmp_str;
+ stcb->asoc.strm_realoutsize = stcb->asoc.streamoutcnt = stcb->asoc.pre_open_streams;
+ } else {
+ stcb->asoc.pre_open_streams = stcb->asoc.streamoutcnt;
+ }
+ for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
+ TAILQ_INIT(&stcb->asoc.strmout[i].outqueue);
+ stcb->asoc.strmout[i].chunks_on_queues = 0;
+ stcb->asoc.strmout[i].next_sequence_send = 0;
+ stcb->asoc.strmout[i].stream_no = i;
+ stcb->asoc.strmout[i].last_msg_incomplete = 0;
+ stcb->asoc.ss_functions.sctp_ss_init_stream(&stcb->asoc.strmout[i], NULL);
+ }
+ }
+ break;
+#ifdef INET
+ case SCTP_DSTADDRV4:
+ if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct in_addr)) {
+ *error = EINVAL;
+ return (1);
+ }
+ memset(&sin, 0, sizeof(struct sockaddr_in));
+ sin.sin_family = AF_INET;
+ sin.sin_len = sizeof(struct sockaddr_in);
+ sin.sin_port = stcb->rport;
+ m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct in_addr), (caddr_t)&sin.sin_addr);
+ if ((sin.sin_addr.s_addr == INADDR_ANY) ||
+ (sin.sin_addr.s_addr == INADDR_BROADCAST) ||
+ IN_MULTICAST(ntohl(sin.sin_addr.s_addr))) {
+ *error = EINVAL;
+ return (1);
+ }
+ if (sctp_add_remote_addr(stcb, (struct sockaddr *)&sin, NULL,
+ SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
+ *error = ENOBUFS;
+ return (1);
+ }
+ break;
+#endif
+#ifdef INET6
+ case SCTP_DSTADDRV6:
+ if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct in6_addr)) {
+ *error = EINVAL;
+ return (1);
+ }
+ memset(&sin6, 0, sizeof(struct sockaddr_in6));
+ sin6.sin6_family = AF_INET6;
+ sin6.sin6_len = sizeof(struct sockaddr_in6);
+ sin6.sin6_port = stcb->rport;
+ m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct in6_addr), (caddr_t)&sin6.sin6_addr);
+ if (IN6_IS_ADDR_UNSPECIFIED(&sin6.sin6_addr) ||
+ IN6_IS_ADDR_MULTICAST(&sin6.sin6_addr)) {
+ *error = EINVAL;
+ return (1);
+ }
+#ifdef INET
+ if (IN6_IS_ADDR_V4MAPPED(&sin6.sin6_addr)) {
+ in6_sin6_2_sin(&sin, &sin6);
+ if ((sin.sin_addr.s_addr == INADDR_ANY) ||
+ (sin.sin_addr.s_addr == INADDR_BROADCAST) ||
+ IN_MULTICAST(ntohl(sin.sin_addr.s_addr))) {
+ *error = EINVAL;
+ return (1);
+ }
+ if (sctp_add_remote_addr(stcb, (struct sockaddr *)&sin, NULL,
+ SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
+ *error = ENOBUFS;
+ return (1);
+ }
+ } else
+#endif
+ if (sctp_add_remote_addr(stcb, (struct sockaddr *)&sin6, NULL,
+ SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
+ *error = ENOBUFS;
+ return (1);
+ }
+ break;
+#endif
+ default:
break;
}
}
+ at += CMSG_ALIGN(cmh.cmsg_len);
}
- /* not found */
return (0);
}
+static struct sctp_tcb *
+sctp_findassociation_cmsgs(struct sctp_inpcb **inp_p,
+ in_port_t port,
+ struct mbuf *control,
+ struct sctp_nets **net_p,
+ int *error)
+{
+ struct cmsghdr cmh;
+ int tlen, at;
+ struct sctp_tcb *stcb;
+ struct sockaddr *addr;
+
+#ifdef INET
+ struct sockaddr_in sin;
+
+#endif
+#ifdef INET6
+ struct sockaddr_in6 sin6;
+
+#endif
+
+ tlen = SCTP_BUF_LEN(control);
+ at = 0;
+ while (at < tlen) {
+ if ((tlen - at) < (int)CMSG_ALIGN(sizeof(cmh))) {
+ /* There is not enough room for one more. */
+ *error = EINVAL;
+ return (NULL);
+ }
+ m_copydata(control, at, sizeof(cmh), (caddr_t)&cmh);
+ if (cmh.cmsg_len < CMSG_ALIGN(sizeof(cmh))) {
+ /* We dont't have a complete CMSG header. */
+ *error = EINVAL;
+ return (NULL);
+ }
+ if (((int)cmh.cmsg_len + at) > tlen) {
+ /* We don't have the complete CMSG. */
+ *error = EINVAL;
+ return (NULL);
+ }
+ if (cmh.cmsg_level == IPPROTO_SCTP) {
+ switch (cmh.cmsg_type) {
+#ifdef INET
+ case SCTP_DSTADDRV4:
+ if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct in_addr)) {
+ *error = EINVAL;
+ return (NULL);
+ }
+ memset(&sin, 0, sizeof(struct sockaddr_in));
+ sin.sin_family = AF_INET;
+ sin.sin_len = sizeof(struct sockaddr_in);
+ sin.sin_port = port;
+ m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct in_addr), (caddr_t)&sin.sin_addr);
+ addr = (struct sockaddr *)&sin;
+ break;
+#endif
+#ifdef INET6
+ case SCTP_DSTADDRV6:
+ if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct in6_addr)) {
+ *error = EINVAL;
+ return (NULL);
+ }
+ memset(&sin6, 0, sizeof(struct sockaddr_in6));
+ sin6.sin6_family = AF_INET6;
+ sin6.sin6_len = sizeof(struct sockaddr_in6);
+ sin6.sin6_port = port;
+ m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct in6_addr), (caddr_t)&sin6.sin6_addr);
+#ifdef INET
+ if (IN6_IS_ADDR_V4MAPPED(&sin6.sin6_addr)) {
+ in6_sin6_2_sin(&sin, &sin6);
+ addr = (struct sockaddr *)&sin;
+ } else
+#endif
+ addr = (struct sockaddr *)&sin6;
+ break;
+#endif
+ default:
+ addr = NULL;
+ break;
+ }
+ if (addr) {
+ stcb = sctp_findassociation_ep_addr(inp_p, addr, net_p, NULL, NULL);
+ if (stcb != NULL) {
+ return (stcb);
+ }
+ }
+ }
+ at += CMSG_ALIGN(cmh.cmsg_len);
+ }
+ return (NULL);
+}
+
static struct mbuf *
-sctp_add_cookie(struct sctp_inpcb *inp, struct mbuf *init, int init_offset,
+sctp_add_cookie(struct mbuf *init, int init_offset,
struct mbuf *initack, int initack_offset, struct sctp_state_cookie *stc_in, uint8_t ** signature)
{
struct mbuf *copy_init, *copy_initack, *m_at, *sig, *mret;
@@ -3303,12 +3680,10 @@ sctp_add_cookie(struct sctp_inpcb *inp, struct mbuf *init, int init_offset,
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
struct mbuf *mat;
- mat = copy_init;
- while (mat) {
+ for (mat = copy_init; mat; mat = SCTP_BUF_NEXT(mat)) {
if (SCTP_BUF_IS_EXTENDED(mat)) {
sctp_log_mb(mat, SCTP_MBUF_ICOPY);
}
- mat = SCTP_BUF_NEXT(mat);
}
}
#endif
@@ -3323,12 +3698,10 @@ sctp_add_cookie(struct sctp_inpcb *inp, struct mbuf *init, int init_offset,
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
struct mbuf *mat;
- mat = copy_initack;
- while (mat) {
+ for (mat = copy_initack; mat; mat = SCTP_BUF_NEXT(mat)) {
if (SCTP_BUF_IS_EXTENDED(mat)) {
sctp_log_mb(mat, SCTP_MBUF_ICOPY);
}
- mat = SCTP_BUF_NEXT(mat);
}
}
#endif
@@ -3345,7 +3718,6 @@ sctp_add_cookie(struct sctp_inpcb *inp, struct mbuf *init, int init_offset,
/* tack the INIT and then the INIT-ACK onto the chain */
cookie_sz = 0;
- m_at = mret;
for (m_at = mret; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
cookie_sz += SCTP_BUF_LEN(m_at);
if (SCTP_BUF_NEXT(m_at) == NULL) {
@@ -3353,7 +3725,6 @@ sctp_add_cookie(struct sctp_inpcb *inp, struct mbuf *init, int init_offset,
break;
}
}
-
for (m_at = copy_init; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
cookie_sz += SCTP_BUF_LEN(m_at);
if (SCTP_BUF_NEXT(m_at) == NULL) {
@@ -3361,7 +3732,6 @@ sctp_add_cookie(struct sctp_inpcb *inp, struct mbuf *init, int init_offset,
break;
}
}
-
for (m_at = copy_initack; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
cookie_sz += SCTP_BUF_LEN(m_at);
if (SCTP_BUF_NEXT(m_at) == NULL) {
@@ -3388,60 +3758,62 @@ sctp_add_cookie(struct sctp_inpcb *inp, struct mbuf *init, int init_offset,
static uint8_t
-sctp_get_ect(struct sctp_tcb *stcb,
- struct sctp_tmit_chunk *chk)
+sctp_get_ect(struct sctp_tcb *stcb)
{
- uint8_t this_random;
-
- /* Huh? */
- if (SCTP_BASE_SYSCTL(sctp_ecn_enable) == 0)
- return (0);
-
- if (SCTP_BASE_SYSCTL(sctp_ecn_nonce) == 0)
- /* no nonce, always return ECT0 */
- return (SCTP_ECT0_BIT);
-
- if (stcb->asoc.peer_supports_ecn_nonce == 0) {
- /* Peer does NOT support it, so we send a ECT0 only */
+ if ((stcb != NULL) && (stcb->asoc.ecn_allowed == 1)) {
return (SCTP_ECT0_BIT);
+ } else {
+ return (0);
}
- if (chk == NULL)
- return (SCTP_ECT0_BIT);
+}
- if ((stcb->asoc.hb_random_idx > 3) ||
- ((stcb->asoc.hb_random_idx == 3) &&
- (stcb->asoc.hb_ect_randombit > 7))) {
- uint32_t rndval;
-
-warp_drive_sa:
- rndval = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep);
- memcpy(stcb->asoc.hb_random_values, &rndval,
- sizeof(stcb->asoc.hb_random_values));
- this_random = stcb->asoc.hb_random_values[0];
- stcb->asoc.hb_random_idx = 0;
- stcb->asoc.hb_ect_randombit = 0;
- } else {
- if (stcb->asoc.hb_ect_randombit > 7) {
- stcb->asoc.hb_ect_randombit = 0;
- stcb->asoc.hb_random_idx++;
- if (stcb->asoc.hb_random_idx > 3) {
- goto warp_drive_sa;
+#if defined(INET) || defined(INET6)
+static void
+sctp_handle_no_route(struct sctp_tcb *stcb,
+ struct sctp_nets *net,
+ int so_locked)
+{
+ SCTPDBG(SCTP_DEBUG_OUTPUT1, "dropped packet - no valid source addr\n");
+
+ if (net) {
+ SCTPDBG(SCTP_DEBUG_OUTPUT1, "Destination was ");
+ SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT1, &net->ro._l_addr.sa);
+ if (net->dest_state & SCTP_ADDR_CONFIRMED) {
+ if ((net->dest_state & SCTP_ADDR_REACHABLE) && stcb) {
+ SCTPDBG(SCTP_DEBUG_OUTPUT1, "no route takes interface %p down\n", (void *)net);
+ sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN,
+ stcb, 0,
+ (void *)net,
+ so_locked);
+ net->dest_state &= ~SCTP_ADDR_REACHABLE;
+ net->dest_state &= ~SCTP_ADDR_PF;
+ }
+ }
+ if (stcb) {
+ if (net == stcb->asoc.primary_destination) {
+ /* need a new primary */
+ struct sctp_nets *alt;
+
+ alt = sctp_find_alternate_net(stcb, net, 0);
+ if (alt != net) {
+ if (stcb->asoc.alternate) {
+ sctp_free_remote_addr(stcb->asoc.alternate);
+ }
+ stcb->asoc.alternate = alt;
+ atomic_add_int(&stcb->asoc.alternate->ref_count, 1);
+ if (net->ro._s_addr) {
+ sctp_free_ifa(net->ro._s_addr);
+ net->ro._s_addr = NULL;
+ }
+ net->src_addr_selected = 0;
+ }
}
}
- this_random = stcb->asoc.hb_random_values[stcb->asoc.hb_random_idx];
- }
- if ((this_random >> stcb->asoc.hb_ect_randombit) & 0x01) {
- if (chk != NULL)
- /* ECN Nonce stuff */
- chk->rec.data.ect_nonce = SCTP_ECT1_BIT;
- stcb->asoc.hb_ect_randombit++;
- return (SCTP_ECT1_BIT);
- } else {
- stcb->asoc.hb_ect_randombit++;
- return (SCTP_ECT0_BIT);
}
}
+#endif
+
static int
sctp_lowlevel_chunk_output(struct sctp_inpcb *inp,
struct sctp_tcb *stcb, /* may be NULL */
@@ -3453,41 +3825,49 @@ sctp_lowlevel_chunk_output(struct sctp_inpcb *inp,
uint16_t auth_keyid,
int nofragment_flag,
int ecn_ok,
- struct sctp_tmit_chunk *chk,
int out_of_asoc_ok,
uint16_t src_port,
uint16_t dest_port,
uint32_t v_tag,
uint16_t port,
- int so_locked,
+ union sctp_sockstore *over_addr,
+ uint8_t use_mflowid, uint32_t mflowid,
#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
- SCTP_UNUSED
+ int so_locked SCTP_UNUSED
+#else
+ int so_locked
#endif
- union sctp_sockstore *over_addr
)
/* nofragment_flag to tell if IP_DF should be set (IPv4 only) */
{
- /*
- * Given a mbuf chain (via SCTP_BUF_NEXT()) that holds a packet
- * header WITH an SCTPHDR but no IP header, endpoint inp and sa
- * structure: - fill in the HMAC digest of any AUTH chunk in the
- * packet. - calculate and fill in the SCTP checksum. - prepend an
- * IP address header. - if boundall use INADDR_ANY. - if
- * boundspecific do source address selection. - set fragmentation
- * option for ipV4. - On return from IP output, check/adjust mtu
- * size of output interface and smallest_mtu size as well.
+ /**
+ * Given a mbuf chain (via SCTP_BUF_NEXT()) that holds a packet header
+ * WITH an SCTPHDR but no IP header, endpoint inp and sa structure:
+ * - fill in the HMAC digest of any AUTH chunk in the packet.
+ * - calculate and fill in the SCTP checksum.
+ * - prepend an IP address header.
+ * - if boundall use INADDR_ANY.
+ * - if boundspecific do source address selection.
+ * - set fragmentation option for ipV4.
+ * - On return from IP output, check/adjust mtu size of output
+ * interface and smallest_mtu size as well.
*/
/* Will need ifdefs around this */
- struct mbuf *o_pak;
struct mbuf *newm;
struct sctphdr *sctphdr;
int packet_length;
int ret;
uint32_t vrf_id;
+
+#if defined(INET) || defined(INET6)
+ struct mbuf *o_pak;
sctp_route_t *ro = NULL;
struct udphdr *udp = NULL;
-#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+#endif
+ uint8_t tos_value;
+
+#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
struct socket *so = NULL;
#endif
@@ -3507,645 +3887,649 @@ sctp_lowlevel_chunk_output(struct sctp_inpcb *inp,
if ((auth != NULL) && (stcb != NULL)) {
sctp_fill_hmac_digest_m(m, auth_offset, auth, stcb, auth_keyid);
}
- if (to->sa_family == AF_INET) {
- struct ip *ip = NULL;
- sctp_route_t iproute;
- uint8_t tos_value;
- int len;
-
- len = sizeof(struct ip) + sizeof(struct sctphdr);
- if (port) {
- len += sizeof(struct udphdr);
- }
- newm = sctp_get_mbuf_for_msg(len, 1, M_DONTWAIT, 1, MT_DATA);
- if (newm == NULL) {
- sctp_m_freem(m);
- SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
- return (ENOMEM);
- }
- SCTP_ALIGN_TO_END(newm, len);
- SCTP_BUF_LEN(newm) = len;
- SCTP_BUF_NEXT(newm) = m;
- m = newm;
- packet_length = sctp_calculate_len(m);
- ip = mtod(m, struct ip *);
- ip->ip_v = IPVERSION;
- ip->ip_hl = (sizeof(struct ip) >> 2);
- if (net) {
- tos_value = net->tos_flowlabel & 0x000000ff;
- } else {
- tos_value = inp->ip_inp.inp.inp_ip_tos;
- }
- if ((nofragment_flag) && (port == 0)) {
- ip->ip_off = IP_DF;
- } else
- ip->ip_off = 0;
+ if (net) {
+ tos_value = net->dscp;
+ } else if (stcb) {
+ tos_value = stcb->asoc.default_dscp;
+ } else {
+ tos_value = inp->sctp_ep.default_dscp;
+ }
- /* FreeBSD has a function for ip_id's */
- ip->ip_id = ip_newid();
+ switch (to->sa_family) {
+#ifdef INET
+ case AF_INET:
+ {
+ struct ip *ip = NULL;
+ sctp_route_t iproute;
+ int len;
- ip->ip_ttl = inp->ip_inp.inp.inp_ip_ttl;
- ip->ip_len = packet_length;
- if (stcb) {
- if ((stcb->asoc.ecn_allowed) && ecn_ok) {
- /* Enable ECN */
- ip->ip_tos = ((u_char)(tos_value & 0xfc) | sctp_get_ect(stcb, chk));
- } else {
- /* No ECN */
- ip->ip_tos = (u_char)(tos_value & 0xfc);
+ len = sizeof(struct ip) + sizeof(struct sctphdr);
+ if (port) {
+ len += sizeof(struct udphdr);
}
- } else {
- /* no association at all */
- ip->ip_tos = (tos_value & 0xfc);
- }
- if (port) {
- ip->ip_p = IPPROTO_UDP;
- } else {
- ip->ip_p = IPPROTO_SCTP;
- }
- ip->ip_sum = 0;
- if (net == NULL) {
- ro = &iproute;
- memset(&iproute, 0, sizeof(iproute));
- memcpy(&ro->ro_dst, to, to->sa_len);
- } else {
- ro = (sctp_route_t *) & net->ro;
- }
- /* Now the address selection part */
- ip->ip_dst.s_addr = ((struct sockaddr_in *)to)->sin_addr.s_addr;
-
- /* call the routine to select the src address */
- if (net && out_of_asoc_ok == 0) {
- if (net->ro._s_addr && (net->ro._s_addr->localifa_flags & (SCTP_BEING_DELETED | SCTP_ADDR_IFA_UNUSEABLE))) {
- sctp_free_ifa(net->ro._s_addr);
- net->ro._s_addr = NULL;
- net->src_addr_selected = 0;
- if (ro->ro_rt) {
- RTFREE(ro->ro_rt);
- ro->ro_rt = NULL;
+ newm = sctp_get_mbuf_for_msg(len, 1, M_DONTWAIT, 1, MT_DATA);
+ if (newm == NULL) {
+ sctp_m_freem(m);
+ SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
+ return (ENOMEM);
+ }
+ SCTP_ALIGN_TO_END(newm, len);
+ SCTP_BUF_LEN(newm) = len;
+ SCTP_BUF_NEXT(newm) = m;
+ m = newm;
+ if (net != NULL) {
+#ifdef INVARIANTS
+ if (net->flowidset == 0) {
+ panic("Flow ID not set");
+ }
+#endif
+ m->m_pkthdr.flowid = net->flowid;
+ m->m_flags |= M_FLOWID;
+ } else {
+ if (use_mflowid != 0) {
+ m->m_pkthdr.flowid = mflowid;
+ m->m_flags |= M_FLOWID;
}
}
- if (net->src_addr_selected == 0) {
- /* Cache the source address */
- net->ro._s_addr = sctp_source_address_selection(inp, stcb,
- ro, net, 0,
- vrf_id);
- net->src_addr_selected = 1;
+ packet_length = sctp_calculate_len(m);
+ ip = mtod(m, struct ip *);
+ ip->ip_v = IPVERSION;
+ ip->ip_hl = (sizeof(struct ip) >> 2);
+ if (tos_value == 0) {
+ /*
+ * This means especially, that it is not set
+ * at the SCTP layer. So use the value from
+ * the IP layer.
+ */
+ tos_value = inp->ip_inp.inp.inp_ip_tos;
}
- if (net->ro._s_addr == NULL) {
- /* No route to host */
- net->src_addr_selected = 0;
- goto no_route;
+ tos_value &= 0xfc;
+ if (ecn_ok) {
+ tos_value |= sctp_get_ect(stcb);
}
- ip->ip_src = net->ro._s_addr->address.sin.sin_addr;
- } else {
- if (over_addr == NULL) {
- struct sctp_ifa *_lsrc;
-
- _lsrc = sctp_source_address_selection(inp, stcb, ro,
- net,
- out_of_asoc_ok,
- vrf_id);
- if (_lsrc == NULL) {
- goto no_route;
- }
- ip->ip_src = _lsrc->address.sin.sin_addr;
- sctp_free_ifa(_lsrc);
+ if ((nofragment_flag) && (port == 0)) {
+ ip->ip_off = IP_DF;
} else {
- ip->ip_src = over_addr->sin.sin_addr;
- SCTP_RTALLOC(ro, vrf_id);
+ ip->ip_off = 0;
}
- }
- if (port) {
- udp = (struct udphdr *)((caddr_t)ip + sizeof(struct ip));
- udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
- udp->uh_dport = port;
- udp->uh_ulen = htons(packet_length - sizeof(struct ip));
- udp->uh_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP));
- sctphdr = (struct sctphdr *)((caddr_t)udp + sizeof(struct udphdr));
- } else {
- sctphdr = (struct sctphdr *)((caddr_t)ip + sizeof(struct ip));
- }
+ /* FreeBSD has a function for ip_id's */
+ ip->ip_id = ip_newid();
- sctphdr->src_port = src_port;
- sctphdr->dest_port = dest_port;
- sctphdr->v_tag = v_tag;
- sctphdr->checksum = 0;
+ ip->ip_ttl = inp->ip_inp.inp.inp_ip_ttl;
+ ip->ip_len = packet_length;
+ ip->ip_tos = tos_value;
+ if (port) {
+ ip->ip_p = IPPROTO_UDP;
+ } else {
+ ip->ip_p = IPPROTO_SCTP;
+ }
+ ip->ip_sum = 0;
+ if (net == NULL) {
+ ro = &iproute;
+ memset(&iproute, 0, sizeof(iproute));
+ memcpy(&ro->ro_dst, to, to->sa_len);
+ } else {
+ ro = (sctp_route_t *) & net->ro;
+ }
+ /* Now the address selection part */
+ ip->ip_dst.s_addr = ((struct sockaddr_in *)to)->sin_addr.s_addr;
- /*
- * If source address selection fails and we find no route
- * then the ip_output should fail as well with a
- * NO_ROUTE_TO_HOST type error. We probably should catch
- * that somewhere and abort the association right away
- * (assuming this is an INIT being sent).
- */
- if ((ro->ro_rt == NULL)) {
- /*
- * src addr selection failed to find a route (or
- * valid source addr), so we can't get there from
- * here (yet)!
- */
- no_route:
- SCTPDBG(SCTP_DEBUG_OUTPUT1,
- "%s: dropped packet - no valid source addr\n",
- __FUNCTION__);
- if (net) {
- SCTPDBG(SCTP_DEBUG_OUTPUT1,
- "Destination was ");
- SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT1,
- &net->ro._l_addr.sa);
- if (net->dest_state & SCTP_ADDR_CONFIRMED) {
- if ((net->dest_state & SCTP_ADDR_REACHABLE) && stcb) {
- SCTPDBG(SCTP_DEBUG_OUTPUT1, "no route takes interface %p down\n", net);
- sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN,
- stcb,
- SCTP_FAILED_THRESHOLD,
- (void *)net,
- so_locked);
- net->dest_state &= ~SCTP_ADDR_REACHABLE;
- net->dest_state |= SCTP_ADDR_NOT_REACHABLE;
- /*
- * JRS 5/14/07 - If a
- * destination is
- * unreachable, the PF bit
- * is turned off. This
- * allows an unambiguous use
- * of the PF bit for
- * destinations that are
- * reachable but potentially
- * failed. If the
- * destination is set to the
- * unreachable state, also
- * set the destination to
- * the PF state.
- */
- /*
- * Add debug message here if
- * destination is not in PF
- * state.
- */
- /*
- * Stop any running T3
- * timers here?
- */
- if ((stcb->asoc.sctp_cmt_on_off == 1) &&
- (stcb->asoc.sctp_cmt_pf > 0)) {
- net->dest_state &= ~SCTP_ADDR_PF;
- SCTPDBG(SCTP_DEBUG_OUTPUT1, "Destination %p moved from PF to unreachable.\n",
- net);
- }
+ /* call the routine to select the src address */
+ if (net && out_of_asoc_ok == 0) {
+ if (net->ro._s_addr && (net->ro._s_addr->localifa_flags & (SCTP_BEING_DELETED | SCTP_ADDR_IFA_UNUSEABLE))) {
+ sctp_free_ifa(net->ro._s_addr);
+ net->ro._s_addr = NULL;
+ net->src_addr_selected = 0;
+ if (ro->ro_rt) {
+ RTFREE(ro->ro_rt);
+ ro->ro_rt = NULL;
}
}
- if (stcb) {
- if (net == stcb->asoc.primary_destination) {
- /* need a new primary */
- struct sctp_nets *alt;
-
- alt = sctp_find_alternate_net(stcb, net, 0);
- if (alt != net) {
- if (sctp_set_primary_addr(stcb,
- (struct sockaddr *)NULL,
- alt) == 0) {
- net->dest_state |= SCTP_ADDR_WAS_PRIMARY;
- if (net->ro._s_addr) {
- sctp_free_ifa(net->ro._s_addr);
- net->ro._s_addr = NULL;
- }
- net->src_addr_selected = 0;
- }
- }
+ if (net->src_addr_selected == 0) {
+ /* Cache the source address */
+ net->ro._s_addr = sctp_source_address_selection(inp, stcb,
+ ro, net, 0,
+ vrf_id);
+ net->src_addr_selected = 1;
+ }
+ if (net->ro._s_addr == NULL) {
+ /* No route to host */
+ net->src_addr_selected = 0;
+ sctp_handle_no_route(stcb, net, so_locked);
+ SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
+ sctp_m_freem(m);
+ return (EHOSTUNREACH);
+ }
+ ip->ip_src = net->ro._s_addr->address.sin.sin_addr;
+ } else {
+ if (over_addr == NULL) {
+ struct sctp_ifa *_lsrc;
+
+ _lsrc = sctp_source_address_selection(inp, stcb, ro,
+ net,
+ out_of_asoc_ok,
+ vrf_id);
+ if (_lsrc == NULL) {
+ sctp_handle_no_route(stcb, net, so_locked);
+ SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
+ sctp_m_freem(m);
+ return (EHOSTUNREACH);
}
+ ip->ip_src = _lsrc->address.sin.sin_addr;
+ sctp_free_ifa(_lsrc);
+ } else {
+ ip->ip_src = over_addr->sin.sin_addr;
+ SCTP_RTALLOC(ro, vrf_id);
}
}
- SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
- sctp_m_freem(m);
- return (EHOSTUNREACH);
- }
- if (ro != &iproute) {
- memcpy(&iproute, ro, sizeof(*ro));
- }
- SCTPDBG(SCTP_DEBUG_OUTPUT3, "Calling ipv4 output routine from low level src addr:%x\n",
- (uint32_t) (ntohl(ip->ip_src.s_addr)));
- SCTPDBG(SCTP_DEBUG_OUTPUT3, "Destination is %x\n",
- (uint32_t) (ntohl(ip->ip_dst.s_addr)));
- SCTPDBG(SCTP_DEBUG_OUTPUT3, "RTP route is %p through\n",
- ro->ro_rt);
+ if (port) {
+ if (htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)) == 0) {
+ sctp_handle_no_route(stcb, net, so_locked);
+ SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
+ sctp_m_freem(m);
+ return (EHOSTUNREACH);
+ }
+ udp = (struct udphdr *)((caddr_t)ip + sizeof(struct ip));
+ udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
+ udp->uh_dport = port;
+ udp->uh_ulen = htons(packet_length - sizeof(struct ip));
+ if (V_udp_cksum) {
+ udp->uh_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP));
+ } else {
+ udp->uh_sum = 0;
+ }
+ sctphdr = (struct sctphdr *)((caddr_t)udp + sizeof(struct udphdr));
+ } else {
+ sctphdr = (struct sctphdr *)((caddr_t)ip + sizeof(struct ip));
+ }
- if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) {
- /* failed to prepend data, give up */
- SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
- sctp_m_freem(m);
- return (ENOMEM);
- }
-#ifdef SCTP_PACKET_LOGGING
- if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
- sctp_packet_log(m, packet_length);
-#endif
- SCTP_ATTACH_CHAIN(o_pak, m, packet_length);
- if (port) {
+ sctphdr->src_port = src_port;
+ sctphdr->dest_port = dest_port;
+ sctphdr->v_tag = v_tag;
+ sctphdr->checksum = 0;
+
+ /*
+ * If source address selection fails and we find no
+ * route then the ip_output should fail as well with
+ * a NO_ROUTE_TO_HOST type error. We probably should
+ * catch that somewhere and abort the association
+ * right away (assuming this is an INIT being sent).
+ */
+ if (ro->ro_rt == NULL) {
+ /*
+ * src addr selection failed to find a route
+ * (or valid source addr), so we can't get
+ * there from here (yet)!
+ */
+ sctp_handle_no_route(stcb, net, so_locked);
+ SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
+ sctp_m_freem(m);
+ return (EHOSTUNREACH);
+ }
+ if (ro != &iproute) {
+ memcpy(&iproute, ro, sizeof(*ro));
+ }
+ SCTPDBG(SCTP_DEBUG_OUTPUT3, "Calling ipv4 output routine from low level src addr:%x\n",
+ (uint32_t) (ntohl(ip->ip_src.s_addr)));
+ SCTPDBG(SCTP_DEBUG_OUTPUT3, "Destination is %x\n",
+ (uint32_t) (ntohl(ip->ip_dst.s_addr)));
+ SCTPDBG(SCTP_DEBUG_OUTPUT3, "RTP route is %p through\n",
+ (void *)ro->ro_rt);
+
+ if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) {
+ /* failed to prepend data, give up */
+ SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
+ sctp_m_freem(m);
+ return (ENOMEM);
+ }
+ SCTP_ATTACH_CHAIN(o_pak, m, packet_length);
+ if (port) {
#if defined(SCTP_WITH_NO_CSUM)
- SCTP_STAT_INCR(sctps_sendnocrc);
+ SCTP_STAT_INCR(sctps_sendnocrc);
#else
- if (!(SCTP_BASE_SYSCTL(sctp_no_csum_on_loopback) &&
- (stcb) &&
- (stcb->asoc.loopback_scope))) {
sctphdr->checksum = sctp_calculate_cksum(m, sizeof(struct ip) + sizeof(struct udphdr));
SCTP_STAT_INCR(sctps_sendswcrc);
+#endif
+ if (V_udp_cksum) {
+ SCTP_ENABLE_UDP_CSUM(o_pak);
+ }
} else {
+#if defined(SCTP_WITH_NO_CSUM)
SCTP_STAT_INCR(sctps_sendnocrc);
+#else
+ m->m_pkthdr.csum_flags = CSUM_SCTP;
+ m->m_pkthdr.csum_data = 0;
+ SCTP_STAT_INCR(sctps_sendhwcrc);
+#endif
}
+#ifdef SCTP_PACKET_LOGGING
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
+ sctp_packet_log(o_pak);
#endif
- SCTP_ENABLE_UDP_CSUM(o_pak);
- } else {
-#if defined(SCTP_WITH_NO_CSUM)
- SCTP_STAT_INCR(sctps_sendnocrc);
-#else
- m->m_pkthdr.csum_flags = CSUM_SCTP;
- m->m_pkthdr.csum_data = 0;
- SCTP_STAT_INCR(sctps_sendhwcrc);
+ /* send it out. table id is taken from stcb */
+#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+ if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
+ so = SCTP_INP_SO(inp);
+ SCTP_SOCKET_UNLOCK(so, 0);
+ }
#endif
- }
- /* send it out. table id is taken from stcb */
-#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
- if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
- so = SCTP_INP_SO(inp);
- SCTP_SOCKET_UNLOCK(so, 0);
- }
+ SCTP_IP_OUTPUT(ret, o_pak, ro, stcb, vrf_id);
+#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+ if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
+ atomic_add_int(&stcb->asoc.refcnt, 1);
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_SOCKET_LOCK(so, 0);
+ SCTP_TCB_LOCK(stcb);
+ atomic_subtract_int(&stcb->asoc.refcnt, 1);
+ }
#endif
- SCTP_IP_OUTPUT(ret, o_pak, ro, stcb, vrf_id);
-#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
- if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
- atomic_add_int(&stcb->asoc.refcnt, 1);
- SCTP_TCB_UNLOCK(stcb);
- SCTP_SOCKET_LOCK(so, 0);
- SCTP_TCB_LOCK(stcb);
- atomic_subtract_int(&stcb->asoc.refcnt, 1);
+ SCTP_STAT_INCR(sctps_sendpackets);
+ SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
+ if (ret)
+ SCTP_STAT_INCR(sctps_senderrors);
+
+ SCTPDBG(SCTP_DEBUG_OUTPUT3, "IP output returns %d\n", ret);
+ if (net == NULL) {
+ /* free tempy routes */
+ if (ro->ro_rt) {
+ RTFREE(ro->ro_rt);
+ ro->ro_rt = NULL;
+ }
+ } else {
+ /*
+ * PMTU check versus smallest asoc MTU goes
+ * here
+ */
+ if ((ro->ro_rt != NULL) &&
+ (net->ro._s_addr)) {
+ uint32_t mtu;
+
+ mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._l_addr.sa, ro->ro_rt);
+ if (net->port) {
+ mtu -= sizeof(struct udphdr);
+ }
+ if (mtu && (stcb->asoc.smallest_mtu > mtu)) {
+ sctp_mtu_size_reset(inp, &stcb->asoc, mtu);
+ net->mtu = mtu;
+ }
+ } else if (ro->ro_rt == NULL) {
+ /* route was freed */
+ if (net->ro._s_addr &&
+ net->src_addr_selected) {
+ sctp_free_ifa(net->ro._s_addr);
+ net->ro._s_addr = NULL;
+ }
+ net->src_addr_selected = 0;
+ }
+ }
+ return (ret);
}
#endif
- SCTP_STAT_INCR(sctps_sendpackets);
- SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
- if (ret)
- SCTP_STAT_INCR(sctps_senderrors);
+#ifdef INET6
+ case AF_INET6:
+ {
+ uint32_t flowlabel, flowinfo;
+ struct ip6_hdr *ip6h;
+ struct route_in6 ip6route;
+ struct ifnet *ifp;
+ struct sockaddr_in6 *sin6, tmp, *lsa6, lsa6_tmp;
+ int prev_scope = 0;
+ struct sockaddr_in6 lsa6_storage;
+ int error;
+ u_short prev_port = 0;
+ int len;
- SCTPDBG(SCTP_DEBUG_OUTPUT3, "IP output returns %d\n", ret);
- if (net == NULL) {
- /* free tempy routes */
- if (ro->ro_rt) {
- RTFREE(ro->ro_rt);
- ro->ro_rt = NULL;
+ if (net) {
+ flowlabel = net->flowlabel;
+ } else if (stcb) {
+ flowlabel = stcb->asoc.default_flowlabel;
+ } else {
+ flowlabel = inp->sctp_ep.default_flowlabel;
}
- } else {
- /* PMTU check versus smallest asoc MTU goes here */
- if ((ro->ro_rt != NULL) &&
- (net->ro._s_addr)) {
- uint32_t mtu;
-
- mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._l_addr.sa, ro->ro_rt);
- if (net->port) {
- mtu -= sizeof(struct udphdr);
- }
- if (mtu && (stcb->asoc.smallest_mtu > mtu)) {
- sctp_mtu_size_reset(inp, &stcb->asoc, mtu);
- net->mtu = mtu;
- }
- } else if (ro->ro_rt == NULL) {
- /* route was freed */
- if (net->ro._s_addr &&
- net->src_addr_selected) {
- sctp_free_ifa(net->ro._s_addr);
- net->ro._s_addr = NULL;
+ if (flowlabel == 0) {
+ /*
+ * This means especially, that it is not set
+ * at the SCTP layer. So use the value from
+ * the IP layer.
+ */
+ flowlabel = ntohl(((struct in6pcb *)inp)->in6p_flowinfo);
+ }
+ flowlabel &= 0x000fffff;
+ len = sizeof(struct ip6_hdr) + sizeof(struct sctphdr);
+ if (port) {
+ len += sizeof(struct udphdr);
+ }
+ newm = sctp_get_mbuf_for_msg(len, 1, M_DONTWAIT, 1, MT_DATA);
+ if (newm == NULL) {
+ sctp_m_freem(m);
+ SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
+ return (ENOMEM);
+ }
+ SCTP_ALIGN_TO_END(newm, len);
+ SCTP_BUF_LEN(newm) = len;
+ SCTP_BUF_NEXT(newm) = m;
+ m = newm;
+ if (net != NULL) {
+#ifdef INVARIANTS
+ if (net->flowidset == 0) {
+ panic("Flow ID not set");
+ }
+#endif
+ m->m_pkthdr.flowid = net->flowid;
+ m->m_flags |= M_FLOWID;
+ } else {
+ if (use_mflowid != 0) {
+ m->m_pkthdr.flowid = mflowid;
+ m->m_flags |= M_FLOWID;
}
- net->src_addr_selected = 0;
}
- }
- return (ret);
- }
-#ifdef INET6
- else if (to->sa_family == AF_INET6) {
- uint32_t flowlabel;
- struct ip6_hdr *ip6h;
- struct route_in6 ip6route;
- struct ifnet *ifp;
- u_char flowTop;
- uint16_t flowBottom;
- u_char tosBottom, tosTop;
- struct sockaddr_in6 *sin6, tmp, *lsa6, lsa6_tmp;
- int prev_scope = 0;
- struct sockaddr_in6 lsa6_storage;
- int error;
- u_short prev_port = 0;
- int len;
-
- if (net != NULL) {
- flowlabel = net->tos_flowlabel;
- } else {
- flowlabel = ((struct in6pcb *)inp)->in6p_flowinfo;
- }
+ packet_length = sctp_calculate_len(m);
- len = sizeof(struct ip6_hdr) + sizeof(struct sctphdr);
- if (port) {
- len += sizeof(struct udphdr);
- }
- newm = sctp_get_mbuf_for_msg(len, 1, M_DONTWAIT, 1, MT_DATA);
- if (newm == NULL) {
- sctp_m_freem(m);
- SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
- return (ENOMEM);
- }
- SCTP_ALIGN_TO_END(newm, len);
- SCTP_BUF_LEN(newm) = len;
- SCTP_BUF_NEXT(newm) = m;
- m = newm;
- packet_length = sctp_calculate_len(m);
+ ip6h = mtod(m, struct ip6_hdr *);
+ /* protect *sin6 from overwrite */
+ sin6 = (struct sockaddr_in6 *)to;
+ tmp = *sin6;
+ sin6 = &tmp;
- ip6h = mtod(m, struct ip6_hdr *);
- /*
- * We assume here that inp_flow is in host byte order within
- * the TCB!
- */
- flowBottom = flowlabel & 0x0000ffff;
- flowTop = ((flowlabel & 0x000f0000) >> 16);
- tosTop = (((flowlabel & 0xf0) >> 4) | IPV6_VERSION);
- /* protect *sin6 from overwrite */
- sin6 = (struct sockaddr_in6 *)to;
- tmp = *sin6;
- sin6 = &tmp;
-
- /* KAME hack: embed scopeid */
- if (sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)) != 0) {
- SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
- return (EINVAL);
- }
- if (net == NULL) {
- memset(&ip6route, 0, sizeof(ip6route));
- ro = (sctp_route_t *) & ip6route;
- memcpy(&ro->ro_dst, sin6, sin6->sin6_len);
- } else {
- ro = (sctp_route_t *) & net->ro;
- }
- if (stcb != NULL) {
- if ((stcb->asoc.ecn_allowed) && ecn_ok) {
- /* Enable ECN */
- tosBottom = (((((struct in6pcb *)inp)->in6p_flowinfo & 0x0c) | sctp_get_ect(stcb, chk)) << 4);
+ /* KAME hack: embed scopeid */
+ if (sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)) != 0) {
+ SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
+ return (EINVAL);
+ }
+ if (net == NULL) {
+ memset(&ip6route, 0, sizeof(ip6route));
+ ro = (sctp_route_t *) & ip6route;
+ memcpy(&ro->ro_dst, sin6, sin6->sin6_len);
} else {
- /* No ECN */
- tosBottom = ((((struct in6pcb *)inp)->in6p_flowinfo & 0x0c) << 4);
+ ro = (sctp_route_t *) & net->ro;
}
- } else {
- /* we could get no asoc if it is a O-O-T-B packet */
- tosBottom = ((((struct in6pcb *)inp)->in6p_flowinfo & 0x0c) << 4);
- }
- ip6h->ip6_flow = htonl(((tosTop << 24) | ((tosBottom | flowTop) << 16) | flowBottom));
- if (port) {
- ip6h->ip6_nxt = IPPROTO_UDP;
- } else {
- ip6h->ip6_nxt = IPPROTO_SCTP;
- }
- ip6h->ip6_plen = (packet_length - sizeof(struct ip6_hdr));
- ip6h->ip6_dst = sin6->sin6_addr;
+ /*
+ * We assume here that inp_flow is in host byte
+ * order within the TCB!
+ */
+ if (tos_value == 0) {
+ /*
+ * This means especially, that it is not set
+ * at the SCTP layer. So use the value from
+ * the IP layer.
+ */
+ tos_value = (ntohl(((struct in6pcb *)inp)->in6p_flowinfo) >> 20) & 0xff;
+ }
+ tos_value &= 0xfc;
+ if (ecn_ok) {
+ tos_value |= sctp_get_ect(stcb);
+ }
+ flowinfo = 0x06;
+ flowinfo <<= 8;
+ flowinfo |= tos_value;
+ flowinfo <<= 20;
+ flowinfo |= flowlabel;
+ ip6h->ip6_flow = htonl(flowinfo);
+ if (port) {
+ ip6h->ip6_nxt = IPPROTO_UDP;
+ } else {
+ ip6h->ip6_nxt = IPPROTO_SCTP;
+ }
+ ip6h->ip6_plen = (packet_length - sizeof(struct ip6_hdr));
+ ip6h->ip6_dst = sin6->sin6_addr;
- /*
- * Add SRC address selection here: we can only reuse to a
- * limited degree the kame src-addr-sel, since we can try
- * their selection but it may not be bound.
- */
- bzero(&lsa6_tmp, sizeof(lsa6_tmp));
- lsa6_tmp.sin6_family = AF_INET6;
- lsa6_tmp.sin6_len = sizeof(lsa6_tmp);
- lsa6 = &lsa6_tmp;
- if (net && out_of_asoc_ok == 0) {
- if (net->ro._s_addr && (net->ro._s_addr->localifa_flags & (SCTP_BEING_DELETED | SCTP_ADDR_IFA_UNUSEABLE))) {
- sctp_free_ifa(net->ro._s_addr);
- net->ro._s_addr = NULL;
- net->src_addr_selected = 0;
- if (ro->ro_rt) {
- RTFREE(ro->ro_rt);
- ro->ro_rt = NULL;
+ /*
+ * Add SRC address selection here: we can only reuse
+ * to a limited degree the kame src-addr-sel, since
+ * we can try their selection but it may not be
+ * bound.
+ */
+ bzero(&lsa6_tmp, sizeof(lsa6_tmp));
+ lsa6_tmp.sin6_family = AF_INET6;
+ lsa6_tmp.sin6_len = sizeof(lsa6_tmp);
+ lsa6 = &lsa6_tmp;
+ if (net && out_of_asoc_ok == 0) {
+ if (net->ro._s_addr && (net->ro._s_addr->localifa_flags & (SCTP_BEING_DELETED | SCTP_ADDR_IFA_UNUSEABLE))) {
+ sctp_free_ifa(net->ro._s_addr);
+ net->ro._s_addr = NULL;
+ net->src_addr_selected = 0;
+ if (ro->ro_rt) {
+ RTFREE(ro->ro_rt);
+ ro->ro_rt = NULL;
+ }
}
- }
- if (net->src_addr_selected == 0) {
- sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
+ if (net->src_addr_selected == 0) {
+ sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
+ /* KAME hack: embed scopeid */
+ if (sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)) != 0) {
+ SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
+ return (EINVAL);
+ }
+ /* Cache the source address */
+ net->ro._s_addr = sctp_source_address_selection(inp,
+ stcb,
+ ro,
+ net,
+ 0,
+ vrf_id);
+ (void)sa6_recoverscope(sin6);
+ net->src_addr_selected = 1;
+ }
+ if (net->ro._s_addr == NULL) {
+ SCTPDBG(SCTP_DEBUG_OUTPUT3, "V6:No route to host\n");
+ net->src_addr_selected = 0;
+ sctp_handle_no_route(stcb, net, so_locked);
+ SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
+ sctp_m_freem(m);
+ return (EHOSTUNREACH);
+ }
+ lsa6->sin6_addr = net->ro._s_addr->address.sin6.sin6_addr;
+ } else {
+ sin6 = (struct sockaddr_in6 *)&ro->ro_dst;
/* KAME hack: embed scopeid */
if (sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)) != 0) {
SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
return (EINVAL);
}
- /* Cache the source address */
- net->ro._s_addr = sctp_source_address_selection(inp,
- stcb,
- ro,
- net,
- 0,
- vrf_id);
+ if (over_addr == NULL) {
+ struct sctp_ifa *_lsrc;
+
+ _lsrc = sctp_source_address_selection(inp, stcb, ro,
+ net,
+ out_of_asoc_ok,
+ vrf_id);
+ if (_lsrc == NULL) {
+ sctp_handle_no_route(stcb, net, so_locked);
+ SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
+ sctp_m_freem(m);
+ return (EHOSTUNREACH);
+ }
+ lsa6->sin6_addr = _lsrc->address.sin6.sin6_addr;
+ sctp_free_ifa(_lsrc);
+ } else {
+ lsa6->sin6_addr = over_addr->sin6.sin6_addr;
+ SCTP_RTALLOC(ro, vrf_id);
+ }
(void)sa6_recoverscope(sin6);
- net->src_addr_selected = 1;
}
- if (net->ro._s_addr == NULL) {
- SCTPDBG(SCTP_DEBUG_OUTPUT3, "V6:No route to host\n");
- net->src_addr_selected = 0;
- goto no_route;
+ lsa6->sin6_port = inp->sctp_lport;
+
+ if (ro->ro_rt == NULL) {
+ /*
+ * src addr selection failed to find a route
+ * (or valid source addr), so we can't get
+ * there from here!
+ */
+ sctp_handle_no_route(stcb, net, so_locked);
+ SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
+ sctp_m_freem(m);
+ return (EHOSTUNREACH);
}
- lsa6->sin6_addr = net->ro._s_addr->address.sin6.sin6_addr;
- } else {
- sin6 = (struct sockaddr_in6 *)&ro->ro_dst;
- /* KAME hack: embed scopeid */
- if (sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)) != 0) {
- SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
- return (EINVAL);
+ /*
+ * XXX: sa6 may not have a valid sin6_scope_id in
+ * the non-SCOPEDROUTING case.
+ */
+ bzero(&lsa6_storage, sizeof(lsa6_storage));
+ lsa6_storage.sin6_family = AF_INET6;
+ lsa6_storage.sin6_len = sizeof(lsa6_storage);
+ lsa6_storage.sin6_addr = lsa6->sin6_addr;
+ if ((error = sa6_recoverscope(&lsa6_storage)) != 0) {
+ SCTPDBG(SCTP_DEBUG_OUTPUT3, "recover scope fails error %d\n", error);
+ sctp_m_freem(m);
+ return (error);
}
- if (over_addr == NULL) {
- struct sctp_ifa *_lsrc;
-
- _lsrc = sctp_source_address_selection(inp, stcb, ro,
- net,
- out_of_asoc_ok,
- vrf_id);
- if (_lsrc == NULL) {
- goto no_route;
+ /* XXX */
+ lsa6_storage.sin6_addr = lsa6->sin6_addr;
+ lsa6_storage.sin6_port = inp->sctp_lport;
+ lsa6 = &lsa6_storage;
+ ip6h->ip6_src = lsa6->sin6_addr;
+
+ if (port) {
+ if (htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)) == 0) {
+ sctp_handle_no_route(stcb, net, so_locked);
+ SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
+ sctp_m_freem(m);
+ return (EHOSTUNREACH);
}
- lsa6->sin6_addr = _lsrc->address.sin6.sin6_addr;
- sctp_free_ifa(_lsrc);
+ udp = (struct udphdr *)((caddr_t)ip6h + sizeof(struct ip6_hdr));
+ udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
+ udp->uh_dport = port;
+ udp->uh_ulen = htons(packet_length - sizeof(struct ip6_hdr));
+ udp->uh_sum = 0;
+ sctphdr = (struct sctphdr *)((caddr_t)udp + sizeof(struct udphdr));
} else {
- lsa6->sin6_addr = over_addr->sin6.sin6_addr;
- SCTP_RTALLOC(ro, vrf_id);
+ sctphdr = (struct sctphdr *)((caddr_t)ip6h + sizeof(struct ip6_hdr));
}
- (void)sa6_recoverscope(sin6);
- }
- lsa6->sin6_port = inp->sctp_lport;
- if (ro->ro_rt == NULL) {
+ sctphdr->src_port = src_port;
+ sctphdr->dest_port = dest_port;
+ sctphdr->v_tag = v_tag;
+ sctphdr->checksum = 0;
+
/*
- * src addr selection failed to find a route (or
- * valid source addr), so we can't get there from
- * here!
+ * We set the hop limit now since there is a good
+ * chance that our ro pointer is now filled
*/
- goto no_route;
- }
- /*
- * XXX: sa6 may not have a valid sin6_scope_id in the
- * non-SCOPEDROUTING case.
- */
- bzero(&lsa6_storage, sizeof(lsa6_storage));
- lsa6_storage.sin6_family = AF_INET6;
- lsa6_storage.sin6_len = sizeof(lsa6_storage);
- lsa6_storage.sin6_addr = lsa6->sin6_addr;
- if ((error = sa6_recoverscope(&lsa6_storage)) != 0) {
- SCTPDBG(SCTP_DEBUG_OUTPUT3, "recover scope fails error %d\n", error);
- sctp_m_freem(m);
- return (error);
- }
- /* XXX */
- lsa6_storage.sin6_addr = lsa6->sin6_addr;
- lsa6_storage.sin6_port = inp->sctp_lport;
- lsa6 = &lsa6_storage;
- ip6h->ip6_src = lsa6->sin6_addr;
-
- if (port) {
- udp = (struct udphdr *)((caddr_t)ip6h + sizeof(struct ip6_hdr));
- udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
- udp->uh_dport = port;
- udp->uh_ulen = htons(packet_length - sizeof(struct ip6_hdr));
- udp->uh_sum = 0;
- sctphdr = (struct sctphdr *)((caddr_t)udp + sizeof(struct udphdr));
- } else {
- sctphdr = (struct sctphdr *)((caddr_t)ip6h + sizeof(struct ip6_hdr));
- }
-
- sctphdr->src_port = src_port;
- sctphdr->dest_port = dest_port;
- sctphdr->v_tag = v_tag;
- sctphdr->checksum = 0;
-
- /*
- * We set the hop limit now since there is a good chance
- * that our ro pointer is now filled
- */
- ip6h->ip6_hlim = SCTP_GET_HLIM(inp, ro);
- ifp = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
+ ip6h->ip6_hlim = SCTP_GET_HLIM(inp, ro);
+ ifp = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
#ifdef SCTP_DEBUG
- /* Copy to be sure something bad is not happening */
- sin6->sin6_addr = ip6h->ip6_dst;
- lsa6->sin6_addr = ip6h->ip6_src;
+ /* Copy to be sure something bad is not happening */
+ sin6->sin6_addr = ip6h->ip6_dst;
+ lsa6->sin6_addr = ip6h->ip6_src;
#endif
- SCTPDBG(SCTP_DEBUG_OUTPUT3, "Calling ipv6 output routine from low level\n");
- SCTPDBG(SCTP_DEBUG_OUTPUT3, "src: ");
- SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT3, (struct sockaddr *)lsa6);
- SCTPDBG(SCTP_DEBUG_OUTPUT3, "dst: ");
- SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT3, (struct sockaddr *)sin6);
- if (net) {
- sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
- /* preserve the port and scope for link local send */
- prev_scope = sin6->sin6_scope_id;
- prev_port = sin6->sin6_port;
- }
- if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) {
- /* failed to prepend data, give up */
- sctp_m_freem(m);
- SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
- return (ENOMEM);
- }
-#ifdef SCTP_PACKET_LOGGING
- if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
- sctp_packet_log(m, packet_length);
-#endif
- SCTP_ATTACH_CHAIN(o_pak, m, packet_length);
- if (port) {
+ SCTPDBG(SCTP_DEBUG_OUTPUT3, "Calling ipv6 output routine from low level\n");
+ SCTPDBG(SCTP_DEBUG_OUTPUT3, "src: ");
+ SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT3, (struct sockaddr *)lsa6);
+ SCTPDBG(SCTP_DEBUG_OUTPUT3, "dst: ");
+ SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT3, (struct sockaddr *)sin6);
+ if (net) {
+ sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
+ /*
+ * preserve the port and scope for link
+ * local send
+ */
+ prev_scope = sin6->sin6_scope_id;
+ prev_port = sin6->sin6_port;
+ }
+ if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) {
+ /* failed to prepend data, give up */
+ sctp_m_freem(m);
+ SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
+ return (ENOMEM);
+ }
+ SCTP_ATTACH_CHAIN(o_pak, m, packet_length);
+ if (port) {
#if defined(SCTP_WITH_NO_CSUM)
- SCTP_STAT_INCR(sctps_sendnocrc);
+ SCTP_STAT_INCR(sctps_sendnocrc);
#else
- if (!(SCTP_BASE_SYSCTL(sctp_no_csum_on_loopback) &&
- (stcb) &&
- (stcb->asoc.loopback_scope))) {
sctphdr->checksum = sctp_calculate_cksum(m, sizeof(struct ip6_hdr) + sizeof(struct udphdr));
SCTP_STAT_INCR(sctps_sendswcrc);
- } else {
- SCTP_STAT_INCR(sctps_sendnocrc);
- }
#endif
- if ((udp->uh_sum = in6_cksum(o_pak, IPPROTO_UDP, sizeof(struct ip6_hdr), packet_length - sizeof(struct ip6_hdr))) == 0) {
- udp->uh_sum = 0xffff;
- }
- } else {
+ if ((udp->uh_sum = in6_cksum(o_pak, IPPROTO_UDP, sizeof(struct ip6_hdr), packet_length - sizeof(struct ip6_hdr))) == 0) {
+ udp->uh_sum = 0xffff;
+ }
+ } else {
#if defined(SCTP_WITH_NO_CSUM)
- SCTP_STAT_INCR(sctps_sendnocrc);
+ SCTP_STAT_INCR(sctps_sendnocrc);
#else
- m->m_pkthdr.csum_flags = CSUM_SCTP;
- m->m_pkthdr.csum_data = 0;
- SCTP_STAT_INCR(sctps_sendhwcrc);
+ sctphdr->checksum = sctp_calculate_cksum(m, sizeof(struct ip6_hdr));
+ SCTP_STAT_INCR(sctps_sendswcrc);
#endif
- }
- /* send it out. table id is taken from stcb */
-#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
- if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
- so = SCTP_INP_SO(inp);
- SCTP_SOCKET_UNLOCK(so, 0);
- }
+ }
+ /* send it out. table id is taken from stcb */
+#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+ if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
+ so = SCTP_INP_SO(inp);
+ SCTP_SOCKET_UNLOCK(so, 0);
+ }
#endif
- SCTP_IP6_OUTPUT(ret, o_pak, (struct route_in6 *)ro, &ifp, stcb, vrf_id);
-#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
- if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
- atomic_add_int(&stcb->asoc.refcnt, 1);
- SCTP_TCB_UNLOCK(stcb);
- SCTP_SOCKET_LOCK(so, 0);
- SCTP_TCB_LOCK(stcb);
- atomic_subtract_int(&stcb->asoc.refcnt, 1);
- }
+#ifdef SCTP_PACKET_LOGGING
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
+ sctp_packet_log(o_pak);
#endif
- if (net) {
- /* for link local this must be done */
- sin6->sin6_scope_id = prev_scope;
- sin6->sin6_port = prev_port;
- }
- SCTPDBG(SCTP_DEBUG_OUTPUT3, "return from send is %d\n", ret);
- SCTP_STAT_INCR(sctps_sendpackets);
- SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
- if (ret) {
- SCTP_STAT_INCR(sctps_senderrors);
- }
- if (net == NULL) {
- /* Now if we had a temp route free it */
- if (ro->ro_rt) {
- RTFREE(ro->ro_rt);
+ SCTP_IP6_OUTPUT(ret, o_pak, (struct route_in6 *)ro, &ifp, stcb, vrf_id);
+#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+ if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
+ atomic_add_int(&stcb->asoc.refcnt, 1);
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_SOCKET_LOCK(so, 0);
+ SCTP_TCB_LOCK(stcb);
+ atomic_subtract_int(&stcb->asoc.refcnt, 1);
}
- } else {
- /* PMTU check versus smallest asoc MTU goes here */
- if (ro->ro_rt == NULL) {
- /* Route was freed */
- if (net->ro._s_addr &&
- net->src_addr_selected) {
- sctp_free_ifa(net->ro._s_addr);
- net->ro._s_addr = NULL;
+#endif
+ if (net) {
+ /* for link local this must be done */
+ sin6->sin6_scope_id = prev_scope;
+ sin6->sin6_port = prev_port;
+ }
+ SCTPDBG(SCTP_DEBUG_OUTPUT3, "return from send is %d\n", ret);
+ SCTP_STAT_INCR(sctps_sendpackets);
+ SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
+ if (ret) {
+ SCTP_STAT_INCR(sctps_senderrors);
+ }
+ if (net == NULL) {
+ /* Now if we had a temp route free it */
+ if (ro->ro_rt) {
+ RTFREE(ro->ro_rt);
}
- net->src_addr_selected = 0;
- }
- if ((ro->ro_rt != NULL) &&
- (net->ro._s_addr)) {
- uint32_t mtu;
-
- mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._l_addr.sa, ro->ro_rt);
- if (mtu &&
- (stcb->asoc.smallest_mtu > mtu)) {
- sctp_mtu_size_reset(inp, &stcb->asoc, mtu);
- net->mtu = mtu;
- if (net->port) {
- net->mtu -= sizeof(struct udphdr);
+ } else {
+ /*
+ * PMTU check versus smallest asoc MTU goes
+ * here
+ */
+ if (ro->ro_rt == NULL) {
+ /* Route was freed */
+ if (net->ro._s_addr &&
+ net->src_addr_selected) {
+ sctp_free_ifa(net->ro._s_addr);
+ net->ro._s_addr = NULL;
}
+ net->src_addr_selected = 0;
}
- } else if (ifp) {
- if (ND_IFINFO(ifp)->linkmtu &&
- (stcb->asoc.smallest_mtu > ND_IFINFO(ifp)->linkmtu)) {
- sctp_mtu_size_reset(inp,
- &stcb->asoc,
- ND_IFINFO(ifp)->linkmtu);
+ if ((ro->ro_rt != NULL) &&
+ (net->ro._s_addr)) {
+ uint32_t mtu;
+
+ mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._l_addr.sa, ro->ro_rt);
+ if (mtu &&
+ (stcb->asoc.smallest_mtu > mtu)) {
+ sctp_mtu_size_reset(inp, &stcb->asoc, mtu);
+ net->mtu = mtu;
+ if (net->port) {
+ net->mtu -= sizeof(struct udphdr);
+ }
+ }
+ } else if (ifp) {
+ if (ND_IFINFO(ifp)->linkmtu &&
+ (stcb->asoc.smallest_mtu > ND_IFINFO(ifp)->linkmtu)) {
+ sctp_mtu_size_reset(inp,
+ &stcb->asoc,
+ ND_IFINFO(ifp)->linkmtu);
+ }
}
}
+ return (ret);
}
- return (ret);
- }
#endif
- else {
+ default:
SCTPDBG(SCTP_DEBUG_OUTPUT1, "Unknown protocol (TSNH) type %d\n",
((struct sockaddr *)to)->sa_family);
sctp_m_freem(m);
@@ -4162,22 +4546,22 @@ sctp_send_initiate(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int so_locked
#endif
)
{
- struct mbuf *m, *m_at, *mp_last;
+ struct mbuf *m;
struct sctp_nets *net;
struct sctp_init_chunk *init;
+
+#if defined(INET) || defined(INET6)
struct sctp_supported_addr_param *sup_addr;
+
+#endif
struct sctp_adaptation_layer_indication *ali;
- struct sctp_ecn_supported_param *ecn;
- struct sctp_prsctp_supported_param *prsctp;
- struct sctp_ecn_nonce_supported_param *ecn_nonce;
struct sctp_supported_chunk_types_param *pr_supported;
+ struct sctp_paramhdr *ph;
int cnt_inits_to = 0;
- int padval, ret;
- int num_ext;
- int p_len;
+ int ret;
+ uint16_t num_ext, chunk_len, padding_len, parameter_len;
/* INIT's always go to the primary (and usually ONLY address) */
- mp_last = NULL;
net = stcb->asoc.primary_destination;
if (net == NULL) {
net = TAILQ_FIRST(&stcb->asoc.nets);
@@ -4194,15 +4578,12 @@ sctp_send_initiate(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int so_locked
}
SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT\n");
#ifdef INET6
- if (((struct sockaddr *)&(net->ro._l_addr))->sa_family == AF_INET6) {
+ if (net->ro._l_addr.sa.sa_family == AF_INET6) {
/*
* special hook, if we are sending to link local it will not
* show up in our private address count.
*/
- struct sockaddr_in6 *sin6l;
-
- sin6l = &net->ro._l_addr.sin6;
- if (IN6_IS_ADDR_LINKLOCAL(&sin6l->sin6_addr))
+ if (IN6_IS_ADDR_LINKLOCAL(&net->ro._l_addr.sin6.sin6_addr))
cnt_inits_to = 1;
}
#endif
@@ -4220,14 +4601,15 @@ sctp_send_initiate(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int so_locked
SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT - mbuf?\n");
return;
}
- SCTP_BUF_LEN(m) = sizeof(struct sctp_init_chunk);
+ chunk_len = (uint16_t) sizeof(struct sctp_init_chunk);
+ padding_len = 0;
/*
* assume peer supports asconf in order to be able to queue local
* address changes while an INIT is in flight and before the assoc
* is established.
*/
stcb->asoc.peer_supports_asconf = 1;
- /* Now lets put the SCTP header in place */
+ /* Now lets put the chunk header in place */
init = mtod(m, struct sctp_init_chunk *);
/* now the chunk header */
init->ch.chunk_type = SCTP_INITIATION;
@@ -4239,76 +4621,104 @@ sctp_send_initiate(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int so_locked
/* set up some of the credits. */
init->init.a_rwnd = htonl(max(inp->sctp_socket ? SCTP_SB_LIMIT_RCV(inp->sctp_socket) : 0,
SCTP_MINIMAL_RWND));
-
init->init.num_outbound_streams = htons(stcb->asoc.pre_open_streams);
init->init.num_inbound_streams = htons(stcb->asoc.max_inbound_streams);
init->init.initial_tsn = htonl(stcb->asoc.init_seq_number);
- /* now the address restriction */
- sup_addr = (struct sctp_supported_addr_param *)((caddr_t)init +
- sizeof(*init));
- sup_addr->ph.param_type = htons(SCTP_SUPPORTED_ADDRTYPE);
-#ifdef INET6
- /* we support 2 types: IPv6/IPv4 */
- sup_addr->ph.param_length = htons(sizeof(*sup_addr) + sizeof(uint16_t));
- sup_addr->addr_type[0] = htons(SCTP_IPV4_ADDRESS);
- sup_addr->addr_type[1] = htons(SCTP_IPV6_ADDRESS);
-#else
- /* we support 1 type: IPv4 */
- sup_addr->ph.param_length = htons(sizeof(*sup_addr) + sizeof(uint8_t));
- sup_addr->addr_type[0] = htons(SCTP_IPV4_ADDRESS);
- sup_addr->addr_type[1] = htons(0); /* this is the padding */
-#endif
- SCTP_BUF_LEN(m) += sizeof(*sup_addr) + sizeof(uint16_t);
- /* adaptation layer indication parameter */
- ali = (struct sctp_adaptation_layer_indication *)((caddr_t)sup_addr + sizeof(*sup_addr) + sizeof(uint16_t));
- ali->ph.param_type = htons(SCTP_ULP_ADAPTATION);
- ali->ph.param_length = htons(sizeof(*ali));
- ali->indication = ntohl(inp->sctp_ep.adaptation_layer_indicator);
- SCTP_BUF_LEN(m) += sizeof(*ali);
- ecn = (struct sctp_ecn_supported_param *)((caddr_t)ali + sizeof(*ali));
- if (SCTP_BASE_SYSCTL(sctp_inits_include_nat_friendly)) {
- /* Add NAT friendly parameter */
- struct sctp_paramhdr *ph;
+ if (stcb->asoc.scope.ipv4_addr_legal || stcb->asoc.scope.ipv6_addr_legal) {
+ uint8_t i;
- ph = (struct sctp_paramhdr *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m));
+ parameter_len = (uint16_t) sizeof(struct sctp_paramhdr);
+ if (stcb->asoc.scope.ipv4_addr_legal) {
+ parameter_len += (uint16_t) sizeof(uint16_t);
+ }
+ if (stcb->asoc.scope.ipv6_addr_legal) {
+ parameter_len += (uint16_t) sizeof(uint16_t);
+ }
+ sup_addr = (struct sctp_supported_addr_param *)(mtod(m, caddr_t)+chunk_len);
+ sup_addr->ph.param_type = htons(SCTP_SUPPORTED_ADDRTYPE);
+ sup_addr->ph.param_length = htons(parameter_len);
+ i = 0;
+ if (stcb->asoc.scope.ipv4_addr_legal) {
+ sup_addr->addr_type[i++] = htons(SCTP_IPV4_ADDRESS);
+ }
+ if (stcb->asoc.scope.ipv6_addr_legal) {
+ sup_addr->addr_type[i++] = htons(SCTP_IPV6_ADDRESS);
+ }
+ padding_len = 4 - 2 * i;
+ chunk_len += parameter_len;
+ }
+ /* Adaptation layer indication parameter */
+ if (inp->sctp_ep.adaptation_layer_indicator_provided) {
+ if (padding_len > 0) {
+ memset(mtod(m, caddr_t)+chunk_len, 0, padding_len);
+ chunk_len += padding_len;
+ padding_len = 0;
+ }
+ parameter_len = (uint16_t) sizeof(struct sctp_adaptation_layer_indication);
+ ali = (struct sctp_adaptation_layer_indication *)(mtod(m, caddr_t)+chunk_len);
+ ali->ph.param_type = htons(SCTP_ULP_ADAPTATION);
+ ali->ph.param_length = htons(parameter_len);
+ ali->indication = ntohl(inp->sctp_ep.adaptation_layer_indicator);
+ chunk_len += parameter_len;
+ }
+ if (SCTP_BASE_SYSCTL(sctp_inits_include_nat_friendly)) {
+ /* Add NAT friendly parameter. */
+ if (padding_len > 0) {
+ memset(mtod(m, caddr_t)+chunk_len, 0, padding_len);
+ chunk_len += padding_len;
+ padding_len = 0;
+ }
+ parameter_len = (uint16_t) sizeof(struct sctp_paramhdr);
+ ph = (struct sctp_paramhdr *)(mtod(m, caddr_t)+chunk_len);
ph->param_type = htons(SCTP_HAS_NAT_SUPPORT);
- ph->param_length = htons(sizeof(struct sctp_paramhdr));
- SCTP_BUF_LEN(m) += sizeof(struct sctp_paramhdr);
- ecn = (struct sctp_ecn_supported_param *)((caddr_t)ph + sizeof(*ph));
+ ph->param_length = htons(parameter_len);
+ chunk_len += parameter_len;
}
/* now any cookie time extensions */
if (stcb->asoc.cookie_preserve_req) {
struct sctp_cookie_perserve_param *cookie_preserve;
- cookie_preserve = (struct sctp_cookie_perserve_param *)(ecn);
+ if (padding_len > 0) {
+ memset(mtod(m, caddr_t)+chunk_len, 0, padding_len);
+ chunk_len += padding_len;
+ padding_len = 0;
+ }
+ parameter_len = (uint16_t) sizeof(struct sctp_cookie_perserve_param);
+ cookie_preserve = (struct sctp_cookie_perserve_param *)(mtod(m, caddr_t)+chunk_len);
cookie_preserve->ph.param_type = htons(SCTP_COOKIE_PRESERVE);
- cookie_preserve->ph.param_length = htons(
- sizeof(*cookie_preserve));
+ cookie_preserve->ph.param_length = htons(parameter_len);
cookie_preserve->time = htonl(stcb->asoc.cookie_preserve_req);
- SCTP_BUF_LEN(m) += sizeof(*cookie_preserve);
- ecn = (struct sctp_ecn_supported_param *)(
- (caddr_t)cookie_preserve + sizeof(*cookie_preserve));
stcb->asoc.cookie_preserve_req = 0;
+ chunk_len += parameter_len;
}
/* ECN parameter */
- if (SCTP_BASE_SYSCTL(sctp_ecn_enable) == 1) {
- ecn->ph.param_type = htons(SCTP_ECN_CAPABLE);
- ecn->ph.param_length = htons(sizeof(*ecn));
- SCTP_BUF_LEN(m) += sizeof(*ecn);
- prsctp = (struct sctp_prsctp_supported_param *)((caddr_t)ecn +
- sizeof(*ecn));
- } else {
- prsctp = (struct sctp_prsctp_supported_param *)((caddr_t)ecn);
- }
- /* And now tell the peer we do pr-sctp */
- prsctp->ph.param_type = htons(SCTP_PRSCTP_SUPPORTED);
- prsctp->ph.param_length = htons(sizeof(*prsctp));
- SCTP_BUF_LEN(m) += sizeof(*prsctp);
+ if (stcb->asoc.ecn_allowed == 1) {
+ if (padding_len > 0) {
+ memset(mtod(m, caddr_t)+chunk_len, 0, padding_len);
+ chunk_len += padding_len;
+ padding_len = 0;
+ }
+ parameter_len = (uint16_t) sizeof(struct sctp_paramhdr);
+ ph = (struct sctp_paramhdr *)(mtod(m, caddr_t)+chunk_len);
+ ph->param_type = htons(SCTP_ECN_CAPABLE);
+ ph->param_length = htons(parameter_len);
+ chunk_len += parameter_len;
+ }
+ /* And now tell the peer we do support PR-SCTP. */
+ if (padding_len > 0) {
+ memset(mtod(m, caddr_t)+chunk_len, 0, padding_len);
+ chunk_len += padding_len;
+ padding_len = 0;
+ }
+ parameter_len = (uint16_t) sizeof(struct sctp_paramhdr);
+ ph = (struct sctp_paramhdr *)(mtod(m, caddr_t)+chunk_len);
+ ph->param_type = htons(SCTP_PRSCTP_SUPPORTED);
+ ph->param_length = htons(parameter_len);
+ chunk_len += parameter_len;
/* And now tell the peer we do all the extensions */
- pr_supported = (struct sctp_supported_chunk_types_param *)
- ((caddr_t)prsctp + sizeof(*prsctp));
+ pr_supported = (struct sctp_supported_chunk_types_param *)(mtod(m, caddr_t)+chunk_len);
pr_supported->ph.param_type = htons(SCTP_SUPPORTED_CHUNK_EXT);
num_ext = 0;
pr_supported->chunk_types[num_ext++] = SCTP_ASCONF;
@@ -4322,118 +4732,100 @@ sctp_send_initiate(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int so_locked
if (stcb->asoc.sctp_nr_sack_on_off == 1) {
pr_supported->chunk_types[num_ext++] = SCTP_NR_SELECTIVE_ACK;
}
- p_len = sizeof(*pr_supported) + num_ext;
- pr_supported->ph.param_length = htons(p_len);
- bzero((caddr_t)pr_supported + p_len, SCTP_SIZE32(p_len) - p_len);
- SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len);
+ parameter_len = (uint16_t) sizeof(struct sctp_supported_chunk_types_param) + num_ext;
+ pr_supported->ph.param_length = htons(parameter_len);
+ padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
+ chunk_len += parameter_len;
-
- /* ECN nonce: And now tell the peer we support ECN nonce */
- if (SCTP_BASE_SYSCTL(sctp_ecn_nonce)) {
- ecn_nonce = (struct sctp_ecn_nonce_supported_param *)
- ((caddr_t)pr_supported + SCTP_SIZE32(p_len));
- ecn_nonce->ph.param_type = htons(SCTP_ECN_NONCE_SUPPORTED);
- ecn_nonce->ph.param_length = htons(sizeof(*ecn_nonce));
- SCTP_BUF_LEN(m) += sizeof(*ecn_nonce);
- }
/* add authentication parameters */
if (!SCTP_BASE_SYSCTL(sctp_auth_disable)) {
- struct sctp_auth_random *randp;
- struct sctp_auth_hmac_algo *hmacs;
- struct sctp_auth_chunk_list *chunks;
-
/* attach RANDOM parameter, if available */
if (stcb->asoc.authinfo.random != NULL) {
- randp = (struct sctp_auth_random *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m));
- p_len = sizeof(*randp) + stcb->asoc.authinfo.random_len;
+ struct sctp_auth_random *randp;
+
+ if (padding_len > 0) {
+ memset(mtod(m, caddr_t)+chunk_len, 0, padding_len);
+ chunk_len += padding_len;
+ padding_len = 0;
+ }
+ randp = (struct sctp_auth_random *)(mtod(m, caddr_t)+chunk_len);
+ parameter_len = (uint16_t) sizeof(struct sctp_auth_random) + stcb->asoc.authinfo.random_len;
/* random key already contains the header */
- bcopy(stcb->asoc.authinfo.random->key, randp, p_len);
- /* zero out any padding required */
- bzero((caddr_t)randp + p_len, SCTP_SIZE32(p_len) - p_len);
- SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len);
+ memcpy(randp, stcb->asoc.authinfo.random->key, parameter_len);
+ padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
+ chunk_len += parameter_len;
}
/* add HMAC_ALGO parameter */
- hmacs = (struct sctp_auth_hmac_algo *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m));
- p_len = sctp_serialize_hmaclist(stcb->asoc.local_hmacs,
- (uint8_t *) hmacs->hmac_ids);
- if (p_len > 0) {
- p_len += sizeof(*hmacs);
+ if ((stcb->asoc.local_hmacs != NULL) &&
+ (stcb->asoc.local_hmacs->num_algo > 0)) {
+ struct sctp_auth_hmac_algo *hmacs;
+
+ if (padding_len > 0) {
+ memset(mtod(m, caddr_t)+chunk_len, 0, padding_len);
+ chunk_len += padding_len;
+ padding_len = 0;
+ }
+ hmacs = (struct sctp_auth_hmac_algo *)(mtod(m, caddr_t)+chunk_len);
+ parameter_len = (uint16_t) (sizeof(struct sctp_auth_hmac_algo) +
+ stcb->asoc.local_hmacs->num_algo * sizeof(uint16_t));
hmacs->ph.param_type = htons(SCTP_HMAC_LIST);
- hmacs->ph.param_length = htons(p_len);
- /* zero out any padding required */
- bzero((caddr_t)hmacs + p_len, SCTP_SIZE32(p_len) - p_len);
- SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len);
+ hmacs->ph.param_length = htons(parameter_len);
+ sctp_serialize_hmaclist(stcb->asoc.local_hmacs, (uint8_t *) hmacs->hmac_ids);
+ padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
+ chunk_len += parameter_len;
}
/* add CHUNKS parameter */
- chunks = (struct sctp_auth_chunk_list *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m));
- p_len = sctp_serialize_auth_chunks(stcb->asoc.local_auth_chunks,
- chunks->chunk_types);
- if (p_len > 0) {
- p_len += sizeof(*chunks);
+ if (sctp_auth_get_chklist_size(stcb->asoc.local_auth_chunks) > 0) {
+ struct sctp_auth_chunk_list *chunks;
+
+ if (padding_len > 0) {
+ memset(mtod(m, caddr_t)+chunk_len, 0, padding_len);
+ chunk_len += padding_len;
+ padding_len = 0;
+ }
+ chunks = (struct sctp_auth_chunk_list *)(mtod(m, caddr_t)+chunk_len);
+ parameter_len = (uint16_t) (sizeof(struct sctp_auth_chunk_list) +
+ sctp_auth_get_chklist_size(stcb->asoc.local_auth_chunks));
chunks->ph.param_type = htons(SCTP_CHUNK_LIST);
- chunks->ph.param_length = htons(p_len);
- /* zero out any padding required */
- bzero((caddr_t)chunks + p_len, SCTP_SIZE32(p_len) - p_len);
- SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len);
+ chunks->ph.param_length = htons(parameter_len);
+ sctp_serialize_auth_chunks(stcb->asoc.local_auth_chunks, chunks->chunk_types);
+ padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
+ chunk_len += parameter_len;
}
}
- m_at = m;
- /* now the addresses */
- {
- struct sctp_scoping scp;
-
- /*
- * To optimize this we could put the scoping stuff into a
- * structure and remove the individual uint8's from the
- * assoc structure. Then we could just sifa in the address
- * within the stcb.. but for now this is a quick hack to get
- * the address stuff teased apart.
- */
- scp.ipv4_addr_legal = stcb->asoc.ipv4_addr_legal;
- scp.ipv6_addr_legal = stcb->asoc.ipv6_addr_legal;
- scp.loopback_scope = stcb->asoc.loopback_scope;
- scp.ipv4_local_scope = stcb->asoc.ipv4_local_scope;
- scp.local_scope = stcb->asoc.local_scope;
- scp.site_scope = stcb->asoc.site_scope;
-
- m_at = sctp_add_addresses_to_i_ia(inp, &scp, m_at, cnt_inits_to);
- }
+ SCTP_BUF_LEN(m) = chunk_len;
- /* calulate the size and update pkt header and chunk header */
- p_len = 0;
- for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
- if (SCTP_BUF_NEXT(m_at) == NULL)
- mp_last = m_at;
- p_len += SCTP_BUF_LEN(m_at);
- }
- init->ch.chunk_length = htons(p_len);
+ /* now the addresses */
/*
- * We sifa 0 here to NOT set IP_DF if its IPv4, we ignore the return
- * here since the timer will drive a retranmission.
+ * To optimize this we could put the scoping stuff into a structure
+ * and remove the individual uint8's from the assoc structure. Then
+ * we could just sifa in the address within the stcb. But for now
+ * this is a quick hack to get the address stuff teased apart.
*/
+ sctp_add_addresses_to_i_ia(inp, stcb, &stcb->asoc.scope, m, cnt_inits_to, &padding_len, &chunk_len);
- /* I don't expect this to execute but we will be safe here */
- padval = p_len % 4;
- if ((padval) && (mp_last)) {
- /*
- * The compiler worries that mp_last may not be set even
- * though I think it is impossible :-> however we add
- * mp_last here just in case.
- */
- ret = sctp_add_pad_tombuf(mp_last, (4 - padval));
- if (ret) {
- /* Houston we have a problem, no space */
+ init->ch.chunk_length = htons(chunk_len);
+ if (padding_len > 0) {
+ struct mbuf *m_at, *mp_last;
+
+ mp_last = NULL;
+ for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
+ if (SCTP_BUF_NEXT(m_at) == NULL)
+ mp_last = m_at;
+ }
+ if ((mp_last == NULL) || sctp_add_pad_tombuf(mp_last, padding_len)) {
sctp_m_freem(m);
return;
}
- p_len += padval;
}
SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT - calls lowlevel_output\n");
ret = sctp_lowlevel_chunk_output(inp, stcb, net,
(struct sockaddr *)&net->ro._l_addr,
- m, 0, NULL, 0, 0, 0, NULL, 0,
+ m, 0, NULL, 0, 0, 0, 0,
inp->sctp_lport, stcb->rport, htonl(0),
- net->port, so_locked, NULL);
+ net->port, NULL,
+ 0, 0,
+ so_locked);
SCTPDBG(SCTP_DEBUG_OUTPUT4, "lowlevel_output - %d\n", ret);
SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
(void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
@@ -4557,11 +4949,10 @@ sctp_arethere_unrecognized_parameters(struct mbuf *in_initpkt,
case SCTP_HAS_NAT_SUPPORT:
*nat_friendly = 1;
/* fall through */
- case SCTP_ECN_NONCE_SUPPORTED:
case SCTP_PRSCTP_SUPPORTED:
if (padded_size != sizeof(struct sctp_paramhdr)) {
- SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error ecnnonce/prsctp/nat support %d\n", plen);
+ SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error prsctp/nat support %d\n", plen);
goto invalid_size;
}
at += padded_size;
@@ -4648,7 +5039,6 @@ sctp_arethere_unrecognized_parameters(struct mbuf *in_initpkt,
return (NULL);
}
m_copyback(op_err, err_at, plen, (caddr_t)phdr);
- err_at += plen;
}
return (op_err);
break;
@@ -4781,7 +5171,7 @@ invalid_size:
static int
sctp_are_there_new_addresses(struct sctp_association *asoc,
- struct mbuf *in_initpkt, int iphlen, int offset)
+ struct mbuf *in_initpkt, int offset, struct sockaddr *src)
{
/*
* Given a INIT packet, look through the packet to verify that there
@@ -4790,75 +5180,56 @@ sctp_are_there_new_addresses(struct sctp_association *asoc,
* must return (1) to drop the packet if we see a un-understood
* parameter that tells us to drop the chunk.
*/
- struct sockaddr_in sin4, *sa4;
-
-#ifdef INET6
- struct sockaddr_in6 sin6, *sa6;
-
-#endif
struct sockaddr *sa_touse;
struct sockaddr *sa;
struct sctp_paramhdr *phdr, params;
- struct ip *iph;
-
-#ifdef INET6
- struct ip6_hdr *ip6h;
-
-#endif
- struct mbuf *mat;
uint16_t ptype, plen;
- int err_at;
uint8_t fnd;
struct sctp_nets *net;
- memset(&sin4, 0, sizeof(sin4));
+#ifdef INET
+ struct sockaddr_in sin4, *sa4;
+
+#endif
#ifdef INET6
- memset(&sin6, 0, sizeof(sin6));
+ struct sockaddr_in6 sin6, *sa6;
+
#endif
+
+#ifdef INET
+ memset(&sin4, 0, sizeof(sin4));
sin4.sin_family = AF_INET;
sin4.sin_len = sizeof(sin4);
+#endif
#ifdef INET6
+ memset(&sin6, 0, sizeof(sin6));
sin6.sin6_family = AF_INET6;
sin6.sin6_len = sizeof(sin6);
#endif
- sa_touse = NULL;
/* First what about the src address of the pkt ? */
- iph = mtod(in_initpkt, struct ip *);
- switch (iph->ip_v) {
- case IPVERSION:
- /* source addr is IPv4 */
- sin4.sin_addr = iph->ip_src;
- sa_touse = (struct sockaddr *)&sin4;
- break;
-#ifdef INET6
- case IPV6_VERSION >> 4:
- /* source addr is IPv6 */
- ip6h = mtod(in_initpkt, struct ip6_hdr *);
- sin6.sin6_addr = ip6h->ip6_src;
- sa_touse = (struct sockaddr *)&sin6;
- break;
-#endif
- default:
- return (1);
- }
-
fnd = 0;
TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
sa = (struct sockaddr *)&net->ro._l_addr;
- if (sa->sa_family == sa_touse->sa_family) {
+ if (sa->sa_family == src->sa_family) {
+#ifdef INET
if (sa->sa_family == AF_INET) {
+ struct sockaddr_in *src4;
+
sa4 = (struct sockaddr_in *)sa;
- if (sa4->sin_addr.s_addr ==
- sin4.sin_addr.s_addr) {
+ src4 = (struct sockaddr_in *)src;
+ if (sa4->sin_addr.s_addr == src4->sin_addr.s_addr) {
fnd = 1;
break;
}
}
+#endif
#ifdef INET6
if (sa->sa_family == AF_INET6) {
+ struct sockaddr_in6 *src6;
+
sa6 = (struct sockaddr_in6 *)sa;
- if (SCTP6_ARE_ADDR_EQUAL(sa6,
- &sin6)) {
+ src6 = (struct sockaddr_in6 *)src;
+ if (SCTP6_ARE_ADDR_EQUAL(sa6, src6)) {
fnd = 1;
break;
}
@@ -4871,41 +5242,51 @@ sctp_are_there_new_addresses(struct sctp_association *asoc,
return (1);
}
/* Ok so far lets munge through the rest of the packet */
- mat = in_initpkt;
- err_at = 0;
- sa_touse = NULL;
offset += sizeof(struct sctp_init_chunk);
- phdr = sctp_get_next_param(mat, offset, &params, sizeof(params));
+ phdr = sctp_get_next_param(in_initpkt, offset, &params, sizeof(params));
while (phdr) {
+ sa_touse = NULL;
ptype = ntohs(phdr->param_type);
plen = ntohs(phdr->param_length);
- if (ptype == SCTP_IPV4_ADDRESS) {
- struct sctp_ipv4addr_param *p4, p4_buf;
+ switch (ptype) {
+#ifdef INET
+ case SCTP_IPV4_ADDRESS:
+ {
+ struct sctp_ipv4addr_param *p4, p4_buf;
- phdr = sctp_get_next_param(mat, offset,
- (struct sctp_paramhdr *)&p4_buf, sizeof(p4_buf));
- if (plen != sizeof(struct sctp_ipv4addr_param) ||
- phdr == NULL) {
- return (1);
+ phdr = sctp_get_next_param(in_initpkt, offset,
+ (struct sctp_paramhdr *)&p4_buf, sizeof(p4_buf));
+ if (plen != sizeof(struct sctp_ipv4addr_param) ||
+ phdr == NULL) {
+ return (1);
+ }
+ p4 = (struct sctp_ipv4addr_param *)phdr;
+ sin4.sin_addr.s_addr = p4->addr;
+ sa_touse = (struct sockaddr *)&sin4;
+ break;
}
- p4 = (struct sctp_ipv4addr_param *)phdr;
- sin4.sin_addr.s_addr = p4->addr;
- sa_touse = (struct sockaddr *)&sin4;
- } else if (ptype == SCTP_IPV6_ADDRESS) {
- struct sctp_ipv6addr_param *p6, p6_buf;
+#endif
+#ifdef INET6
+ case SCTP_IPV6_ADDRESS:
+ {
+ struct sctp_ipv6addr_param *p6, p6_buf;
- phdr = sctp_get_next_param(mat, offset,
- (struct sctp_paramhdr *)&p6_buf, sizeof(p6_buf));
- if (plen != sizeof(struct sctp_ipv6addr_param) ||
- phdr == NULL) {
- return (1);
+ phdr = sctp_get_next_param(in_initpkt, offset,
+ (struct sctp_paramhdr *)&p6_buf, sizeof(p6_buf));
+ if (plen != sizeof(struct sctp_ipv6addr_param) ||
+ phdr == NULL) {
+ return (1);
+ }
+ p6 = (struct sctp_ipv6addr_param *)phdr;
+ memcpy((caddr_t)&sin6.sin6_addr, p6->addr,
+ sizeof(p6->addr));
+ sa_touse = (struct sockaddr *)&sin6;
+ break;
}
- p6 = (struct sctp_ipv6addr_param *)phdr;
-#ifdef INET6
- memcpy((caddr_t)&sin6.sin6_addr, p6->addr,
- sizeof(p6->addr));
#endif
- sa_touse = (struct sockaddr *)&sin4;
+ default:
+ sa_touse = NULL;
+ break;
}
if (sa_touse) {
/* ok, sa_touse points to one to check */
@@ -4915,6 +5296,7 @@ sctp_are_there_new_addresses(struct sctp_association *asoc,
if (sa->sa_family != sa_touse->sa_family) {
continue;
}
+#ifdef INET
if (sa->sa_family == AF_INET) {
sa4 = (struct sockaddr_in *)sa;
if (sa4->sin_addr.s_addr ==
@@ -4923,6 +5305,7 @@ sctp_are_there_new_addresses(struct sctp_association *asoc,
break;
}
}
+#endif
#ifdef INET6
if (sa->sa_family == AF_INET6) {
sa6 = (struct sockaddr_in6 *)sa;
@@ -4940,7 +5323,7 @@ sctp_are_there_new_addresses(struct sctp_association *asoc,
}
}
offset += SCTP_SIZE32(plen);
- phdr = sctp_get_next_param(mat, offset, &params, sizeof(params));
+ phdr = sctp_get_next_param(in_initpkt, offset, &params, sizeof(params));
}
return (0);
}
@@ -4953,8 +5336,11 @@ sctp_are_there_new_addresses(struct sctp_association *asoc,
*/
void
sctp_send_initiate_ack(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
- struct mbuf *init_pkt, int iphlen, int offset, struct sctphdr *sh,
- struct sctp_init_chunk *init_chk, uint32_t vrf_id, uint16_t port, int hold_inp_lock)
+ struct mbuf *init_pkt, int iphlen, int offset,
+ struct sockaddr *src, struct sockaddr *dst,
+ struct sctphdr *sh, struct sctp_init_chunk *init_chk,
+ uint8_t use_mflowid, uint32_t mflowid,
+ uint32_t vrf_id, uint16_t port, int hold_inp_lock)
{
struct sctp_association *asoc;
struct mbuf *m, *m_at, *m_tmp, *m_cookie, *op_err, *mp_last;
@@ -4962,19 +5348,19 @@ sctp_send_initiate_ack(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
struct sctp_adaptation_layer_indication *ali;
struct sctp_ecn_supported_param *ecn;
struct sctp_prsctp_supported_param *prsctp;
- struct sctp_ecn_nonce_supported_param *ecn_nonce;
struct sctp_supported_chunk_types_param *pr_supported;
- union sctp_sockstore store, store1, *over_addr;
- struct sockaddr_in *sin, *to_sin;
+ union sctp_sockstore *over_addr;
-#ifdef INET6
- struct sockaddr_in6 *sin6, *to_sin6;
+#ifdef INET
+ struct sockaddr_in *dst4 = (struct sockaddr_in *)dst;
+ struct sockaddr_in *src4 = (struct sockaddr_in *)src;
+ struct sockaddr_in *sin;
#endif
- struct ip *iph;
-
#ifdef INET6
- struct ip6_hdr *ip6;
+ struct sockaddr_in6 *dst6 = (struct sockaddr_in6 *)dst;
+ struct sockaddr_in6 *src6 = (struct sockaddr_in6 *)src;
+ struct sockaddr_in6 *sin6;
#endif
struct sockaddr *to;
@@ -4989,21 +5375,24 @@ sctp_send_initiate_ack(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
int nat_friendly = 0;
struct socket *so;
- if (stcb)
+ if (stcb) {
asoc = &stcb->asoc;
- else
+ } else {
asoc = NULL;
+ }
mp_last = NULL;
if ((asoc != NULL) &&
(SCTP_GET_STATE(asoc) != SCTP_STATE_COOKIE_WAIT) &&
- (sctp_are_there_new_addresses(asoc, init_pkt, iphlen, offset))) {
+ (sctp_are_there_new_addresses(asoc, init_pkt, offset, src))) {
/* new addresses, out of here in non-cookie-wait states */
/*
* Send a ABORT, we don't add the new address error clause
* though we even set the T bit and copy in the 0 tag.. this
* looks no different than if no listener was present.
*/
- sctp_send_abort(init_pkt, iphlen, sh, 0, NULL, vrf_id, port);
+ sctp_send_abort(init_pkt, iphlen, src, dst, sh, 0, NULL,
+ use_mflowid, mflowid,
+ vrf_id, port);
return;
}
abort_flag = 0;
@@ -5012,8 +5401,10 @@ sctp_send_initiate_ack(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
&abort_flag, (struct sctp_chunkhdr *)init_chk, &nat_friendly);
if (abort_flag) {
do_a_abort:
- sctp_send_abort(init_pkt, iphlen, sh,
- init_chk->init.initiate_tag, op_err, vrf_id, port);
+ sctp_send_abort(init_pkt, iphlen, src, dst, sh,
+ init_chk->init.initiate_tag, op_err,
+ use_mflowid, mflowid,
+ vrf_id, port);
return;
}
m = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
@@ -5025,6 +5416,14 @@ do_a_abort:
}
SCTP_BUF_LEN(m) = sizeof(struct sctp_init_chunk);
+ /*
+ * We might not overwrite the identification[] completely and on
+ * some platforms time_entered will contain some padding. Therefore
+ * zero out the cookie to avoid putting uninitialized memory on the
+ * wire.
+ */
+ memset(&stc, 0, sizeof(struct sctp_state_cookie));
+
/* the time I built cookie */
(void)SCTP_GETTIME_TIMEVAL(&stc.time_entered);
@@ -5052,79 +5451,35 @@ do_a_abort:
*/
stc.site_scope = stc.local_scope = stc.loopback_scope = 0;
if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
- struct inpcb *in_inp;
-
- /* Its a V6 socket */
- in_inp = (struct inpcb *)inp;
stc.ipv6_addr_legal = 1;
- /* Now look at the binding flag to see if V4 will be legal */
- if (SCTP_IPV6_V6ONLY(in_inp) == 0) {
- stc.ipv4_addr_legal = 1;
- } else {
- /* V4 addresses are NOT legal on the association */
+ if (SCTP_IPV6_V6ONLY(inp)) {
stc.ipv4_addr_legal = 0;
+ } else {
+ stc.ipv4_addr_legal = 1;
}
} else {
- /* Its a V4 socket, no - V6 */
- stc.ipv4_addr_legal = 1;
stc.ipv6_addr_legal = 0;
+ stc.ipv4_addr_legal = 1;
}
-
#ifdef SCTP_DONT_DO_PRIVADDR_SCOPE
stc.ipv4_scope = 1;
#else
stc.ipv4_scope = 0;
#endif
- /* now for scope setup */
- memset((caddr_t)&store, 0, sizeof(store));
- memset((caddr_t)&store1, 0, sizeof(store1));
- sin = &store.sin;
- to_sin = &store1.sin;
-#ifdef INET6
- sin6 = &store.sin6;
- to_sin6 = &store1.sin6;
-#endif
- iph = mtod(init_pkt, struct ip *);
- /* establish the to_addr's */
- switch (iph->ip_v) {
- case IPVERSION:
- to_sin->sin_port = sh->dest_port;
- to_sin->sin_family = AF_INET;
- to_sin->sin_len = sizeof(struct sockaddr_in);
- to_sin->sin_addr = iph->ip_dst;
- break;
-#ifdef INET6
- case IPV6_VERSION >> 4:
- ip6 = mtod(init_pkt, struct ip6_hdr *);
- to_sin6->sin6_addr = ip6->ip6_dst;
- to_sin6->sin6_scope_id = 0;
- to_sin6->sin6_port = sh->dest_port;
- to_sin6->sin6_family = AF_INET6;
- to_sin6->sin6_len = sizeof(struct sockaddr_in6);
- break;
-#endif
- default:
- goto do_a_abort;
- break;
- };
-
if (net == NULL) {
- to = (struct sockaddr *)&store;
- switch (iph->ip_v) {
- case IPVERSION:
+ to = src;
+ switch (dst->sa_family) {
+#ifdef INET
+ case AF_INET:
{
- sin->sin_family = AF_INET;
- sin->sin_len = sizeof(struct sockaddr_in);
- sin->sin_port = sh->src_port;
- sin->sin_addr = iph->ip_src;
/* lookup address */
- stc.address[0] = sin->sin_addr.s_addr;
+ stc.address[0] = src4->sin_addr.s_addr;
stc.address[1] = 0;
stc.address[2] = 0;
stc.address[3] = 0;
stc.addr_type = SCTP_IPV4_ADDRESS;
/* local from address */
- stc.laddress[0] = to_sin->sin_addr.s_addr;
+ stc.laddress[0] = dst4->sin_addr.s_addr;
stc.laddress[1] = 0;
stc.laddress[2] = 0;
stc.laddress[3] = 0;
@@ -5132,14 +5487,14 @@ do_a_abort:
/* scope_id is only for v6 */
stc.scope_id = 0;
#ifndef SCTP_DONT_DO_PRIVADDR_SCOPE
- if (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr)) {
+ if (IN4_ISPRIVATE_ADDRESS(&src4->sin_addr)) {
stc.ipv4_scope = 1;
}
#else
stc.ipv4_scope = 1;
#endif /* SCTP_DONT_DO_PRIVADDR_SCOPE */
/* Must use the address in this case */
- if (sctp_is_address_on_local_host((struct sockaddr *)sin, vrf_id)) {
+ if (sctp_is_address_on_local_host(src, vrf_id)) {
stc.loopback_scope = 1;
stc.ipv4_scope = 1;
stc.site_scope = 1;
@@ -5147,33 +5502,19 @@ do_a_abort:
}
break;
}
+#endif
#ifdef INET6
- case IPV6_VERSION >> 4:
+ case AF_INET6:
{
- ip6 = mtod(init_pkt, struct ip6_hdr *);
- sin6->sin6_family = AF_INET6;
- sin6->sin6_len = sizeof(struct sockaddr_in6);
- sin6->sin6_port = sh->src_port;
- sin6->sin6_addr = ip6->ip6_src;
- /* lookup address */
- memcpy(&stc.address, &sin6->sin6_addr,
- sizeof(struct in6_addr));
- sin6->sin6_scope_id = 0;
stc.addr_type = SCTP_IPV6_ADDRESS;
- stc.scope_id = 0;
- if (sctp_is_address_on_local_host((struct sockaddr *)sin6, vrf_id)) {
- /*
- * FIX ME: does this have scope from
- * rcvif?
- */
- (void)sa6_recoverscope(sin6);
- stc.scope_id = sin6->sin6_scope_id;
- sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone));
+ memcpy(&stc.address, &src6->sin6_addr, sizeof(struct in6_addr));
+ stc.scope_id = in6_getscope(&src6->sin6_addr);
+ if (sctp_is_address_on_local_host(src, vrf_id)) {
stc.loopback_scope = 1;
stc.local_scope = 0;
stc.site_scope = 1;
stc.ipv4_scope = 1;
- } else if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
+ } else if (IN6_IS_ADDR_LINKLOCAL(&src6->sin6_addr)) {
/*
* If the new destination is a
* LINK_LOCAL we must have common
@@ -5198,14 +5539,7 @@ do_a_abort:
* pull out the scope_id from
* incoming pkt
*/
- /*
- * FIX ME: does this have scope from
- * rcvif?
- */
- (void)sa6_recoverscope(sin6);
- stc.scope_id = sin6->sin6_scope_id;
- sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone));
- } else if (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr)) {
+ } else if (IN6_IS_ADDR_SITELOCAL(&src6->sin6_addr)) {
/*
* If the new destination is
* SITE_LOCAL then we must have site
@@ -5213,7 +5547,7 @@ do_a_abort:
*/
stc.site_scope = 1;
}
- memcpy(&stc.laddress, &to_sin6->sin6_addr, sizeof(struct in6_addr));
+ memcpy(&stc.laddress, &dst6->sin6_addr, sizeof(struct in6_addr));
stc.laddr_type = SCTP_IPV6_ADDRESS;
break;
}
@@ -5231,10 +5565,10 @@ do_a_abort:
#endif
- stc.loopback_scope = asoc->loopback_scope;
- stc.ipv4_scope = asoc->ipv4_local_scope;
- stc.site_scope = asoc->site_scope;
- stc.local_scope = asoc->local_scope;
+ stc.loopback_scope = asoc->scope.loopback_scope;
+ stc.ipv4_scope = asoc->scope.ipv4_local_scope;
+ stc.site_scope = asoc->scope.site_scope;
+ stc.local_scope = asoc->scope.local_scope;
#ifdef INET6
/* Why do we not consider IPv4 LL addresses? */
TAILQ_FOREACH(lnet, &asoc->nets, sctp_next) {
@@ -5252,6 +5586,7 @@ do_a_abort:
/* use the net pointer */
to = (struct sockaddr *)&net->ro._l_addr;
switch (to->sa_family) {
+#ifdef INET
case AF_INET:
sin = (struct sockaddr_in *)to;
stc.address[0] = sin->sin_addr.s_addr;
@@ -5278,17 +5613,21 @@ do_a_abort:
stc.laddress[2] = 0;
stc.laddress[3] = 0;
stc.laddr_type = SCTP_IPV4_ADDRESS;
+ /* scope_id is only for v6 */
+ stc.scope_id = 0;
break;
+#endif
#ifdef INET6
case AF_INET6:
sin6 = (struct sockaddr_in6 *)to;
memcpy(&stc.address, &sin6->sin6_addr,
sizeof(struct in6_addr));
stc.addr_type = SCTP_IPV6_ADDRESS;
+ stc.scope_id = sin6->sin6_scope_id;
if (net->src_addr_selected == 0) {
/*
* strange case here, the INIT should have
- * did the selection.
+ * done the selection.
*/
net->ro._s_addr = sctp_source_address_selection(inp,
stcb, (sctp_route_t *) & net->ro,
@@ -5312,6 +5651,7 @@ do_a_abort:
/* who are we */
memcpy(stc.identification, SCTP_VERSION_STRING,
min(strlen(SCTP_VERSION_STRING), sizeof(stc.identification)));
+ memset(stc.reserved, 0, SCTP_RESERVE_SPACE);
/* now the chunk header */
initack->ch.chunk_type = SCTP_INITIATION_ACK;
initack->ch.chunk_flags = 0;
@@ -5392,20 +5732,25 @@ do_a_abort:
/* I can have what I want :> */
initack->init.num_outbound_streams = htons(i_want);
}
- /* tell him his limt. */
+ /* tell him his limit. */
initack->init.num_inbound_streams =
htons(inp->sctp_ep.max_open_streams_intome);
/* adaptation layer indication parameter */
- ali = (struct sctp_adaptation_layer_indication *)((caddr_t)initack + sizeof(*initack));
- ali->ph.param_type = htons(SCTP_ULP_ADAPTATION);
- ali->ph.param_length = htons(sizeof(*ali));
- ali->indication = ntohl(inp->sctp_ep.adaptation_layer_indicator);
- SCTP_BUF_LEN(m) += sizeof(*ali);
- ecn = (struct sctp_ecn_supported_param *)((caddr_t)ali + sizeof(*ali));
+ if (inp->sctp_ep.adaptation_layer_indicator_provided) {
+ ali = (struct sctp_adaptation_layer_indication *)((caddr_t)initack + sizeof(*initack));
+ ali->ph.param_type = htons(SCTP_ULP_ADAPTATION);
+ ali->ph.param_length = htons(sizeof(*ali));
+ ali->indication = ntohl(inp->sctp_ep.adaptation_layer_indicator);
+ SCTP_BUF_LEN(m) += sizeof(*ali);
+ ecn = (struct sctp_ecn_supported_param *)((caddr_t)ali + sizeof(*ali));
+ } else {
+ ecn = (struct sctp_ecn_supported_param *)((caddr_t)initack + sizeof(*initack));
+ }
/* ECN parameter */
- if (SCTP_BASE_SYSCTL(sctp_ecn_enable) == 1) {
+ if (((asoc != NULL) && (asoc->ecn_allowed == 1)) ||
+ (inp->sctp_ecn_enable == 1)) {
ecn->ph.param_type = htons(SCTP_ECN_CAPABLE);
ecn->ph.param_length = htons(sizeof(*ecn));
SCTP_BUF_LEN(m) += sizeof(*ecn);
@@ -5446,14 +5791,6 @@ do_a_abort:
bzero((caddr_t)pr_supported + p_len, SCTP_SIZE32(p_len) - p_len);
SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len);
- /* ECN nonce: And now tell the peer we support ECN nonce */
- if (SCTP_BASE_SYSCTL(sctp_ecn_nonce)) {
- ecn_nonce = (struct sctp_ecn_nonce_supported_param *)
- ((caddr_t)pr_supported + SCTP_SIZE32(p_len));
- ecn_nonce->ph.param_type = htons(SCTP_ECN_NONCE_SUPPORTED);
- ecn_nonce->ph.param_length = htons(sizeof(*ecn_nonce));
- SCTP_BUF_LEN(m) += sizeof(*ecn_nonce);
- }
/* add authentication parameters */
if (!SCTP_BASE_SYSCTL(sctp_auth_disable)) {
struct sctp_auth_random *randp;
@@ -5515,7 +5852,7 @@ do_a_abort:
scp.ipv4_local_scope = stc.ipv4_scope;
scp.local_scope = stc.local_scope;
scp.site_scope = stc.site_scope;
- m_at = sctp_add_addresses_to_i_ia(inp, &scp, m_at, cnt_inits_to);
+ m_at = sctp_add_addresses_to_i_ia(inp, stcb, &scp, m_at, cnt_inits_to, NULL, NULL);
}
/* tack on the operational error if present */
@@ -5525,6 +5862,7 @@ do_a_abort:
llen = 0;
ol = op_err;
+
while (ol) {
llen += SCTP_BUF_LEN(ol);
ol = SCTP_BUF_NEXT(ol);
@@ -5556,7 +5894,7 @@ do_a_abort:
}
/* Now we must build a cookie */
- m_cookie = sctp_add_cookie(inp, init_pkt, offset, m, 0, &stc, &signature);
+ m_cookie = sctp_add_cookie(init_pkt, offset, m, 0, &stc, &signature);
if (m_cookie == NULL) {
/* memory problem */
sctp_m_freem(m);
@@ -5594,76 +5932,28 @@ do_a_abort:
padval = p_len % 4;
if ((padval) && (mp_last)) {
/* see my previous comments on mp_last */
- int ret;
-
- ret = sctp_add_pad_tombuf(mp_last, (4 - padval));
- if (ret) {
+ if (sctp_add_pad_tombuf(mp_last, (4 - padval))) {
/* Houston we have a problem, no space */
sctp_m_freem(m);
return;
}
- p_len += padval;
}
if (stc.loopback_scope) {
- over_addr = &store1;
+ over_addr = (union sctp_sockstore *)dst;
} else {
over_addr = NULL;
}
(void)sctp_lowlevel_chunk_output(inp, NULL, NULL, to, m, 0, NULL, 0, 0,
- 0, NULL, 0,
+ 0, 0,
inp->sctp_lport, sh->src_port, init_chk->init.initiate_tag,
- port, SCTP_SO_NOT_LOCKED, over_addr);
+ port, over_addr,
+ use_mflowid, mflowid,
+ SCTP_SO_NOT_LOCKED);
SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
}
-void
-sctp_insert_on_wheel(struct sctp_tcb *stcb,
- struct sctp_association *asoc,
- struct sctp_stream_out *strq, int holds_lock)
-{
- if (holds_lock == 0) {
- SCTP_TCB_SEND_LOCK(stcb);
- }
- if ((strq->next_spoke.tqe_next == NULL) &&
- (strq->next_spoke.tqe_prev == NULL)) {
- TAILQ_INSERT_TAIL(&asoc->out_wheel, strq, next_spoke);
- }
- if (holds_lock == 0) {
- SCTP_TCB_SEND_UNLOCK(stcb);
- }
-}
-
-void
-sctp_remove_from_wheel(struct sctp_tcb *stcb,
- struct sctp_association *asoc,
- struct sctp_stream_out *strq,
- int holds_lock)
-{
- /* take off and then setup so we know it is not on the wheel */
- if (holds_lock == 0) {
- SCTP_TCB_SEND_LOCK(stcb);
- }
- if (TAILQ_EMPTY(&strq->outqueue)) {
- if (asoc->last_out_stream == strq) {
- asoc->last_out_stream = TAILQ_PREV(asoc->last_out_stream, sctpwheel_listhead, next_spoke);
- if (asoc->last_out_stream == NULL) {
- asoc->last_out_stream = TAILQ_LAST(&asoc->out_wheel, sctpwheel_listhead);
- }
- if (asoc->last_out_stream == strq) {
- asoc->last_out_stream = NULL;
- }
- }
- TAILQ_REMOVE(&asoc->out_wheel, strq, next_spoke);
- strq->next_spoke.tqe_next = NULL;
- strq->next_spoke.tqe_prev = NULL;
- }
- if (holds_lock == 0) {
- SCTP_TCB_SEND_UNLOCK(stcb);
- }
-}
-
static void
sctp_prune_prsctp(struct sctp_tcb *stcb,
struct sctp_association *asoc,
@@ -5702,14 +5992,14 @@ sctp_prune_prsctp(struct sctp_tcb *stcb,
* if the mbuf is here
*/
int ret_spc;
- int cause;
+ uint8_t sent;
if (chk->sent > SCTP_DATAGRAM_UNSENT)
- cause = SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT;
+ sent = 1;
else
- cause = SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_UNSENT;
+ sent = 0;
ret_spc = sctp_release_pr_sctp_chunk(stcb, chk,
- cause,
+ sent,
SCTP_SO_LOCKED);
freed_spc += ret_spc;
if (freed_spc >= dataout) {
@@ -5720,9 +6010,7 @@ sctp_prune_prsctp(struct sctp_tcb *stcb,
} /* if chunk has enabled */
} /* tailqforeach */
- chk = TAILQ_FIRST(&asoc->send_queue);
- while (chk) {
- nchk = TAILQ_NEXT(chk, sctp_next);
+ TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
/* Here we must move to the sent queue and mark */
if (PR_SCTP_BUF_ENABLED(chk->flags)) {
if (chk->rec.data.timetodrop.tv_sec >= (long)srcv->sinfo_timetolive) {
@@ -5734,8 +6022,7 @@ sctp_prune_prsctp(struct sctp_tcb *stcb,
int ret_spc;
ret_spc = sctp_release_pr_sctp_chunk(stcb, chk,
- SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_UNSENT,
- SCTP_SO_LOCKED);
+ 0, SCTP_SO_LOCKED);
freed_spc += ret_spc;
if (freed_spc >= dataout) {
@@ -5744,8 +6031,7 @@ sctp_prune_prsctp(struct sctp_tcb *stcb,
} /* end if chk->data */
} /* end if right class */
} /* end if chk pr-sctp */
- chk = nchk;
- } /* end while (chk) */
+ } /* tailqforeachsafe (chk) */
} /* if enabled in asoc */
}
@@ -5850,7 +6136,7 @@ sctp_msg_append(struct sctp_tcb *stcb,
struct mbuf *m,
struct sctp_sndrcvinfo *srcv, int hold_stcb_lock)
{
- int error = 0, holds_lock;
+ int error = 0;
struct mbuf *at;
struct sctp_stream_queue_pending *sp = NULL;
struct sctp_stream_out *strm;
@@ -5859,7 +6145,6 @@ sctp_msg_append(struct sctp_tcb *stcb,
* Given an mbuf chain, put it into the association send queue and
* place it on the wheel
*/
- holds_lock = hold_stcb_lock;
if (srcv->sinfo_stream >= stcb->asoc.streamoutcnt) {
/* Invalid stream number */
SCTP_LTRACE_ERR_RET_PKT(m, NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
@@ -5893,7 +6178,6 @@ sctp_msg_append(struct sctp_tcb *stcb,
sp->timetolive = srcv->sinfo_timetolive;
sp->ppid = srcv->sinfo_ppid;
sp->context = srcv->sinfo_context;
- sp->strseq = 0;
if (sp->sinfo_flags & SCTP_ADDR_OVER) {
sp->net = net;
atomic_add_int(&sp->net->ref_count, 1);
@@ -5907,35 +6191,38 @@ sctp_msg_append(struct sctp_tcb *stcb,
sp->some_taken = 0;
sp->data = m;
sp->tail_mbuf = NULL;
- sp->length = 0;
- at = m;
sctp_set_prsctp_policy(sp);
/*
* We could in theory (for sendall) sifa the length in, but we would
* still have to hunt through the chain since we need to setup the
* tail_mbuf
*/
- while (at) {
+ sp->length = 0;
+ for (at = m; at; at = SCTP_BUF_NEXT(at)) {
if (SCTP_BUF_NEXT(at) == NULL)
sp->tail_mbuf = at;
sp->length += SCTP_BUF_LEN(at);
- at = SCTP_BUF_NEXT(at);
}
- SCTP_TCB_SEND_LOCK(stcb);
+ if (srcv->sinfo_keynumber_valid) {
+ sp->auth_keyid = srcv->sinfo_keynumber;
+ } else {
+ sp->auth_keyid = stcb->asoc.authinfo.active_keyid;
+ }
+ if (sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks)) {
+ sctp_auth_key_acquire(stcb, sp->auth_keyid);
+ sp->holds_key_ref = 1;
+ }
+ if (hold_stcb_lock == 0) {
+ SCTP_TCB_SEND_LOCK(stcb);
+ }
sctp_snd_sb_alloc(stcb, sp->length);
atomic_add_int(&stcb->asoc.stream_queue_cnt, 1);
TAILQ_INSERT_TAIL(&strm->outqueue, sp, next);
- if ((srcv->sinfo_flags & SCTP_UNORDERED) == 0) {
- sp->strseq = strm->next_sequence_sent;
- strm->next_sequence_sent++;
- }
- if ((strm->next_spoke.tqe_next == NULL) &&
- (strm->next_spoke.tqe_prev == NULL)) {
- /* Not on wheel, insert */
- sctp_insert_on_wheel(stcb, &stcb->asoc, strm, 1);
- }
+ stcb->asoc.ss_functions.sctp_ss_add_to_stream(stcb, &stcb->asoc, strm, sp, 1);
m = NULL;
- SCTP_TCB_SEND_UNLOCK(stcb);
+ if (hold_stcb_lock == 0) {
+ SCTP_TCB_SEND_UNLOCK(stcb);
+ }
out_now:
if (m) {
sctp_m_freem(m);
@@ -6049,12 +6336,10 @@ error_out:
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
struct mbuf *mat;
- mat = appendchain;
- while (mat) {
+ for (mat = appendchain; mat; mat = SCTP_BUF_NEXT(mat)) {
if (SCTP_BUF_IS_EXTENDED(mat)) {
sctp_log_mb(mat, SCTP_MBUF_ICOPY);
}
- mat = SCTP_BUF_NEXT(mat);
}
}
#endif
@@ -6106,7 +6391,7 @@ error_out:
}
}
-int
+static int
sctp_med_chunk_output(struct sctp_inpcb *inp,
struct sctp_tcb *stcb,
struct sctp_association *asoc,
@@ -6121,7 +6406,7 @@ sctp_med_chunk_output(struct sctp_inpcb *inp,
static void
sctp_sendall_iterator(struct sctp_inpcb *inp, struct sctp_tcb *stcb, void *ptr,
- uint32_t val)
+ uint32_t val SCTP_UNUSED)
{
struct sctp_copy_all *ca;
struct mbuf *m;
@@ -6129,6 +6414,7 @@ sctp_sendall_iterator(struct sctp_inpcb *inp, struct sctp_tcb *stcb, void *ptr,
int added_control = 0;
int un_sent, do_chunk_output = 1;
struct sctp_association *asoc;
+ struct sctp_nets *net;
ca = (struct sctp_copy_all *)ptr;
if (ca->m == NULL) {
@@ -6149,12 +6435,10 @@ sctp_sendall_iterator(struct sctp_inpcb *inp, struct sctp_tcb *stcb, void *ptr,
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
struct mbuf *mat;
- mat = m;
- while (mat) {
+ for (mat = m; mat; mat = SCTP_BUF_NEXT(mat)) {
if (SCTP_BUF_IS_EXTENDED(mat)) {
sctp_log_mb(mat, SCTP_MBUF_ICOPY);
}
- mat = SCTP_BUF_NEXT(mat);
}
}
#endif
@@ -6162,6 +6446,11 @@ sctp_sendall_iterator(struct sctp_inpcb *inp, struct sctp_tcb *stcb, void *ptr,
m = NULL;
}
SCTP_TCB_LOCK_ASSERT(stcb);
+ if (stcb->asoc.alternate) {
+ net = stcb->asoc.alternate;
+ } else {
+ net = stcb->asoc.primary_destination;
+ }
if (ca->sndrcv.sinfo_flags & SCTP_ABORT) {
/* Abort this assoc with m as the user defined reason */
if (m) {
@@ -6171,16 +6460,14 @@ sctp_sendall_iterator(struct sctp_inpcb *inp, struct sctp_tcb *stcb, void *ptr,
if (m) {
ph = mtod(m, struct sctp_paramhdr *);
ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
- ph->param_length = htons(ca->sndlen);
+ ph->param_length = htons(sizeof(struct sctp_paramhdr) + ca->sndlen);
}
/*
* We add one here to keep the assoc from
* dis-appearing on us.
*/
atomic_add_int(&stcb->asoc.refcnt, 1);
- sctp_abort_an_association(inp, stcb,
- SCTP_RESPONSE_TO_USER_REQ,
- m, SCTP_SO_NOT_LOCKED);
+ sctp_abort_an_association(inp, stcb, m, SCTP_SO_NOT_LOCKED);
/*
* sctp_abort_an_association calls sctp_free_asoc()
* free association will NOT free it since we
@@ -6200,7 +6487,7 @@ sctp_sendall_iterator(struct sctp_inpcb *inp, struct sctp_tcb *stcb, void *ptr,
}
} else {
if (m) {
- ret = sctp_msg_append(stcb, stcb->asoc.primary_destination, m,
+ ret = sctp_msg_append(stcb, net, m,
&ca->sndrcv, 1);
}
asoc = &stcb->asoc;
@@ -6208,7 +6495,7 @@ sctp_sendall_iterator(struct sctp_inpcb *inp, struct sctp_tcb *stcb, void *ptr,
/* shutdown this assoc */
int cnt;
- cnt = sctp_is_there_unsent_data(stcb);
+ cnt = sctp_is_there_unsent_data(stcb, SCTP_SO_NOT_LOCKED);
if (TAILQ_EMPTY(&asoc->send_queue) &&
TAILQ_EMPTY(&asoc->sent_queue) &&
@@ -6227,14 +6514,15 @@ sctp_sendall_iterator(struct sctp_inpcb *inp, struct sctp_tcb *stcb, void *ptr,
* only send SHUTDOWN the first time
* through
*/
- sctp_send_shutdown(stcb, stcb->asoc.primary_destination);
if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) {
SCTP_STAT_DECR_GAUGE32(sctps_currestab);
}
SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
+ sctp_stop_timers_for_shutdown(stcb);
+ sctp_send_shutdown(stcb, net);
sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb,
- asoc->primary_destination);
+ net);
sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
asoc->primary_destination);
added_control = 1;
@@ -6274,7 +6562,6 @@ sctp_sendall_iterator(struct sctp_inpcb *inp, struct sctp_tcb *stcb, void *ptr,
abort_anyway:
atomic_add_int(&stcb->asoc.refcnt, 1);
sctp_abort_an_association(stcb->sctp_ep, stcb,
- SCTP_RESPONSE_TO_USER_REQ,
NULL, SCTP_SO_NOT_LOCKED);
atomic_add_int(&stcb->asoc.refcnt, -1);
goto no_chunk_output;
@@ -6315,7 +6602,7 @@ no_chunk_output:
}
static void
-sctp_sendall_completes(void *ptr, uint32_t val)
+sctp_sendall_completes(void *ptr, uint32_t val SCTP_UNUSED)
{
struct sctp_copy_all *ca;
@@ -6400,7 +6687,9 @@ sctp_sendall(struct sctp_inpcb *inp, struct uio *uio, struct mbuf *m,
memset(ca, 0, sizeof(struct sctp_copy_all));
ca->inp = inp;
- memcpy(&ca->sndrcv, srcv, sizeof(struct sctp_nonpad_sndrcvinfo));
+ if (srcv) {
+ memcpy(&ca->sndrcv, srcv, sizeof(struct sctp_nonpad_sndrcvinfo));
+ }
/*
* take off the sendall flag, it would be bad if we failed to do
* this :-0
@@ -6447,9 +6736,7 @@ sctp_toss_old_cookies(struct sctp_tcb *stcb, struct sctp_association *asoc)
{
struct sctp_tmit_chunk *chk, *nchk;
- chk = TAILQ_FIRST(&asoc->control_send_queue);
- while (chk) {
- nchk = TAILQ_NEXT(chk, sctp_next);
+ TAILQ_FOREACH_SAFE(chk, &asoc->control_send_queue, sctp_next, nchk) {
if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
if (chk->data) {
@@ -6457,9 +6744,8 @@ sctp_toss_old_cookies(struct sctp_tcb *stcb, struct sctp_association *asoc)
chk->data = NULL;
}
asoc->ctrl_queue_cnt--;
- sctp_free_a_chunk(stcb, chk);
+ sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
}
- chk = nchk;
}
}
@@ -6467,19 +6753,16 @@ void
sctp_toss_old_asconf(struct sctp_tcb *stcb)
{
struct sctp_association *asoc;
- struct sctp_tmit_chunk *chk, *chk_tmp;
+ struct sctp_tmit_chunk *chk, *nchk;
struct sctp_asconf_chunk *acp;
asoc = &stcb->asoc;
- for (chk = TAILQ_FIRST(&asoc->asconf_send_queue); chk != NULL;
- chk = chk_tmp) {
- /* get next chk */
- chk_tmp = TAILQ_NEXT(chk, sctp_next);
+ TAILQ_FOREACH_SAFE(chk, &asoc->asconf_send_queue, sctp_next, nchk) {
/* find SCTP_ASCONF chunk in queue */
if (chk->rec.chunk_id.id == SCTP_ASCONF) {
if (chk->data) {
acp = mtod(chk->data, struct sctp_asconf_chunk *);
- if (compare_with_wrap(ntohl(acp->serial_number), stcb->asoc.asconf_seq_out_acked, MAX_SEQ)) {
+ if (SCTP_TSN_GT(ntohl(acp->serial_number), asoc->asconf_seq_out_acked)) {
/* Not Acked yet */
break;
}
@@ -6490,7 +6773,7 @@ sctp_toss_old_asconf(struct sctp_tcb *stcb)
chk->data = NULL;
}
asoc->ctrl_queue_cnt--;
- sctp_free_a_chunk(stcb, chk);
+ sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
}
}
}
@@ -6520,6 +6803,7 @@ sctp_clean_up_datalist(struct sctp_tcb *stcb,
}
/* record time */
data_list[i]->sent_rcv_time = net->last_sent_time;
+ data_list[i]->rec.data.cwnd_at_send = net->cwnd;
data_list[i]->rec.data.fast_retran_tsn = data_list[i]->rec.data.TSN_seq;
if (data_list[i]->whoTo == NULL) {
data_list[i]->whoTo = net;
@@ -6527,8 +6811,7 @@ sctp_clean_up_datalist(struct sctp_tcb *stcb,
}
/* on to the sent queue */
tp1 = TAILQ_LAST(&asoc->sent_queue, sctpchunk_listhead);
- if ((tp1) && (compare_with_wrap(tp1->rec.data.TSN_seq,
- data_list[i]->rec.data.TSN_seq, MAX_TSN))) {
+ if ((tp1) && SCTP_TSN_GT(tp1->rec.data.TSN_seq, data_list[i]->rec.data.TSN_seq)) {
struct sctp_tmit_chunk *tpp;
/* need to move back */
@@ -6539,8 +6822,7 @@ sctp_clean_up_datalist(struct sctp_tcb *stcb,
goto all_done;
}
tp1 = tpp;
- if (compare_with_wrap(tp1->rec.data.TSN_seq,
- data_list[i]->rec.data.TSN_seq, MAX_TSN)) {
+ if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, data_list[i]->rec.data.TSN_seq)) {
goto back_up_more;
}
TAILQ_INSERT_AFTER(&asoc->sent_queue, tp1, data_list[i], sctp_next);
@@ -6584,16 +6866,21 @@ all_done:
asoc->peers_rwnd = 0;
}
}
+ if (asoc->cc_functions.sctp_cwnd_update_packet_transmitted) {
+ (*asoc->cc_functions.sctp_cwnd_update_packet_transmitted) (stcb, net);
+ }
}
static void
-sctp_clean_up_ctl(struct sctp_tcb *stcb, struct sctp_association *asoc)
+sctp_clean_up_ctl(struct sctp_tcb *stcb, struct sctp_association *asoc, int so_locked
+#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
+ SCTP_UNUSED
+#endif
+)
{
struct sctp_tmit_chunk *chk, *nchk;
- for (chk = TAILQ_FIRST(&asoc->control_send_queue);
- chk; chk = nchk) {
- nchk = TAILQ_NEXT(chk, sctp_next);
+ TAILQ_FOREACH_SAFE(chk, &asoc->control_send_queue, sctp_next, nchk) {
if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
(chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK) || /* EY */
(chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) ||
@@ -6616,7 +6903,7 @@ sctp_clean_up_ctl(struct sctp_tcb *stcb, struct sctp_association *asoc)
asoc->ctrl_queue_cnt--;
if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN)
asoc->fwd_tsn_cnt--;
- sctp_free_a_chunk(stcb, chk);
+ sctp_free_a_chunk(stcb, chk, so_locked);
} else if (chk->rec.chunk_id.id == SCTP_STREAM_RESET) {
/* special handling, we must look into the param */
if (chk != asoc->str_reset) {
@@ -6696,7 +6983,12 @@ sctp_move_to_outqueue(struct sctp_tcb *stcb,
int *locked,
int *giveup,
int eeor_mode,
- int *bail)
+ int *bail,
+ int so_locked
+#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
+ SCTP_UNUSED
+#endif
+)
{
/* Move from the stream to the send_queue keeping track of the total */
struct sctp_association *asoc;
@@ -6758,6 +7050,7 @@ one_more_time:
}
atomic_subtract_int(&asoc->stream_queue_cnt, 1);
TAILQ_REMOVE(&strq->outqueue, sp, next);
+ stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, strq, sp, send_lock_up);
if (sp->net) {
sctp_free_remote_addr(sp->net);
sp->net = NULL;
@@ -6766,7 +7059,7 @@ one_more_time:
sctp_m_freem(sp->data);
sp->data = NULL;
}
- sctp_free_a_strmoq(stcb, sp);
+ sctp_free_a_strmoq(stcb, sp, so_locked);
/* we can't be locked to it */
*locked = 0;
stcb->asoc.locked_on_sending = NULL;
@@ -6932,7 +7225,7 @@ dont_do_it:
chk->last_mbuf = NULL;
if (chk->data == NULL) {
sp->some_taken = some_taken;
- sctp_free_a_chunk(stcb, chk);
+ sctp_free_a_chunk(stcb, chk, so_locked);
*bail = 1;
to_move = 0;
goto out_of;
@@ -6941,12 +7234,10 @@ dont_do_it:
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
struct mbuf *mat;
- mat = chk->data;
- while (mat) {
+ for (mat = chk->data; mat; mat = SCTP_BUF_NEXT(mat)) {
if (SCTP_BUF_IS_EXTENDED(mat)) {
sctp_log_mb(mat, SCTP_MBUF_ICOPY);
}
- mat = SCTP_BUF_NEXT(mat);
}
}
#endif
@@ -7036,7 +7327,7 @@ dont_do_it:
atomic_add_int(&sp->length, to_move);
chk->data = NULL;
*bail = 1;
- sctp_free_a_chunk(stcb, chk);
+ sctp_free_a_chunk(stcb, chk, so_locked);
to_move = 0;
goto out_of;
} else {
@@ -7053,7 +7344,7 @@ dont_do_it:
panic("prepend failes HELP?");
#else
SCTP_PRINTF("prepend fails HELP?\n");
- sctp_free_a_chunk(stcb, chk);
+ sctp_free_a_chunk(stcb, chk, so_locked);
#endif
*bail = 1;
to_move = 0;
@@ -7068,12 +7359,14 @@ dont_do_it:
chk->asoc = &stcb->asoc;
chk->pad_inplace = 0;
chk->no_fr_allowed = 0;
- chk->rec.data.stream_seq = sp->strseq;
+ chk->rec.data.stream_seq = strq->next_sequence_send;
+ if (rcv_flags & SCTP_DATA_LAST_FRAG) {
+ strq->next_sequence_send++;
+ }
chk->rec.data.stream_number = sp->stream;
chk->rec.data.payloadtype = sp->ppid;
chk->rec.data.context = sp->context;
chk->rec.data.doing_fast_retransmit = 0;
- chk->rec.data.ect_nonce = 0; /* ECN Nonce */
chk->rec.data.timetodrop = sp->ts;
chk->flags = sp->act_flags;
@@ -7167,6 +7460,7 @@ dont_do_it:
send_lock_up = 1;
}
TAILQ_REMOVE(&strq->outqueue, sp, next);
+ stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, strq, sp, send_lock_up);
if (sp->net) {
sctp_free_remote_addr(sp->net);
sp->net = NULL;
@@ -7175,7 +7469,7 @@ dont_do_it:
sctp_m_freem(sp->data);
sp->data = NULL;
}
- sctp_free_a_strmoq(stcb, sp);
+ sctp_free_a_strmoq(stcb, sp, so_locked);
/* we can't be locked to it */
*locked = 0;
@@ -7185,57 +7479,48 @@ dont_do_it:
*locked = 1;
}
asoc->chunks_on_out_queue++;
+ strq->chunks_on_queues++;
TAILQ_INSERT_TAIL(&asoc->send_queue, chk, sctp_next);
asoc->send_queue_cnt++;
out_of:
if (send_lock_up) {
SCTP_TCB_SEND_UNLOCK(stcb);
- send_lock_up = 0;
}
return (to_move);
}
-static struct sctp_stream_out *
-sctp_select_a_stream(struct sctp_tcb *stcb, struct sctp_association *asoc)
-{
- struct sctp_stream_out *strq;
-
- /* Find the next stream to use */
- if (asoc->last_out_stream == NULL) {
- strq = TAILQ_FIRST(&asoc->out_wheel);
- } else {
- strq = TAILQ_NEXT(asoc->last_out_stream, next_spoke);
- if (strq == NULL) {
- strq = TAILQ_FIRST(&asoc->out_wheel);
- }
- }
- return (strq);
-}
-
-
static void
sctp_fill_outqueue(struct sctp_tcb *stcb,
- struct sctp_nets *net, int frag_point, int eeor_mode, int *quit_now)
+ struct sctp_nets *net, int frag_point, int eeor_mode, int *quit_now, int so_locked
+#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
+ SCTP_UNUSED
+#endif
+)
{
struct sctp_association *asoc;
- struct sctp_stream_out *strq, *strqn;
+ struct sctp_stream_out *strq;
int goal_mtu, moved_how_much, total_moved = 0, bail = 0;
int locked, giveup;
- struct sctp_stream_queue_pending *sp;
SCTP_TCB_LOCK_ASSERT(stcb);
asoc = &stcb->asoc;
+ switch (net->ro._l_addr.sa.sa_family) {
+#ifdef INET
+ case AF_INET:
+ goal_mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
+ break;
+#endif
#ifdef INET6
- if (net->ro._l_addr.sin6.sin6_family == AF_INET6) {
+ case AF_INET6:
goal_mtu = net->mtu - SCTP_MIN_OVERHEAD;
- } else {
- /* ?? not sure what else to do */
- goal_mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
- }
-#else
- goal_mtu = net->mtu - SCTP_MIN_OVERHEAD;
+ break;
#endif
+ default:
+ /* TSNH */
+ goal_mtu = net->mtu;
+ break;
+ }
/* Need an allowance for the data chunk header too */
goal_mtu -= sizeof(struct sctp_data_chunk);
@@ -7246,46 +7531,16 @@ sctp_fill_outqueue(struct sctp_tcb *stcb,
strq = asoc->locked_on_sending;
locked = 1;
} else {
- strq = sctp_select_a_stream(stcb, asoc);
+ strq = stcb->asoc.ss_functions.sctp_ss_select_stream(stcb, net, asoc);
locked = 0;
}
- strqn = strq;
while ((goal_mtu > 0) && strq) {
- sp = TAILQ_FIRST(&strq->outqueue);
- if (sp == NULL) {
- break;
- }
- /**
- * Honor the users' choice if given. If not given,
- * pull it only to the primary path in case of not using
- * CMT.
- */
- if (((sp->net != NULL) &&
- (sp->net != net)) ||
- ((sp->net == NULL) &&
- (asoc->sctp_cmt_on_off == 0) &&
- (asoc->primary_destination != net))) {
- /* Do not pull to this network */
- if (locked) {
- break;
- } else {
- strq = sctp_select_a_stream(stcb, asoc);
- if (strq == NULL)
- /* none left */
- break;
- if (strqn == strq) {
- /* I have circled */
- break;
- }
- continue;
- }
- }
giveup = 0;
bail = 0;
moved_how_much = sctp_move_to_outqueue(stcb, strq, goal_mtu, frag_point, &locked,
- &giveup, eeor_mode, &bail);
+ &giveup, eeor_mode, &bail, so_locked);
if (moved_how_much)
- asoc->last_out_stream = strq;
+ stcb->asoc.ss_functions.sctp_ss_scheduled(stcb, net, asoc, strq, moved_how_much);
if (locked) {
asoc->locked_on_sending = strq;
@@ -7294,23 +7549,10 @@ sctp_fill_outqueue(struct sctp_tcb *stcb,
break;
} else {
asoc->locked_on_sending = NULL;
- if (TAILQ_EMPTY(&strq->outqueue)) {
- if (strq == strqn) {
- /* Must move start to next one */
- strqn = TAILQ_NEXT(strq, next_spoke);
- if (strqn == NULL) {
- strqn = TAILQ_FIRST(&asoc->out_wheel);
- if (strqn == NULL) {
- break;
- }
- }
- }
- sctp_remove_from_wheel(stcb, asoc, strq, 0);
- }
if ((giveup) || bail) {
break;
}
- strq = sctp_select_a_stream(stcb, asoc);
+ strq = stcb->asoc.ss_functions.sctp_ss_select_stream(stcb, net, asoc);
if (strq == NULL) {
break;
}
@@ -7322,12 +7564,14 @@ sctp_fill_outqueue(struct sctp_tcb *stcb,
if (bail)
*quit_now = 1;
+ stcb->asoc.ss_functions.sctp_ss_packet_done(stcb, net, asoc);
+
if (total_moved == 0) {
if ((stcb->asoc.sctp_cmt_on_off == 0) &&
(net == stcb->asoc.primary_destination)) {
/* ran dry for primary network net */
SCTP_STAT_INCR(sctps_primary_randry);
- } else if (stcb->asoc.sctp_cmt_on_off == 1) {
+ } else if (stcb->asoc.sctp_cmt_on_off > 0) {
/* ran dry with CMT on */
SCTP_STAT_INCR(sctps_cmt_randry);
}
@@ -7350,16 +7594,16 @@ void
sctp_move_chunks_from_net(struct sctp_tcb *stcb, struct sctp_nets *net)
{
struct sctp_association *asoc;
- struct sctp_stream_out *outs;
struct sctp_tmit_chunk *chk;
struct sctp_stream_queue_pending *sp;
+ unsigned int i;
if (net == NULL) {
return;
}
asoc = &stcb->asoc;
- TAILQ_FOREACH(outs, &asoc->out_wheel, next_spoke) {
- TAILQ_FOREACH(sp, &outs->outqueue, next) {
+ for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
+ TAILQ_FOREACH(sp, &stcb->asoc.strmout[i].outqueue, next) {
if (sp->net == net) {
sctp_free_remote_addr(sp->net);
sp->net = NULL;
@@ -7396,7 +7640,7 @@ sctp_med_chunk_output(struct sctp_inpcb *inp,
* fomulate and send the low level chunks. Making sure to combine
* any control in the control chunk queue also.
*/
- struct sctp_nets *net, *start_at, *old_start_at = NULL;
+ struct sctp_nets *net, *start_at, *sack_goes_to = NULL, *old_start_at = NULL;
struct mbuf *outchain, *endoutchain;
struct sctp_tmit_chunk *chk, *nchk;
@@ -7413,13 +7657,13 @@ sctp_med_chunk_output(struct sctp_inpcb *inp,
struct sctp_auth_chunk *auth = NULL;
uint16_t auth_keyid;
int override_ok = 1;
+ int skip_fill_up = 0;
int data_auth_reqd = 0;
/*
* JRS 5/14/07 - Add flag for whether a heartbeat is sent to the
* destination.
*/
- int pf_hbflag = 0;
int quit_now = 0;
*num_out = 0;
@@ -7449,10 +7693,12 @@ sctp_med_chunk_output(struct sctp_inpcb *inp,
no_data_chunks = 0;
/* Nothing to possible to send? */
- if (TAILQ_EMPTY(&asoc->control_send_queue) &&
+ if ((TAILQ_EMPTY(&asoc->control_send_queue) ||
+ (asoc->ctrl_queue_cnt == stcb->asoc.ecn_echo_cnt_onq)) &&
TAILQ_EMPTY(&asoc->asconf_send_queue) &&
TAILQ_EMPTY(&asoc->send_queue) &&
- TAILQ_EMPTY(&asoc->out_wheel)) {
+ stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, asoc)) {
+nothing_to_send:
*reason_code = 9;
return (0);
}
@@ -7464,12 +7710,43 @@ sctp_med_chunk_output(struct sctp_inpcb *inp,
no_data_chunks = 1;
}
}
+ if (stcb->asoc.ecn_echo_cnt_onq) {
+ /* Record where a sack goes, if any */
+ if (no_data_chunks &&
+ (asoc->ctrl_queue_cnt == stcb->asoc.ecn_echo_cnt_onq)) {
+ /* Nothing but ECNe to send - we don't do that */
+ goto nothing_to_send;
+ }
+ TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
+ if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
+ (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK)) {
+ sack_goes_to = chk->whoTo;
+ break;
+ }
+ }
+ }
max_rwnd_per_dest = ((asoc->peers_rwnd + asoc->total_flight) / asoc->numnets);
if (stcb->sctp_socket)
max_send_per_dest = SCTP_SB_LIMIT_SND(stcb->sctp_socket) / asoc->numnets;
else
max_send_per_dest = 0;
- if ((no_data_chunks == 0) && (!TAILQ_EMPTY(&asoc->out_wheel))) {
+ if (no_data_chunks == 0) {
+ /* How many non-directed chunks are there? */
+ TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
+ if (chk->whoTo == NULL) {
+ /*
+ * We already have non-directed chunks on
+ * the queue, no need to do a fill-up.
+ */
+ skip_fill_up = 1;
+ break;
+ }
+ }
+
+ }
+ if ((no_data_chunks == 0) &&
+ (skip_fill_up == 0) &&
+ (!stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, asoc))) {
TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
/*
* This for loop we are in takes in each net, if
@@ -7483,23 +7760,19 @@ sctp_med_chunk_output(struct sctp_inpcb *inp,
* copy by reference (we hope).
*/
net->window_probe = 0;
- if ((net->dest_state & SCTP_ADDR_NOT_REACHABLE) ||
- (net->dest_state & SCTP_ADDR_UNCONFIRMED)) {
+ if ((net != stcb->asoc.alternate) &&
+ ((net->dest_state & SCTP_ADDR_PF) ||
+ (!(net->dest_state & SCTP_ADDR_REACHABLE)) ||
+ (net->dest_state & SCTP_ADDR_UNCONFIRMED))) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
sctp_log_cwnd(stcb, net, 1,
SCTP_CWND_LOG_FILL_OUTQ_CALLED);
}
continue;
}
- if ((asoc->sctp_cmt_on_off == 0) &&
- (asoc->primary_destination != net) &&
- (net->ref_count < 2)) {
- /* nothing can be in queue for this guy */
- if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
- sctp_log_cwnd(stcb, net, 2,
- SCTP_CWND_LOG_FILL_OUTQ_CALLED);
- }
- continue;
+ if ((stcb->asoc.cc_functions.sctp_cwnd_new_transmission_begins) &&
+ (net->flight_size == 0)) {
+ (*stcb->asoc.cc_functions.sctp_cwnd_new_transmission_begins) (stcb, net);
}
if (net->flight_size >= net->cwnd) {
/* skip this network, no room - can't fill */
@@ -7512,7 +7785,7 @@ sctp_med_chunk_output(struct sctp_inpcb *inp,
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
sctp_log_cwnd(stcb, net, 4, SCTP_CWND_LOG_FILL_OUTQ_CALLED);
}
- sctp_fill_outqueue(stcb, net, frag_point, eeor_mode, &quit_now);
+ sctp_fill_outqueue(stcb, net, frag_point, eeor_mode, &quit_now, so_locked);
if (quit_now) {
/* memory alloc failure */
no_data_chunks = 1;
@@ -7528,7 +7801,7 @@ sctp_med_chunk_output(struct sctp_inpcb *inp,
*reason_code = 8;
return (0);
}
- if (asoc->sctp_cmt_on_off == 1) {
+ if (asoc->sctp_cmt_on_off > 0) {
/* get the last start point */
start_at = asoc->last_net_cmt_send_started;
if (start_at == NULL) {
@@ -7544,6 +7817,16 @@ sctp_med_chunk_output(struct sctp_inpcb *inp,
} else {
start_at = TAILQ_FIRST(&asoc->nets);
}
+ TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
+ if (chk->whoTo == NULL) {
+ if (asoc->alternate) {
+ chk->whoTo = asoc->alternate;
+ } else {
+ chk->whoTo = asoc->primary_destination;
+ }
+ atomic_add_int(&chk->whoTo->ref_count, 1);
+ }
+ }
old_start_at = NULL;
again_one_more_time:
for (net = start_at; net != NULL; net = TAILQ_NEXT(net, sctp_next)) {
@@ -7554,15 +7837,6 @@ again_one_more_time:
break;
}
tsns_sent = 0xa;
- if ((asoc->sctp_cmt_on_off == 0) &&
- (asoc->primary_destination != net) &&
- (net->ref_count < 2)) {
- /*
- * Ref-count of 1 so we cannot have data or control
- * queued to this address. Skip it (non-CMT).
- */
- continue;
- }
if (TAILQ_EMPTY(&asoc->control_send_queue) &&
TAILQ_EMPTY(&asoc->asconf_send_queue) &&
(net->flight_size >= net->cwnd)) {
@@ -7572,7 +7846,7 @@ again_one_more_time:
*/
continue;
}
- ctl_cnt = bundle_at = 0;
+ bundle_at = 0;
endoutchain = outchain = NULL;
no_fragmentflg = 1;
one_chunk = 0;
@@ -7598,9 +7872,11 @@ again_one_more_time:
}
}
switch (((struct sockaddr *)&net->ro._l_addr)->sa_family) {
+#ifdef INET
case AF_INET:
mtu = net->mtu - (sizeof(struct ip) + sizeof(struct sctphdr));
break;
+#endif
#ifdef INET6
case AF_INET6:
mtu = net->mtu - (sizeof(struct ip6_hdr) + sizeof(struct sctphdr));
@@ -7629,18 +7905,24 @@ again_one_more_time:
/* ASCONF transmission */
/************************/
/* Now first lets go through the asconf queue */
- for (chk = TAILQ_FIRST(&asoc->asconf_send_queue);
- chk; chk = nchk) {
- nchk = TAILQ_NEXT(chk, sctp_next);
+ TAILQ_FOREACH_SAFE(chk, &asoc->asconf_send_queue, sctp_next, nchk) {
if (chk->rec.chunk_id.id != SCTP_ASCONF) {
continue;
}
- if (chk->whoTo != net) {
- /*
- * No, not sent to the network we are
- * looking at
- */
- break;
+ if (chk->whoTo == NULL) {
+ if (asoc->alternate == NULL) {
+ if (asoc->primary_destination != net) {
+ break;
+ }
+ } else {
+ if (asoc->alternate != net) {
+ break;
+ }
+ }
+ } else {
+ if (chk->whoTo != net) {
+ break;
+ }
}
if (chk->data == NULL) {
break;
@@ -7724,6 +8006,10 @@ again_one_more_time:
*/
no_data_chunks = 1;
chk->sent = SCTP_DATAGRAM_SENT;
+ if (chk->whoTo == NULL) {
+ chk->whoTo = net;
+ atomic_add_int(&net->ref_count, 1);
+ }
chk->snd_count++;
if (mtu == 0) {
/*
@@ -7742,10 +8028,12 @@ again_one_more_time:
(struct sockaddr *)&net->ro._l_addr,
outchain, auth_offset, auth,
stcb->asoc.authinfo.active_keyid,
- no_fragmentflg, 0, NULL, asconf,
+ no_fragmentflg, 0, asconf,
inp->sctp_lport, stcb->rport,
htonl(stcb->asoc.peer_vtag),
- net->port, so_locked, NULL))) {
+ net->port, NULL,
+ 0, 0,
+ so_locked))) {
if (error == ENOBUFS) {
asoc->ifp_had_enobuf = 1;
SCTP_STAT_INCR(sctps_lowlevelerr);
@@ -7793,10 +8081,21 @@ again_one_more_time:
if (!no_out_cnt)
*num_out += ctl_cnt;
/* recalc a clean slate and setup */
- if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
- mtu = (net->mtu - SCTP_MIN_OVERHEAD);
- } else {
- mtu = (net->mtu - SCTP_MIN_V4_OVERHEAD);
+ switch (net->ro._l_addr.sa.sa_family) {
+#ifdef INET
+ case AF_INET:
+ mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
+ break;
+#endif
+#ifdef INET6
+ case AF_INET6:
+ mtu = net->mtu - SCTP_MIN_OVERHEAD;
+ break;
+#endif
+ default:
+ /* TSNH */
+ mtu = net->mtu;
+ break;
}
to_out = 0;
no_fragmentflg = 1;
@@ -7807,16 +8106,45 @@ again_one_more_time:
/* Control transmission */
/************************/
/* Now first lets go through the control queue */
- for (chk = TAILQ_FIRST(&asoc->control_send_queue);
- chk; chk = nchk) {
- nchk = TAILQ_NEXT(chk, sctp_next);
- if (chk->whoTo != net) {
+ TAILQ_FOREACH_SAFE(chk, &asoc->control_send_queue, sctp_next, nchk) {
+ if ((sack_goes_to) &&
+ (chk->rec.chunk_id.id == SCTP_ECN_ECHO) &&
+ (chk->whoTo != sack_goes_to)) {
/*
- * No, not sent to the network we are
- * looking at
+ * if we have a sack in queue, and we are
+ * looking at an ecn echo that is NOT queued
+ * to where the sack is going..
*/
- continue;
+ if (chk->whoTo == net) {
+ /*
+ * Don't transmit it to where its
+ * going (current net)
+ */
+ continue;
+ } else if (sack_goes_to == net) {
+ /*
+ * But do transmit it to this
+ * address
+ */
+ goto skip_net_check;
+ }
+ }
+ if (chk->whoTo == NULL) {
+ if (asoc->alternate == NULL) {
+ if (asoc->primary_destination != net) {
+ continue;
+ }
+ } else {
+ if (asoc->alternate != net) {
+ continue;
+ }
+ }
+ } else {
+ if (chk->whoTo != net) {
+ continue;
+ }
}
+ skip_net_check:
if (chk->data == NULL) {
continue;
}
@@ -7903,15 +8231,8 @@ again_one_more_time:
(chk->rec.chunk_id.id == SCTP_ECN_CWR) ||
(chk->rec.chunk_id.id == SCTP_PACKET_DROPPED) ||
(chk->rec.chunk_id.id == SCTP_ASCONF_ACK)) {
-
if (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) {
hbflag = 1;
- /*
- * JRS 5/14/07 - Set the
- * flag to say a heartbeat
- * is being sent.
- */
- pf_hbflag = 1;
}
/* remove these chunks at the end */
if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
@@ -7934,8 +8255,26 @@ again_one_more_time:
if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
cookie = 1;
no_out_cnt = 1;
+ } else if (chk->rec.chunk_id.id == SCTP_ECN_ECHO) {
+ /*
+ * Increment ecne send count
+ * here this means we may be
+ * over-zealous in our
+ * counting if the send
+ * fails, but its the best
+ * place to do it (we used
+ * to do it in the queue of
+ * the chunk, but that did
+ * not tell how many times
+ * it was sent.
+ */
+ SCTP_STAT_INCR(sctps_sendecne);
}
chk->sent = SCTP_DATAGRAM_SENT;
+ if (chk->whoTo == NULL) {
+ chk->whoTo = net;
+ atomic_add_int(&net->ref_count, 1);
+ }
chk->snd_count++;
}
if (mtu == 0) {
@@ -7963,10 +8302,12 @@ again_one_more_time:
outchain,
auth_offset, auth,
stcb->asoc.authinfo.active_keyid,
- no_fragmentflg, 0, NULL, asconf,
+ no_fragmentflg, 0, asconf,
inp->sctp_lport, stcb->rport,
htonl(stcb->asoc.peer_vtag),
- net->port, so_locked, NULL))) {
+ net->port, NULL,
+ 0, 0,
+ so_locked))) {
if (error == ENOBUFS) {
asoc->ifp_had_enobuf = 1;
SCTP_STAT_INCR(sctps_lowlevelerr);
@@ -8019,10 +8360,21 @@ again_one_more_time:
if (!no_out_cnt)
*num_out += ctl_cnt;
/* recalc a clean slate and setup */
- if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
- mtu = (net->mtu - SCTP_MIN_OVERHEAD);
- } else {
- mtu = (net->mtu - SCTP_MIN_V4_OVERHEAD);
+ switch (net->ro._l_addr.sa.sa_family) {
+#ifdef INET
+ case AF_INET:
+ mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
+ break;
+#endif
+#ifdef INET6
+ case AF_INET6:
+ mtu = net->mtu - SCTP_MIN_OVERHEAD;
+ break;
+#endif
+ default:
+ /* TSNH */
+ mtu = net->mtu;
+ break;
}
to_out = 0;
no_fragmentflg = 1;
@@ -8030,15 +8382,15 @@ again_one_more_time:
}
}
/* JRI: if dest is in PF state, do not send data to it */
- if ((asoc->sctp_cmt_on_off == 1) &&
- (asoc->sctp_cmt_pf > 0) &&
+ if ((asoc->sctp_cmt_on_off > 0) &&
+ (net != stcb->asoc.alternate) &&
(net->dest_state & SCTP_ADDR_PF)) {
goto no_data_fill;
}
if (net->flight_size >= net->cwnd) {
goto no_data_fill;
}
- if ((asoc->sctp_cmt_on_off == 1) &&
+ if ((asoc->sctp_cmt_on_off > 0) &&
(SCTP_BASE_SYSCTL(sctp_buffer_splitting) & SCTP_RECV_BUFFER_SPLITTING) &&
(net->flight_size > max_rwnd_per_dest)) {
goto no_data_fill;
@@ -8049,7 +8401,7 @@ again_one_more_time:
* net. For now, this is better than nothing and it disabled
* by default...
*/
- if ((asoc->sctp_cmt_on_off == 1) &&
+ if ((asoc->sctp_cmt_on_off > 0) &&
(SCTP_BASE_SYSCTL(sctp_buffer_splitting) & SCTP_SEND_BUFFER_SPLITTING) &&
(max_send_per_dest > 0) &&
(net->flight_size > max_send_per_dest)) {
@@ -8071,12 +8423,14 @@ again_one_more_time:
}
/* now lets add any data within the MTU constraints */
switch (((struct sockaddr *)&net->ro._l_addr)->sa_family) {
+#ifdef INET
case AF_INET:
if (net->mtu > (sizeof(struct ip) + sizeof(struct sctphdr)))
omtu = net->mtu - (sizeof(struct ip) + sizeof(struct sctphdr));
else
omtu = 0;
break;
+#endif
#ifdef INET6
case AF_INET6:
if (net->mtu > (sizeof(struct ip6_hdr) + sizeof(struct sctphdr)))
@@ -8093,7 +8447,7 @@ again_one_more_time:
if ((((asoc->state & SCTP_STATE_OPEN) == SCTP_STATE_OPEN) &&
(skip_data_for_this_net == 0)) ||
(cookie)) {
- for (chk = TAILQ_FIRST(&asoc->send_queue); chk; chk = nchk) {
+ TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
if (no_data_chunks) {
/* let only control go out */
*reason_code = 1;
@@ -8104,12 +8458,22 @@ again_one_more_time:
*reason_code = 2;
break;
}
- nchk = TAILQ_NEXT(chk, sctp_next);
if ((chk->whoTo != NULL) &&
(chk->whoTo != net)) {
/* Don't send the chunk on this net */
continue;
}
+ if (asoc->sctp_cmt_on_off == 0) {
+ if ((asoc->alternate) &&
+ (asoc->alternate != net) &&
+ (chk->whoTo == NULL)) {
+ continue;
+ } else if ((net != asoc->primary_destination) &&
+ (asoc->alternate == NULL) &&
+ (chk->whoTo == NULL)) {
+ continue;
+ }
+ }
if ((chk->send_size > omtu) && ((chk->flags & CHUNK_FLAGS_FRAGMENT_OK) == 0)) {
/*-
* strange, we have a chunk that is
@@ -8208,7 +8572,6 @@ again_one_more_time:
chk->window_probe = 0;
data_list[bundle_at++] = chk;
if (bundle_at >= SCTP_MAX_DATA_BUNDLING) {
- mtu = 0;
break;
}
if (chk->sent == SCTP_DATAGRAM_UNSENT) {
@@ -8270,18 +8633,6 @@ no_data_fill:
* restart it.
*/
sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
- } else if ((asoc->sctp_cmt_on_off == 1) &&
- (asoc->sctp_cmt_pf > 0) &&
- pf_hbflag &&
- ((net->dest_state & SCTP_ADDR_PF) == SCTP_ADDR_PF) &&
- (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer))) {
- /*
- * JRS 5/14/07 - If a HB has been sent to a
- * PF destination and no T3 timer is
- * currently running, start the T3 timer to
- * track the HBs that were sent.
- */
- sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
}
/* Now send it, if there is anything to send :> */
if ((error = sctp_lowlevel_chunk_output(inp,
@@ -8294,11 +8645,12 @@ no_data_fill:
auth_keyid,
no_fragmentflg,
bundle_at,
- data_list[0],
asconf,
inp->sctp_lport, stcb->rport,
htonl(stcb->asoc.peer_vtag),
- net->port, so_locked, NULL))) {
+ net->port, NULL,
+ 0, 0,
+ so_locked))) {
/* error, we could not output */
if (error == ENOBUFS) {
SCTP_STAT_INCR(sctps_lowlevelerr);
@@ -8338,7 +8690,7 @@ no_data_fill:
} else {
asoc->ifp_had_enobuf = 0;
}
- outchain = endoutchain = NULL;
+ endoutchain = NULL;
auth = NULL;
auth_offset = 0;
if (bundle_at || hbflag) {
@@ -8365,27 +8717,12 @@ no_data_fill:
} else {
asoc->time_last_sent = *now;
}
- data_list[0]->do_rtt = 1;
+ if (net->rto_needed) {
+ data_list[0]->do_rtt = 1;
+ net->rto_needed = 0;
+ }
SCTP_STAT_INCR_BY(sctps_senddata, bundle_at);
sctp_clean_up_datalist(stcb, asoc, data_list, bundle_at, net);
- if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
- if (net->flight_size < net->cwnd) {
- /* start or restart it */
- if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
- sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, inp, stcb, net,
- SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_2);
- }
- SCTP_STAT_INCR(sctps_earlyfrstrout);
- sctp_timer_start(SCTP_TIMER_TYPE_EARLYFR, inp, stcb, net);
- } else {
- /* stop it if its running */
- if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
- SCTP_STAT_INCR(sctps_earlyfrstpout);
- sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, inp, stcb, net,
- SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_3);
- }
- }
- }
}
if (one_chunk) {
break;
@@ -8413,7 +8750,7 @@ no_data_fill:
} else {
*reason_code = 5;
}
- sctp_clean_up_ctl(stcb, asoc);
+ sctp_clean_up_ctl(stcb, asoc, so_locked);
return (0);
}
@@ -8438,7 +8775,7 @@ sctp_queue_op_err(struct sctp_tcb *stcb, struct mbuf *op_err)
chk->copy_by_ref = 0;
SCTP_BUF_PREPEND(op_err, sizeof(struct sctp_chunkhdr), M_DONTWAIT);
if (op_err == NULL) {
- sctp_free_a_chunk(stcb, chk);
+ sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
return;
}
chk->send_size = 0;
@@ -8454,8 +8791,7 @@ sctp_queue_op_err(struct sctp_tcb *stcb, struct mbuf *op_err)
chk->flags = 0;
chk->asoc = &stcb->asoc;
chk->data = op_err;
- chk->whoTo = chk->asoc->primary_destination;
- atomic_add_int(&chk->whoTo->ref_count, 1);
+ chk->whoTo = NULL;
hdr = mtod(op_err, struct sctp_chunkhdr *);
hdr->chunk_type = SCTP_OPERATION_ERROR;
hdr->chunk_flags = 0;
@@ -8511,12 +8847,10 @@ sctp_send_cookie_echo(struct mbuf *m,
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
struct mbuf *mat;
- mat = cookie;
- while (mat) {
+ for (mat = cookie; mat; mat = SCTP_BUF_NEXT(mat)) {
if (SCTP_BUF_IS_EXTENDED(mat)) {
sctp_log_mb(mat, SCTP_MBUF_ICOPY);
}
- mat = SCTP_BUF_NEXT(mat);
}
}
#endif
@@ -8550,7 +8884,7 @@ sctp_send_cookie_echo(struct mbuf *m,
chk->flags = CHUNK_FLAGS_FRAGMENT_OK;
chk->asoc = &stcb->asoc;
chk->data = cookie;
- chk->whoTo = chk->asoc->primary_destination;
+ chk->whoTo = net;
atomic_add_int(&chk->whoTo->ref_count, 1);
TAILQ_INSERT_HEAD(&chk->asoc->control_send_queue, chk, sctp_next);
chk->asoc->ctrl_queue_cnt++;
@@ -8585,12 +8919,10 @@ sctp_send_heartbeat_ack(struct sctp_tcb *stcb,
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
struct mbuf *mat;
- mat = outchain;
- while (mat) {
+ for (mat = outchain; mat; mat = SCTP_BUF_NEXT(mat)) {
if (SCTP_BUF_IS_EXTENDED(mat)) {
sctp_log_mb(mat, SCTP_MBUF_ICOPY);
}
- mat = SCTP_BUF_NEXT(mat);
}
}
#endif
@@ -8660,10 +8992,10 @@ sctp_send_cookie_ack(struct sctp_tcb *stcb)
chk->data = cookie_ack;
if (chk->asoc->last_control_chunk_from != NULL) {
chk->whoTo = chk->asoc->last_control_chunk_from;
+ atomic_add_int(&chk->whoTo->ref_count, 1);
} else {
- chk->whoTo = chk->asoc->primary_destination;
+ chk->whoTo = NULL;
}
- atomic_add_int(&chk->whoTo->ref_count, 1);
hdr = mtod(cookie_ack, struct sctp_chunkhdr *);
hdr->chunk_type = SCTP_COOKIE_ACK;
hdr->chunk_flags = 0;
@@ -8705,8 +9037,9 @@ sctp_send_shutdown_ack(struct sctp_tcb *stcb, struct sctp_nets *net)
chk->asoc = &stcb->asoc;
chk->data = m_shutdown_ack;
chk->whoTo = net;
- atomic_add_int(&net->ref_count, 1);
-
+ if (chk->whoTo) {
+ atomic_add_int(&chk->whoTo->ref_count, 1);
+ }
ack_cp = mtod(m_shutdown_ack, struct sctp_shutdown_ack_chunk *);
ack_cp->ch.chunk_type = SCTP_SHUTDOWN_ACK;
ack_cp->ch.chunk_flags = 0;
@@ -8747,8 +9080,9 @@ sctp_send_shutdown(struct sctp_tcb *stcb, struct sctp_nets *net)
chk->asoc = &stcb->asoc;
chk->data = m_shutdown;
chk->whoTo = net;
- atomic_add_int(&net->ref_count, 1);
-
+ if (chk->whoTo) {
+ atomic_add_int(&chk->whoTo->ref_count, 1);
+ }
shutdown_cp = mtod(m_shutdown, struct sctp_shutdown_chunk *);
shutdown_cp->ch.chunk_type = SCTP_SHUTDOWN;
shutdown_cp->ch.chunk_flags = 0;
@@ -8799,7 +9133,9 @@ sctp_send_asconf(struct sctp_tcb *stcb, struct sctp_nets *net, int addr_locked)
chk->flags = CHUNK_FLAGS_FRAGMENT_OK;
chk->asoc = &stcb->asoc;
chk->whoTo = net;
- atomic_add_int(&chk->whoTo->ref_count, 1);
+ if (chk->whoTo) {
+ atomic_add_int(&chk->whoTo->ref_count, 1);
+ }
TAILQ_INSERT_TAIL(&chk->asoc->asconf_send_queue, chk, sctp_next);
chk->asoc->ctrl_queue_cnt++;
return;
@@ -8814,7 +9150,7 @@ sctp_send_asconf_ack(struct sctp_tcb *stcb)
*/
struct sctp_tmit_chunk *chk;
struct sctp_asconf_ack *ack, *latest_ack;
- struct mbuf *m_ack, *m;
+ struct mbuf *m_ack;
struct sctp_nets *net = NULL;
SCTP_TCB_LOCK_ASSERT(stcb);
@@ -8829,17 +9165,27 @@ sctp_send_asconf_ack(struct sctp_tcb *stcb)
net = sctp_find_alternate_net(stcb, stcb->asoc.last_control_chunk_from, 0);
if (net == NULL) {
/* no alternate */
- if (stcb->asoc.last_control_chunk_from == NULL)
- net = stcb->asoc.primary_destination;
- else
+ if (stcb->asoc.last_control_chunk_from == NULL) {
+ if (stcb->asoc.alternate) {
+ net = stcb->asoc.alternate;
+ } else {
+ net = stcb->asoc.primary_destination;
+ }
+ } else {
net = stcb->asoc.last_control_chunk_from;
+ }
}
} else {
/* normal case */
- if (stcb->asoc.last_control_chunk_from == NULL)
- net = stcb->asoc.primary_destination;
- else
+ if (stcb->asoc.last_control_chunk_from == NULL) {
+ if (stcb->asoc.alternate) {
+ net = stcb->asoc.alternate;
+ } else {
+ net = stcb->asoc.primary_destination;
+ }
+ } else {
net = stcb->asoc.last_control_chunk_from;
+ }
}
latest_ack->last_sent_to = net;
@@ -8857,12 +9203,10 @@ sctp_send_asconf_ack(struct sctp_tcb *stcb)
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
struct mbuf *mat;
- mat = m_ack;
- while (mat) {
+ for (mat = m_ack; mat; mat = SCTP_BUF_NEXT(mat)) {
if (SCTP_BUF_IS_EXTENDED(mat)) {
sctp_log_mb(mat, SCTP_MBUF_ICOPY);
}
- mat = SCTP_BUF_NEXT(mat);
}
}
#endif
@@ -8877,10 +9221,12 @@ sctp_send_asconf_ack(struct sctp_tcb *stcb)
chk->copy_by_ref = 0;
chk->whoTo = net;
+ if (chk->whoTo) {
+ atomic_add_int(&chk->whoTo->ref_count, 1);
+ }
chk->data = m_ack;
chk->send_size = 0;
/* Get size */
- m = m_ack;
chk->send_size = ack->len;
chk->rec.chunk_id.id = SCTP_ASCONF_ACK;
chk->rec.chunk_id.can_take_data = 1;
@@ -8888,7 +9234,6 @@ sctp_send_asconf_ack(struct sctp_tcb *stcb)
chk->snd_count = 0;
chk->flags |= CHUNK_FLAGS_FRAGMENT_OK; /* XXX */
chk->asoc = &stcb->asoc;
- atomic_add_int(&chk->whoTo->ref_count, 1);
TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
chk->asoc->ctrl_queue_cnt++;
@@ -8974,7 +9319,6 @@ sctp_chunk_retransmission(struct sctp_inpcb *inp,
ctl_cnt++;
if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) {
fwd_tsn = 1;
- fwd = chk;
}
/*
* Add an AUTH chunk, if chunk requires it save the
@@ -9006,13 +9350,15 @@ sctp_chunk_retransmission(struct sctp_inpcb *inp,
if ((error = sctp_lowlevel_chunk_output(inp, stcb, chk->whoTo,
(struct sockaddr *)&chk->whoTo->ro._l_addr, m,
auth_offset, auth, stcb->asoc.authinfo.active_keyid,
- no_fragmentflg, 0, NULL, 0,
+ no_fragmentflg, 0, 0,
inp->sctp_lport, stcb->rport, htonl(stcb->asoc.peer_vtag),
- chk->whoTo->port, so_locked, NULL))) {
+ chk->whoTo->port, NULL,
+ 0, 0,
+ so_locked))) {
SCTP_STAT_INCR(sctps_lowlevelerr);
return (error);
}
- m = endofchain = NULL;
+ endofchain = NULL;
auth = NULL;
auth_offset = 0;
/*
@@ -9027,7 +9373,7 @@ sctp_chunk_retransmission(struct sctp_inpcb *inp,
return (0);
} else {
/* Clean up the fwd-tsn list */
- sctp_clean_up_ctl(stcb, asoc);
+ sctp_clean_up_ctl(stcb, asoc, so_locked);
return (0);
}
}
@@ -9053,7 +9399,7 @@ sctp_chunk_retransmission(struct sctp_inpcb *inp,
continue;
}
if (chk->data == NULL) {
- printf("TSN:%x chk->snd_count:%d chk->sent:%d can't retran - no data\n",
+ SCTP_PRINTF("TSN:%x chk->snd_count:%d chk->sent:%d can't retran - no data\n",
chk->rec.data.TSN_seq, chk->snd_count, chk->sent);
continue;
}
@@ -9064,17 +9410,28 @@ sctp_chunk_retransmission(struct sctp_inpcb *inp,
chk->snd_count,
SCTP_BASE_SYSCTL(sctp_max_retran_chunk));
atomic_add_int(&stcb->asoc.refcnt, 1);
- sctp_abort_an_association(stcb->sctp_ep, stcb, 0, NULL, so_locked);
+ sctp_abort_an_association(stcb->sctp_ep, stcb, NULL, so_locked);
SCTP_TCB_LOCK(stcb);
atomic_subtract_int(&stcb->asoc.refcnt, 1);
return (SCTP_RETRAN_EXIT);
}
/* pick up the net */
net = chk->whoTo;
- if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
- mtu = (net->mtu - SCTP_MIN_OVERHEAD);
- } else {
+ switch (net->ro._l_addr.sa.sa_family) {
+#ifdef INET
+ case AF_INET:
mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
+ break;
+#endif
+#ifdef INET6
+ case AF_INET6:
+ mtu = net->mtu - SCTP_MIN_OVERHEAD;
+ break;
+#endif
+ default:
+ /* TSNH */
+ mtu = net->mtu;
+ break;
}
if ((asoc->peers_rwnd < mtu) && (asoc->total_flight > 0)) {
@@ -9182,16 +9539,13 @@ one_chunk_around:
* now are there anymore forward from chk to pick
* up?
*/
- fwd = TAILQ_NEXT(chk, sctp_next);
- while (fwd) {
+ for (fwd = TAILQ_NEXT(chk, sctp_next); fwd != NULL; fwd = TAILQ_NEXT(fwd, sctp_next)) {
if (fwd->sent != SCTP_DATAGRAM_RESEND) {
/* Nope, not for retran */
- fwd = TAILQ_NEXT(fwd, sctp_next);
continue;
}
if (fwd->whoTo != net) {
/* Nope, not the net in question */
- fwd = TAILQ_NEXT(fwd, sctp_next);
continue;
}
if (data_auth_reqd && (auth == NULL)) {
@@ -9239,7 +9593,6 @@ one_chunk_around:
if (bundle_at >= SCTP_MAX_DATA_BUNDLING) {
break;
}
- fwd = TAILQ_NEXT(fwd, sctp_next);
} else {
/* can't fit so we are done */
break;
@@ -9264,14 +9617,16 @@ one_chunk_around:
if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
(struct sockaddr *)&net->ro._l_addr, m,
auth_offset, auth, auth_keyid,
- no_fragmentflg, 0, NULL, 0,
+ no_fragmentflg, 0, 0,
inp->sctp_lport, stcb->rport, htonl(stcb->asoc.peer_vtag),
- net->port, so_locked, NULL))) {
+ net->port, NULL,
+ 0, 0,
+ so_locked))) {
/* error, we could not output */
SCTP_STAT_INCR(sctps_lowlevelerr);
return (error);
}
- m = endofchain = NULL;
+ endofchain = NULL;
auth = NULL;
auth_offset = 0;
/* For HB's */
@@ -9399,12 +9754,10 @@ one_chunk_around:
return (0);
}
-
-static int
+static void
sctp_timer_validation(struct sctp_inpcb *inp,
struct sctp_tcb *stcb,
- struct sctp_association *asoc,
- int ret)
+ struct sctp_association *asoc)
{
struct sctp_nets *net;
@@ -9412,14 +9765,18 @@ sctp_timer_validation(struct sctp_inpcb *inp,
TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
/* Here is a timer */
- return (ret);
+ return;
}
}
SCTP_TCB_LOCK_ASSERT(stcb);
/* Gak, we did not have a timer somewhere */
SCTPDBG(SCTP_DEBUG_OUTPUT3, "Deadlock avoided starting timer on a dest at retran\n");
- sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, asoc->primary_destination);
- return (ret);
+ if (asoc->alternate) {
+ sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, asoc->alternate);
+ } else {
+ sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, asoc->primary_destination);
+ }
+ return;
}
void
@@ -9447,22 +9804,26 @@ sctp_chunk_output(struct sctp_inpcb *inp,
*/
struct sctp_association *asoc;
struct sctp_nets *net;
- int error = 0, num_out = 0, tot_out = 0, ret = 0, reason_code = 0,
- burst_cnt = 0, burst_limit = 0;
+ int error = 0, num_out = 0, tot_out = 0, ret = 0, reason_code = 0;
+ unsigned int burst_cnt = 0;
struct timeval now;
int now_filled = 0;
- int nagle_on = 0;
+ int nagle_on;
int frag_point = sctp_get_frag_point(stcb, &stcb->asoc);
int un_sent = 0;
- int fr_done, tot_frs = 0;
+ int fr_done;
+ unsigned int tot_frs = 0;
asoc = &stcb->asoc;
+ /* The Nagle algorithm is only applied when handling a send call. */
if (from_where == SCTP_OUTPUT_FROM_USR_SEND) {
if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NODELAY)) {
nagle_on = 0;
} else {
nagle_on = 1;
}
+ } else {
+ nagle_on = 0;
}
SCTP_TCB_LOCK_ASSERT(stcb);
@@ -9480,7 +9841,7 @@ sctp_chunk_output(struct sctp_inpcb *inp,
* running, if so piggy-back the sack.
*/
if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
- sctp_send_sack(stcb);
+ sctp_send_sack(stcb, so_locked);
(void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
}
while (asoc->sent_queue_retran_cnt) {
@@ -9525,7 +9886,7 @@ sctp_chunk_output(struct sctp_inpcb *inp,
#ifdef SCTP_AUDITING_ENABLED
sctp_auditing(8, inp, stcb, NULL);
#endif
- (void)sctp_timer_validation(inp, stcb, asoc, ret);
+ sctp_timer_validation(inp, stcb, asoc);
return;
}
if (ret < 0) {
@@ -9551,12 +9912,11 @@ sctp_chunk_output(struct sctp_inpcb *inp,
&now, &now_filled, frag_point, so_locked);
return;
}
- if (tot_frs > asoc->max_burst) {
+ if ((asoc->fr_max_burst > 0) && (tot_frs >= asoc->fr_max_burst)) {
/* Hit FR burst limit */
return;
}
if ((num_out == 0) && (ret == 0)) {
-
/* No more retrans to send */
break;
}
@@ -9565,10 +9925,8 @@ sctp_chunk_output(struct sctp_inpcb *inp,
sctp_auditing(12, inp, stcb, NULL);
#endif
/* Check for bad destinations, if they exist move chunks around. */
- burst_limit = asoc->max_burst;
TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
- if ((net->dest_state & SCTP_ADDR_NOT_REACHABLE) ==
- SCTP_ADDR_NOT_REACHABLE) {
+ if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
/*-
* if possible move things off of this address we
* still may send below due to the dormant state but
@@ -9578,40 +9936,35 @@ sctp_chunk_output(struct sctp_inpcb *inp,
*/
if (net->ref_count > 1)
sctp_move_chunks_from_net(stcb, net);
- } else if ((asoc->sctp_cmt_on_off == 1) &&
- (asoc->sctp_cmt_pf > 0) &&
- ((net->dest_state & SCTP_ADDR_PF) == SCTP_ADDR_PF)) {
- /*
- * JRS 5/14/07 - If CMT PF is on and the current
- * destination is in PF state, move all queued data
- * to an alternate desination.
- */
- if (net->ref_count > 1)
- sctp_move_chunks_from_net(stcb, net);
} else {
/*-
* if ((asoc->sat_network) || (net->addr_is_local))
* { burst_limit = asoc->max_burst *
* SCTP_SAT_NETWORK_BURST_INCR; }
*/
- if (SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst)) {
- if ((net->flight_size + (burst_limit * net->mtu)) < net->cwnd) {
- /*
- * JRS - Use the congestion control
- * given in the congestion control
- * module
- */
- asoc->cc_functions.sctp_cwnd_update_after_output(stcb, net, burst_limit);
- if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) {
- sctp_log_maxburst(stcb, net, 0, burst_limit, SCTP_MAX_BURST_APPLIED);
+ if (asoc->max_burst > 0) {
+ if (SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst)) {
+ if ((net->flight_size + (asoc->max_burst * net->mtu)) < net->cwnd) {
+ /*
+ * JRS - Use the congestion
+ * control given in the
+ * congestion control module
+ */
+ asoc->cc_functions.sctp_cwnd_update_after_output(stcb, net, asoc->max_burst);
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) {
+ sctp_log_maxburst(stcb, net, 0, asoc->max_burst, SCTP_MAX_BURST_APPLIED);
+ }
+ SCTP_STAT_INCR(sctps_maxburstqueued);
+ }
+ net->fast_retran_ip = 0;
+ } else {
+ if (net->flight_size == 0) {
+ /*
+ * Should be decaying the
+ * cwnd here
+ */
+ ;
}
- SCTP_STAT_INCR(sctps_maxburstqueued);
- }
- net->fast_retran_ip = 0;
- } else {
- if (net->flight_size == 0) {
- /* Should be decaying the cwnd here */
- ;
}
}
}
@@ -9644,21 +9997,24 @@ sctp_chunk_output(struct sctp_inpcb *inp,
}
}
if (nagle_on) {
- /*-
- * When nagle is on, we look at how much is un_sent, then
- * if its smaller than an MTU and we have data in
- * flight we stop.
+ /*
+ * When the Nagle algorithm is used, look at how
+ * much is unsent, then if its smaller than an MTU
+ * and we have data in flight we stop, except if we
+ * are handling a fragmented user message.
*/
un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
(stcb->asoc.stream_queue_cnt * sizeof(struct sctp_data_chunk)));
if ((un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD)) &&
- (stcb->asoc.total_flight > 0)) {
+ (stcb->asoc.total_flight > 0) &&
+ ((stcb->asoc.locked_on_sending == NULL) ||
+ sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR))) {
break;
}
}
if (TAILQ_EMPTY(&asoc->control_send_queue) &&
TAILQ_EMPTY(&asoc->send_queue) &&
- TAILQ_EMPTY(&asoc->out_wheel)) {
+ stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, asoc)) {
/* Nothing left to send */
break;
}
@@ -9666,11 +10022,13 @@ sctp_chunk_output(struct sctp_inpcb *inp,
/* Nothing left to send */
break;
}
- } while (num_out && (SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst) ||
- (burst_cnt < burst_limit)));
+ } while (num_out &&
+ ((asoc->max_burst == 0) ||
+ SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst) ||
+ (burst_cnt < asoc->max_burst)));
if (SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst) == 0) {
- if (burst_cnt >= burst_limit) {
+ if ((asoc->max_burst > 0) && (burst_cnt >= asoc->max_burst)) {
SCTP_STAT_INCR(sctps_maxburstqueued);
asoc->burst_limit_applied = 1;
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) {
@@ -9698,13 +10056,13 @@ sctp_chunk_output(struct sctp_inpcb *inp,
int
-sctp_output(inp, m, addr, control, p, flags)
- struct sctp_inpcb *inp;
- struct mbuf *m;
- struct sockaddr *addr;
- struct mbuf *control;
- struct thread *p;
- int flags;
+sctp_output(
+ struct sctp_inpcb *inp,
+ struct mbuf *m,
+ struct sockaddr *addr,
+ struct mbuf *control,
+ struct thread *p,
+ int flags)
{
if (inp == NULL) {
SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
@@ -9738,10 +10096,9 @@ send_forward_tsn(struct sctp_tcb *stcb,
chk->sent = SCTP_DATAGRAM_UNSENT;
chk->snd_count = 0;
/* Do we correct its output location? */
- if (chk->whoTo != asoc->primary_destination) {
+ if (chk->whoTo) {
sctp_free_remote_addr(chk->whoTo);
- chk->whoTo = asoc->primary_destination;
- atomic_add_int(&chk->whoTo->ref_count, 1);
+ chk->whoTo = NULL;
}
goto sctp_fill_in_rest;
}
@@ -9759,14 +10116,12 @@ send_forward_tsn(struct sctp_tcb *stcb,
chk->whoTo = NULL;
chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
if (chk->data == NULL) {
- sctp_free_a_chunk(stcb, chk);
+ sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
return;
}
SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
chk->sent = SCTP_DATAGRAM_UNSENT;
chk->snd_count = 0;
- chk->whoTo = asoc->primary_destination;
- atomic_add_int(&chk->whoTo->ref_count, 1);
TAILQ_INSERT_TAIL(&asoc->control_send_queue, chk, sctp_next);
asoc->ctrl_queue_cnt++;
sctp_fill_in_rest:
@@ -9783,7 +10138,8 @@ sctp_fill_in_rest:
unsigned int cnt_of_skipped = 0;
TAILQ_FOREACH(at, &asoc->sent_queue, sctp_next) {
- if (at->sent != SCTP_FORWARD_TSN_SKIP) {
+ if ((at->sent != SCTP_FORWARD_TSN_SKIP) &&
+ (at->sent != SCTP_DATAGRAM_NR_ACKED)) {
/* no more to look at */
break;
}
@@ -9831,12 +10187,14 @@ sctp_fill_in_rest:
* we report.
*/
at = TAILQ_FIRST(&asoc->sent_queue);
- for (i = 0; i < cnt_of_skipped; i++) {
- tp1 = TAILQ_NEXT(at, sctp_next);
- if (tp1 == NULL) {
- break;
+ if (at != NULL) {
+ for (i = 0; i < cnt_of_skipped; i++) {
+ tp1 = TAILQ_NEXT(at, sctp_next);
+ if (tp1 == NULL) {
+ break;
+ }
+ at = tp1;
}
- at = tp1;
}
if (at && SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
sctp_misc_ints(SCTP_FWD_TSN_CHECK,
@@ -9900,11 +10258,14 @@ sctp_fill_in_rest:
}
}
return;
-
}
void
-sctp_send_sack(struct sctp_tcb *stcb)
+sctp_send_sack(struct sctp_tcb *stcb, int so_locked
+#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
+ SCTP_UNUSED
+#endif
+)
{
/*-
* Queue up a SACK or NR-SACK in the control queue.
@@ -9957,8 +10318,10 @@ sctp_send_sack(struct sctp_tcb *stcb)
sctp_m_freem(a_chk->data);
a_chk->data = NULL;
}
- sctp_free_remote_addr(a_chk->whoTo);
- a_chk->whoTo = NULL;
+ if (a_chk->whoTo) {
+ sctp_free_remote_addr(a_chk->whoTo);
+ a_chk->whoTo = NULL;
+ }
break;
}
}
@@ -9990,13 +10353,13 @@ sctp_send_sack(struct sctp_tcb *stcb)
a_chk->whoTo = NULL;
if ((asoc->numduptsns) ||
- (asoc->last_data_chunk_from->dest_state & SCTP_ADDR_NOT_REACHABLE)) {
+ (!(asoc->last_data_chunk_from->dest_state & SCTP_ADDR_REACHABLE))) {
/*-
* Ok, we have some duplicates or the destination for the
* sack is unreachable, lets see if we can select an
* alternate than asoc->last_data_chunk_from
*/
- if ((!(asoc->last_data_chunk_from->dest_state & SCTP_ADDR_NOT_REACHABLE)) &&
+ if ((asoc->last_data_chunk_from->dest_state & SCTP_ADDR_REACHABLE) &&
(asoc->used_alt_onsack > asoc->numnets)) {
/* We used an alt last time, don't this time */
a_chk->whoTo = NULL;
@@ -10020,7 +10383,7 @@ sctp_send_sack(struct sctp_tcb *stcb)
if (a_chk->whoTo) {
atomic_add_int(&a_chk->whoTo->ref_count, 1);
}
- if (compare_with_wrap(asoc->highest_tsn_inside_map, asoc->highest_tsn_inside_nr_map, MAX_TSN)) {
+ if (SCTP_TSN_GT(asoc->highest_tsn_inside_map, asoc->highest_tsn_inside_nr_map)) {
highest_tsn = asoc->highest_tsn_inside_map;
} else {
highest_tsn = asoc->highest_tsn_inside_nr_map;
@@ -10046,7 +10409,7 @@ sctp_send_sack(struct sctp_tcb *stcb)
sctp_m_freem(a_chk->data);
a_chk->data = NULL;
}
- sctp_free_a_chunk(stcb, a_chk);
+ sctp_free_a_chunk(stcb, a_chk, so_locked);
/* sa_ignore NO_NULL_CHK */
if (stcb->asoc.delayed_ack) {
sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
@@ -10067,15 +10430,9 @@ sctp_send_sack(struct sctp_tcb *stcb)
limit = mtod(a_chk->data, caddr_t);
limit += space;
- /* 0x01 is used by nonce for ecn */
- if ((SCTP_BASE_SYSCTL(sctp_ecn_enable)) &&
- (SCTP_BASE_SYSCTL(sctp_ecn_nonce)) &&
- (asoc->peer_supports_ecn_nonce))
- flags = (asoc->receiver_nonce_sum & SCTP_SACK_NONCE_SUM);
- else
- flags = 0;
+ flags = 0;
- if ((asoc->sctp_cmt_on_off == 1) &&
+ if ((asoc->sctp_cmt_on_off > 0) &&
SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
/*-
* CMT DAC algorithm: If 2 (i.e., 0x10) packets have been
@@ -10115,15 +10472,15 @@ sctp_send_sack(struct sctp_tcb *stcb)
}
}
- if (compare_with_wrap(asoc->mapping_array_base_tsn, asoc->cumulative_tsn, MAX_TSN)) {
+ if (SCTP_TSN_GT(asoc->mapping_array_base_tsn, asoc->cumulative_tsn)) {
offset = 1;
} else {
offset = asoc->mapping_array_base_tsn - asoc->cumulative_tsn;
}
if (((type == SCTP_SELECTIVE_ACK) &&
- compare_with_wrap(highest_tsn, asoc->cumulative_tsn, MAX_TSN)) ||
+ SCTP_TSN_GT(highest_tsn, asoc->cumulative_tsn)) ||
((type == SCTP_NR_SELECTIVE_ACK) &&
- compare_with_wrap(asoc->highest_tsn_inside_map, asoc->cumulative_tsn, MAX_TSN))) {
+ SCTP_TSN_GT(asoc->highest_tsn_inside_map, asoc->cumulative_tsn))) {
/* we have a gap .. maybe */
for (i = 0; i < siz; i++) {
tsn_map = asoc->mapping_array[i];
@@ -10195,12 +10552,12 @@ sctp_send_sack(struct sctp_tcb *stcb)
siz = (((MAX_TSN - asoc->mapping_array_base_tsn) + 1) + asoc->highest_tsn_inside_nr_map + 7) / 8;
}
- if (compare_with_wrap(asoc->mapping_array_base_tsn, asoc->cumulative_tsn, MAX_TSN)) {
+ if (SCTP_TSN_GT(asoc->mapping_array_base_tsn, asoc->cumulative_tsn)) {
offset = 1;
} else {
offset = asoc->mapping_array_base_tsn - asoc->cumulative_tsn;
}
- if (compare_with_wrap(asoc->highest_tsn_inside_nr_map, asoc->cumulative_tsn, MAX_TSN)) {
+ if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->cumulative_tsn)) {
/* we have a gap .. maybe */
for (i = 0; i < siz; i++) {
tsn_map = asoc->nr_mapping_array[i];
@@ -10321,44 +10678,50 @@ sctp_send_abort_tcb(struct sctp_tcb *stcb, struct mbuf *operr, int so_locked
#endif
)
{
- struct mbuf *m_abort;
- struct mbuf *m_out = NULL, *m_end = NULL;
- struct sctp_abort_chunk *abort = NULL;
- int sz;
- uint32_t auth_offset = 0;
+ struct mbuf *m_abort, *m, *m_last;
+ struct mbuf *m_out, *m_end = NULL;
+ struct sctp_abort_chunk *abort;
struct sctp_auth_chunk *auth = NULL;
+ struct sctp_nets *net;
+ uint32_t auth_offset = 0;
+ uint16_t cause_len, chunk_len, padding_len;
+ SCTP_TCB_LOCK_ASSERT(stcb);
/*-
* Add an AUTH chunk, if chunk requires it and save the offset into
* the chain for AUTH
*/
if (sctp_auth_is_required_chunk(SCTP_ABORT_ASSOCIATION,
stcb->asoc.peer_auth_chunks)) {
- m_out = sctp_add_auth_chunk(m_out, &m_end, &auth, &auth_offset,
+ m_out = sctp_add_auth_chunk(NULL, &m_end, &auth, &auth_offset,
stcb, SCTP_ABORT_ASSOCIATION);
SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
+ } else {
+ m_out = NULL;
}
- SCTP_TCB_LOCK_ASSERT(stcb);
m_abort = sctp_get_mbuf_for_msg(sizeof(struct sctp_abort_chunk), 0, M_DONTWAIT, 1, MT_HEADER);
if (m_abort == NULL) {
- /* no mbuf's */
- if (m_out)
+ if (m_out) {
sctp_m_freem(m_out);
+ }
+ if (operr) {
+ sctp_m_freem(operr);
+ }
return;
}
/* link in any error */
SCTP_BUF_NEXT(m_abort) = operr;
- sz = 0;
- if (operr) {
- struct mbuf *n;
-
- n = operr;
- while (n) {
- sz += SCTP_BUF_LEN(n);
- n = SCTP_BUF_NEXT(n);
+ cause_len = 0;
+ m_last = NULL;
+ for (m = operr; m; m = SCTP_BUF_NEXT(m)) {
+ cause_len += (uint16_t) SCTP_BUF_LEN(m);
+ if (SCTP_BUF_NEXT(m) == NULL) {
+ m_last = m;
}
}
- SCTP_BUF_LEN(m_abort) = sizeof(*abort);
+ SCTP_BUF_LEN(m_abort) = sizeof(struct sctp_abort_chunk);
+ chunk_len = (uint16_t) sizeof(struct sctp_abort_chunk) + cause_len;
+ padding_len = SCTP_SIZE32(chunk_len) - chunk_len;
if (m_out == NULL) {
/* NO Auth chunk prepended, so reserve space in front */
SCTP_BUF_RESV_UF(m_abort, SCTP_MIN_OVERHEAD);
@@ -10367,19 +10730,30 @@ sctp_send_abort_tcb(struct sctp_tcb *stcb, struct mbuf *operr, int so_locked
/* Put AUTH chunk at the front of the chain */
SCTP_BUF_NEXT(m_end) = m_abort;
}
-
- /* fill in the ABORT chunk */
+ if (stcb->asoc.alternate) {
+ net = stcb->asoc.alternate;
+ } else {
+ net = stcb->asoc.primary_destination;
+ }
+ /* Fill in the ABORT chunk header. */
abort = mtod(m_abort, struct sctp_abort_chunk *);
abort->ch.chunk_type = SCTP_ABORT_ASSOCIATION;
abort->ch.chunk_flags = 0;
- abort->ch.chunk_length = htons(sizeof(*abort) + sz);
-
- (void)sctp_lowlevel_chunk_output(stcb->sctp_ep, stcb,
- stcb->asoc.primary_destination,
- (struct sockaddr *)&stcb->asoc.primary_destination->ro._l_addr,
- m_out, auth_offset, auth, stcb->asoc.authinfo.active_keyid, 1, 0, NULL, 0,
+ abort->ch.chunk_length = htons(chunk_len);
+ /* Add padding, if necessary. */
+ if (padding_len > 0) {
+ if ((m_last == NULL) || sctp_add_pad_tombuf(m_last, padding_len)) {
+ sctp_m_freem(m_out);
+ return;
+ }
+ }
+ (void)sctp_lowlevel_chunk_output(stcb->sctp_ep, stcb, net,
+ (struct sockaddr *)&net->ro._l_addr,
+ m_out, auth_offset, auth, stcb->asoc.authinfo.active_keyid, 1, 0, 0,
stcb->sctp_ep->sctp_lport, stcb->rport, htonl(stcb->asoc.peer_vtag),
- stcb->asoc.primary_destination->port, so_locked, NULL);
+ stcb->asoc.primary_destination->port, NULL,
+ 0, 0,
+ so_locked);
SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
}
@@ -10413,159 +10787,217 @@ sctp_send_shutdown_complete(struct sctp_tcb *stcb,
SCTP_BUF_LEN(m_shutdown_comp) = sizeof(struct sctp_shutdown_complete_chunk);
(void)sctp_lowlevel_chunk_output(stcb->sctp_ep, stcb, net,
(struct sockaddr *)&net->ro._l_addr,
- m_shutdown_comp, 0, NULL, 0, 1, 0, NULL, 0,
+ m_shutdown_comp, 0, NULL, 0, 1, 0, 0,
stcb->sctp_ep->sctp_lport, stcb->rport,
htonl(vtag),
- net->port, SCTP_SO_NOT_LOCKED, NULL);
+ net->port, NULL,
+ 0, 0,
+ SCTP_SO_NOT_LOCKED);
SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
return;
}
-void
-sctp_send_shutdown_complete2(struct mbuf *m, int iphlen, struct sctphdr *sh,
+static void
+sctp_send_resp_msg(struct sockaddr *src, struct sockaddr *dst,
+ struct sctphdr *sh, uint32_t vtag,
+ uint8_t type, struct mbuf *cause,
+ uint8_t use_mflowid, uint32_t mflowid,
uint32_t vrf_id, uint16_t port)
{
- /* formulate and SEND a SHUTDOWN-COMPLETE */
struct mbuf *o_pak;
struct mbuf *mout;
- struct ip *iph, *iph_out;
- struct udphdr *udp = NULL;
+ struct sctphdr *shout;
+ struct sctp_chunkhdr *ch;
+ struct udphdr *udp;
+ int len, cause_len, padding_len, ret;
+#ifdef INET
+ struct sockaddr_in *src_sin, *dst_sin;
+ struct ip *ip;
+
+#endif
#ifdef INET6
- struct ip6_hdr *ip6, *ip6_out;
+ struct sockaddr_in6 *src_sin6, *dst_sin6;
+ struct ip6_hdr *ip6;
#endif
- int offset_out, len, mlen;
- struct sctp_shutdown_complete_msg *comp_cp;
- iph = mtod(m, struct ip *);
- switch (iph->ip_v) {
- case IPVERSION:
- len = (sizeof(struct ip) + sizeof(struct sctp_shutdown_complete_msg));
+ /* Compute the length of the cause and add final padding. */
+ cause_len = 0;
+ if (cause != NULL) {
+ struct mbuf *m_at, *m_last = NULL;
+
+ for (m_at = cause; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
+ if (SCTP_BUF_NEXT(m_at) == NULL)
+ m_last = m_at;
+ cause_len += SCTP_BUF_LEN(m_at);
+ }
+ padding_len = cause_len % 4;
+ if (padding_len != 0) {
+ padding_len = 4 - padding_len;
+ }
+ if (padding_len != 0) {
+ if (sctp_add_pad_tombuf(m_last, padding_len)) {
+ sctp_m_freem(cause);
+ return;
+ }
+ }
+ } else {
+ padding_len = 0;
+ }
+ /* Get an mbuf for the header. */
+ len = sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
+ switch (dst->sa_family) {
+#ifdef INET
+ case AF_INET:
+ len += sizeof(struct ip);
break;
+#endif
#ifdef INET6
- case IPV6_VERSION >> 4:
- len = (sizeof(struct ip6_hdr) + sizeof(struct sctp_shutdown_complete_msg));
+ case AF_INET6:
+ len += sizeof(struct ip6_hdr);
break;
#endif
default:
- return;
+ break;
}
if (port) {
len += sizeof(struct udphdr);
}
mout = sctp_get_mbuf_for_msg(len + max_linkhdr, 1, M_DONTWAIT, 1, MT_DATA);
if (mout == NULL) {
+ if (cause) {
+ sctp_m_freem(cause);
+ }
return;
}
SCTP_BUF_RESV_UF(mout, max_linkhdr);
SCTP_BUF_LEN(mout) = len;
- SCTP_BUF_NEXT(mout) = NULL;
- iph_out = NULL;
+ SCTP_BUF_NEXT(mout) = cause;
+ if (use_mflowid != 0) {
+ mout->m_pkthdr.flowid = mflowid;
+ mout->m_flags |= M_FLOWID;
+ }
+#ifdef INET
+ ip = NULL;
+#endif
#ifdef INET6
- ip6_out = NULL;
+ ip6 = NULL;
#endif
- offset_out = 0;
-
- switch (iph->ip_v) {
- case IPVERSION:
- iph_out = mtod(mout, struct ip *);
-
- /* Fill in the IP header for the ABORT */
- iph_out->ip_v = IPVERSION;
- iph_out->ip_hl = (sizeof(struct ip) / 4);
- iph_out->ip_tos = (u_char)0;
- iph_out->ip_id = 0;
- iph_out->ip_off = 0;
- iph_out->ip_ttl = MAXTTL;
+ switch (dst->sa_family) {
+#ifdef INET
+ case AF_INET:
+ src_sin = (struct sockaddr_in *)src;
+ dst_sin = (struct sockaddr_in *)dst;
+ ip = mtod(mout, struct ip *);
+ ip->ip_v = IPVERSION;
+ ip->ip_hl = (sizeof(struct ip) >> 2);
+ ip->ip_tos = 0;
+ ip->ip_id = ip_newid();
+ ip->ip_off = 0;
+ ip->ip_ttl = MODULE_GLOBAL(ip_defttl);
if (port) {
- iph_out->ip_p = IPPROTO_UDP;
+ ip->ip_p = IPPROTO_UDP;
} else {
- iph_out->ip_p = IPPROTO_SCTP;
+ ip->ip_p = IPPROTO_SCTP;
}
- iph_out->ip_src.s_addr = iph->ip_dst.s_addr;
- iph_out->ip_dst.s_addr = iph->ip_src.s_addr;
-
- /* let IP layer calculate this */
- iph_out->ip_sum = 0;
- offset_out += sizeof(*iph_out);
- comp_cp = (struct sctp_shutdown_complete_msg *)(
- (caddr_t)iph_out + offset_out);
+ ip->ip_src.s_addr = dst_sin->sin_addr.s_addr;
+ ip->ip_dst.s_addr = src_sin->sin_addr.s_addr;
+ ip->ip_sum = 0;
+ len = sizeof(struct ip);
+ shout = (struct sctphdr *)((caddr_t)ip + len);
break;
+#endif
#ifdef INET6
- case IPV6_VERSION >> 4:
- ip6 = (struct ip6_hdr *)iph;
- ip6_out = mtod(mout, struct ip6_hdr *);
-
- /* Fill in the IPv6 header for the ABORT */
- ip6_out->ip6_flow = ip6->ip6_flow;
- ip6_out->ip6_hlim = MODULE_GLOBAL(ip6_defhlim);
+ case AF_INET6:
+ src_sin6 = (struct sockaddr_in6 *)src;
+ dst_sin6 = (struct sockaddr_in6 *)dst;
+ ip6 = mtod(mout, struct ip6_hdr *);
+ ip6->ip6_flow = htonl(0x60000000);
+ if (V_ip6_auto_flowlabel) {
+ ip6->ip6_flow |= (htonl(ip6_randomflowlabel()) & IPV6_FLOWLABEL_MASK);
+ }
+ ip6->ip6_hlim = MODULE_GLOBAL(ip6_defhlim);
if (port) {
- ip6_out->ip6_nxt = IPPROTO_UDP;
+ ip6->ip6_nxt = IPPROTO_UDP;
} else {
- ip6_out->ip6_nxt = IPPROTO_SCTP;
+ ip6->ip6_nxt = IPPROTO_SCTP;
}
- ip6_out->ip6_src = ip6->ip6_dst;
- ip6_out->ip6_dst = ip6->ip6_src;
- /*
- * ?? The old code had both the iph len + payload, I think
- * this is wrong and would never have worked
- */
- ip6_out->ip6_plen = sizeof(struct sctp_shutdown_complete_msg);
- offset_out += sizeof(*ip6_out);
- comp_cp = (struct sctp_shutdown_complete_msg *)(
- (caddr_t)ip6_out + offset_out);
+ ip6->ip6_src = dst_sin6->sin6_addr;
+ ip6->ip6_dst = src_sin6->sin6_addr;
+ len = sizeof(struct ip6_hdr);
+ shout = (struct sctphdr *)((caddr_t)ip6 + len);
break;
-#endif /* INET6 */
+#endif
default:
- /* Currently not supported. */
- sctp_m_freem(mout);
- return;
+ len = 0;
+ shout = mtod(mout, struct sctphdr *);
+ break;
}
if (port) {
- udp = (struct udphdr *)comp_cp;
+ if (htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)) == 0) {
+ sctp_m_freem(mout);
+ return;
+ }
+ udp = (struct udphdr *)shout;
udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
udp->uh_dport = port;
- udp->uh_ulen = htons(sizeof(struct sctp_shutdown_complete_msg) + sizeof(struct udphdr));
- if (iph_out)
- udp->uh_sum = in_pseudo(iph_out->ip_src.s_addr, iph_out->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP));
- offset_out += sizeof(struct udphdr);
- comp_cp = (struct sctp_shutdown_complete_msg *)((caddr_t)comp_cp + sizeof(struct udphdr));
+ udp->uh_sum = 0;
+ udp->uh_ulen = htons(sizeof(struct udphdr) +
+ sizeof(struct sctphdr) +
+ sizeof(struct sctp_chunkhdr) +
+ cause_len + padding_len);
+ len += sizeof(struct udphdr);
+ shout = (struct sctphdr *)((caddr_t)shout + sizeof(struct udphdr));
+ } else {
+ udp = NULL;
}
+ shout->src_port = sh->dest_port;
+ shout->dest_port = sh->src_port;
+ shout->checksum = 0;
+ if (vtag) {
+ shout->v_tag = htonl(vtag);
+ } else {
+ shout->v_tag = sh->v_tag;
+ }
+ len += sizeof(struct sctphdr);
+ ch = (struct sctp_chunkhdr *)((caddr_t)shout + sizeof(struct sctphdr));
+ ch->chunk_type = type;
+ if (vtag) {
+ ch->chunk_flags = 0;
+ } else {
+ ch->chunk_flags = SCTP_HAD_NO_TCB;
+ }
+ ch->chunk_length = htons(sizeof(struct sctp_chunkhdr) + cause_len);
+ len += sizeof(struct sctp_chunkhdr);
+ len += cause_len + padding_len;
+
if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) {
- /* no mbuf's */
sctp_m_freem(mout);
return;
}
- /* Now copy in and fill in the ABORT tags etc. */
- comp_cp->sh.src_port = sh->dest_port;
- comp_cp->sh.dest_port = sh->src_port;
- comp_cp->sh.checksum = 0;
- comp_cp->sh.v_tag = sh->v_tag;
- comp_cp->shut_cmp.ch.chunk_flags = SCTP_HAD_NO_TCB;
- comp_cp->shut_cmp.ch.chunk_type = SCTP_SHUTDOWN_COMPLETE;
- comp_cp->shut_cmp.ch.chunk_length = htons(sizeof(struct sctp_shutdown_complete_chunk));
-
- if (iph_out != NULL) {
- sctp_route_t ro;
- int ret;
-
- mlen = SCTP_BUF_LEN(mout);
- bzero(&ro, sizeof ro);
- /* set IPv4 length */
- iph_out->ip_len = mlen;
-#ifdef SCTP_PACKET_LOGGING
- if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
- sctp_packet_log(mout, mlen);
-#endif
+ SCTP_ATTACH_CHAIN(o_pak, mout, len);
+ switch (dst->sa_family) {
+#ifdef INET
+ case AF_INET:
+ if (port) {
+ if (V_udp_cksum) {
+ udp->uh_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP));
+ } else {
+ udp->uh_sum = 0;
+ }
+ }
+ ip->ip_len = len;
if (port) {
#if defined(SCTP_WITH_NO_CSUM)
SCTP_STAT_INCR(sctps_sendnocrc);
#else
- comp_cp->sh.checksum = sctp_calculate_cksum(mout, offset_out);
+ shout->checksum = sctp_calculate_cksum(mout, sizeof(struct ip) + sizeof(struct udphdr));
SCTP_STAT_INCR(sctps_sendswcrc);
#endif
- SCTP_ENABLE_UDP_CSUM(mout);
+ if (V_udp_cksum) {
+ SCTP_ENABLE_UDP_CSUM(o_pak);
+ }
} else {
#if defined(SCTP_WITH_NO_CSUM)
SCTP_STAT_INCR(sctps_sendnocrc);
@@ -10575,183 +11007,99 @@ sctp_send_shutdown_complete2(struct mbuf *m, int iphlen, struct sctphdr *sh,
SCTP_STAT_INCR(sctps_sendhwcrc);
#endif
}
- SCTP_ATTACH_CHAIN(o_pak, mout, mlen);
- /* out it goes */
- SCTP_IP_OUTPUT(ret, o_pak, &ro, NULL, vrf_id);
-
- /* Free the route if we got one back */
- if (ro.ro_rt)
- RTFREE(ro.ro_rt);
- }
+#ifdef SCTP_PACKET_LOGGING
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING) {
+ sctp_packet_log(o_pak);
+ }
+#endif
+ SCTP_IP_OUTPUT(ret, o_pak, NULL, NULL, vrf_id);
+ break;
+#endif
#ifdef INET6
- if (ip6_out != NULL) {
- struct route_in6 ro;
- int ret;
- struct ifnet *ifp = NULL;
-
- bzero(&ro, sizeof(ro));
- mlen = SCTP_BUF_LEN(mout);
-#ifdef SCTP_PACKET_LOGGING
- if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
- sctp_packet_log(mout, mlen);
-#endif
- SCTP_ATTACH_CHAIN(o_pak, mout, mlen);
+ case AF_INET6:
+ ip6->ip6_plen = len - sizeof(struct ip6_hdr);
if (port) {
#if defined(SCTP_WITH_NO_CSUM)
SCTP_STAT_INCR(sctps_sendnocrc);
#else
- comp_cp->sh.checksum = sctp_calculate_cksum(mout, sizeof(struct ip6_hdr) + sizeof(struct udphdr));
+ shout->checksum = sctp_calculate_cksum(mout, sizeof(struct ip6_hdr) + sizeof(struct udphdr));
SCTP_STAT_INCR(sctps_sendswcrc);
#endif
- if ((udp->uh_sum = in6_cksum(o_pak, IPPROTO_UDP, sizeof(struct ip6_hdr), mlen - sizeof(struct ip6_hdr))) == 0) {
+ if ((udp->uh_sum = in6_cksum(o_pak, IPPROTO_UDP, sizeof(struct ip6_hdr), len - sizeof(struct ip6_hdr))) == 0) {
udp->uh_sum = 0xffff;
}
} else {
#if defined(SCTP_WITH_NO_CSUM)
SCTP_STAT_INCR(sctps_sendnocrc);
#else
- mout->m_pkthdr.csum_flags = CSUM_SCTP;
- mout->m_pkthdr.csum_data = 0;
- SCTP_STAT_INCR(sctps_sendhwcrc);
+ shout->checksum = sctp_calculate_cksum(mout, sizeof(struct ip6_hdr));
+ SCTP_STAT_INCR(sctps_sendswcrc);
#endif
}
- SCTP_IP6_OUTPUT(ret, o_pak, &ro, &ifp, NULL, vrf_id);
-
- /* Free the route if we got one back */
- if (ro.ro_rt)
- RTFREE(ro.ro_rt);
- }
+#ifdef SCTP_PACKET_LOGGING
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING) {
+ sctp_packet_log(o_pak);
+ }
+#endif
+ SCTP_IP6_OUTPUT(ret, o_pak, NULL, NULL, NULL, vrf_id);
+ break;
#endif
+ default:
+ SCTPDBG(SCTP_DEBUG_OUTPUT1, "Unknown protocol (TSNH) type %d\n",
+ dst->sa_family);
+ sctp_m_freem(mout);
+ SCTP_LTRACE_ERR_RET_PKT(mout, NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EFAULT);
+ return;
+ }
SCTP_STAT_INCR(sctps_sendpackets);
SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
return;
-
}
-static struct sctp_nets *
-sctp_select_hb_destination(struct sctp_tcb *stcb, struct timeval *now)
+void
+sctp_send_shutdown_complete2(struct sockaddr *src, struct sockaddr *dst,
+ struct sctphdr *sh,
+ uint8_t use_mflowid, uint32_t mflowid,
+ uint32_t vrf_id, uint16_t port)
{
- struct sctp_nets *net, *hnet;
- int ms_goneby, highest_ms, state_overide = 0;
-
- (void)SCTP_GETTIME_TIMEVAL(now);
- highest_ms = 0;
- hnet = NULL;
- SCTP_TCB_LOCK_ASSERT(stcb);
- TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
- if (
- ((net->dest_state & SCTP_ADDR_NOHB) && ((net->dest_state & SCTP_ADDR_UNCONFIRMED) == 0)) ||
- (net->dest_state & SCTP_ADDR_OUT_OF_SCOPE)
- ) {
- /*
- * Skip this guy from consideration if HB is off AND
- * its confirmed
- */
- continue;
- }
- if (sctp_destination_is_reachable(stcb, (struct sockaddr *)&net->ro._l_addr) == 0) {
- /* skip this dest net from consideration */
- continue;
- }
- if (net->last_sent_time.tv_sec) {
- /* Sent to so we subtract */
- ms_goneby = (now->tv_sec - net->last_sent_time.tv_sec) * 1000;
- } else
- /* Never been sent to */
- ms_goneby = 0x7fffffff;
- /*-
- * When the address state is unconfirmed but still
- * considered reachable, we HB at a higher rate. Once it
- * goes confirmed OR reaches the "unreachable" state, thenw
- * we cut it back to HB at a more normal pace.
- */
- if ((net->dest_state & (SCTP_ADDR_UNCONFIRMED | SCTP_ADDR_NOT_REACHABLE)) == SCTP_ADDR_UNCONFIRMED) {
- state_overide = 1;
- } else {
- state_overide = 0;
- }
-
- if ((((unsigned int)ms_goneby >= net->RTO) || (state_overide)) &&
- (ms_goneby > highest_ms)) {
- highest_ms = ms_goneby;
- hnet = net;
- }
- }
- if (hnet &&
- ((hnet->dest_state & (SCTP_ADDR_UNCONFIRMED | SCTP_ADDR_NOT_REACHABLE)) == SCTP_ADDR_UNCONFIRMED)) {
- state_overide = 1;
- } else {
- state_overide = 0;
- }
-
- if (hnet && highest_ms && (((unsigned int)highest_ms >= hnet->RTO) || state_overide)) {
- /*-
- * Found the one with longest delay bounds OR it is
- * unconfirmed and still not marked unreachable.
- */
- SCTPDBG(SCTP_DEBUG_OUTPUT4, "net:%p is the hb winner -", hnet);
-#ifdef SCTP_DEBUG
- if (hnet) {
- SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT4,
- (struct sockaddr *)&hnet->ro._l_addr);
- } else {
- SCTPDBG(SCTP_DEBUG_OUTPUT4, " none\n");
- }
-#endif
- /* update the timer now */
- hnet->last_sent_time = *now;
- return (hnet);
- }
- /* Nothing to HB */
- return (NULL);
+ sctp_send_resp_msg(src, dst, sh, 0, SCTP_SHUTDOWN_COMPLETE, NULL,
+ use_mflowid, mflowid,
+ vrf_id, port);
}
-int
-sctp_send_hb(struct sctp_tcb *stcb, int user_req, struct sctp_nets *u_net)
+void
+sctp_send_hb(struct sctp_tcb *stcb, struct sctp_nets *net, int so_locked
+#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
+ SCTP_UNUSED
+#endif
+)
{
struct sctp_tmit_chunk *chk;
- struct sctp_nets *net;
struct sctp_heartbeat_chunk *hb;
struct timeval now;
- struct sockaddr_in *sin;
- struct sockaddr_in6 *sin6;
SCTP_TCB_LOCK_ASSERT(stcb);
- if (user_req == 0) {
- net = sctp_select_hb_destination(stcb, &now);
- if (net == NULL) {
- /*-
- * All our busy none to send to, just start the
- * timer again.
- */
- if (stcb->asoc.state == 0) {
- return (0);
- }
- sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT,
- stcb->sctp_ep,
- stcb,
- net);
- return (0);
- }
- } else {
- net = u_net;
- if (net == NULL) {
- return (0);
- }
- (void)SCTP_GETTIME_TIMEVAL(&now);
+ if (net == NULL) {
+ return;
}
- sin = (struct sockaddr_in *)&net->ro._l_addr;
- if (sin->sin_family != AF_INET) {
- if (sin->sin_family != AF_INET6) {
- /* huh */
- return (0);
- }
+ (void)SCTP_GETTIME_TIMEVAL(&now);
+ switch (net->ro._l_addr.sa.sa_family) {
+#ifdef INET
+ case AF_INET:
+ break;
+#endif
+#ifdef INET6
+ case AF_INET6:
+ break;
+#endif
+ default:
+ return;
}
sctp_alloc_a_chunk(stcb, chk);
if (chk == NULL) {
SCTPDBG(SCTP_DEBUG_OUTPUT4, "Gak, can't get a chunk for hb\n");
- return (0);
+ return;
}
chk->copy_by_ref = 0;
chk->rec.chunk_id.id = SCTP_HEARTBEAT_REQUEST;
@@ -10761,8 +11109,8 @@ sctp_send_hb(struct sctp_tcb *stcb, int user_req, struct sctp_nets *u_net)
chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_DONTWAIT, 1, MT_HEADER);
if (chk->data == NULL) {
- sctp_free_a_chunk(stcb, chk);
- return (0);
+ sctp_free_a_chunk(stcb, chk, so_locked);
+ return;
}
SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
SCTP_BUF_LEN(chk->data) = chk->send_size;
@@ -10783,9 +11131,8 @@ sctp_send_hb(struct sctp_tcb *stcb, int user_req, struct sctp_nets *u_net)
hb->heartbeat.hb_info.time_value_1 = now.tv_sec;
hb->heartbeat.hb_info.time_value_2 = now.tv_usec;
/* Did our user request this one, put it in */
- hb->heartbeat.hb_info.user_req = user_req;
- hb->heartbeat.hb_info.addr_family = sin->sin_family;
- hb->heartbeat.hb_info.addr_len = sin->sin_len;
+ hb->heartbeat.hb_info.addr_family = net->ro._l_addr.sa.sa_family;
+ hb->heartbeat.hb_info.addr_len = net->ro._l_addr.sa.sa_len;
if (net->dest_state & SCTP_ADDR_UNCONFIRMED) {
/*
* we only take from the entropy pool if the address is not
@@ -10797,64 +11144,30 @@ sctp_send_hb(struct sctp_tcb *stcb, int user_req, struct sctp_nets *u_net)
net->heartbeat_random1 = hb->heartbeat.hb_info.random_value1 = 0;
net->heartbeat_random2 = hb->heartbeat.hb_info.random_value2 = 0;
}
- if (sin->sin_family == AF_INET) {
- memcpy(hb->heartbeat.hb_info.address, &sin->sin_addr, sizeof(sin->sin_addr));
- } else if (sin->sin_family == AF_INET6) {
- /* We leave the scope the way it is in our lookup table. */
- sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
- memcpy(hb->heartbeat.hb_info.address, &sin6->sin6_addr, sizeof(sin6->sin6_addr));
- } else {
- /* huh compiler bug */
- return (0);
- }
-
- /*
- * JRS 5/14/07 - In CMT PF, the T3 timer is used to track
- * PF-heartbeats. Because of this, threshold management is done by
- * the t3 timer handler, and does not need to be done upon the send
- * of a PF-heartbeat. If CMT PF is on and the destination to which a
- * heartbeat is being sent is in PF state, do NOT do threshold
- * management.
- */
- if ((stcb->asoc.sctp_cmt_pf == 0) ||
- ((net->dest_state & SCTP_ADDR_PF) != SCTP_ADDR_PF)) {
- /* ok we have a destination that needs a beat */
- /* lets do the theshold management Qiaobing style */
- if (sctp_threshold_management(stcb->sctp_ep, stcb, net,
- stcb->asoc.max_send_times)) {
- /*-
- * we have lost the association, in a way this is
- * quite bad since we really are one less time since
- * we really did not send yet. This is the down side
- * to the Q's style as defined in the RFC and not my
- * alternate style defined in the RFC.
- */
- if (chk->data != NULL) {
- sctp_m_freem(chk->data);
- chk->data = NULL;
- }
- /*
- * Here we do NOT use the macro since the
- * association is now gone.
- */
- if (chk->whoTo) {
- sctp_free_remote_addr(chk->whoTo);
- chk->whoTo = NULL;
- }
- sctp_free_a_chunk((struct sctp_tcb *)NULL, chk);
- return (-1);
- }
+ switch (net->ro._l_addr.sa.sa_family) {
+#ifdef INET
+ case AF_INET:
+ memcpy(hb->heartbeat.hb_info.address,
+ &net->ro._l_addr.sin.sin_addr,
+ sizeof(net->ro._l_addr.sin.sin_addr));
+ break;
+#endif
+#ifdef INET6
+ case AF_INET6:
+ memcpy(hb->heartbeat.hb_info.address,
+ &net->ro._l_addr.sin6.sin6_addr,
+ sizeof(net->ro._l_addr.sin6.sin6_addr));
+ break;
+#endif
+ default:
+ return;
+ break;
}
net->hb_responded = 0;
TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
stcb->asoc.ctrl_queue_cnt++;
SCTP_STAT_INCR(sctps_sendheartbeat);
- /*-
- * Call directly med level routine to put out the chunk. It will
- * always tumble out control chunks aka HB but it may even tumble
- * out data too.
- */
- return (1);
+ return;
}
void
@@ -10865,13 +11178,25 @@ sctp_send_ecn_echo(struct sctp_tcb *stcb, struct sctp_nets *net,
struct sctp_ecne_chunk *ecne;
struct sctp_tmit_chunk *chk;
+ if (net == NULL) {
+ return;
+ }
asoc = &stcb->asoc;
SCTP_TCB_LOCK_ASSERT(stcb);
TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
- if (chk->rec.chunk_id.id == SCTP_ECN_ECHO) {
+ if ((chk->rec.chunk_id.id == SCTP_ECN_ECHO) && (net == chk->whoTo)) {
/* found a previous ECN_ECHO update it if needed */
+ uint32_t cnt, ctsn;
+
ecne = mtod(chk->data, struct sctp_ecne_chunk *);
- ecne->tsn = htonl(high_tsn);
+ ctsn = ntohl(ecne->tsn);
+ if (SCTP_TSN_GT(high_tsn, ctsn)) {
+ ecne->tsn = htonl(high_tsn);
+ SCTP_STAT_INCR(sctps_queue_upd_ecne);
+ }
+ cnt = ntohl(ecne->num_pkts_since_cwr);
+ cnt++;
+ ecne->num_pkts_since_cwr = htonl(cnt);
return;
}
}
@@ -10881,14 +11206,14 @@ sctp_send_ecn_echo(struct sctp_tcb *stcb, struct sctp_nets *net,
return;
}
chk->copy_by_ref = 0;
- SCTP_STAT_INCR(sctps_sendecne);
+ SCTP_STAT_INCR(sctps_queue_upd_ecne);
chk->rec.chunk_id.id = SCTP_ECN_ECHO;
chk->rec.chunk_id.can_take_data = 0;
chk->asoc = &stcb->asoc;
chk->send_size = sizeof(struct sctp_ecne_chunk);
chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_DONTWAIT, 1, MT_HEADER);
if (chk->data == NULL) {
- sctp_free_a_chunk(stcb, chk);
+ sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
return;
}
SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
@@ -10897,33 +11222,28 @@ sctp_send_ecn_echo(struct sctp_tcb *stcb, struct sctp_nets *net,
chk->snd_count = 0;
chk->whoTo = net;
atomic_add_int(&chk->whoTo->ref_count, 1);
+
stcb->asoc.ecn_echo_cnt_onq++;
ecne = mtod(chk->data, struct sctp_ecne_chunk *);
ecne->ch.chunk_type = SCTP_ECN_ECHO;
ecne->ch.chunk_flags = 0;
ecne->ch.chunk_length = htons(sizeof(struct sctp_ecne_chunk));
ecne->tsn = htonl(high_tsn);
- TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
+ ecne->num_pkts_since_cwr = htonl(1);
+ TAILQ_INSERT_HEAD(&stcb->asoc.control_send_queue, chk, sctp_next);
asoc->ctrl_queue_cnt++;
}
void
sctp_send_packet_dropped(struct sctp_tcb *stcb, struct sctp_nets *net,
- struct mbuf *m, int iphlen, int bad_crc)
+ struct mbuf *m, int len, int iphlen, int bad_crc)
{
struct sctp_association *asoc;
struct sctp_pktdrop_chunk *drp;
struct sctp_tmit_chunk *chk;
uint8_t *datap;
- int len;
int was_trunc = 0;
- struct ip *iph;
-
-#ifdef INET6
- struct ip6_hdr *ip6h;
-
-#endif
- int fullsz = 0, extra = 0;
+ int fullsz = 0;
long spc;
int offset;
struct sctp_chunkhdr *ch, chunk_buf;
@@ -10948,26 +11268,8 @@ sctp_send_packet_dropped(struct sctp_tcb *stcb, struct sctp_nets *net,
return;
}
chk->copy_by_ref = 0;
- iph = mtod(m, struct ip *);
- if (iph == NULL) {
- sctp_free_a_chunk(stcb, chk);
- return;
- }
- switch (iph->ip_v) {
- case IPVERSION:
- /* IPv4 */
- len = chk->send_size = iph->ip_len;
- break;
-#ifdef INET6
- case IPV6_VERSION >> 4:
- /* IPv6 */
- ip6h = mtod(m, struct ip6_hdr *);
- len = chk->send_size = htons(ip6h->ip6_plen);
- break;
-#endif
- default:
- return;
- }
+ len -= iphlen;
+ chk->send_size = len;
/* Validate that we do not have an ABORT in here. */
offset = iphlen + sizeof(struct sctphdr);
ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
@@ -10988,7 +11290,7 @@ sctp_send_packet_dropped(struct sctp_tcb *stcb, struct sctp_nets *net,
* INIT-ACK, because we can't know if the initiation
* tag is correct or not.
*/
- sctp_free_a_chunk(stcb, chk);
+ sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
return;
default:
break;
@@ -11003,7 +11305,7 @@ sctp_send_packet_dropped(struct sctp_tcb *stcb, struct sctp_nets *net,
/*
* only send 1 mtu worth, trim off the excess on the end.
*/
- fullsz = len - extra;
+ fullsz = len;
len = min(stcb->asoc.smallest_mtu, MCLBYTES) - SCTP_MAX_OVERHEAD;
was_trunc = 1;
}
@@ -11011,7 +11313,7 @@ sctp_send_packet_dropped(struct sctp_tcb *stcb, struct sctp_nets *net,
chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
if (chk->data == NULL) {
jump_out:
- sctp_free_a_chunk(stcb, chk);
+ sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
return;
}
SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
@@ -11048,10 +11350,10 @@ jump_out:
if (net) {
/* we should hit here */
chk->whoTo = net;
+ atomic_add_int(&chk->whoTo->ref_count, 1);
} else {
- chk->whoTo = asoc->primary_destination;
+ chk->whoTo = NULL;
}
- atomic_add_int(&chk->whoTo->ref_count, 1);
chk->rec.chunk_id.id = SCTP_PACKET_DROPPED;
chk->rec.chunk_id.can_take_data = 1;
drp->ch.chunk_type = SCTP_PACKET_DROPPED;
@@ -11081,26 +11383,37 @@ jump_out:
}
void
-sctp_send_cwr(struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t high_tsn)
+sctp_send_cwr(struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t high_tsn, uint8_t override)
{
struct sctp_association *asoc;
struct sctp_cwr_chunk *cwr;
struct sctp_tmit_chunk *chk;
- asoc = &stcb->asoc;
SCTP_TCB_LOCK_ASSERT(stcb);
+ if (net == NULL) {
+ return;
+ }
+ asoc = &stcb->asoc;
TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
- if (chk->rec.chunk_id.id == SCTP_ECN_CWR) {
- /* found a previous ECN_CWR update it if needed */
+ if ((chk->rec.chunk_id.id == SCTP_ECN_CWR) && (net == chk->whoTo)) {
+ /*
+ * found a previous CWR queued to same destination
+ * update it if needed
+ */
+ uint32_t ctsn;
+
cwr = mtod(chk->data, struct sctp_cwr_chunk *);
- if (compare_with_wrap(high_tsn, ntohl(cwr->tsn),
- MAX_TSN)) {
+ ctsn = ntohl(cwr->tsn);
+ if (SCTP_TSN_GT(high_tsn, ctsn)) {
cwr->tsn = htonl(high_tsn);
}
+ if (override & SCTP_CWR_REDUCE_OVERRIDE) {
+ /* Make sure override is carried */
+ cwr->ch.chunk_flags |= SCTP_CWR_REDUCE_OVERRIDE;
+ }
return;
}
}
- /* nope could not find one to update so we must build one */
sctp_alloc_a_chunk(stcb, chk);
if (chk == NULL) {
return;
@@ -11112,7 +11425,7 @@ sctp_send_cwr(struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t high_tsn)
chk->send_size = sizeof(struct sctp_cwr_chunk);
chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_DONTWAIT, 1, MT_HEADER);
if (chk->data == NULL) {
- sctp_free_a_chunk(stcb, chk);
+ sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
return;
}
SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
@@ -11123,7 +11436,7 @@ sctp_send_cwr(struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t high_tsn)
atomic_add_int(&chk->whoTo->ref_count, 1);
cwr = mtod(chk->data, struct sctp_cwr_chunk *);
cwr->ch.chunk_type = SCTP_ECN_CWR;
- cwr->ch.chunk_flags = 0;
+ cwr->ch.chunk_flags = override;
cwr->ch.chunk_length = htons(sizeof(struct sctp_cwr_chunk));
cwr->tsn = htonl(high_tsn);
TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
@@ -11135,13 +11448,11 @@ sctp_add_stream_reset_out(struct sctp_tmit_chunk *chk,
int number_entries, uint16_t * list,
uint32_t seq, uint32_t resp_seq, uint32_t last_sent)
{
- int len, old_len, i;
+ uint16_t len, old_len, i;
struct sctp_stream_reset_out_request *req_out;
struct sctp_chunkhdr *ch;
ch = mtod(chk->data, struct sctp_chunkhdr *);
-
-
old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
/* get to new offset for the param. */
@@ -11175,19 +11486,16 @@ sctp_add_stream_reset_out(struct sctp_tmit_chunk *chk,
return;
}
-
-void
+static void
sctp_add_stream_reset_in(struct sctp_tmit_chunk *chk,
int number_entries, uint16_t * list,
uint32_t seq)
{
- int len, old_len, i;
+ uint16_t len, old_len, i;
struct sctp_stream_reset_in_request *req_in;
struct sctp_chunkhdr *ch;
ch = mtod(chk->data, struct sctp_chunkhdr *);
-
-
old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
/* get to new offset for the param. */
@@ -11219,18 +11527,15 @@ sctp_add_stream_reset_in(struct sctp_tmit_chunk *chk,
return;
}
-
-void
+static void
sctp_add_stream_reset_tsn(struct sctp_tmit_chunk *chk,
uint32_t seq)
{
- int len, old_len;
+ uint16_t len, old_len;
struct sctp_stream_reset_tsn_request *req_tsn;
struct sctp_chunkhdr *ch;
ch = mtod(chk->data, struct sctp_chunkhdr *);
-
-
old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
/* get to new offset for the param. */
@@ -11254,13 +11559,11 @@ void
sctp_add_stream_reset_result(struct sctp_tmit_chunk *chk,
uint32_t resp_seq, uint32_t result)
{
- int len, old_len;
+ uint16_t len, old_len;
struct sctp_stream_reset_response *resp;
struct sctp_chunkhdr *ch;
ch = mtod(chk->data, struct sctp_chunkhdr *);
-
-
old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
/* get to new offset for the param. */
@@ -11279,22 +11582,18 @@ sctp_add_stream_reset_result(struct sctp_tmit_chunk *chk,
chk->send_size = SCTP_SIZE32(chk->book_size);
SCTP_BUF_LEN(chk->data) = chk->send_size;
return;
-
}
-
void
sctp_add_stream_reset_result_tsn(struct sctp_tmit_chunk *chk,
uint32_t resp_seq, uint32_t result,
uint32_t send_una, uint32_t recv_next)
{
- int len, old_len;
+ uint16_t len, old_len;
struct sctp_stream_reset_response_tsn *resp;
struct sctp_chunkhdr *ch;
ch = mtod(chk->data, struct sctp_chunkhdr *);
-
-
old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
/* get to new offset for the param. */
@@ -11318,11 +11617,11 @@ sctp_add_stream_reset_result_tsn(struct sctp_tmit_chunk *chk,
}
static void
-sctp_add_a_stream(struct sctp_tmit_chunk *chk,
+sctp_add_an_out_stream(struct sctp_tmit_chunk *chk,
uint32_t seq,
uint16_t adding)
{
- int len, old_len;
+ uint16_t len, old_len;
struct sctp_chunkhdr *ch;
struct sctp_stream_reset_add_strm *addstr;
@@ -11335,7 +11634,39 @@ sctp_add_a_stream(struct sctp_tmit_chunk *chk,
len = sizeof(struct sctp_stream_reset_add_strm);
/* Fill it out. */
- addstr->ph.param_type = htons(SCTP_STR_RESET_ADD_STREAMS);
+ addstr->ph.param_type = htons(SCTP_STR_RESET_ADD_OUT_STREAMS);
+ addstr->ph.param_length = htons(len);
+ addstr->request_seq = htonl(seq);
+ addstr->number_of_streams = htons(adding);
+ addstr->reserved = 0;
+
+ /* now fix the chunk length */
+ ch->chunk_length = htons(len + old_len);
+ chk->send_size = len + old_len;
+ chk->book_size = SCTP_SIZE32(chk->send_size);
+ chk->book_size_scale = 0;
+ SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size);
+ return;
+}
+
+static void
+sctp_add_an_in_stream(struct sctp_tmit_chunk *chk,
+ uint32_t seq,
+ uint16_t adding)
+{
+ uint16_t len, old_len;
+ struct sctp_chunkhdr *ch;
+ struct sctp_stream_reset_add_strm *addstr;
+
+ ch = mtod(chk->data, struct sctp_chunkhdr *);
+ old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
+
+ /* get to new offset for the param. */
+ addstr = (struct sctp_stream_reset_add_strm *)((caddr_t)ch + len);
+ /* now how long will this param be? */
+ len = sizeof(struct sctp_stream_reset_add_strm);
+ /* Fill it out. */
+ addstr->ph.param_type = htons(SCTP_STR_RESET_ADD_IN_STREAMS);
addstr->ph.param_length = htons(len);
addstr->request_seq = htonl(seq);
addstr->number_of_streams = htons(adding);
@@ -11354,12 +11685,11 @@ int
sctp_send_str_reset_req(struct sctp_tcb *stcb,
int number_entries, uint16_t * list,
uint8_t send_out_req,
- uint32_t resp_seq,
uint8_t send_in_req,
uint8_t send_tsn_req,
uint8_t add_stream,
- uint16_t adding
-)
+ uint16_t adding_o,
+ uint16_t adding_i, uint8_t peer_asked)
{
struct sctp_association *asoc;
@@ -11401,7 +11731,7 @@ sctp_send_str_reset_req(struct sctp_tcb *stcb,
chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
if (chk->data == NULL) {
- sctp_free_a_chunk(stcb, chk);
+ sctp_free_a_chunk(stcb, chk, SCTP_SO_LOCKED);
SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
return (ENOMEM);
}
@@ -11410,9 +11740,12 @@ sctp_send_str_reset_req(struct sctp_tcb *stcb,
/* setup chunk parameters */
chk->sent = SCTP_DATAGRAM_UNSENT;
chk->snd_count = 0;
- chk->whoTo = asoc->primary_destination;
+ if (stcb->asoc.alternate) {
+ chk->whoTo = stcb->asoc.alternate;
+ } else {
+ chk->whoTo = stcb->asoc.primary_destination;
+ }
atomic_add_int(&chk->whoTo->ref_count, 1);
-
ch = mtod(chk->data, struct sctp_chunkhdr *);
ch->chunk_type = SCTP_STREAM_RESET;
ch->chunk_flags = 0;
@@ -11422,18 +11755,88 @@ sctp_send_str_reset_req(struct sctp_tcb *stcb,
seq = stcb->asoc.str_reset_seq_out;
if (send_out_req) {
sctp_add_stream_reset_out(chk, number_entries, list,
- seq, resp_seq, (stcb->asoc.sending_seq - 1));
+ seq, (stcb->asoc.str_reset_seq_in - 1), (stcb->asoc.sending_seq - 1));
asoc->stream_reset_out_is_outstanding = 1;
seq++;
asoc->stream_reset_outstanding++;
}
- if (add_stream) {
- sctp_add_a_stream(chk, seq, adding);
+ if ((add_stream & 1) &&
+ ((stcb->asoc.strm_realoutsize - stcb->asoc.streamoutcnt) < adding_o)) {
+ /* Need to allocate more */
+ struct sctp_stream_out *oldstream;
+ struct sctp_stream_queue_pending *sp, *nsp;
+ int i;
+
+ oldstream = stcb->asoc.strmout;
+ /* get some more */
+ SCTP_MALLOC(stcb->asoc.strmout, struct sctp_stream_out *,
+ ((stcb->asoc.streamoutcnt + adding_o) * sizeof(struct sctp_stream_out)),
+ SCTP_M_STRMO);
+ if (stcb->asoc.strmout == NULL) {
+ uint8_t x;
+
+ stcb->asoc.strmout = oldstream;
+ /* Turn off the bit */
+ x = add_stream & 0xfe;
+ add_stream = x;
+ goto skip_stuff;
+ }
+ /*
+ * Ok now we proceed with copying the old out stuff and
+ * initializing the new stuff.
+ */
+ SCTP_TCB_SEND_LOCK(stcb);
+ stcb->asoc.ss_functions.sctp_ss_clear(stcb, &stcb->asoc, 0, 1);
+ for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
+ TAILQ_INIT(&stcb->asoc.strmout[i].outqueue);
+ stcb->asoc.strmout[i].chunks_on_queues = oldstream[i].chunks_on_queues;
+ stcb->asoc.strmout[i].next_sequence_send = oldstream[i].next_sequence_send;
+ stcb->asoc.strmout[i].last_msg_incomplete = oldstream[i].last_msg_incomplete;
+ stcb->asoc.strmout[i].stream_no = i;
+ stcb->asoc.ss_functions.sctp_ss_init_stream(&stcb->asoc.strmout[i], &oldstream[i]);
+ /* now anything on those queues? */
+ TAILQ_FOREACH_SAFE(sp, &oldstream[i].outqueue, next, nsp) {
+ TAILQ_REMOVE(&oldstream[i].outqueue, sp, next);
+ TAILQ_INSERT_TAIL(&stcb->asoc.strmout[i].outqueue, sp, next);
+ }
+ /* Now move assoc pointers too */
+ if (stcb->asoc.last_out_stream == &oldstream[i]) {
+ stcb->asoc.last_out_stream = &stcb->asoc.strmout[i];
+ }
+ if (stcb->asoc.locked_on_sending == &oldstream[i]) {
+ stcb->asoc.locked_on_sending = &stcb->asoc.strmout[i];
+ }
+ }
+ /* now the new streams */
+ stcb->asoc.ss_functions.sctp_ss_init(stcb, &stcb->asoc, 1);
+ for (i = stcb->asoc.streamoutcnt; i < (stcb->asoc.streamoutcnt + adding_o); i++) {
+ TAILQ_INIT(&stcb->asoc.strmout[i].outqueue);
+ stcb->asoc.strmout[i].chunks_on_queues = 0;
+ stcb->asoc.strmout[i].next_sequence_send = 0x0;
+ stcb->asoc.strmout[i].stream_no = i;
+ stcb->asoc.strmout[i].last_msg_incomplete = 0;
+ stcb->asoc.ss_functions.sctp_ss_init_stream(&stcb->asoc.strmout[i], NULL);
+ }
+ stcb->asoc.strm_realoutsize = stcb->asoc.streamoutcnt + adding_o;
+ SCTP_FREE(oldstream, SCTP_M_STRMO);
+ SCTP_TCB_SEND_UNLOCK(stcb);
+ }
+skip_stuff:
+ if ((add_stream & 1) && (adding_o > 0)) {
+ asoc->strm_pending_add_size = adding_o;
+ asoc->peer_req_out = peer_asked;
+ sctp_add_an_out_stream(chk, seq, adding_o);
+ seq++;
+ asoc->stream_reset_outstanding++;
+ }
+ if ((add_stream & 2) && (adding_i > 0)) {
+ sctp_add_an_in_stream(chk, seq, adding_i);
seq++;
asoc->stream_reset_outstanding++;
}
if (send_in_req) {
sctp_add_stream_reset_in(chk, number_entries, list, seq);
+ seq++;
asoc->stream_reset_outstanding++;
}
if (send_tsn_req) {
@@ -11441,7 +11844,6 @@ sctp_send_str_reset_req(struct sctp_tcb *stcb,
asoc->stream_reset_outstanding++;
}
asoc->str_reset = chk;
-
/* insert the chunk for sending */
TAILQ_INSERT_TAIL(&asoc->control_send_queue,
chk,
@@ -11452,499 +11854,37 @@ sctp_send_str_reset_req(struct sctp_tcb *stcb,
}
void
-sctp_send_abort(struct mbuf *m, int iphlen, struct sctphdr *sh, uint32_t vtag,
- struct mbuf *err_cause, uint32_t vrf_id, uint16_t port)
+sctp_send_abort(struct mbuf *m, int iphlen, struct sockaddr *src, struct sockaddr *dst,
+ struct sctphdr *sh, uint32_t vtag, struct mbuf *cause,
+ uint8_t use_mflowid, uint32_t mflowid,
+ uint32_t vrf_id, uint16_t port)
{
- /*-
- * Formulate the abort message, and send it back down.
- */
- struct mbuf *o_pak;
- struct mbuf *mout;
- struct sctp_abort_msg *abm;
- struct ip *iph, *iph_out;
- struct udphdr *udp;
-
-#ifdef INET6
- struct ip6_hdr *ip6, *ip6_out;
-
-#endif
- int iphlen_out, len;
-
- /* don't respond to ABORT with ABORT */
+ /* Don't respond to an ABORT with an ABORT. */
if (sctp_is_there_an_abort_here(m, iphlen, &vtag)) {
- if (err_cause)
- sctp_m_freem(err_cause);
- return;
- }
- iph = mtod(m, struct ip *);
- switch (iph->ip_v) {
- case IPVERSION:
- len = (sizeof(struct ip) + sizeof(struct sctp_abort_msg));
- break;
-#ifdef INET6
- case IPV6_VERSION >> 4:
- len = (sizeof(struct ip6_hdr) + sizeof(struct sctp_abort_msg));
- break;
-#endif
- default:
- if (err_cause) {
- sctp_m_freem(err_cause);
- }
- return;
- }
- if (port) {
- len += sizeof(struct udphdr);
- }
- mout = sctp_get_mbuf_for_msg(len + max_linkhdr, 1, M_DONTWAIT, 1, MT_DATA);
- if (mout == NULL) {
- if (err_cause) {
- sctp_m_freem(err_cause);
- }
- return;
- }
- SCTP_BUF_RESV_UF(mout, max_linkhdr);
- SCTP_BUF_LEN(mout) = len;
- SCTP_BUF_NEXT(mout) = err_cause;
- iph_out = NULL;
-#ifdef INET6
- ip6_out = NULL;
-#endif
- switch (iph->ip_v) {
- case IPVERSION:
- iph_out = mtod(mout, struct ip *);
-
- /* Fill in the IP header for the ABORT */
- iph_out->ip_v = IPVERSION;
- iph_out->ip_hl = (sizeof(struct ip) / 4);
- iph_out->ip_tos = (u_char)0;
- iph_out->ip_id = 0;
- iph_out->ip_off = 0;
- iph_out->ip_ttl = MAXTTL;
- if (port) {
- iph_out->ip_p = IPPROTO_UDP;
- } else {
- iph_out->ip_p = IPPROTO_SCTP;
- }
- iph_out->ip_src.s_addr = iph->ip_dst.s_addr;
- iph_out->ip_dst.s_addr = iph->ip_src.s_addr;
- /* let IP layer calculate this */
- iph_out->ip_sum = 0;
-
- iphlen_out = sizeof(*iph_out);
- abm = (struct sctp_abort_msg *)((caddr_t)iph_out + iphlen_out);
- break;
-#ifdef INET6
- case IPV6_VERSION >> 4:
- ip6 = (struct ip6_hdr *)iph;
- ip6_out = mtod(mout, struct ip6_hdr *);
-
- /* Fill in the IP6 header for the ABORT */
- ip6_out->ip6_flow = ip6->ip6_flow;
- ip6_out->ip6_hlim = MODULE_GLOBAL(ip6_defhlim);
- if (port) {
- ip6_out->ip6_nxt = IPPROTO_UDP;
- } else {
- ip6_out->ip6_nxt = IPPROTO_SCTP;
- }
- ip6_out->ip6_src = ip6->ip6_dst;
- ip6_out->ip6_dst = ip6->ip6_src;
-
- iphlen_out = sizeof(*ip6_out);
- abm = (struct sctp_abort_msg *)((caddr_t)ip6_out + iphlen_out);
- break;
-#endif /* INET6 */
- default:
- /* Currently not supported */
- sctp_m_freem(mout);
- return;
- }
-
- udp = (struct udphdr *)abm;
- if (port) {
- udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
- udp->uh_dport = port;
- /* set udp->uh_ulen later */
- udp->uh_sum = 0;
- iphlen_out += sizeof(struct udphdr);
- abm = (struct sctp_abort_msg *)((caddr_t)abm + sizeof(struct udphdr));
- }
- abm->sh.src_port = sh->dest_port;
- abm->sh.dest_port = sh->src_port;
- abm->sh.checksum = 0;
- if (vtag == 0) {
- abm->sh.v_tag = sh->v_tag;
- abm->msg.ch.chunk_flags = SCTP_HAD_NO_TCB;
- } else {
- abm->sh.v_tag = htonl(vtag);
- abm->msg.ch.chunk_flags = 0;
- }
- abm->msg.ch.chunk_type = SCTP_ABORT_ASSOCIATION;
-
- if (err_cause) {
- struct mbuf *m_tmp = err_cause;
- int err_len = 0;
-
- /* get length of the err_cause chain */
- while (m_tmp != NULL) {
- err_len += SCTP_BUF_LEN(m_tmp);
- m_tmp = SCTP_BUF_NEXT(m_tmp);
- }
- len = SCTP_BUF_LEN(mout) + err_len;
- if (err_len % 4) {
- /* need pad at end of chunk */
- uint32_t cpthis = 0;
- int padlen;
-
- padlen = 4 - (len % 4);
- m_copyback(mout, len, padlen, (caddr_t)&cpthis);
- len += padlen;
- }
- abm->msg.ch.chunk_length = htons(sizeof(abm->msg.ch) + err_len);
- } else {
- len = SCTP_BUF_LEN(mout);
- abm->msg.ch.chunk_length = htons(sizeof(abm->msg.ch));
- }
-
- if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) {
- /* no mbuf's */
- sctp_m_freem(mout);
+ if (cause)
+ sctp_m_freem(cause);
return;
}
- if (iph_out != NULL) {
- sctp_route_t ro;
- int ret;
-
- /* zap the stack pointer to the route */
- bzero(&ro, sizeof ro);
- if (port) {
- udp->uh_ulen = htons(len - sizeof(struct ip));
- udp->uh_sum = in_pseudo(iph_out->ip_src.s_addr, iph_out->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP));
- }
- SCTPDBG(SCTP_DEBUG_OUTPUT2, "sctp_send_abort calling ip_output:\n");
- SCTPDBG_PKT(SCTP_DEBUG_OUTPUT2, iph_out, &abm->sh);
- /* set IPv4 length */
- iph_out->ip_len = len;
- /* out it goes */
-#ifdef SCTP_PACKET_LOGGING
- if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
- sctp_packet_log(mout, len);
-#endif
- SCTP_ATTACH_CHAIN(o_pak, mout, len);
- if (port) {
-#if defined(SCTP_WITH_NO_CSUM)
- SCTP_STAT_INCR(sctps_sendnocrc);
-#else
- abm->sh.checksum = sctp_calculate_cksum(mout, iphlen_out);
- SCTP_STAT_INCR(sctps_sendswcrc);
-#endif
- SCTP_ENABLE_UDP_CSUM(o_pak);
- } else {
-#if defined(SCTP_WITH_NO_CSUM)
- SCTP_STAT_INCR(sctps_sendnocrc);
-#else
- mout->m_pkthdr.csum_flags = CSUM_SCTP;
- mout->m_pkthdr.csum_data = 0;
- SCTP_STAT_INCR(sctps_sendhwcrc);
-#endif
- }
- SCTP_IP_OUTPUT(ret, o_pak, &ro, NULL, vrf_id);
-
- /* Free the route if we got one back */
- if (ro.ro_rt)
- RTFREE(ro.ro_rt);
- }
-#ifdef INET6
- if (ip6_out != NULL) {
- struct route_in6 ro;
- int ret;
- struct ifnet *ifp = NULL;
-
- /* zap the stack pointer to the route */
- bzero(&ro, sizeof(ro));
- if (port) {
- udp->uh_ulen = htons(len - sizeof(struct ip6_hdr));
- }
- SCTPDBG(SCTP_DEBUG_OUTPUT2, "sctp_send_abort calling ip6_output:\n");
- SCTPDBG_PKT(SCTP_DEBUG_OUTPUT2, (struct ip *)ip6_out, &abm->sh);
- ip6_out->ip6_plen = len - sizeof(*ip6_out);
-#ifdef SCTP_PACKET_LOGGING
- if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
- sctp_packet_log(mout, len);
-#endif
- SCTP_ATTACH_CHAIN(o_pak, mout, len);
- if (port) {
-#if defined(SCTP_WITH_NO_CSUM)
- SCTP_STAT_INCR(sctps_sendnocrc);
-#else
- abm->sh.checksum = sctp_calculate_cksum(mout, sizeof(struct ip6_hdr) + sizeof(struct udphdr));
- SCTP_STAT_INCR(sctps_sendswcrc);
-#endif
- if ((udp->uh_sum = in6_cksum(o_pak, IPPROTO_UDP, sizeof(struct ip6_hdr), len - sizeof(struct ip6_hdr))) == 0) {
- udp->uh_sum = 0xffff;
- }
- } else {
-#if defined(SCTP_WITH_NO_CSUM)
- SCTP_STAT_INCR(sctps_sendnocrc);
-#else
- mout->m_pkthdr.csum_flags = CSUM_SCTP;
- mout->m_pkthdr.csum_data = 0;
- SCTP_STAT_INCR(sctps_sendhwcrc);
-#endif
- }
- SCTP_IP6_OUTPUT(ret, o_pak, &ro, &ifp, NULL, vrf_id);
-
- /* Free the route if we got one back */
- if (ro.ro_rt)
- RTFREE(ro.ro_rt);
- }
-#endif
- SCTP_STAT_INCR(sctps_sendpackets);
- SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
- SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
+ sctp_send_resp_msg(src, dst, sh, vtag, SCTP_ABORT_ASSOCIATION, cause,
+ use_mflowid, mflowid,
+ vrf_id, port);
+ return;
}
void
-sctp_send_operr_to(struct mbuf *m, int iphlen, struct mbuf *scm, uint32_t vtag,
+sctp_send_operr_to(struct sockaddr *src, struct sockaddr *dst,
+ struct sctphdr *sh, uint32_t vtag, struct mbuf *cause,
+ uint8_t use_mflowid, uint32_t mflowid,
uint32_t vrf_id, uint16_t port)
{
- struct mbuf *o_pak;
- struct sctphdr *sh, *sh_out;
- struct sctp_chunkhdr *ch;
- struct ip *iph, *iph_out;
- struct udphdr *udp = NULL;
- struct mbuf *mout;
-
-#ifdef INET6
- struct ip6_hdr *ip6, *ip6_out;
-
-#endif
- int iphlen_out, len;
-
- iph = mtod(m, struct ip *);
- sh = (struct sctphdr *)((caddr_t)iph + iphlen);
- switch (iph->ip_v) {
- case IPVERSION:
- len = (sizeof(struct ip) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr));
- break;
-#ifdef INET6
- case IPV6_VERSION >> 4:
- len = (sizeof(struct ip6_hdr) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr));
- break;
-#endif
- default:
- if (scm) {
- sctp_m_freem(scm);
- }
- return;
- }
- if (port) {
- len += sizeof(struct udphdr);
- }
- mout = sctp_get_mbuf_for_msg(len + max_linkhdr, 1, M_DONTWAIT, 1, MT_DATA);
- if (mout == NULL) {
- if (scm) {
- sctp_m_freem(scm);
- }
- return;
- }
- SCTP_BUF_RESV_UF(mout, max_linkhdr);
- SCTP_BUF_LEN(mout) = len;
- SCTP_BUF_NEXT(mout) = scm;
- iph_out = NULL;
-#ifdef INET6
- ip6_out = NULL;
-#endif
- switch (iph->ip_v) {
- case IPVERSION:
- iph_out = mtod(mout, struct ip *);
-
- /* Fill in the IP header for the ABORT */
- iph_out->ip_v = IPVERSION;
- iph_out->ip_hl = (sizeof(struct ip) / 4);
- iph_out->ip_tos = (u_char)0;
- iph_out->ip_id = 0;
- iph_out->ip_off = 0;
- iph_out->ip_ttl = MAXTTL;
- if (port) {
- iph_out->ip_p = IPPROTO_UDP;
- } else {
- iph_out->ip_p = IPPROTO_SCTP;
- }
- iph_out->ip_src.s_addr = iph->ip_dst.s_addr;
- iph_out->ip_dst.s_addr = iph->ip_src.s_addr;
- /* let IP layer calculate this */
- iph_out->ip_sum = 0;
-
- iphlen_out = sizeof(struct ip);
- sh_out = (struct sctphdr *)((caddr_t)iph_out + iphlen_out);
- break;
-#ifdef INET6
- case IPV6_VERSION >> 4:
- ip6 = (struct ip6_hdr *)iph;
- ip6_out = mtod(mout, struct ip6_hdr *);
-
- /* Fill in the IP6 header for the ABORT */
- ip6_out->ip6_flow = ip6->ip6_flow;
- ip6_out->ip6_hlim = MODULE_GLOBAL(ip6_defhlim);
- if (port) {
- ip6_out->ip6_nxt = IPPROTO_UDP;
- } else {
- ip6_out->ip6_nxt = IPPROTO_SCTP;
- }
- ip6_out->ip6_src = ip6->ip6_dst;
- ip6_out->ip6_dst = ip6->ip6_src;
-
- iphlen_out = sizeof(struct ip6_hdr);
- sh_out = (struct sctphdr *)((caddr_t)ip6_out + iphlen_out);
- break;
-#endif /* INET6 */
- default:
- /* Currently not supported */
- sctp_m_freem(mout);
- return;
- }
-
- udp = (struct udphdr *)sh_out;
- if (port) {
- udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
- udp->uh_dport = port;
- /* set udp->uh_ulen later */
- udp->uh_sum = 0;
- iphlen_out += sizeof(struct udphdr);
- sh_out = (struct sctphdr *)((caddr_t)udp + sizeof(struct udphdr));
- }
- sh_out->src_port = sh->dest_port;
- sh_out->dest_port = sh->src_port;
- sh_out->v_tag = vtag;
- sh_out->checksum = 0;
-
- ch = (struct sctp_chunkhdr *)((caddr_t)sh_out + sizeof(struct sctphdr));
- ch->chunk_type = SCTP_OPERATION_ERROR;
- ch->chunk_flags = 0;
-
- if (scm) {
- struct mbuf *m_tmp = scm;
- int cause_len = 0;
-
- /* get length of the err_cause chain */
- while (m_tmp != NULL) {
- cause_len += SCTP_BUF_LEN(m_tmp);
- m_tmp = SCTP_BUF_NEXT(m_tmp);
- }
- len = SCTP_BUF_LEN(mout) + cause_len;
- if (cause_len % 4) {
- /* need pad at end of chunk */
- uint32_t cpthis = 0;
- int padlen;
-
- padlen = 4 - (len % 4);
- m_copyback(mout, len, padlen, (caddr_t)&cpthis);
- len += padlen;
- }
- ch->chunk_length = htons(sizeof(struct sctp_chunkhdr) + cause_len);
- } else {
- len = SCTP_BUF_LEN(mout);
- ch->chunk_length = htons(sizeof(struct sctp_chunkhdr));
- }
-
- if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) {
- /* no mbuf's */
- sctp_m_freem(mout);
- return;
- }
- if (iph_out != NULL) {
- sctp_route_t ro;
- int ret;
-
- /* zap the stack pointer to the route */
- bzero(&ro, sizeof ro);
- if (port) {
- udp->uh_ulen = htons(len - sizeof(struct ip));
- udp->uh_sum = in_pseudo(iph_out->ip_src.s_addr, iph_out->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP));
- }
- /* set IPv4 length */
- iph_out->ip_len = len;
- /* out it goes */
-#ifdef SCTP_PACKET_LOGGING
- if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
- sctp_packet_log(mout, len);
-#endif
- SCTP_ATTACH_CHAIN(o_pak, mout, len);
- if (port) {
-#if defined(SCTP_WITH_NO_CSUM)
- SCTP_STAT_INCR(sctps_sendnocrc);
-#else
- sh_out->checksum = sctp_calculate_cksum(mout, iphlen_out);
- SCTP_STAT_INCR(sctps_sendswcrc);
-#endif
- SCTP_ENABLE_UDP_CSUM(o_pak);
- } else {
-#if defined(SCTP_WITH_NO_CSUM)
- SCTP_STAT_INCR(sctps_sendnocrc);
-#else
- mout->m_pkthdr.csum_flags = CSUM_SCTP;
- mout->m_pkthdr.csum_data = 0;
- SCTP_STAT_INCR(sctps_sendhwcrc);
-#endif
- }
- SCTP_IP_OUTPUT(ret, o_pak, &ro, NULL, vrf_id);
-
- /* Free the route if we got one back */
- if (ro.ro_rt)
- RTFREE(ro.ro_rt);
- }
-#ifdef INET6
- if (ip6_out != NULL) {
- struct route_in6 ro;
- int ret;
- struct ifnet *ifp = NULL;
-
- /* zap the stack pointer to the route */
- bzero(&ro, sizeof(ro));
- if (port) {
- udp->uh_ulen = htons(len - sizeof(struct ip6_hdr));
- }
- ip6_out->ip6_plen = len - sizeof(*ip6_out);
-#ifdef SCTP_PACKET_LOGGING
- if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
- sctp_packet_log(mout, len);
-#endif
- SCTP_ATTACH_CHAIN(o_pak, mout, len);
- if (port) {
-#if defined(SCTP_WITH_NO_CSUM)
- SCTP_STAT_INCR(sctps_sendnocrc);
-#else
- sh_out->checksum = sctp_calculate_cksum(mout, sizeof(struct ip6_hdr) + sizeof(struct udphdr));
- SCTP_STAT_INCR(sctps_sendswcrc);
-#endif
- if ((udp->uh_sum = in6_cksum(o_pak, IPPROTO_UDP, sizeof(struct ip6_hdr), len - sizeof(struct ip6_hdr))) == 0) {
- udp->uh_sum = 0xffff;
- }
- } else {
-#if defined(SCTP_WITH_NO_CSUM)
- SCTP_STAT_INCR(sctps_sendnocrc);
-#else
- mout->m_pkthdr.csum_flags = CSUM_SCTP;
- mout->m_pkthdr.csum_data = 0;
- SCTP_STAT_INCR(sctps_sendhwcrc);
-#endif
- }
- SCTP_IP6_OUTPUT(ret, o_pak, &ro, &ifp, NULL, vrf_id);
-
- /* Free the route if we got one back */
- if (ro.ro_rt)
- RTFREE(ro.ro_rt);
- }
-#endif
- SCTP_STAT_INCR(sctps_sendpackets);
- SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
- SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
+ sctp_send_resp_msg(src, dst, sh, vtag, SCTP_OPERATION_ERROR, cause,
+ use_mflowid, mflowid,
+ vrf_id, port);
+ return;
}
static struct mbuf *
-sctp_copy_resume(struct sctp_stream_queue_pending *sp,
- struct uio *uio,
- struct sctp_sndrcvinfo *srcv,
+sctp_copy_resume(struct uio *uio,
int max_send_len,
int user_marks_eor,
int *error,
@@ -11993,8 +11933,7 @@ sctp_copy_it_in(struct sctp_tcb *stcb,
struct sctp_nets *net,
int max_send_len,
int user_marks_eor,
- int *error,
- int non_blocking)
+ int *error)
{
/*-
* This routine must be very careful in its work. Protocol
@@ -12029,7 +11968,6 @@ sctp_copy_it_in(struct sctp_tcb *stcb,
sp->timetolive = srcv->sinfo_timetolive;
sp->ppid = srcv->sinfo_ppid;
sp->context = srcv->sinfo_context;
- sp->strseq = 0;
(void)SCTP_GETTIME_TIMEVAL(&sp->ts);
sp->stream = srcv->sinfo_stream;
@@ -12051,15 +11989,19 @@ sctp_copy_it_in(struct sctp_tcb *stcb,
*error = 0;
goto skip_copy;
}
- sp->auth_keyid = stcb->asoc.authinfo.active_keyid;
+ if (srcv->sinfo_keynumber_valid) {
+ sp->auth_keyid = srcv->sinfo_keynumber;
+ } else {
+ sp->auth_keyid = stcb->asoc.authinfo.active_keyid;
+ }
if (sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks)) {
- sctp_auth_key_acquire(stcb, stcb->asoc.authinfo.active_keyid);
+ sctp_auth_key_acquire(stcb, sp->auth_keyid);
sp->holds_key_ref = 1;
}
*error = sctp_copy_one(sp, uio, resv_in_first);
skip_copy:
if (*error) {
- sctp_free_a_strmoq(stcb, sp);
+ sctp_free_a_strmoq(stcb, sp, SCTP_SO_LOCKED);
sp = NULL;
} else {
if (sp->sinfo_flags & SCTP_ADDR_OVER) {
@@ -12085,8 +12027,8 @@ sctp_sosend(struct socket *so,
struct thread *p
)
{
- int error, use_rcvinfo = 0;
- struct sctp_sndrcvinfo srcv;
+ int error, use_sndinfo = 0;
+ struct sctp_sndrcvinfo sndrcvninfo;
struct sockaddr *addr_to_use;
#if defined(INET) && defined(INET6)
@@ -12096,10 +12038,10 @@ sctp_sosend(struct socket *so,
if (control) {
/* process cmsg snd/rcv info (maybe a assoc-id) */
- if (sctp_find_cmsg(SCTP_SNDRCV, (void *)&srcv, control,
- sizeof(srcv))) {
+ if (sctp_find_cmsg(SCTP_SNDRCV, (void *)&sndrcvninfo, control,
+ sizeof(sndrcvninfo))) {
/* got one */
- use_rcvinfo = 1;
+ use_sndinfo = 1;
}
}
addr_to_use = addr;
@@ -12117,7 +12059,7 @@ sctp_sosend(struct socket *so,
error = sctp_lower_sosend(so, addr_to_use, uio, top,
control,
flags,
- use_rcvinfo ? &srcv : NULL
+ use_sndinfo ? &sndrcvninfo : NULL
,p
);
return (error);
@@ -12194,7 +12136,7 @@ sctp_lower_sosend(struct socket *so,
sndlen = SCTP_HEADER_LEN(i_pak);
}
SCTPDBG(SCTP_DEBUG_OUTPUT1, "Send called addr:%p send length %d\n",
- addr,
+ (void *)addr,
sndlen);
if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
(inp->sctp_socket->so_qlimit)) {
@@ -12211,7 +12153,7 @@ sctp_lower_sosend(struct socket *so,
union sctp_sockstore *raddr = (union sctp_sockstore *)addr;
switch (raddr->sa.sa_family) {
-#if defined(INET)
+#ifdef INET
case AF_INET:
if (raddr->sin.sin_len != sizeof(struct sockaddr_in)) {
SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
@@ -12221,7 +12163,7 @@ sctp_lower_sosend(struct socket *so,
port = raddr->sin.sin_port;
break;
#endif
-#if defined(INET6)
+#ifdef INET6
case AF_INET6:
if (raddr->sin6.sin6_len != sizeof(struct sockaddr_in6)) {
SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
@@ -12270,14 +12212,10 @@ sctp_lower_sosend(struct socket *so,
(inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
SCTP_INP_RLOCK(inp);
stcb = LIST_FIRST(&inp->sctp_asoc_list);
- if (stcb == NULL) {
- SCTP_INP_RUNLOCK(inp);
- SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOTCONN);
- error = ENOTCONN;
- goto out_unlocked;
+ if (stcb) {
+ SCTP_TCB_LOCK(stcb);
+ hold_tcblock = 1;
}
- SCTP_TCB_LOCK(stcb);
- hold_tcblock = 1;
SCTP_INP_RUNLOCK(inp);
} else if (sinfo_assoc_id) {
stcb = sctp_findassociation_ep_asocid(inp, sinfo_assoc_id, 0);
@@ -12322,6 +12260,9 @@ sctp_lower_sosend(struct socket *so,
SCTP_INP_WUNLOCK(inp);
/* With the lock applied look again */
stcb = sctp_findassociation_ep_addr(&t_inp, addr, &net, NULL, NULL);
+ if ((stcb == NULL) && (control != NULL) && (port > 0)) {
+ stcb = sctp_findassociation_cmsgs(&t_inp, port, control, &net, &error);
+ }
if (stcb == NULL) {
SCTP_INP_WLOCK(inp);
SCTP_INP_DECR_REF(inp);
@@ -12329,6 +12270,9 @@ sctp_lower_sosend(struct socket *so,
} else {
hold_tcblock = 1;
}
+ if (error) {
+ goto out_unlocked;
+ }
if (t_inp != inp) {
SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOTCONN);
error = ENOTCONN;
@@ -12336,21 +12280,12 @@ sctp_lower_sosend(struct socket *so,
}
}
if (stcb == NULL) {
- if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
- (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
- SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOTCONN);
- error = ENOTCONN;
- goto out_unlocked;
- }
if (addr == NULL) {
SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOENT);
error = ENOENT;
goto out_unlocked;
} else {
- /*
- * UDP style, we must go ahead and start the INIT
- * process
- */
+ /* We must go ahead and start the INIT process */
uint32_t vrf_id;
if ((sinfo_flags & SCTP_ABORT) ||
@@ -12377,6 +12312,15 @@ sctp_lower_sosend(struct socket *so,
/* Error is setup for us in the call */
goto out_unlocked;
}
+ if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) {
+ stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED;
+ /*
+ * Set the connected flag so we can queue
+ * data
+ */
+ soisconnecting(so);
+ }
+ hold_tcblock = 1;
if (create_lock_applied) {
SCTP_ASOC_CREATE_UNLOCK(inp);
create_lock_applied = 0;
@@ -12396,85 +12340,13 @@ sctp_lower_sosend(struct socket *so,
sctp_initialize_auth_params(inp, stcb);
if (control) {
- /*
- * see if a init structure exists in cmsg
- * headers
- */
- struct sctp_initmsg initm;
- int i;
-
- if (sctp_find_cmsg(SCTP_INIT, (void *)&initm, control,
- sizeof(initm))) {
- /*
- * we have an INIT override of the
- * default
- */
- if (initm.sinit_max_attempts)
- asoc->max_init_times = initm.sinit_max_attempts;
- if (initm.sinit_num_ostreams)
- asoc->pre_open_streams = initm.sinit_num_ostreams;
- if (initm.sinit_max_instreams)
- asoc->max_inbound_streams = initm.sinit_max_instreams;
- if (initm.sinit_max_init_timeo)
- asoc->initial_init_rto_max = initm.sinit_max_init_timeo;
- if (asoc->streamoutcnt < asoc->pre_open_streams) {
- struct sctp_stream_out *tmp_str;
- int had_lock = 0;
-
- /* Default is NOT correct */
- SCTPDBG(SCTP_DEBUG_OUTPUT1, "Ok, defout:%d pre_open:%d\n",
- asoc->streamoutcnt, asoc->pre_open_streams);
- /*
- * What happens if this
- * fails? we panic ...
- */
-
- if (hold_tcblock) {
- had_lock = 1;
- SCTP_TCB_UNLOCK(stcb);
- }
- SCTP_MALLOC(tmp_str,
- struct sctp_stream_out *,
- (asoc->pre_open_streams *
- sizeof(struct sctp_stream_out)),
- SCTP_M_STRMO);
- if (had_lock) {
- SCTP_TCB_LOCK(stcb);
- }
- if (tmp_str != NULL) {
- SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
- asoc->strmout = tmp_str;
- asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams;
- } else {
- asoc->pre_open_streams = asoc->streamoutcnt;
- }
- for (i = 0; i < asoc->streamoutcnt; i++) {
- /*-
- * inbound side must be set
- * to 0xffff, also NOTE when
- * we get the INIT-ACK back
- * (for INIT sender) we MUST
- * reduce the count
- * (streamoutcnt) but first
- * check if we sent to any
- * of the upper streams that
- * were dropped (if some
- * were). Those that were
- * dropped must be notified
- * to the upper layer as
- * failed to send.
- */
- asoc->strmout[i].next_sequence_sent = 0x0;
- TAILQ_INIT(&asoc->strmout[i].outqueue);
- asoc->strmout[i].stream_no = i;
- asoc->strmout[i].last_msg_incomplete = 0;
- asoc->strmout[i].next_spoke.tqe_next = 0;
- asoc->strmout[i].next_spoke.tqe_prev = 0;
- }
- }
+ if (sctp_process_cmsgs_for_init(stcb, control, &error)) {
+ sctp_free_assoc(inp, stcb, SCTP_PCBFREE_FORCE, SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_7);
+ hold_tcblock = 0;
+ stcb = NULL;
+ goto out_unlocked;
}
}
- hold_tcblock = 1;
/* out with the INIT */
queue_only_for_init = 1;
/*-
@@ -12500,7 +12372,11 @@ sctp_lower_sosend(struct socket *so,
goto out_unlocked;
}
} else {
- net = stcb->asoc.primary_destination;
+ if (stcb->asoc.alternate) {
+ net = stcb->asoc.alternate;
+ } else {
+ net = stcb->asoc.primary_destination;
+ }
}
atomic_add_int(&stcb->total_sends, 1);
/* Keep the stcb from being freed under our feet */
@@ -12514,9 +12390,9 @@ sctp_lower_sosend(struct socket *so,
goto out_unlocked;
}
}
- if ((SCTP_SO_IS_NBIO(so)
+ if (SCTP_SO_IS_NBIO(so)
|| (flags & MSG_NBIO)
- )) {
+ ) {
non_blocking = 1;
}
/* would we block? */
@@ -12607,15 +12483,12 @@ sctp_lower_sosend(struct socket *so,
if (top) {
struct mbuf *cntm = NULL;
- mm = sctp_get_mbuf_for_msg(1, 0, M_WAIT, 1, MT_DATA);
+ mm = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_WAIT, 1, MT_DATA);
if (sndlen != 0) {
- cntm = top;
- while (cntm) {
+ for (cntm = top; cntm; cntm = SCTP_BUF_NEXT(cntm)) {
tot_out += SCTP_BUF_LEN(cntm);
- cntm = SCTP_BUF_NEXT(cntm);
}
}
- tot_demand = (tot_out + sizeof(struct sctp_paramhdr));
} else {
/* Must fit in a MTU */
tot_out = sndlen;
@@ -12644,7 +12517,7 @@ sctp_lower_sosend(struct socket *so,
/* now move forward the data pointer */
ph = mtod(mm, struct sctp_paramhdr *);
ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
- ph->param_length = htons((sizeof(struct sctp_paramhdr) + tot_out));
+ ph->param_length = htons(sizeof(struct sctp_paramhdr) + tot_out);
ph++;
SCTP_BUF_LEN(mm) = tot_out + sizeof(struct sctp_paramhdr);
if (top == NULL) {
@@ -12666,14 +12539,11 @@ sctp_lower_sosend(struct socket *so,
}
if (hold_tcblock == 0) {
SCTP_TCB_LOCK(stcb);
- hold_tcblock = 1;
}
atomic_add_int(&stcb->asoc.refcnt, -1);
free_cnt_applied = 0;
/* release this lock, otherwise we hang on ourselves */
- sctp_abort_an_association(stcb->sctp_ep, stcb,
- SCTP_RESPONSE_TO_USER_REQ,
- mm, SCTP_SO_LOCKED);
+ sctp_abort_an_association(stcb->sctp_ep, stcb, mm, SCTP_SO_LOCKED);
/* now relock the stcb so everything is sane */
hold_tcblock = 0;
stcb = NULL;
@@ -12763,7 +12633,7 @@ sctp_lower_sosend(struct socket *so,
stcb->asoc.chunks_on_out_queue,
SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue));
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
- sctp_log_block(SCTP_BLOCK_LOG_INTO_BLKA, so, asoc, sndlen);
+ sctp_log_block(SCTP_BLOCK_LOG_INTO_BLKA, asoc, sndlen);
}
be.error = 0;
stcb->block_entry = &be;
@@ -12782,7 +12652,7 @@ sctp_lower_sosend(struct socket *so,
}
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
sctp_log_block(SCTP_BLOCK_LOG_OUTOF_BLK,
- so, asoc, stcb->asoc.total_output_queue_size);
+ asoc, stcb->asoc.total_output_queue_size);
}
if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
goto out_unlocked;
@@ -12832,7 +12702,7 @@ skip_preblock:
strm = &stcb->asoc.strmout[srcv->sinfo_stream];
if (strm->last_msg_incomplete == 0) {
do_a_copy_in:
- sp = sctp_copy_it_in(stcb, asoc, srcv, uio, net, max_len, user_marks_eor, &error, non_blocking);
+ sp = sctp_copy_it_in(stcb, asoc, srcv, uio, net, max_len, user_marks_eor, &error);
if ((sp == NULL) || (error)) {
goto out;
}
@@ -12852,23 +12722,11 @@ skip_preblock:
}
sctp_snd_sb_alloc(stcb, sp->length);
atomic_add_int(&asoc->stream_queue_cnt, 1);
- if ((srcv->sinfo_flags & SCTP_UNORDERED) == 0) {
- sp->strseq = strm->next_sequence_sent;
- if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_AT_SEND_2_SCTP) {
- sctp_misc_ints(SCTP_STRMOUT_LOG_ASSIGN,
- (uintptr_t) stcb, sp->length,
- (uint32_t) ((srcv->sinfo_stream << 16) | sp->strseq), 0);
- }
- strm->next_sequence_sent++;
- } else {
+ if (srcv->sinfo_flags & SCTP_UNORDERED) {
SCTP_STAT_INCR(sctps_sends_with_unord);
}
TAILQ_INSERT_TAIL(&strm->outqueue, sp, next);
- if ((strm->next_spoke.tqe_next == NULL) &&
- (strm->next_spoke.tqe_prev == NULL)) {
- /* Not on wheel, insert */
- sctp_insert_on_wheel(stcb, asoc, strm, 1);
- }
+ stcb->asoc.ss_functions.sctp_ss_add_to_stream(stcb, asoc, strm, sp, 1);
SCTP_TCB_SEND_UNLOCK(stcb);
} else {
SCTP_TCB_SEND_LOCK(stcb);
@@ -12904,7 +12762,7 @@ skip_preblock:
SCTP_TCB_UNLOCK(stcb);
hold_tcblock = 0;
}
- mm = sctp_copy_resume(sp, uio, srcv, max_len, user_marks_eor, &error, &sndout, &new_tail);
+ mm = sctp_copy_resume(uio, max_len, user_marks_eor, &error, &sndout, &new_tail);
if ((mm == NULL) || error) {
if (mm) {
sctp_m_freem(mm);
@@ -13086,7 +12944,7 @@ skip_preblock:
min(SCTP_BASE_SYSCTL(sctp_add_more_threshold), SCTP_SB_LIMIT_SND(so)))) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
sctp_log_block(SCTP_BLOCK_LOG_INTO_BLK,
- so, asoc, uio->uio_resid);
+ asoc, uio->uio_resid);
}
be.error = 0;
stcb->block_entry = &be;
@@ -13106,7 +12964,7 @@ skip_preblock:
}
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
sctp_log_block(SCTP_BLOCK_LOG_OUTOF_BLK,
- so, asoc, stcb->asoc.total_output_queue_size);
+ asoc, stcb->asoc.total_output_queue_size);
}
}
SOCKBUF_UNLOCK(&so->so_snd);
@@ -13154,8 +13012,7 @@ skip_preblock:
dataless_eof:
/* EOF thing ? */
if ((srcv->sinfo_flags & SCTP_EOF) &&
- (got_all_of_the_send == 1) &&
- (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE)) {
+ (got_all_of_the_send == 1)) {
int cnt;
SCTP_STAT_INCR(sctps_sends_with_eof);
@@ -13164,7 +13021,7 @@ dataless_eof:
SCTP_TCB_LOCK(stcb);
hold_tcblock = 1;
}
- cnt = sctp_is_there_unsent_data(stcb);
+ cnt = sctp_is_there_unsent_data(stcb, SCTP_SO_LOCKED);
if (TAILQ_EMPTY(&asoc->send_queue) &&
TAILQ_EMPTY(&asoc->sent_queue) &&
(cnt == 0)) {
@@ -13175,15 +13032,23 @@ dataless_eof:
if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
(SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
(SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
+ struct sctp_nets *netp;
+
/* only send SHUTDOWN the first time through */
- sctp_send_shutdown(stcb, stcb->asoc.primary_destination);
if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) {
SCTP_STAT_DECR_GAUGE32(sctps_currestab);
}
SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
+ sctp_stop_timers_for_shutdown(stcb);
+ if (stcb->asoc.alternate) {
+ netp = stcb->asoc.alternate;
+ } else {
+ netp = stcb->asoc.primary_destination;
+ }
+ sctp_send_shutdown(stcb, netp);
sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb,
- asoc->primary_destination);
+ netp);
sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
asoc->primary_destination);
}
@@ -13225,7 +13090,6 @@ dataless_eof:
free_cnt_applied = 0;
}
sctp_abort_an_association(stcb->sctp_ep, stcb,
- SCTP_RESPONSE_TO_USER_REQ,
NULL, SCTP_SO_LOCKED);
/*
* now relock the stcb so everything
@@ -13301,8 +13165,6 @@ skip_out_eof:
stcb->asoc.total_flight,
stcb->asoc.chunks_on_out_queue, stcb->asoc.total_flight_count);
}
- if (queue_only_for_init)
- queue_only_for_init = 0;
if ((queue_only == 0) && (nagle_applies == 0) && (stcb->asoc.peers_rwnd && un_sent)) {
/* we can attempt to send too. */
if (hold_tcblock == 0) {
@@ -13348,11 +13210,9 @@ out_unlocked:
if (local_soresv && stcb) {
atomic_subtract_int(&stcb->asoc.sb_send_resv, sndlen);
- local_soresv = 0;
}
if (create_lock_applied) {
SCTP_ASOC_CREATE_UNLOCK(inp);
- create_lock_applied = 0;
}
if ((stcb) && hold_tcblock) {
SCTP_TCB_UNLOCK(stcb);
@@ -13374,7 +13234,7 @@ out_unlocked:
if (inp) {
sctp_validate_no_locks(inp);
} else {
- printf("Warning - inp is NULL so cant validate locks\n");
+ SCTP_PRINTF("Warning - inp is NULL so cant validate locks\n");
}
#endif
if (top) {
@@ -13398,6 +13258,7 @@ sctp_add_auth_chunk(struct mbuf *m, struct mbuf **m_end,
struct mbuf *m_auth;
struct sctp_auth_chunk *auth;
int chunk_len;
+ struct mbuf *cn;
if ((m_end == NULL) || (auth_ret == NULL) || (offset == NULL) ||
(stcb == NULL))
@@ -13435,17 +13296,10 @@ sctp_add_auth_chunk(struct mbuf *m, struct mbuf **m_end,
/* key id and hmac digest will be computed and filled in upon send */
/* save the offset where the auth was inserted into the chain */
- if (m != NULL) {
- struct mbuf *cn;
-
- *offset = 0;
- cn = m;
- while (cn) {
- *offset += SCTP_BUF_LEN(cn);
- cn = SCTP_BUF_NEXT(cn);
- }
- } else
- *offset = 0;
+ *offset = 0;
+ for (cn = m; cn; cn = SCTP_BUF_NEXT(cn)) {
+ *offset += SCTP_BUF_LEN(cn);
+ }
/* update length and return pointer to the auth chunk */
SCTP_BUF_LEN(m_auth) = chunk_len;
@@ -13485,8 +13339,7 @@ sctp_v6src_match_nexthop(struct sockaddr_in6 *src6, sctp_route_t * ro)
SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)src6);
/* search installed gateway from prefix entry */
- for (pfxrtr = pfx->ndpr_advrtrs.lh_first; pfxrtr; pfxrtr =
- pfxrtr->pfr_next) {
+ LIST_FOREACH(pfxrtr, &pfx->ndpr_advrtrs, pfr_entry) {
memset(&gw6, 0, sizeof(struct sockaddr_in6));
gw6.sin6_family = AF_INET6;
gw6.sin6_len = sizeof(struct sockaddr_in6);
@@ -13511,6 +13364,7 @@ sctp_v6src_match_nexthop(struct sockaddr_in6 *src6, sctp_route_t * ro)
int
sctp_v4src_match_nexthop(struct sctp_ifa *sifa, sctp_route_t * ro)
{
+#ifdef INET
struct sockaddr_in *sin, *mask;
struct ifaddr *ifa;
struct in_addr srcnetaddr, gwnetaddr;
@@ -13535,5 +13389,6 @@ sctp_v4src_match_nexthop(struct sctp_ifa *sifa, sctp_route_t * ro)
if (srcnetaddr.s_addr == gwnetaddr.s_addr) {
return (1);
}
+#endif
return (0);
}