xref: /xnu-10002.1.13/bsd/netinet/tcp_output.c (revision 1031c584a5e37aff177559b9f69dbd3c8c3fd30a)
1 /*
2  * Copyright (c) 2000-2022 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995
30  *	The Regents of the University of California.  All rights reserved.
31  *
32  * Redistribution and use in source and binary forms, with or without
33  * modification, are permitted provided that the following conditions
34  * are met:
35  * 1. Redistributions of source code must retain the above copyright
36  *    notice, this list of conditions and the following disclaimer.
37  * 2. Redistributions in binary form must reproduce the above copyright
38  *    notice, this list of conditions and the following disclaimer in the
39  *    documentation and/or other materials provided with the distribution.
40  * 3. All advertising materials mentioning features or use of this software
41  *    must display the following acknowledgement:
42  *	This product includes software developed by the University of
43  *	California, Berkeley and its contributors.
44  * 4. Neither the name of the University nor the names of its contributors
45  *    may be used to endorse or promote products derived from this software
46  *    without specific prior written permission.
47  *
48  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58  * SUCH DAMAGE.
59  *
60  *	@(#)tcp_output.c	8.4 (Berkeley) 5/24/95
61  * $FreeBSD: src/sys/netinet/tcp_output.c,v 1.39.2.10 2001/07/07 04:30:38 silby Exp $
62  */
63 /*
64  * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
65  * support for mandatory and extensible security protections.  This notice
66  * is included in support of clause 2.2 (b) of the Apple Public License,
67  * Version 2.0.
68  */
69 
70 #define _IP_VHL
71 
72 #include "tcp_includes.h"
73 
74 #include <sys/param.h>
75 #include <sys/systm.h>
76 #include <sys/kernel.h>
77 #include <sys/sysctl.h>
78 #include <sys/mbuf.h>
79 #include <sys/domain.h>
80 #include <sys/protosw.h>
81 #include <sys/socket.h>
82 #include <sys/socketvar.h>
83 
84 #include <net/route.h>
85 #include <net/ntstat.h>
86 #include <net/if_var.h>
87 #include <net/if.h>
88 #include <net/if_types.h>
89 #include <net/dlil.h>
90 
91 #include <netinet/in.h>
92 #include <netinet/in_systm.h>
93 #include <netinet/in_var.h>
94 #include <netinet/in_tclass.h>
95 #include <netinet/ip.h>
96 #include <netinet/in_pcb.h>
97 #include <netinet/ip_var.h>
98 #include <mach/sdt.h>
99 #include <netinet6/in6_pcb.h>
100 #include <netinet/ip6.h>
101 #include <netinet6/ip6_var.h>
102 #include <netinet/tcp.h>
103 #include <netinet/tcp_cache.h>
104 #include <netinet/tcp_fsm.h>
105 #include <netinet/tcp_seq.h>
106 #include <netinet/tcp_timer.h>
107 #include <netinet/tcp_var.h>
108 #include <netinet/tcpip.h>
109 #include <netinet/tcp_cc.h>
110 #if TCPDEBUG
111 #include <netinet/tcp_debug.h>
112 #endif
113 #include <netinet/tcp_log.h>
114 #include <sys/kdebug.h>
115 #include <mach/sdt.h>
116 
117 #if IPSEC
118 #include <netinet6/ipsec.h>
119 #endif /*IPSEC*/
120 
121 #if MPTCP
122 #include <netinet/mptcp_var.h>
123 #include <netinet/mptcp.h>
124 #include <netinet/mptcp_opt.h>
125 #include <netinet/mptcp_seq.h>
126 #endif
127 
128 #include <corecrypto/ccaes.h>
129 
130 #define DBG_LAYER_BEG           NETDBG_CODE(DBG_NETTCP, 1)
131 #define DBG_LAYER_END           NETDBG_CODE(DBG_NETTCP, 3)
132 #define DBG_FNC_TCP_OUTPUT      NETDBG_CODE(DBG_NETTCP, (4 << 8) | 1)
133 
134 SYSCTL_SKMEM_TCP_INT(OID_AUTO, path_mtu_discovery,
135     CTLFLAG_RW | CTLFLAG_LOCKED, int, path_mtu_discovery, 1,
136     "Enable Path MTU Discovery");
137 
138 SYSCTL_SKMEM_TCP_INT(OID_AUTO, local_slowstart_flightsize,
139     CTLFLAG_RW | CTLFLAG_LOCKED, int, ss_fltsz_local, 8,
140     "Slow start flight size for local networks");
141 
142 SYSCTL_SKMEM_TCP_INT(OID_AUTO, tso, CTLFLAG_RW | CTLFLAG_LOCKED,
143     int, tcp_do_tso, 1, "Enable TCP Segmentation Offload");
144 
145 SYSCTL_SKMEM_TCP_INT(OID_AUTO, ecn_setup_percentage,
146     CTLFLAG_RW | CTLFLAG_LOCKED, int, tcp_ecn_setup_percentage, 100,
147     "Max ECN setup percentage");
148 
149 SYSCTL_SKMEM_TCP_INT(OID_AUTO, accurate_ecn,
150     CTLFLAG_RW | CTLFLAG_LOCKED, int, tcp_acc_ecn, 0,
151     "Accurate ECN mode (0: disable, 1: enable ACE feedback");
152 
153 // TO BE REMOVED
154 SYSCTL_SKMEM_TCP_INT(OID_AUTO, do_ack_compression,
155     CTLFLAG_RW | CTLFLAG_LOCKED, int, tcp_do_ack_compression, 1,
156     "Enable TCP ACK compression (on (cell only): 1, off: 0, on (all interfaces): 2)");
157 
158 SYSCTL_SKMEM_TCP_INT(OID_AUTO, ack_compression_rate,
159     CTLFLAG_RW | CTLFLAG_LOCKED, int, tcp_ack_compression_rate, TCP_COMP_CHANGE_RATE,
160     "Rate at which we force sending new ACKs (in ms)");
161 
162 SYSCTL_SKMEM_TCP_INT(OID_AUTO, randomize_timestamps,
163     CTLFLAG_RW | CTLFLAG_LOCKED, int, tcp_randomize_timestamps, 1,
164     "Randomize TCP timestamps to prevent tracking (on: 1, off: 0)");
165 
166 static int
167 sysctl_change_ecn_setting SYSCTL_HANDLER_ARGS
168 {
169 #pragma unused(oidp, arg1, arg2)
170 	int i, err = 0, changed = 0;
171 	struct ifnet *ifp;
172 
173 	err = sysctl_io_number(req, tcp_ecn_outbound, sizeof(int32_t),
174 	    &i, &changed);
175 	if (err != 0 || req->newptr == USER_ADDR_NULL) {
176 		return err;
177 	}
178 
179 	if (changed) {
180 		if ((tcp_ecn_outbound == 0 || tcp_ecn_outbound == 1) &&
181 		    (i == 0 || i == 1)) {
182 			tcp_ecn_outbound = i;
183 			SYSCTL_SKMEM_UPDATE_FIELD(tcp.ecn_initiate_out, tcp_ecn_outbound);
184 			return err;
185 		}
186 		if (tcp_ecn_outbound == 2 && (i == 0 || i == 1)) {
187 			/*
188 			 * Reset ECN enable flags on non-cellular
189 			 * interfaces so that the system default will take
190 			 * over
191 			 */
192 			ifnet_head_lock_shared();
193 			TAILQ_FOREACH(ifp, &ifnet_head, if_link) {
194 				if (!IFNET_IS_CELLULAR(ifp)) {
195 					if_clear_eflags(ifp,
196 					    IFEF_ECN_ENABLE |
197 					    IFEF_ECN_DISABLE);
198 				}
199 			}
200 			ifnet_head_done();
201 		} else {
202 			/*
203 			 * Set ECN enable flags on non-cellular
204 			 * interfaces
205 			 */
206 			ifnet_head_lock_shared();
207 			TAILQ_FOREACH(ifp, &ifnet_head, if_link) {
208 				if (!IFNET_IS_CELLULAR(ifp)) {
209 					if_set_eflags(ifp, IFEF_ECN_ENABLE);
210 					if_clear_eflags(ifp, IFEF_ECN_DISABLE);
211 				}
212 			}
213 			ifnet_head_done();
214 		}
215 		tcp_ecn_outbound = i;
216 		SYSCTL_SKMEM_UPDATE_FIELD(tcp.ecn_initiate_out, tcp_ecn_outbound);
217 	}
218 	/* Change the other one too as the work is done */
219 	if (i == 2 || tcp_ecn_inbound == 2) {
220 		tcp_ecn_inbound = i;
221 		SYSCTL_SKMEM_UPDATE_FIELD(tcp.ecn_negotiate_in, tcp_ecn_inbound);
222 	}
223 	return err;
224 }
225 
226 int     tcp_ecn_outbound = 2;
227 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, ecn_initiate_out,
228     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, &tcp_ecn_outbound, 0,
229     sysctl_change_ecn_setting, "IU",
230     "Initiate ECN for outbound connections");
231 
232 int     tcp_ecn_inbound = 2;
233 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, ecn_negotiate_in,
234     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, &tcp_ecn_inbound, 0,
235     sysctl_change_ecn_setting, "IU",
236     "Initiate ECN for inbound connections");
237 
238 SYSCTL_SKMEM_TCP_INT(OID_AUTO, packetchain,
239     CTLFLAG_RW | CTLFLAG_LOCKED, int, tcp_packet_chaining, 50,
240     "Enable TCP output packet chaining");
241 
242 SYSCTL_SKMEM_TCP_INT(OID_AUTO, socket_unlocked_on_output,
243     CTLFLAG_RW | CTLFLAG_LOCKED, int, tcp_output_unlocked, 1,
244     "Unlock TCP when sending packets down to IP");
245 
246 SYSCTL_SKMEM_TCP_INT(OID_AUTO, min_iaj_win,
247     CTLFLAG_RW | CTLFLAG_LOCKED, int, tcp_min_iaj_win, MIN_IAJ_WIN,
248     "Minimum recv win based on inter-packet arrival jitter");
249 
250 SYSCTL_SKMEM_TCP_INT(OID_AUTO, acc_iaj_react_limit,
251     CTLFLAG_RW | CTLFLAG_LOCKED, int, tcp_acc_iaj_react_limit,
252     ACC_IAJ_REACT_LIMIT, "Accumulated IAJ when receiver starts to react");
253 
254 SYSCTL_SKMEM_TCP_INT(OID_AUTO, autosndbufinc,
255     CTLFLAG_RW | CTLFLAG_LOCKED, uint32_t, tcp_autosndbuf_inc,
256     8 * 1024, "Increment in send socket bufffer size");
257 
258 SYSCTL_SKMEM_TCP_INT(OID_AUTO, autosndbufmax,
259     CTLFLAG_RW | CTLFLAG_LOCKED | CTLFLAG_KERN, uint32_t, tcp_autosndbuf_max, 2 * 1024 * 1024,
260     "Maximum send socket buffer size");
261 
262 SYSCTL_SKMEM_TCP_INT(OID_AUTO, rtt_recvbg,
263     CTLFLAG_RW | CTLFLAG_LOCKED, uint32_t, tcp_use_rtt_recvbg, 1,
264     "Use RTT for bg recv algorithm");
265 
266 SYSCTL_SKMEM_TCP_INT(OID_AUTO, recv_throttle_minwin,
267     CTLFLAG_RW | CTLFLAG_LOCKED, uint32_t, tcp_recv_throttle_minwin, 16 * 1024,
268     "Minimum recv win for throttling");
269 
270 SYSCTL_SKMEM_TCP_INT(OID_AUTO, enable_tlp,
271     CTLFLAG_RW | CTLFLAG_LOCKED,
272     int32_t, tcp_enable_tlp, 1, "Enable Tail loss probe");
273 
274 static int32_t packchain_newlist = 0;
275 static int32_t packchain_looped = 0;
276 static int32_t packchain_sent = 0;
277 
278 /* temporary: for testing */
279 #if IPSEC
280 extern int ipsec_bypass;
281 #endif
282 
283 extern int slowlink_wsize;      /* window correction for slow links */
284 
285 extern u_int32_t kipf_count;
286 
287 static int tcp_ip_output(struct socket *, struct tcpcb *, struct mbuf *,
288     int, struct mbuf *, int, int, boolean_t);
289 static int tcp_recv_throttle(struct tcpcb *tp);
290 
291 __attribute__((noinline))
292 static int32_t
tcp_tfo_check(struct tcpcb * tp,int32_t len)293 tcp_tfo_check(struct tcpcb *tp, int32_t len)
294 {
295 	struct socket *so = tp->t_inpcb->inp_socket;
296 	unsigned int optlen = 0;
297 	unsigned int cookie_len;
298 
299 	if (tp->t_flags & TF_NOOPT) {
300 		goto fallback;
301 	}
302 
303 	if (!(tp->t_flagsext & TF_FASTOPEN_FORCE_ENABLE) &&
304 	    !tcp_heuristic_do_tfo(tp)) {
305 		tp->t_tfo_stats |= TFO_S_HEURISTICS_DISABLE;
306 		tcpstat.tcps_tfo_heuristics_disable++;
307 		goto fallback;
308 	}
309 
310 	if (so->so_flags1 & SOF1_DATA_AUTHENTICATED) {
311 		return len;
312 	}
313 
314 	optlen += TCPOLEN_MAXSEG;
315 
316 	if (tp->t_flags & TF_REQ_SCALE) {
317 		optlen += 4;
318 	}
319 
320 #if MPTCP
321 	if ((so->so_flags & SOF_MP_SUBFLOW) && mptcp_enable &&
322 	    (tp->t_rxtshift <= mptcp_mpcap_retries ||
323 	    (tptomptp(tp)->mpt_mpte->mpte_flags & MPTE_FORCE_ENABLE))) {
324 		optlen += sizeof(struct mptcp_mpcapable_opt_common) + sizeof(mptcp_key_t);
325 	}
326 #endif /* MPTCP */
327 
328 	if (tp->t_flags & TF_REQ_TSTMP) {
329 		optlen += TCPOLEN_TSTAMP_APPA;
330 	}
331 
332 	if (SACK_ENABLED(tp)) {
333 		optlen += TCPOLEN_SACK_PERMITTED;
334 	}
335 
336 	/* Now, decide whether to use TFO or not */
337 
338 	/* Don't even bother trying if there is no space at all... */
339 	if (MAX_TCPOPTLEN - optlen < TCPOLEN_FASTOPEN_REQ) {
340 		goto fallback;
341 	}
342 
343 	cookie_len = tcp_cache_get_cookie_len(tp);
344 	if (cookie_len == 0) {
345 		/* No cookie, so we request one */
346 		return 0;
347 	}
348 
349 	/* There is not enough space for the cookie, so we cannot do TFO */
350 	if (MAX_TCPOPTLEN - optlen < cookie_len) {
351 		goto fallback;
352 	}
353 
354 	/* Do not send SYN+data if there is more in the queue than MSS */
355 	if (so->so_snd.sb_cc > (tp->t_maxopd - MAX_TCPOPTLEN)) {
356 		goto fallback;
357 	}
358 
359 	/* Ok, everything looks good. We can go on and do TFO */
360 	return len;
361 
362 fallback:
363 	tcp_disable_tfo(tp);
364 	return 0;
365 }
366 
367 /* Returns the number of bytes written to the TCP option-space */
368 __attribute__((noinline))
369 static unsigned int
tcp_tfo_write_cookie_rep(struct tcpcb * tp,unsigned int optlen,u_char * opt)370 tcp_tfo_write_cookie_rep(struct tcpcb *tp, unsigned int optlen, u_char *opt)
371 {
372 	u_char out[CCAES_BLOCK_SIZE];
373 	unsigned ret = 0;
374 	u_char *bp;
375 
376 	if (MAX_TCPOPTLEN - optlen <
377 	    TCPOLEN_FASTOPEN_REQ + TFO_COOKIE_LEN_DEFAULT) {
378 		return ret;
379 	}
380 
381 	tcp_tfo_gen_cookie(tp->t_inpcb, out, sizeof(out));
382 
383 	bp = opt + optlen;
384 
385 	*bp++ = TCPOPT_FASTOPEN;
386 	*bp++ = 2 + TFO_COOKIE_LEN_DEFAULT;
387 	memcpy(bp, out, TFO_COOKIE_LEN_DEFAULT);
388 	ret += 2 + TFO_COOKIE_LEN_DEFAULT;
389 
390 	tp->t_tfo_stats |= TFO_S_COOKIE_SENT;
391 	tcpstat.tcps_tfo_cookie_sent++;
392 
393 	return ret;
394 }
395 
396 __attribute__((noinline))
397 static unsigned int
tcp_tfo_write_cookie(struct tcpcb * tp,unsigned int optlen,int32_t len,u_char * opt)398 tcp_tfo_write_cookie(struct tcpcb *tp, unsigned int optlen, int32_t len,
399     u_char *opt)
400 {
401 	uint8_t tfo_len;
402 	struct socket *so = tp->t_inpcb->inp_socket;
403 	unsigned ret = 0;
404 	int res;
405 	u_char *bp;
406 
407 	if (TCPOLEN_FASTOPEN_REQ > MAX_TCPOPTLEN - optlen) {
408 		return 0;
409 	}
410 	tfo_len = (uint8_t)(MAX_TCPOPTLEN - optlen - TCPOLEN_FASTOPEN_REQ);
411 
412 	if (so->so_flags1 & SOF1_DATA_AUTHENTICATED) {
413 		/* If there is some data, let's track it */
414 		if (len > 0) {
415 			tp->t_tfo_stats |= TFO_S_SYN_DATA_SENT;
416 			tcpstat.tcps_tfo_syn_data_sent++;
417 		}
418 
419 		return 0;
420 	}
421 
422 	bp = opt + optlen;
423 
424 	/*
425 	 * The cookie will be copied in the appropriate place within the
426 	 * TCP-option space. That way we avoid the need for an intermediate
427 	 * variable.
428 	 */
429 	res = tcp_cache_get_cookie(tp, bp + TCPOLEN_FASTOPEN_REQ, &tfo_len);
430 	if (res == 0) {
431 		*bp++ = TCPOPT_FASTOPEN;
432 		*bp++ = TCPOLEN_FASTOPEN_REQ;
433 		ret += TCPOLEN_FASTOPEN_REQ;
434 
435 		tp->t_tfo_flags |= TFO_F_COOKIE_REQ;
436 
437 		tp->t_tfo_stats |= TFO_S_COOKIE_REQ;
438 		tcpstat.tcps_tfo_cookie_req++;
439 	} else {
440 		*bp++ = TCPOPT_FASTOPEN;
441 		*bp++ = TCPOLEN_FASTOPEN_REQ + tfo_len;
442 
443 		ret += TCPOLEN_FASTOPEN_REQ + tfo_len;
444 
445 		tp->t_tfo_flags |= TFO_F_COOKIE_SENT;
446 
447 		/* If there is some data, let's track it */
448 		if (len > 0) {
449 			tp->t_tfo_stats |= TFO_S_SYN_DATA_SENT;
450 			tcpstat.tcps_tfo_syn_data_sent++;
451 		}
452 	}
453 
454 	return ret;
455 }
456 
457 static inline bool
tcp_send_ecn_flags_on_syn(struct tcpcb * tp)458 tcp_send_ecn_flags_on_syn(struct tcpcb *tp)
459 {
460 	/* We allow Accurate ECN negotiation on first retransmission as well */
461 	bool send_on_first_retrans = (tp->ecn_flags & TE_ACE_SETUPSENT) &&
462 	    (tp->t_rxtshift <= 1);
463 
464 	return !(tp->ecn_flags & (TE_SETUPSENT | TE_ACE_SETUPSENT)) || send_on_first_retrans;
465 }
466 
467 void
tcp_set_ecn(struct tcpcb * tp,struct ifnet * ifp)468 tcp_set_ecn(struct tcpcb *tp, struct ifnet *ifp)
469 {
470 	boolean_t inbound;
471 
472 	/*
473 	 * Socket option has precedence
474 	 */
475 	if (tp->ecn_flags & TE_ECN_MODE_ENABLE) {
476 		tp->ecn_flags |= TE_ENABLE_ECN;
477 		goto check_heuristic;
478 	}
479 
480 	if (tp->ecn_flags & TE_ECN_MODE_DISABLE) {
481 		tp->ecn_flags &= ~TE_ENABLE_ECN;
482 		return;
483 	}
484 	/*
485 	 * Per interface setting comes next
486 	 */
487 	if (ifp != NULL) {
488 		if (ifp->if_eflags & IFEF_ECN_ENABLE) {
489 			tp->ecn_flags |= TE_ENABLE_ECN;
490 			goto check_heuristic;
491 		}
492 
493 		if (ifp->if_eflags & IFEF_ECN_DISABLE) {
494 			tp->ecn_flags &= ~TE_ENABLE_ECN;
495 			return;
496 		}
497 	}
498 	/*
499 	 * System wide settings come last
500 	 */
501 	inbound = (tp->t_inpcb->inp_socket->so_head != NULL);
502 	if ((inbound && tcp_ecn_inbound == 1) ||
503 	    (!inbound && tcp_ecn_outbound == 1)) {
504 		tp->ecn_flags |= TE_ENABLE_ECN;
505 		goto check_heuristic;
506 	} else {
507 		tp->ecn_flags &= ~TE_ENABLE_ECN;
508 	}
509 
510 	return;
511 
512 check_heuristic:
513 	if (TCP_ACC_ECN_ENABLED(tp)) {
514 		/* Allow ECN when Accurate ECN is enabled until heuristics are fixed */
515 		tp->ecn_flags |= TE_ENABLE_ECN;
516 		/* Set the accurate ECN state */
517 		if (tp->t_client_accecn_state == tcp_connection_client_accurate_ecn_feature_disabled) {
518 			tp->t_client_accecn_state = tcp_connection_client_accurate_ecn_feature_enabled;
519 		}
520 		if (tp->t_server_accecn_state == tcp_connection_server_accurate_ecn_feature_disabled) {
521 			tp->t_server_accecn_state = tcp_connection_server_accurate_ecn_feature_enabled;
522 		}
523 	}
524 	if (!tcp_heuristic_do_ecn(tp) && !TCP_ACC_ECN_ENABLED(tp)) {
525 		/* Allow ECN when Accurate ECN is enabled until heuristics are fixed */
526 		tp->ecn_flags &= ~TE_ENABLE_ECN;
527 	}
528 	/*
529 	 * If the interface setting, system-level setting and heuristics
530 	 * allow to enable ECN, randomly select 5% of connections to
531 	 * enable it
532 	 */
533 	if ((tp->ecn_flags & (TE_ECN_MODE_ENABLE | TE_ECN_MODE_DISABLE
534 	    | TE_ENABLE_ECN)) == TE_ENABLE_ECN) {
535 		/*
536 		 * Use the random value in iss for randomizing
537 		 * this selection
538 		 */
539 		if ((tp->iss % 100) >= tcp_ecn_setup_percentage && !TCP_ACC_ECN_ENABLED(tp)) {
540 			/* Don't disable Accurate ECN randomly */
541 			tp->ecn_flags &= ~TE_ENABLE_ECN;
542 		}
543 	}
544 }
545 
546 int
tcp_flight_size(struct tcpcb * tp)547 tcp_flight_size(struct tcpcb *tp)
548 {
549 	int ret;
550 
551 	VERIFY(tp->sackhint.sack_bytes_acked >= 0);
552 	VERIFY(tp->sackhint.sack_bytes_rexmit >= 0);
553 
554 	/*
555 	 * RFC6675, SetPipe (), SACK'd bytes are discounted. All the rest is still in-flight.
556 	 */
557 	ret = tp->snd_nxt - tp->snd_una - tp->sackhint.sack_bytes_acked;
558 
559 	if (ret < 0) {
560 		/*
561 		 * This happens when the RTO-timer fires because snd_nxt gets artificially
562 		 * decreased. If we then receive some SACK-blogs, sack_bytes_acked is
563 		 * going to be high.
564 		 */
565 		ret = 0;
566 	}
567 
568 	return ret;
569 }
570 
571 /*
572  * Either of ECT0 or ECT1 flag should be set
573  * when this function is called
574  */
575 static void
tcp_add_accecn_option(struct tcpcb * tp,uint16_t flags,uint32_t * lp,uint8_t * optlen)576 tcp_add_accecn_option(struct tcpcb *tp, uint16_t flags, uint32_t *lp, uint8_t *optlen)
577 {
578 	uint8_t max_len = TCP_MAXOLEN - *optlen;
579 	uint8_t len = TCPOLEN_ACCECN_EMPTY;
580 
581 	uint32_t e1b = (uint32_t)(tp->t_rcv_ect1_bytes & TCP_ACO_MASK);
582 	uint32_t e0b = (uint32_t)(tp->t_rcv_ect0_bytes & TCP_ACO_MASK);
583 	uint32_t ceb =  (uint32_t)(tp->t_rcv_ce_bytes & TCP_ACO_MASK);
584 
585 	if (max_len < TCPOLEN_ACCECN_EMPTY) {
586 		TCP_LOG(tp, "not enough space to add any AccECN option");
587 		return;
588 	}
589 
590 	if (!(flags & TH_SYN || (tp->ecn_flags & TE_ACE_FINAL_ACK_3WHS) ||
591 	    tp->snd_una == tp->iss + 1 ||
592 	    tp->ecn_flags & (TE_ACO_ECT1 | TE_ACO_ECT0))) {
593 		/*
594 		 * Since this is neither a SYN-ACK packet, nor the final ACK of
595 		 * the 3WHS (nor the first acked data segment) nor any of the ECT byte
596 		 * counter flags are set, no need to send the option.
597 		 */
598 		return;
599 	}
600 
601 	if ((flags & (TH_SYN | TH_ACK)) == (TH_SYN | TH_ACK) &&
602 	    tp->t_rxtshift >= 1) {
603 		/*
604 		 * If this is a SYN-ACK retransmission (first),
605 		 * retry without AccECN option and just with ACE fields.
606 		 * From second retransmission onwards, we don't send any
607 		 * Accurate ECN state.
608 		 */
609 		return;
610 	}
611 
612 	if (max_len < (TCPOLEN_ACCECN_EMPTY + 1 * TCPOLEN_ACCECN_COUNTER)) {
613 		/* Can carry EMPTY option which can be used to test path in SYN-ACK packet */
614 		if (flags & TH_SYN) {
615 			*lp++ = htonl((TCPOPT_ACCECN1 << 24) | (len << 16) |
616 			    (TCPOPT_NOP << 8) | TCPOPT_NOP);
617 			*optlen += len + 2; /* 2 NOPs */
618 			TCP_LOG(tp, "add empty AccECN option, optlen=%u", *optlen);
619 		}
620 	} else if (max_len < (TCPOLEN_ACCECN_EMPTY + 2 * TCPOLEN_ACCECN_COUNTER)) {
621 		/* Can carry one option */
622 		len += 1 * TCPOLEN_ACCECN_COUNTER;
623 		if (tp->ecn_flags & TE_ACO_ECT1) {
624 			*lp++ = htonl((TCPOPT_ACCECN1 << 24) | (len << 16) | ((e1b >> 8) & 0xffff));
625 			*lp++ = htonl(((e1b & 0xff) << 24) | (TCPOPT_NOP << 16) | (TCPOPT_NOP << 8) | TCPOPT_NOP);
626 		} else {
627 			*lp++ = htonl((TCPOPT_ACCECN0 << 24) | (len << 16) | ((e0b >> 8) & 0xffff));
628 			*lp++ = htonl(((e0b & 0xff) << 24) | (TCPOPT_NOP << 16) | (TCPOPT_NOP << 8) | TCPOPT_NOP);
629 		}
630 		*optlen += len + 3; /* 3 NOPs */
631 		TCP_LOG(tp, "add single counter for AccECN option, optlen=%u", *optlen);
632 	} else if (max_len < (TCPOLEN_ACCECN_EMPTY + 3 * TCPOLEN_ACCECN_COUNTER)) {
633 		/* Can carry two options */
634 		len += 2 * TCPOLEN_ACCECN_COUNTER;
635 		if (tp->ecn_flags & TE_ACO_ECT1) {
636 			*lp++ = htonl((TCPOPT_ACCECN1 << 24) | (len << 16) | ((e1b >> 8) & 0xffff));
637 			*lp++ = htonl(((e1b & 0xff) << 24) | (ceb & 0xffffff));
638 		} else {
639 			*lp++ = htonl((TCPOPT_ACCECN0 << 24) | (len << 16) | ((e0b >> 8) & 0xffff));
640 			*lp++ = htonl(((e0b & 0xff) << 24) | (ceb & 0xffffff));
641 		}
642 		*optlen += len; /* 0 NOPs */
643 		TCP_LOG(tp, "add 2 counters for AccECN option, optlen=%u", *optlen);
644 	} else {
645 		/*
646 		 * TCP option sufficient to hold full AccECN option
647 		 * but send counter that changed during the entire connection.
648 		 */
649 		len += 3 * TCPOLEN_ACCECN_COUNTER;
650 		/* Can carry all three options */
651 		if (tp->ecn_flags & TE_ACO_ECT1) {
652 			*lp++ = htonl((TCPOPT_ACCECN1 << 24) | (len << 16) | ((e1b >> 8) & 0xffff));
653 			*lp++ = htonl(((e1b & 0xff) << 24) | (ceb & 0xffffff));
654 			*lp++ = htonl(((e0b & 0xffffff) << 8) | TCPOPT_NOP);
655 		} else {
656 			*lp++ = htonl((TCPOPT_ACCECN0 << 24) | (len << 16) | ((e0b >> 8) & 0xffff));
657 			*lp++ = htonl(((e0b & 0xff) << 24) | (ceb & 0xffffff));
658 			*lp++ = htonl(((e1b & 0xffffff) << 8) | TCPOPT_NOP);
659 		}
660 		*optlen += len + 1; /* 1 NOP */
661 		TCP_LOG(tp, "add all 3 counters for AccECN option, optlen=%u", *optlen);
662 	}
663 }
664 
665 /*
666  * Tcp output routine: figure out what should be sent and send it.
667  *
668  * Returns:	0			Success
669  *		EADDRNOTAVAIL
670  *		ENOBUFS
671  *		EMSGSIZE
672  *		EHOSTUNREACH
673  *		ENETDOWN
674  *	ip_output_list:ENOMEM
675  *	ip_output_list:EADDRNOTAVAIL
676  *	ip_output_list:ENETUNREACH
677  *	ip_output_list:EHOSTUNREACH
678  *	ip_output_list:EACCES
679  *	ip_output_list:EMSGSIZE
680  *	ip_output_list:ENOBUFS
681  *	ip_output_list:???		[ignorable: mostly IPSEC/firewall/DLIL]
682  *	ip6_output_list:EINVAL
683  *	ip6_output_list:EOPNOTSUPP
684  *	ip6_output_list:EHOSTUNREACH
685  *	ip6_output_list:EADDRNOTAVAIL
686  *	ip6_output_list:ENETUNREACH
687  *	ip6_output_list:EMSGSIZE
688  *	ip6_output_list:ENOBUFS
689  *	ip6_output_list:???		[ignorable: mostly IPSEC/firewall/DLIL]
690  */
691 int
tcp_output(struct tcpcb * tp)692 tcp_output(struct tcpcb *tp)
693 {
694 	struct inpcb *inp = tp->t_inpcb;
695 	struct socket *so = inp->inp_socket;
696 	int32_t len, recwin, sendwin, off;
697 	uint32_t max_len = 0;
698 	uint16_t flags;
699 	int error;
700 	struct mbuf *m;
701 	struct ip *ip = NULL;
702 	struct ip6_hdr *ip6 = NULL;
703 	struct tcphdr *th;
704 	u_char opt[TCP_MAXOLEN];
705 	unsigned int ipoptlen, optlen, hdrlen;
706 	int idle, sendalot, lost = 0;
707 	int sendalot_cnt = 0;
708 	int i, sack_rxmit;
709 	int tso = 0;
710 	int sack_bytes_rxmt;
711 	tcp_seq old_snd_nxt = 0;
712 	struct sackhole *p;
713 #if IPSEC
714 	size_t ipsec_optlen = 0;
715 #endif /* IPSEC */
716 	int    idle_time = 0;
717 	struct mbuf *packetlist = NULL;
718 	struct mbuf *tp_inp_options = inp->inp_depend4.inp4_options;
719 	int isipv6 = inp->inp_vflag & INP_IPV6;
720 	int packchain_listadd = 0;
721 	int so_options = so->so_options;
722 	struct rtentry *rt;
723 	u_int32_t svc_flags = 0, allocated_len;
724 #if MPTCP
725 	boolean_t mptcp_acknow;
726 #endif /* MPTCP */
727 	boolean_t cell = FALSE;
728 	boolean_t wifi = FALSE;
729 	boolean_t wired = FALSE;
730 	boolean_t sack_rescue_rxt = FALSE;
731 	int sotc = so->so_traffic_class;
732 	boolean_t do_not_compress = FALSE;
733 	boolean_t sack_rxmted = FALSE;
734 
735 	/*
736 	 * Determine length of data that should be transmitted,
737 	 * and flags that will be used.
738 	 * If there is some data or critical controls (SYN, RST)
739 	 * to send, then transmit; otherwise, investigate further.
740 	 */
741 	idle = (tp->t_flags & TF_LASTIDLE) || (tp->snd_max == tp->snd_una);
742 
743 	/* Since idle_time is signed integer, the following integer subtraction
744 	 * will take care of wrap around of tcp_now
745 	 */
746 	idle_time = tcp_now - tp->t_rcvtime;
747 	if (idle && idle_time >= TCP_IDLETIMEOUT(tp)) {
748 		if (CC_ALGO(tp)->after_idle != NULL &&
749 		    (tp->tcp_cc_index != TCP_CC_ALGO_CUBIC_INDEX ||
750 		    idle_time >= TCP_CC_CWND_NONVALIDATED_PERIOD)) {
751 			CC_ALGO(tp)->after_idle(tp);
752 			tcp_ccdbg_trace(tp, NULL, TCP_CC_IDLE_TIMEOUT);
753 		}
754 
755 		/*
756 		 * Do some other tasks that need to be done after
757 		 * idle time
758 		 */
759 		if (!SLIST_EMPTY(&tp->t_rxt_segments)) {
760 			tcp_rxtseg_clean(tp);
761 		}
762 
763 		/* If stretch ack was auto-disabled, re-evaluate it */
764 		tcp_cc_after_idle_stretchack(tp);
765 		tp->t_forced_acks = TCP_FORCED_ACKS_COUNT;
766 	}
767 	tp->t_flags &= ~TF_LASTIDLE;
768 	if (idle) {
769 		if (tp->t_flags & TF_MORETOCOME) {
770 			tp->t_flags |= TF_LASTIDLE;
771 			idle = 0;
772 		}
773 	}
774 #if MPTCP
775 	if (tp->t_mpflags & TMPF_RESET) {
776 		tcp_check_timer_state(tp);
777 		/*
778 		 * Once a RST has been sent for an MPTCP subflow,
779 		 * the subflow socket stays around until deleted.
780 		 * No packets such as FINs must be sent after RST.
781 		 */
782 		return 0;
783 	}
784 #endif /* MPTCP */
785 
786 again:
787 #if MPTCP
788 	mptcp_acknow = FALSE;
789 
790 	if (so->so_flags & SOF_MP_SUBFLOW && SEQ_LT(tp->snd_nxt, tp->snd_una)) {
791 		os_log_error(mptcp_log_handle, "%s - %lx: snd_nxt is %u and snd_una is %u, cnt %d\n",
792 		    __func__, (unsigned long)VM_KERNEL_ADDRPERM(tp->t_mpsub->mpts_mpte),
793 		    tp->snd_nxt, tp->snd_una, sendalot_cnt);
794 	}
795 #endif
796 	do_not_compress = FALSE;
797 	sendalot_cnt++;
798 
799 	KERNEL_DEBUG(DBG_FNC_TCP_OUTPUT | DBG_FUNC_START, 0, 0, 0, 0, 0);
800 
801 	if (isipv6) {
802 		KERNEL_DEBUG(DBG_LAYER_BEG,
803 		    ((inp->inp_fport << 16) | inp->inp_lport),
804 		    (((inp->in6p_laddr.s6_addr16[0] & 0xffff) << 16) |
805 		    (inp->in6p_faddr.s6_addr16[0] & 0xffff)),
806 		    sendalot, 0, 0);
807 	} else {
808 		KERNEL_DEBUG(DBG_LAYER_BEG,
809 		    ((inp->inp_fport << 16) | inp->inp_lport),
810 		    (((inp->inp_laddr.s_addr & 0xffff) << 16) |
811 		    (inp->inp_faddr.s_addr & 0xffff)),
812 		    sendalot, 0, 0);
813 	}
814 	/*
815 	 * If the route generation id changed, we need to check that our
816 	 * local (source) IP address is still valid. If it isn't either
817 	 * return error or silently do nothing (assuming the address will
818 	 * come back before the TCP connection times out).
819 	 */
820 	rt = inp->inp_route.ro_rt;
821 	if (rt != NULL && ROUTE_UNUSABLE(&tp->t_inpcb->inp_route)) {
822 		struct ifnet *ifp;
823 		struct in_ifaddr *ia = NULL;
824 		struct in6_ifaddr *ia6 = NULL;
825 		int found_srcaddr = 0;
826 
827 		/* disable multipages at the socket */
828 		somultipages(so, FALSE);
829 
830 		/* Disable TSO for the socket until we know more */
831 		tp->t_flags &= ~TF_TSO;
832 
833 		soif2kcl(so, FALSE);
834 
835 		if (isipv6) {
836 			ia6 = ifa_foraddr6(&inp->in6p_laddr);
837 			if (ia6 != NULL) {
838 				found_srcaddr = 1;
839 			}
840 		} else {
841 			ia = ifa_foraddr(inp->inp_laddr.s_addr);
842 			if (ia != NULL) {
843 				found_srcaddr = 1;
844 			}
845 		}
846 
847 		/* check that the source address is still valid */
848 		if (found_srcaddr == 0) {
849 			soevent(so,
850 			    (SO_FILT_HINT_LOCKED | SO_FILT_HINT_NOSRCADDR));
851 
852 			if (tp->t_state >= TCPS_CLOSE_WAIT) {
853 				tcp_drop(tp, EADDRNOTAVAIL);
854 				return EADDRNOTAVAIL;
855 			}
856 
857 			/*
858 			 * Set retransmit  timer if it wasn't set,
859 			 * reset Persist timer and shift register as the
860 			 * advertised peer window may not be valid anymore
861 			 */
862 			if (tp->t_timer[TCPT_REXMT] == 0) {
863 				tp->t_timer[TCPT_REXMT] =
864 				    OFFSET_FROM_START(tp, tp->t_rxtcur);
865 				if (tp->t_timer[TCPT_PERSIST] != 0) {
866 					tp->t_timer[TCPT_PERSIST] = 0;
867 					tp->t_persist_stop = 0;
868 					TCP_RESET_REXMT_STATE(tp);
869 				}
870 			}
871 
872 			if (tp->t_pktlist_head != NULL) {
873 				m_freem_list(tp->t_pktlist_head);
874 			}
875 			TCP_PKTLIST_CLEAR(tp);
876 
877 			/* drop connection if source address isn't available */
878 			if (so->so_flags & SOF_NOADDRAVAIL) {
879 				tcp_drop(tp, EADDRNOTAVAIL);
880 				return EADDRNOTAVAIL;
881 			} else {
882 				TCP_LOG_OUTPUT(tp, "no source address silently ignored");
883 				tcp_check_timer_state(tp);
884 				return 0; /* silently ignore, keep data in socket: address may be back */
885 			}
886 		}
887 		if (ia != NULL) {
888 			IFA_REMREF(&ia->ia_ifa);
889 		}
890 
891 		if (ia6 != NULL) {
892 			IFA_REMREF(&ia6->ia_ifa);
893 		}
894 
895 		/*
896 		 * Address is still valid; check for multipages capability
897 		 * again in case the outgoing interface has changed.
898 		 */
899 		RT_LOCK(rt);
900 		if ((ifp = rt->rt_ifp) != NULL) {
901 			somultipages(so, (ifp->if_hwassist & IFNET_MULTIPAGES));
902 			tcp_set_tso(tp, ifp);
903 			soif2kcl(so, (ifp->if_eflags & IFEF_2KCL));
904 			tcp_set_ecn(tp, ifp);
905 		}
906 		if (rt->rt_flags & RTF_UP) {
907 			RT_GENID_SYNC(rt);
908 		}
909 		/*
910 		 * See if we should do MTU discovery. Don't do it if:
911 		 *	1) it is disabled via the sysctl
912 		 *	2) the route isn't up
913 		 *	3) the MTU is locked (if it is, then discovery
914 		 *         has been disabled)
915 		 */
916 
917 		if (!path_mtu_discovery || ((rt != NULL) &&
918 		    (!(rt->rt_flags & RTF_UP) ||
919 		    (rt->rt_rmx.rmx_locks & RTV_MTU)))) {
920 			tp->t_flags &= ~TF_PMTUD;
921 		} else {
922 			tp->t_flags |= TF_PMTUD;
923 		}
924 
925 		RT_UNLOCK(rt);
926 	}
927 
928 	if (rt != NULL) {
929 		cell = IFNET_IS_CELLULAR(rt->rt_ifp);
930 		wifi = (!cell && IFNET_IS_WIFI(rt->rt_ifp));
931 		wired = (!wifi && IFNET_IS_WIRED(rt->rt_ifp));
932 	}
933 
934 	/*
935 	 * If we've recently taken a timeout, snd_max will be greater than
936 	 * snd_nxt.  There may be SACK information that allows us to avoid
937 	 * resending already delivered data.  Adjust snd_nxt accordingly.
938 	 */
939 	if (SACK_ENABLED(tp) && SEQ_LT(tp->snd_nxt, tp->snd_max)) {
940 		max_len = tcp_sack_adjust(tp);
941 	}
942 	sendalot = 0;
943 	off = tp->snd_nxt - tp->snd_una;
944 	sendwin = min(tp->snd_wnd, tp->snd_cwnd);
945 
946 	if (tp->t_flags & TF_SLOWLINK && slowlink_wsize > 0) {
947 		sendwin = min(sendwin, slowlink_wsize);
948 	}
949 
950 	flags = tcp_outflags[tp->t_state];
951 	/*
952 	 * Send any SACK-generated retransmissions.  If we're explicitly
953 	 * trying to send out new data (when sendalot is 1), bypass this
954 	 * function. If we retransmit in fast recovery mode, decrement
955 	 * snd_cwnd, since we're replacing a (future) new transmission
956 	 * with a retransmission now, and we previously incremented
957 	 * snd_cwnd in tcp_input().
958 	 */
959 	/*
960 	 * Still in sack recovery , reset rxmit flag to zero.
961 	 */
962 	sack_rxmit = 0;
963 	sack_bytes_rxmt = 0;
964 	len = 0;
965 	p = NULL;
966 	if (SACK_ENABLED(tp) && IN_FASTRECOVERY(tp) &&
967 	    (p = tcp_sack_output(tp, &sack_bytes_rxmt))) {
968 		int32_t cwin;
969 
970 		if (tcp_do_better_lr) {
971 			cwin = min(tp->snd_wnd, tp->snd_cwnd) - tcp_flight_size(tp);
972 			if (cwin <= 0 && sack_rxmted == FALSE) {
973 				/* Allow to clock out at least on per period */
974 				cwin = tp->t_maxseg;
975 			}
976 
977 			sack_rxmted = TRUE;
978 		} else {
979 			cwin = min(tp->snd_wnd, tp->snd_cwnd) - sack_bytes_rxmt;
980 		}
981 		if (cwin < 0) {
982 			cwin = 0;
983 		}
984 		/* Do not retransmit SACK segments beyond snd_recover */
985 		if (SEQ_GT(p->end, tp->snd_recover)) {
986 			/*
987 			 * (At least) part of sack hole extends beyond
988 			 * snd_recover. Check to see if we can rexmit data
989 			 * for this hole.
990 			 */
991 			if (SEQ_GEQ(p->rxmit, tp->snd_recover)) {
992 				/*
993 				 * Can't rexmit any more data for this hole.
994 				 * That data will be rexmitted in the next
995 				 * sack recovery episode, when snd_recover
996 				 * moves past p->rxmit.
997 				 */
998 				p = NULL;
999 				goto after_sack_rexmit;
1000 			} else {
1001 				/* Can rexmit part of the current hole */
1002 				len = ((int32_t)min(cwin,
1003 				    tp->snd_recover - p->rxmit));
1004 			}
1005 		} else {
1006 			len = ((int32_t)min(cwin, p->end - p->rxmit));
1007 		}
1008 		if (len > 0) {
1009 			off = p->rxmit - tp->snd_una;
1010 			sack_rxmit = 1;
1011 			sendalot = 1;
1012 			/* Everything sent after snd_nxt will allow us to account for fast-retransmit of the retransmitted segment */
1013 			tp->send_highest_sack = tp->snd_nxt;
1014 			tp->t_new_dupacks = 0;
1015 			tcpstat.tcps_sack_rexmits++;
1016 			tcpstat.tcps_sack_rexmit_bytes +=
1017 			    min(len, tp->t_maxseg);
1018 		} else {
1019 			len = 0;
1020 		}
1021 	}
1022 after_sack_rexmit:
1023 	/*
1024 	 * Get standard flags, and add SYN or FIN if requested by 'hidden'
1025 	 * state flags.
1026 	 */
1027 	if (tp->t_flags & TF_NEEDFIN) {
1028 		flags |= TH_FIN;
1029 	}
1030 
1031 	/*
1032 	 * If in persist timeout with window of 0, send 1 byte.
1033 	 * Otherwise, if window is small but nonzero
1034 	 * and timer expired, we will send what we can
1035 	 * and go to transmit state.
1036 	 */
1037 	if (tp->t_flagsext & TF_FORCE) {
1038 		if (sendwin == 0) {
1039 			/*
1040 			 * If we still have some data to send, then
1041 			 * clear the FIN bit.  Usually this would
1042 			 * happen below when it realizes that we
1043 			 * aren't sending all the data.  However,
1044 			 * if we have exactly 1 byte of unsent data,
1045 			 * then it won't clear the FIN bit below,
1046 			 * and if we are in persist state, we wind
1047 			 * up sending the packet without recording
1048 			 * that we sent the FIN bit.
1049 			 *
1050 			 * We can't just blindly clear the FIN bit,
1051 			 * because if we don't have any more data
1052 			 * to send then the probe will be the FIN
1053 			 * itself.
1054 			 */
1055 			if (off < so->so_snd.sb_cc) {
1056 				flags &= ~TH_FIN;
1057 			}
1058 			sendwin = 1;
1059 		} else {
1060 			tp->t_timer[TCPT_PERSIST] = 0;
1061 			tp->t_persist_stop = 0;
1062 			TCP_RESET_REXMT_STATE(tp);
1063 		}
1064 	}
1065 
1066 	/*
1067 	 * If snd_nxt == snd_max and we have transmitted a FIN, the
1068 	 * offset will be > 0 even if so_snd.sb_cc is 0, resulting in
1069 	 * a negative length.  This can also occur when TCP opens up
1070 	 * its congestion window while receiving additional duplicate
1071 	 * acks after fast-retransmit because TCP will reset snd_nxt
1072 	 * to snd_max after the fast-retransmit.
1073 	 *
1074 	 * In the normal retransmit-FIN-only case, however, snd_nxt will
1075 	 * be set to snd_una, the offset will be 0, and the length may
1076 	 * wind up 0.
1077 	 *
1078 	 * If sack_rxmit is true we are retransmitting from the scoreboard
1079 	 * in which case len is already set.
1080 	 */
1081 	if (sack_rxmit == 0) {
1082 		if (sack_bytes_rxmt == 0) {
1083 			len = min(so->so_snd.sb_cc, sendwin) - off;
1084 		} else {
1085 			int32_t cwin;
1086 
1087 			if (tcp_do_better_lr) {
1088 				cwin = tp->snd_cwnd - tcp_flight_size(tp);
1089 			} else {
1090 				cwin = tp->snd_cwnd -
1091 				    (tp->snd_nxt - tp->sack_newdata) -
1092 				    sack_bytes_rxmt;
1093 			}
1094 			if (cwin < 0) {
1095 				cwin = 0;
1096 			}
1097 			/*
1098 			 * We are inside of a SACK recovery episode and are
1099 			 * sending new data, having retransmitted all the
1100 			 * data possible in the scoreboard.
1101 			 */
1102 			len = min(so->so_snd.sb_cc, tp->snd_wnd) - off;
1103 			/*
1104 			 * Don't remove this (len > 0) check !
1105 			 * We explicitly check for len > 0 here (although it
1106 			 * isn't really necessary), to work around a gcc
1107 			 * optimization issue - to force gcc to compute
1108 			 * len above. Without this check, the computation
1109 			 * of len is bungled by the optimizer.
1110 			 */
1111 			if (len > 0) {
1112 				len = imin(len, cwin);
1113 			} else {
1114 				len = 0;
1115 			}
1116 			/*
1117 			 * At this point SACK recovery can not send any
1118 			 * data from scoreboard or any new data. Check
1119 			 * if we can do a rescue retransmit towards the
1120 			 * tail end of recovery window.
1121 			 */
1122 			if (len == 0 && cwin > 0 &&
1123 			    SEQ_LT(tp->snd_fack, tp->snd_recover) &&
1124 			    !(tp->t_flagsext & TF_RESCUE_RXT)) {
1125 				len = min((tp->snd_recover - tp->snd_fack),
1126 				    tp->t_maxseg);
1127 				len = imin(len, cwin);
1128 				old_snd_nxt = tp->snd_nxt;
1129 				sack_rescue_rxt = TRUE;
1130 				tp->snd_nxt = tp->snd_recover - len;
1131 				/*
1132 				 * If FIN has been sent, snd_max
1133 				 * must have been advanced to cover it.
1134 				 */
1135 				if ((tp->t_flags & TF_SENTFIN) &&
1136 				    tp->snd_max == tp->snd_recover) {
1137 					tp->snd_nxt--;
1138 				}
1139 
1140 				off = tp->snd_nxt - tp->snd_una;
1141 				sendalot = 0;
1142 				tp->t_flagsext |= TF_RESCUE_RXT;
1143 			}
1144 		}
1145 	}
1146 
1147 	if (max_len != 0 && len > 0) {
1148 		len = min(len, max_len);
1149 	}
1150 
1151 	/*
1152 	 * Lop off SYN bit if it has already been sent.  However, if this
1153 	 * is SYN-SENT state and if segment contains data and if we don't
1154 	 * know that foreign host supports TAO, suppress sending segment.
1155 	 */
1156 	if ((flags & TH_SYN) && SEQ_GT(tp->snd_nxt, tp->snd_una)) {
1157 		if (tp->t_state == TCPS_SYN_RECEIVED && tfo_enabled(tp) && tp->snd_nxt == tp->snd_una + 1) {
1158 			/* We are sending the SYN again! */
1159 			off--;
1160 			len++;
1161 		} else {
1162 			if (tp->t_state != TCPS_SYN_RECEIVED || tfo_enabled(tp)) {
1163 				flags &= ~TH_SYN;
1164 			}
1165 
1166 			off--;
1167 			len++;
1168 			if (len > 0 && tp->t_state == TCPS_SYN_SENT) {
1169 				while (inp->inp_sndinprog_cnt == 0 &&
1170 				    tp->t_pktlist_head != NULL) {
1171 					packetlist = tp->t_pktlist_head;
1172 					packchain_listadd = tp->t_lastchain;
1173 					packchain_sent++;
1174 					TCP_PKTLIST_CLEAR(tp);
1175 
1176 					error = tcp_ip_output(so, tp, packetlist,
1177 					    packchain_listadd, tp_inp_options,
1178 					    (so_options & SO_DONTROUTE),
1179 					    (sack_rxmit || (sack_bytes_rxmt != 0)),
1180 					    isipv6);
1181 				}
1182 
1183 				/*
1184 				 * tcp was closed while we were in ip,
1185 				 * resume close
1186 				 */
1187 				if (inp->inp_sndinprog_cnt == 0 &&
1188 				    (tp->t_flags & TF_CLOSING)) {
1189 					tp->t_flags &= ~TF_CLOSING;
1190 					(void) tcp_close(tp);
1191 				} else {
1192 					tcp_check_timer_state(tp);
1193 				}
1194 				KERNEL_DEBUG(DBG_FNC_TCP_OUTPUT | DBG_FUNC_END,
1195 				    0, 0, 0, 0, 0);
1196 				return 0;
1197 			}
1198 		}
1199 	}
1200 
1201 	/*
1202 	 * Be careful not to send data and/or FIN on SYN segments.
1203 	 * This measure is needed to prevent interoperability problems
1204 	 * with not fully conformant TCP implementations.
1205 	 *
1206 	 * In case of TFO, we handle the setting of the len in
1207 	 * tcp_tfo_check. In case TFO is not enabled, never ever send
1208 	 * SYN+data.
1209 	 */
1210 	if ((flags & TH_SYN) && !tfo_enabled(tp)) {
1211 		len = 0;
1212 		flags &= ~TH_FIN;
1213 	}
1214 
1215 	/*
1216 	 * Don't send a RST with data.
1217 	 */
1218 	if (flags & TH_RST) {
1219 		len = 0;
1220 	}
1221 
1222 	if ((flags & TH_SYN) && tp->t_state <= TCPS_SYN_SENT && tfo_enabled(tp)) {
1223 		len = tcp_tfo_check(tp, len);
1224 	}
1225 
1226 	/*
1227 	 * The check here used to be (len < 0). Some times len is zero
1228 	 * when the congestion window is closed and we need to check
1229 	 * if persist timer has to be set in that case. But don't set
1230 	 * persist until connection is established.
1231 	 */
1232 	if (len <= 0 && !(flags & TH_SYN)) {
1233 		/*
1234 		 * If FIN has been sent but not acked,
1235 		 * but we haven't been called to retransmit,
1236 		 * len will be < 0.  Otherwise, window shrank
1237 		 * after we sent into it.  If window shrank to 0,
1238 		 * cancel pending retransmit, pull snd_nxt back
1239 		 * to (closed) window, and set the persist timer
1240 		 * if it isn't already going.  If the window didn't
1241 		 * close completely, just wait for an ACK.
1242 		 */
1243 		len = 0;
1244 		if (sendwin == 0) {
1245 			tp->t_timer[TCPT_REXMT] = 0;
1246 			tp->t_timer[TCPT_PTO] = 0;
1247 			TCP_RESET_REXMT_STATE(tp);
1248 			tp->snd_nxt = tp->snd_una;
1249 			off = 0;
1250 			if (tp->t_timer[TCPT_PERSIST] == 0) {
1251 				tcp_setpersist(tp);
1252 			}
1253 		}
1254 	}
1255 
1256 	/*
1257 	 * Automatic sizing of send socket buffer. Increase the send
1258 	 * socket buffer size if all of the following criteria are met
1259 	 *	1. the receiver has enough buffer space for this data
1260 	 *	2. send buffer is filled to 7/8th with data (so we actually
1261 	 *	   have data to make use of it);
1262 	 *	3. our send window (slow start and congestion controlled) is
1263 	 *	   larger than sent but unacknowledged data in send buffer.
1264 	 */
1265 	if (!INP_WAIT_FOR_IF_FEEDBACK(inp) && !IN_FASTRECOVERY(tp) &&
1266 	    (so->so_snd.sb_flags & (SB_AUTOSIZE | SB_TRIM)) == SB_AUTOSIZE &&
1267 	    tcp_cansbgrow(&so->so_snd)) {
1268 		if ((tp->snd_wnd / 4 * 5) >= so->so_snd.sb_hiwat &&
1269 		    so->so_snd.sb_cc >= (so->so_snd.sb_hiwat / 8 * 7) &&
1270 		    sendwin >= (so->so_snd.sb_cc - (tp->snd_nxt - tp->snd_una))) {
1271 			if (sbreserve(&so->so_snd,
1272 			    min(so->so_snd.sb_hiwat + tcp_autosndbuf_inc,
1273 			    tcp_autosndbuf_max)) == 1) {
1274 				so->so_snd.sb_idealsize = so->so_snd.sb_hiwat;
1275 			}
1276 		}
1277 	}
1278 
1279 	/*
1280 	 * Truncate to the maximum segment length or enable TCP Segmentation
1281 	 * Offloading (if supported by hardware) and ensure that FIN is removed
1282 	 * if the length no longer contains the last data byte.
1283 	 *
1284 	 * TSO may only be used if we are in a pure bulk sending state.
1285 	 * The presence of TCP-MD5, SACK retransmits, SACK advertizements,
1286 	 * filters and IP options, as well as disabling hardware checksum
1287 	 * offload prevent using TSO.  With TSO the TCP header is the same
1288 	 * (except for the sequence number) for all generated packets.  This
1289 	 * makes it impossible to transmit any options which vary per generated
1290 	 * segment or packet.
1291 	 *
1292 	 * The length of TSO bursts is limited to TCP_MAXWIN.  That limit and
1293 	 * removal of FIN (if not already catched here) are handled later after
1294 	 * the exact length of the TCP options are known.
1295 	 */
1296 #if IPSEC
1297 	/*
1298 	 * Pre-calculate here as we save another lookup into the darknesses
1299 	 * of IPsec that way and can actually decide if TSO is ok.
1300 	 */
1301 	if (ipsec_bypass == 0) {
1302 		ipsec_optlen = ipsec_hdrsiz_tcp(tp);
1303 	}
1304 #endif
1305 	if (len > tp->t_maxseg) {
1306 		if ((tp->t_flags & TF_TSO) && tcp_do_tso && hwcksum_tx &&
1307 		    kipf_count == 0 &&
1308 		    tp->rcv_numsacks == 0 && sack_rxmit == 0 &&
1309 		    sack_bytes_rxmt == 0 &&
1310 		    inp->inp_options == NULL &&
1311 		    inp->in6p_options == NULL
1312 #if IPSEC
1313 		    && ipsec_optlen == 0
1314 #endif
1315 		    ) {
1316 			tso = 1;
1317 			sendalot = 0;
1318 		} else {
1319 			len = tp->t_maxseg;
1320 			sendalot = 1;
1321 			tso = 0;
1322 		}
1323 	} else {
1324 		tso = 0;
1325 	}
1326 
1327 	/* Send one segment or less as a tail loss probe */
1328 	if (tp->t_flagsext & TF_SENT_TLPROBE) {
1329 		len = min(len, tp->t_maxseg);
1330 		sendalot = 0;
1331 		tso = 0;
1332 	}
1333 
1334 #if MPTCP
1335 	if (so->so_flags & SOF_MP_SUBFLOW && off < 0) {
1336 		os_log_error(mptcp_log_handle, "%s - %lx: offset is negative! len %d off %d\n",
1337 		    __func__, (unsigned long)VM_KERNEL_ADDRPERM(tp->t_mpsub->mpts_mpte),
1338 		    len, off);
1339 	}
1340 
1341 	if ((so->so_flags & SOF_MP_SUBFLOW) &&
1342 	    !(tp->t_mpflags & TMPF_TCP_FALLBACK)) {
1343 		int newlen = len;
1344 		struct mptcb *mp_tp = tptomptp(tp);
1345 		if (tp->t_state >= TCPS_ESTABLISHED &&
1346 		    (tp->t_mpflags & TMPF_SND_MPPRIO ||
1347 		    tp->t_mpflags & TMPF_SND_REM_ADDR ||
1348 		    tp->t_mpflags & TMPF_SND_MPFAIL ||
1349 		    (tp->t_mpflags & TMPF_SND_KEYS &&
1350 		    mp_tp->mpt_version == MPTCP_VERSION_0) ||
1351 		    tp->t_mpflags & TMPF_SND_JACK ||
1352 		    tp->t_mpflags & TMPF_MPTCP_ECHO_ADDR)) {
1353 			if (len > 0) {
1354 				len = 0;
1355 				tso = 0;
1356 			}
1357 			/*
1358 			 * On a new subflow, don't try to send again, because
1359 			 * we are still waiting for the fourth ack.
1360 			 */
1361 			if (!(tp->t_mpflags & TMPF_PREESTABLISHED)) {
1362 				sendalot = 1;
1363 			}
1364 			mptcp_acknow = TRUE;
1365 		} else {
1366 			mptcp_acknow = FALSE;
1367 		}
1368 		/*
1369 		 * The contiguous bytes in the subflow socket buffer can be
1370 		 * discontiguous at the MPTCP level. Since only one DSS
1371 		 * option can be sent in one packet, reduce length to match
1372 		 * the contiguous MPTCP level. Set sendalot to send remainder.
1373 		 */
1374 		if (len > 0 && off >= 0) {
1375 			newlen = mptcp_adj_sendlen(so, off);
1376 		}
1377 
1378 		if (newlen < len) {
1379 			len = newlen;
1380 			if (len <= tp->t_maxseg) {
1381 				tso = 0;
1382 			}
1383 		}
1384 	}
1385 #endif /* MPTCP */
1386 
1387 	if (sack_rxmit) {
1388 		if (SEQ_LT(p->rxmit + len, tp->snd_una + so->so_snd.sb_cc)) {
1389 			flags &= ~TH_FIN;
1390 		}
1391 	} else {
1392 		if (SEQ_LT(tp->snd_nxt + len, tp->snd_una + so->so_snd.sb_cc)) {
1393 			flags &= ~TH_FIN;
1394 		}
1395 	}
1396 	/*
1397 	 * Compare available window to amount of window
1398 	 * known to peer (as advertised window less
1399 	 * next expected input).  If the difference is at least two
1400 	 * max size segments, or at least 25% of the maximum possible
1401 	 * window, then want to send a window update to peer.
1402 	 */
1403 	recwin = tcp_sbspace(tp);
1404 
1405 	if (!(so->so_flags & SOF_MP_SUBFLOW)) {
1406 		if (recwin < (int32_t)(so->so_rcv.sb_hiwat / 4) &&
1407 		    recwin < (int)tp->t_maxseg) {
1408 			recwin = 0;
1409 		}
1410 	} else {
1411 		struct mptcb *mp_tp = tptomptp(tp);
1412 		struct socket *mp_so = mptetoso(mp_tp->mpt_mpte);
1413 
1414 		if (recwin < (int32_t)(mp_so->so_rcv.sb_hiwat / 4) &&
1415 		    recwin < (int)tp->t_maxseg) {
1416 			recwin = 0;
1417 		}
1418 	}
1419 
1420 #if TRAFFIC_MGT
1421 	if (tcp_recv_bg == 1 || IS_TCP_RECV_BG(so)) {
1422 		/*
1423 		 * Timestamp MUST be supported to use rledbat unless we haven't
1424 		 * yet negotiated it.
1425 		 */
1426 		if (TCP_RLEDBAT_ENABLED(tp) || (tcp_rledbat && tp->t_state <
1427 		    TCPS_ESTABLISHED)) {
1428 			if (recwin > 0 && tcp_cc_rledbat.get_rlwin != NULL) {
1429 				/* Min of flow control window and rledbat window */
1430 				recwin = imin(recwin, tcp_cc_rledbat.get_rlwin(tp));
1431 			}
1432 		} else if (recwin > 0 && tcp_recv_throttle(tp)) {
1433 			uint32_t min_iaj_win = tcp_min_iaj_win * tp->t_maxseg;
1434 			uint32_t bg_rwintop = tp->rcv_adv;
1435 			if (SEQ_LT(bg_rwintop, tp->rcv_nxt + min_iaj_win)) {
1436 				bg_rwintop =  tp->rcv_nxt + min_iaj_win;
1437 			}
1438 			recwin = imin((int32_t)(bg_rwintop - tp->rcv_nxt),
1439 			    recwin);
1440 			if (recwin < 0) {
1441 				recwin = 0;
1442 			}
1443 		}
1444 	}
1445 #endif /* TRAFFIC_MGT */
1446 
1447 	if (recwin > (int32_t)(TCP_MAXWIN << tp->rcv_scale)) {
1448 		recwin = (int32_t)(TCP_MAXWIN << tp->rcv_scale);
1449 	}
1450 
1451 	if (!(so->so_flags & SOF_MP_SUBFLOW)) {
1452 		if (recwin < (int32_t)(tp->rcv_adv - tp->rcv_nxt)) {
1453 			recwin = (int32_t)(tp->rcv_adv - tp->rcv_nxt);
1454 		}
1455 	} else {
1456 		struct mptcb *mp_tp = tptomptp(tp);
1457 		int64_t recwin_announced = (int64_t)(mp_tp->mpt_rcvadv - mp_tp->mpt_rcvnxt);
1458 
1459 		/* Don't remove what we announced at the MPTCP-layer */
1460 		VERIFY(recwin_announced < INT32_MAX && recwin_announced > INT32_MIN);
1461 		if (recwin < (int32_t)recwin_announced) {
1462 			recwin = (int32_t)recwin_announced;
1463 		}
1464 	}
1465 
1466 	/*
1467 	 * Sender silly window avoidance.   We transmit under the following
1468 	 * conditions when len is non-zero:
1469 	 *
1470 	 *	- we've timed out (e.g. persist timer)
1471 	 *	- we need to retransmit
1472 	 *	- We have a full segment (or more with TSO)
1473 	 *	- This is the last buffer in a write()/send() and we are
1474 	 *	  either idle or running NODELAY
1475 	 *	- we have more then 1/2 the maximum send window's worth of
1476 	 *	  data (receiver may be limited the window size)
1477 	 */
1478 	if (len) {
1479 		if (tp->t_flagsext & TF_FORCE) {
1480 			goto send;
1481 		}
1482 		if (SEQ_LT(tp->snd_nxt, tp->snd_max)) {
1483 			goto send;
1484 		}
1485 		if (sack_rxmit) {
1486 			goto send;
1487 		}
1488 
1489 		/*
1490 		 * If this here is the first segment after SYN/ACK and TFO
1491 		 * is being used, then we always send it, regardless of Nagle,...
1492 		 */
1493 		if (tp->t_state == TCPS_SYN_RECEIVED &&
1494 		    tfo_enabled(tp) &&
1495 		    (tp->t_tfo_flags & TFO_F_COOKIE_VALID) &&
1496 		    tp->snd_nxt == tp->iss + 1) {
1497 			goto send;
1498 		}
1499 
1500 		/*
1501 		 * Send new data on the connection only if it is
1502 		 * not flow controlled
1503 		 */
1504 		if (!INP_WAIT_FOR_IF_FEEDBACK(inp) ||
1505 		    tp->t_state != TCPS_ESTABLISHED) {
1506 			if (len >= tp->t_maxseg) {
1507 				goto send;
1508 			}
1509 
1510 			if (!(tp->t_flags & TF_MORETOCOME) &&
1511 			    (idle || tp->t_flags & TF_NODELAY ||
1512 			    (tp->t_flags & TF_MAXSEGSNT) ||
1513 			    ALLOW_LIMITED_TRANSMIT(tp)) &&
1514 			    (tp->t_flags & TF_NOPUSH) == 0 &&
1515 			    (len + off >= so->so_snd.sb_cc ||
1516 			    /*
1517 			     * MPTCP needs to respect the DSS-mappings. So, it
1518 			     * may be sending data that *could* have been
1519 			     * coalesced, but cannot because of
1520 			     * mptcp_adj_sendlen().
1521 			     */
1522 			    so->so_flags & SOF_MP_SUBFLOW)) {
1523 				goto send;
1524 			}
1525 			if (len >= tp->max_sndwnd / 2 && tp->max_sndwnd > 0) {
1526 				goto send;
1527 			}
1528 		} else {
1529 			tcpstat.tcps_fcholdpacket++;
1530 		}
1531 	}
1532 
1533 	if (recwin > 0) {
1534 		/*
1535 		 * "adv" is the amount we can increase the window,
1536 		 * taking into account that we are limited by
1537 		 * TCP_MAXWIN << tp->rcv_scale.
1538 		 */
1539 		int32_t adv, oldwin = 0;
1540 		adv = imin(recwin, (int)TCP_MAXWIN << tp->rcv_scale) -
1541 		    (tp->rcv_adv - tp->rcv_nxt);
1542 
1543 		if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt)) {
1544 			oldwin = tp->rcv_adv - tp->rcv_nxt;
1545 		}
1546 
1547 		if (tcp_ack_strategy == TCP_ACK_STRATEGY_LEGACY) {
1548 			if (adv >= (int32_t) (2 * tp->t_maxseg)) {
1549 				/*
1550 				 * Update only if the resulting scaled value of
1551 				 * the window changed, or if there is a change in
1552 				 * the sequence since the last ack. This avoids
1553 				 * what appears as dupe ACKS (see rdar://5640997)
1554 				 *
1555 				 * If streaming is detected avoid sending too many
1556 				 * window updates. We will depend on the delack
1557 				 * timer to send a window update when needed.
1558 				 *
1559 				 * If there is more data to read, don't send an ACK.
1560 				 * Otherwise we will end up sending many ACKs if the
1561 				 * application is doing micro-reads.
1562 				 */
1563 				if (!(tp->t_flags & TF_STRETCHACK) &&
1564 				    (tp->last_ack_sent != tp->rcv_nxt ||
1565 				    ((oldwin + adv) >> tp->rcv_scale) >
1566 				    (oldwin >> tp->rcv_scale))) {
1567 					goto send;
1568 				}
1569 			}
1570 		} else {
1571 			if (adv >= (int32_t) (2 * tp->t_maxseg)) {
1572 				/*
1573 				 * ACK every second full-sized segment, if the
1574 				 * ACK is advancing or the window becomes bigger
1575 				 */
1576 				if (so->so_rcv.sb_cc < so->so_rcv.sb_lowat &&
1577 				    (tp->last_ack_sent != tp->rcv_nxt ||
1578 				    ((oldwin + adv) >> tp->rcv_scale) >
1579 				    (oldwin >> tp->rcv_scale))) {
1580 					goto send;
1581 				}
1582 			} else if (tp->t_flags & TF_DELACK) {
1583 				/*
1584 				 * If we delayed the ACK and the window
1585 				 * is not advancing by a lot (< 2MSS), ACK
1586 				 * immediately if the last incoming packet had
1587 				 * the push flag set and we emptied the buffer.
1588 				 *
1589 				 * This takes care of a sender doing small
1590 				 * repeated writes with Nagle enabled.
1591 				 */
1592 				if (so->so_rcv.sb_cc == 0 &&
1593 				    tp->last_ack_sent != tp->rcv_nxt &&
1594 				    (tp->t_flagsext & TF_LAST_IS_PSH)) {
1595 					goto send;
1596 				}
1597 			}
1598 		}
1599 		if (4 * adv >= (int32_t) so->so_rcv.sb_hiwat) {
1600 			goto send;
1601 		}
1602 
1603 		/*
1604 		 * Make sure that the delayed ack timer is set if
1605 		 * we delayed sending a window update because of
1606 		 * streaming detection.
1607 		 */
1608 		if (tcp_ack_strategy == TCP_ACK_STRATEGY_LEGACY &&
1609 		    (tp->t_flags & TF_STRETCHACK) &&
1610 		    !(tp->t_flags & TF_DELACK)) {
1611 			tp->t_flags |= TF_DELACK;
1612 			tp->t_timer[TCPT_DELACK] =
1613 			    OFFSET_FROM_START(tp, tcp_delack);
1614 		}
1615 	}
1616 
1617 	/*
1618 	 * Send if we owe the peer an ACK, RST, SYN, or urgent data. ACKNOW
1619 	 * is also a catch-all for the retransmit timer timeout case.
1620 	 */
1621 	if (tp->t_flags & TF_ACKNOW) {
1622 		if (tp->t_forced_acks > 0) {
1623 			tp->t_forced_acks--;
1624 		}
1625 		goto send;
1626 	}
1627 	if ((flags & TH_RST) || (flags & TH_SYN)) {
1628 		goto send;
1629 	}
1630 	if (SEQ_GT(tp->snd_up, tp->snd_una)) {
1631 		goto send;
1632 	}
1633 #if MPTCP
1634 	if (mptcp_acknow) {
1635 		goto send;
1636 	}
1637 #endif /* MPTCP */
1638 	/*
1639 	 * If our state indicates that FIN should be sent
1640 	 * and we have not yet done so, then we need to send.
1641 	 */
1642 	if ((flags & TH_FIN) &&
1643 	    (!(tp->t_flags & TF_SENTFIN) || tp->snd_nxt == tp->snd_una)) {
1644 		goto send;
1645 	}
1646 	/*
1647 	 * In SACK, it is possible for tcp_output to fail to send a segment
1648 	 * after the retransmission timer has been turned off.  Make sure
1649 	 * that the retransmission timer is set.
1650 	 */
1651 	if (SACK_ENABLED(tp) && (tp->t_state >= TCPS_ESTABLISHED) &&
1652 	    SEQ_GT(tp->snd_max, tp->snd_una) &&
1653 	    tp->t_timer[TCPT_REXMT] == 0 &&
1654 	    tp->t_timer[TCPT_PERSIST] == 0) {
1655 		tp->t_timer[TCPT_REXMT] = OFFSET_FROM_START(tp,
1656 		    tp->t_rxtcur);
1657 		goto just_return;
1658 	}
1659 	/*
1660 	 * TCP window updates are not reliable, rather a polling protocol
1661 	 * using ``persist'' packets is used to insure receipt of window
1662 	 * updates.  The three ``states'' for the output side are:
1663 	 *	idle			not doing retransmits or persists
1664 	 *	persisting		to move a small or zero window
1665 	 *	(re)transmitting	and thereby not persisting
1666 	 *
1667 	 * tp->t_timer[TCPT_PERSIST]
1668 	 *	is set when we are in persist state.
1669 	 * tp->t_force
1670 	 *	is set when we are called to send a persist packet.
1671 	 * tp->t_timer[TCPT_REXMT]
1672 	 *	is set when we are retransmitting
1673 	 * The output side is idle when both timers are zero.
1674 	 *
1675 	 * If send window is too small, there is data to transmit, and no
1676 	 * retransmit or persist is pending, then go to persist state.
1677 	 * If nothing happens soon, send when timer expires:
1678 	 * if window is nonzero, transmit what we can,
1679 	 * otherwise force out a byte.
1680 	 */
1681 	if (so->so_snd.sb_cc && tp->t_timer[TCPT_REXMT] == 0 &&
1682 	    tp->t_timer[TCPT_PERSIST] == 0) {
1683 		TCP_RESET_REXMT_STATE(tp);
1684 		tcp_setpersist(tp);
1685 	}
1686 just_return:
1687 	/*
1688 	 * If there is no reason to send a segment, just return.
1689 	 * but if there is some packets left in the packet list, send them now.
1690 	 */
1691 	while (inp->inp_sndinprog_cnt == 0 &&
1692 	    tp->t_pktlist_head != NULL) {
1693 		packetlist = tp->t_pktlist_head;
1694 		packchain_listadd = tp->t_lastchain;
1695 		packchain_sent++;
1696 		TCP_PKTLIST_CLEAR(tp);
1697 
1698 		error = tcp_ip_output(so, tp, packetlist,
1699 		    packchain_listadd,
1700 		    tp_inp_options, (so_options & SO_DONTROUTE),
1701 		    (sack_rxmit || (sack_bytes_rxmt != 0)), isipv6);
1702 	}
1703 	/* tcp was closed while we were in ip; resume close */
1704 	if (inp->inp_sndinprog_cnt == 0 &&
1705 	    (tp->t_flags & TF_CLOSING)) {
1706 		tp->t_flags &= ~TF_CLOSING;
1707 		(void) tcp_close(tp);
1708 	} else {
1709 		tcp_check_timer_state(tp);
1710 	}
1711 	KERNEL_DEBUG(DBG_FNC_TCP_OUTPUT | DBG_FUNC_END, 0, 0, 0, 0, 0);
1712 	return 0;
1713 
1714 send:
1715 	/*
1716 	 * Set TF_MAXSEGSNT flag if the segment size is greater than
1717 	 * the max segment size.
1718 	 */
1719 	if (len > 0) {
1720 		do_not_compress = TRUE;
1721 
1722 		if (len >= tp->t_maxseg) {
1723 			tp->t_flags |= TF_MAXSEGSNT;
1724 		} else {
1725 			tp->t_flags &= ~TF_MAXSEGSNT;
1726 		}
1727 	}
1728 	/*
1729 	 * If we are connected and no segment has been ACKed or SACKed yet and we
1730 	 * hit a retransmission timeout, then we should disable AccECN option
1731 	 * for the rest of the connection.
1732 	 */
1733 	if (TCP_ACC_ECN_ON(tp) && tp->t_state == TCPS_ESTABLISHED &&
1734 	    tp->snd_una == tp->iss + 1 && (tp->snd_fack == 0)
1735 	    && tp->t_rxtshift > 0) {
1736 		if ((tp->ecn_flags & TE_RETRY_WITHOUT_ACO) == 0) {
1737 			tp->ecn_flags |= TE_RETRY_WITHOUT_ACO;
1738 		}
1739 	}
1740 	/*
1741 	 * Before ESTABLISHED, force sending of initial options
1742 	 * unless TCP set not to do any options.
1743 	 * NOTE: we assume that the IP/TCP header plus TCP options
1744 	 * always fit in a single mbuf, leaving room for a maximum
1745 	 * link header, i.e.
1746 	 *	max_linkhdr + sizeof (struct tcpiphdr) + optlen <= MCLBYTES
1747 	 */
1748 	optlen = 0;
1749 	if (isipv6) {
1750 		hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
1751 	} else {
1752 		hdrlen = sizeof(struct tcpiphdr);
1753 	}
1754 	if (flags & TH_SYN) {
1755 		tp->snd_nxt = tp->iss;
1756 		if ((tp->t_flags & TF_NOOPT) == 0) {
1757 			u_short mss;
1758 
1759 			opt[0] = TCPOPT_MAXSEG;
1760 			opt[1] = TCPOLEN_MAXSEG;
1761 			mss = htons((u_short) tcp_mssopt(tp));
1762 			(void)memcpy(opt + 2, &mss, sizeof(mss));
1763 			optlen = TCPOLEN_MAXSEG;
1764 
1765 			if ((tp->t_flags & TF_REQ_SCALE) &&
1766 			    ((flags & TH_ACK) == 0 ||
1767 			    (tp->t_flags & TF_RCVD_SCALE))) {
1768 				*((u_int32_t *)(void *)(opt + optlen)) = htonl(
1769 					TCPOPT_NOP << 24 |
1770 					        TCPOPT_WINDOW << 16 |
1771 					        TCPOLEN_WINDOW << 8 |
1772 					        tp->request_r_scale);
1773 				optlen += 4;
1774 			}
1775 #if MPTCP
1776 			if (mptcp_enable && (so->so_flags & SOF_MP_SUBFLOW)) {
1777 				optlen = mptcp_setup_syn_opts(so, opt, optlen);
1778 			}
1779 #endif /* MPTCP */
1780 		}
1781 	}
1782 
1783 	/*
1784 	 * Send a timestamp and echo-reply if this is a SYN and our side
1785 	 * wants to use timestamps (TF_REQ_TSTMP is set) or both our side
1786 	 * and our peer have sent timestamps in our SYN's.
1787 	 */
1788 	if ((tp->t_flags & (TF_REQ_TSTMP | TF_NOOPT)) == TF_REQ_TSTMP &&
1789 	    (flags & TH_RST) == 0 &&
1790 	    ((flags & TH_ACK) == 0 ||
1791 	    (tp->t_flags & TF_RCVD_TSTMP))) {
1792 		u_int32_t *lp = (u_int32_t *)(void *)(opt + optlen);
1793 
1794 		/* Form timestamp option as shown in appendix A of RFC 1323. */
1795 		*lp++ = htonl(TCPOPT_TSTAMP_HDR);
1796 		*lp++ = htonl(tcp_now + tp->t_ts_offset);
1797 		*lp   = htonl(tp->ts_recent);
1798 		optlen += TCPOLEN_TSTAMP_APPA;
1799 	}
1800 
1801 	if (SACK_ENABLED(tp) && ((tp->t_flags & TF_NOOPT) == 0)) {
1802 		/*
1803 		 * Tack on the SACK permitted option *last*.
1804 		 * And do padding of options after tacking this on.
1805 		 * This is because of MSS, TS, WinScale and Signatures are
1806 		 * all present, we have just 2 bytes left for the SACK
1807 		 * permitted option, which is just enough.
1808 		 */
1809 		/*
1810 		 * If this is the first SYN of connection (not a SYN
1811 		 * ACK), include SACK permitted option.  If this is a
1812 		 * SYN ACK, include SACK permitted option if peer has
1813 		 * already done so. This is only for active connect,
1814 		 * since the syncache takes care of the passive connect.
1815 		 */
1816 		if ((flags & TH_SYN) &&
1817 		    (!(flags & TH_ACK) || (tp->t_flags & TF_SACK_PERMIT))) {
1818 			u_char *bp;
1819 			bp = (u_char *)opt + optlen;
1820 
1821 			*bp++ = TCPOPT_SACK_PERMITTED;
1822 			*bp++ = TCPOLEN_SACK_PERMITTED;
1823 			optlen += TCPOLEN_SACK_PERMITTED;
1824 		}
1825 	}
1826 #if MPTCP
1827 	if (so->so_flags & SOF_MP_SUBFLOW) {
1828 		/*
1829 		 * Its important to piggyback acks with data as ack only packets
1830 		 * may get lost and data packets that don't send Data ACKs
1831 		 * still advance the subflow level ACK and therefore make it
1832 		 * hard for the remote end to recover in low cwnd situations.
1833 		 */
1834 		if (len != 0) {
1835 			tp->t_mpflags |= (TMPF_SEND_DSN |
1836 			    TMPF_MPTCP_ACKNOW);
1837 		} else {
1838 			tp->t_mpflags |= TMPF_MPTCP_ACKNOW;
1839 		}
1840 		optlen = mptcp_setup_opts(tp, off, &opt[0], optlen, flags,
1841 		    len, &mptcp_acknow, &do_not_compress);
1842 		tp->t_mpflags &= ~TMPF_SEND_DSN;
1843 	}
1844 #endif /* MPTCP */
1845 
1846 	if (tfo_enabled(tp) && !(tp->t_flags & TF_NOOPT) &&
1847 	    (flags & (TH_SYN | TH_ACK)) == TH_SYN) {
1848 		optlen += tcp_tfo_write_cookie(tp, optlen, len, opt);
1849 	}
1850 
1851 	if (tfo_enabled(tp) &&
1852 	    (flags & (TH_SYN | TH_ACK)) == (TH_SYN | TH_ACK) &&
1853 	    (tp->t_tfo_flags & TFO_F_OFFER_COOKIE)) {
1854 		optlen += tcp_tfo_write_cookie_rep(tp, optlen, opt);
1855 	}
1856 
1857 	if (SACK_ENABLED(tp) && ((tp->t_flags & TF_NOOPT) == 0)) {
1858 		/*
1859 		 * Send SACKs if necessary.  This should be the last
1860 		 * option processed.  Only as many SACKs are sent as
1861 		 * are permitted by the maximum options size.
1862 		 *
1863 		 * In general, SACK blocks consume 8*n+2 bytes.
1864 		 * So a full size SACK blocks option is 34 bytes
1865 		 * (to generate 4 SACK blocks).  At a minimum,
1866 		 * we need 10 bytes (to generate 1 SACK block).
1867 		 * If TCP Timestamps (12 bytes) and TCP Signatures
1868 		 * (18 bytes) are both present, we'll just have
1869 		 * 10 bytes for SACK options 40 - (12 + 18).
1870 		 */
1871 		if (TCPS_HAVEESTABLISHED(tp->t_state) &&
1872 		    (tp->t_flags & TF_SACK_PERMIT) &&
1873 		    (tp->rcv_numsacks > 0 || TCP_SEND_DSACK_OPT(tp)) &&
1874 		    MAX_TCPOPTLEN - optlen >= TCPOLEN_SACK + 2) {
1875 			unsigned int sackoptlen = 0;
1876 			int nsack, padlen;
1877 			u_char *bp = (u_char *)opt + optlen;
1878 			u_int32_t *lp;
1879 
1880 			nsack = (MAX_TCPOPTLEN - optlen - 2) / TCPOLEN_SACK;
1881 			nsack = min(nsack, (tp->rcv_numsacks +
1882 			    (TCP_SEND_DSACK_OPT(tp) ? 1 : 0)));
1883 			sackoptlen = (2 + nsack * TCPOLEN_SACK);
1884 			VERIFY(sackoptlen < UINT8_MAX);
1885 
1886 			/*
1887 			 * First we need to pad options so that the
1888 			 * SACK blocks can start at a 4-byte boundary
1889 			 * (sack option and length are at a 2 byte offset).
1890 			 */
1891 			padlen = (MAX_TCPOPTLEN - optlen - sackoptlen) % 4;
1892 			optlen += padlen;
1893 			while (padlen-- > 0) {
1894 				*bp++ = TCPOPT_NOP;
1895 			}
1896 
1897 			tcpstat.tcps_sack_send_blocks++;
1898 			*bp++ = TCPOPT_SACK;
1899 			*bp++ = (uint8_t)sackoptlen;
1900 			lp = (u_int32_t *)(void *)bp;
1901 
1902 			/*
1903 			 * First block of SACK option should represent
1904 			 * DSACK. Prefer to send SACK information if there
1905 			 * is space for only one SACK block. This will
1906 			 * allow for faster recovery.
1907 			 */
1908 			if (TCP_SEND_DSACK_OPT(tp) && nsack > 0 &&
1909 			    (tp->rcv_numsacks == 0 || nsack > 1)) {
1910 				*lp++ = htonl(tp->t_dsack_lseq);
1911 				*lp++ = htonl(tp->t_dsack_rseq);
1912 				tcpstat.tcps_dsack_sent++;
1913 				tp->t_dsack_sent++;
1914 				nsack--;
1915 			}
1916 			VERIFY(nsack == 0 || tp->rcv_numsacks >= nsack);
1917 			for (i = 0; i < nsack; i++) {
1918 				struct sackblk sack = tp->sackblks[i];
1919 				*lp++ = htonl(sack.start);
1920 				*lp++ = htonl(sack.end);
1921 			}
1922 			optlen += sackoptlen;
1923 
1924 			/* Make sure we didn't write too much */
1925 			VERIFY((u_char *)lp - opt <= MAX_TCPOPTLEN);
1926 		}
1927 	}
1928 
1929 	/*
1930 	 * AccECN option - after SACK
1931 	 * Don't send on <SYN>,
1932 	 * send only on <SYN,ACK> before ACCECN is negotiated or
1933 	 * when doing an AccECN session. Don't send AccECN option
1934 	 * if retransmitting a SYN-ACK or a data segment
1935 	 */
1936 	if ((TCP_ACC_ECN_ON(tp) ||
1937 	    (TCP_ACC_ECN_ENABLED(tp) && (flags & (TH_SYN | TH_ACK)) == (TH_SYN | TH_ACK)))
1938 	    && ((tp->ecn_flags & TE_RETRY_WITHOUT_ACO) == 0)) {
1939 		uint32_t *lp = (uint32_t *)(void *)(opt + optlen);
1940 		/* lp will become outdated after options are added */
1941 		tcp_add_accecn_option(tp, flags, lp, (uint8_t *)&optlen);
1942 	}
1943 	/* Pad TCP options to a 4 byte boundary */
1944 	if (optlen < MAX_TCPOPTLEN && (optlen % sizeof(u_int32_t))) {
1945 		int pad = sizeof(u_int32_t) - (optlen % sizeof(u_int32_t));
1946 		u_char *bp = (u_char *)opt + optlen;
1947 
1948 		optlen += pad;
1949 		while (pad) {
1950 			*bp++ = TCPOPT_EOL;
1951 			pad--;
1952 		}
1953 	}
1954 
1955 	/*
1956 	 * For Accurate ECN, send ACE flag based on r.cep, if
1957 	 * We have completed handshake and are in ESTABLISHED state, and
1958 	 * This is not the final ACK of 3WHS.
1959 	 */
1960 	if (TCP_ACC_ECN_ON(tp) && TCPS_HAVEESTABLISHED(tp->t_state) &&
1961 	    (tp->ecn_flags & TE_ACE_FINAL_ACK_3WHS) == 0) {
1962 		uint8_t ace = tp->t_rcv_ce_packets & TCP_ACE_MASK;
1963 		if (ace & 0x01) {
1964 			flags |= TH_ECE;
1965 		} else {
1966 			flags &= ~TH_ECE;
1967 		}
1968 		if (ace & 0x02) {
1969 			flags |= TH_CWR;
1970 		} else {
1971 			flags &= ~TH_CWR;
1972 		}
1973 		if (ace & 0x04) {
1974 			flags |= TH_AE;
1975 		} else {
1976 			flags &= ~TH_AE;
1977 		}
1978 	}
1979 
1980 	/*
1981 	 * RFC 3168 states that:
1982 	 * - If you ever sent an ECN-setup SYN/SYN-ACK you must be prepared
1983 	 * to handle the TCP ECE flag, even if you also later send a
1984 	 * non-ECN-setup SYN/SYN-ACK.
1985 	 * - If you ever send a non-ECN-setup SYN/SYN-ACK, you must not set
1986 	 * the ip ECT flag.
1987 	 *
1988 	 * It is not clear how the ECE flag would ever be set if you never
1989 	 * set the IP ECT flag on outbound packets. All the same, we use
1990 	 * the TE_SETUPSENT to indicate that we have committed to handling
1991 	 * the TCP ECE flag correctly. We use the TE_SENDIPECT to indicate
1992 	 * whether or not we should set the IP ECT flag on outbound packet
1993 	 *
1994 	 * For a SYN-ACK, send an ECN setup SYN-ACK
1995 	 *
1996 	 * Below we send ECN for three different handhshake states:
1997 	 * 1. Server received SYN and is sending a SYN-ACK (state->TCPS_SYN_RECEIVED)
1998 	 *    - both classic and Accurate ECN have special encoding
1999 	 * 2. Client is sending SYN packet (state->SYN_SENT)
2000 	 *    - both classic and Accurate ECN have special encoding
2001 	 * 3. Client is sending final ACK of 3WHS (state->ESTABLISHED)
2002 	 *    - Only Accurate ECN has special encoding
2003 	 */
2004 	if ((flags & (TH_SYN | TH_ACK)) == (TH_SYN | TH_ACK) &&
2005 	    (tp->ecn_flags & TE_ENABLE_ECN)) {
2006 		/* Server received either legacy or Accurate ECN setup SYN */
2007 		if (tp->ecn_flags & (TE_SETUPRECEIVED | TE_ACE_SETUPRECEIVED)) {
2008 			if (tcp_send_ecn_flags_on_syn(tp)) {
2009 				if (TCP_ACC_ECN_ENABLED(tp) && (tp->ecn_flags & TE_ACE_SETUPRECEIVED)) {
2010 					/*
2011 					 * Accurate ECN mode is on. Initialize packet and byte counters
2012 					 * for the server sending SYN-ACK. Although s_cep will be initialized
2013 					 * during input processing of ACK of SYN-ACK, initialize here as well
2014 					 * in case ACK gets lost.
2015 					 *
2016 					 * Non-zero initial values are used to
2017 					 * support a stateless handshake (see
2018 					 * Section 5.1 of AccECN draft) and to be
2019 					 * distinct from cases where the fields
2020 					 * are incorrectly zeroed.
2021 					 */
2022 					tp->t_rcv_ce_packets = 5;
2023 					tp->t_snd_ce_packets = 5;
2024 
2025 					/* Initialize CE byte counter to 0 */
2026 					tp->t_rcv_ce_bytes = tp->t_snd_ce_bytes = 0;
2027 
2028 					if (tp->ecn_flags & TE_ACE_SETUP_NON_ECT) {
2029 						tp->t_prev_ace_flags = TH_CWR;
2030 						flags |= tp->t_prev_ace_flags;
2031 						/* Remove the setup flag as it is also used for final ACK */
2032 						tp->ecn_flags &= ~TE_ACE_SETUP_NON_ECT;
2033 						tcpstat.tcps_ecn_ace_syn_not_ect++;
2034 					} else if (tp->ecn_flags & TE_ACE_SETUP_ECT1) {
2035 						tp->t_prev_ace_flags = (TH_CWR | TH_ECE);
2036 						flags |= tp->t_prev_ace_flags;
2037 						tp->ecn_flags &= ~TE_ACE_SETUP_ECT1;
2038 						tcpstat.tcps_ecn_ace_syn_ect1++;
2039 					} else if (tp->ecn_flags & TE_ACE_SETUP_ECT0) {
2040 						tp->t_prev_ace_flags = TH_AE;
2041 						flags |= tp->t_prev_ace_flags;
2042 						tp->ecn_flags &= ~TE_ACE_SETUP_ECT0;
2043 						tcpstat.tcps_ecn_ace_syn_ect0++;
2044 					} else if (tp->ecn_flags & TE_ACE_SETUP_CE) {
2045 						tp->t_prev_ace_flags = (TH_AE | TH_CWR);
2046 						flags |= tp->t_prev_ace_flags;
2047 						tp->ecn_flags &= ~TE_ACE_SETUP_CE;
2048 						/*
2049 						 * Receive counter is updated on
2050 						 * all acceptable packets except
2051 						 * CE on SYN packets (SYN=1, ACK=0)
2052 						 */
2053 						tcpstat.tcps_ecn_ace_syn_ce++;
2054 					} else {
2055 						if (tp->t_prev_ace_flags != 0) {
2056 							/* Set the flags for retransmitted SYN-ACK same as the previous one */
2057 							flags |= tp->t_prev_ace_flags;
2058 						} else {
2059 							/* We shouldn't come here */
2060 							panic("ECN flags (0x%x) not set correctly", tp->ecn_flags);
2061 						}
2062 					}
2063 					/*
2064 					 * We are not yet committing to send IP ECT packets when
2065 					 * Accurate ECN mode is on
2066 					 */
2067 					tp->ecn_flags |= (TE_ACE_SETUPSENT);
2068 				} else if (tp->ecn_flags & TE_SETUPRECEIVED) {
2069 					/*
2070 					 * Setting TH_ECE makes this an ECN-setup
2071 					 * SYN-ACK
2072 					 */
2073 					flags |= TH_ECE;
2074 					/*
2075 					 * Record that we sent the ECN-setup and
2076 					 * default to setting IP ECT.
2077 					 */
2078 					tp->ecn_flags |= (TE_SETUPSENT | TE_SENDIPECT);
2079 				}
2080 				tcpstat.tcps_ecn_server_setup++;
2081 				tcpstat.tcps_ecn_server_success++;
2082 			} else {
2083 				/*
2084 				 * For classic ECN, we sent an ECN-setup SYN-ACK but it was
2085 				 * dropped. Fallback to non-ECN-setup
2086 				 * SYN-ACK and clear flag to indicate that
2087 				 * we should not send data with IP ECT set
2088 				 *
2089 				 * Pretend we didn't receive an
2090 				 * ECN-setup SYN.
2091 				 *
2092 				 * We already incremented the counter
2093 				 * assuming that the ECN setup will
2094 				 * succeed. Decrementing here
2095 				 * tcps_ecn_server_success to correct it.
2096 				 *
2097 				 * For Accurate ECN, we don't yet remove TE_ACE_SETUPRECEIVED
2098 				 * as the client might have received Accurate ECN SYN-ACK.
2099 				 * We decide Accurate ECN's state on processing last ACK from the client.
2100 				 */
2101 				if (tp->ecn_flags & (TE_SETUPSENT | TE_ACE_SETUPSENT)) {
2102 					tcpstat.tcps_ecn_lost_synack++;
2103 					tcpstat.tcps_ecn_server_success--;
2104 					tp->ecn_flags |= TE_LOST_SYNACK;
2105 				}
2106 
2107 				tp->ecn_flags &=
2108 				    ~(TE_SETUPRECEIVED | TE_SENDIPECT |
2109 				    TE_SENDCWR);
2110 			}
2111 		}
2112 	} else if ((flags & (TH_SYN | TH_ACK)) == TH_SYN &&
2113 	    (tp->ecn_flags & TE_ENABLE_ECN)) {
2114 		if (tcp_send_ecn_flags_on_syn(tp)) {
2115 			if (TCP_ACC_ECN_ENABLED(tp)) {
2116 				/* We are negotiating AccECN in SYN */
2117 				flags |= TH_ACE;
2118 				/*
2119 				 * For AccECN, we only set the ECN-setup sent
2120 				 * flag as we are not committing to set ECT yet.
2121 				 */
2122 				tp->ecn_flags |= (TE_ACE_SETUPSENT);
2123 			} else {
2124 				/*
2125 				 * Setting TH_ECE and TH_CWR makes this an
2126 				 * ECN-setup SYN
2127 				 */
2128 				flags |= (TH_ECE | TH_CWR);
2129 				/*
2130 				 * Record that we sent the ECN-setup and default to
2131 				 * setting IP ECT.
2132 				 */
2133 				tp->ecn_flags |= (TE_SETUPSENT | TE_SENDIPECT);
2134 			}
2135 			tcpstat.tcps_ecn_client_setup++;
2136 			tp->ecn_flags |= TE_CLIENT_SETUP;
2137 		} else {
2138 			/*
2139 			 * We sent an ECN-setup SYN but it was dropped.
2140 			 * Fall back to non-ECN and clear flag indicating
2141 			 * we should send data with IP ECT set.
2142 			 */
2143 			if (tp->ecn_flags & (TE_SETUPSENT | TE_ACE_SETUPSENT)) {
2144 				tcpstat.tcps_ecn_lost_syn++;
2145 				tp->ecn_flags |= TE_LOST_SYN;
2146 			}
2147 			tp->ecn_flags &= ~TE_SENDIPECT;
2148 		}
2149 	} else if (TCP_ACC_ECN_ON(tp) && (tp->ecn_flags & TE_ACE_FINAL_ACK_3WHS) &&
2150 	    len == 0 && (flags & (TH_FLAGS_ALL)) == TH_ACK) {
2151 		/*
2152 		 * Client has processed SYN-ACK and moved to ESTABLISHED.
2153 		 * This is the final ACK of 3WHS. If ACC_ECN has been negotiated,
2154 		 * then send the handshake encoding as per Table 3 of Accurate ECN draft.
2155 		 * We are clearing the ACE flags just in case if they were set before.
2156 		 * TODO: if client has to carry data in the 3WHS ACK, then we need to send a pure ACK first
2157 		 */
2158 		flags &= ~(TH_AE | TH_CWR | TH_ECE);
2159 		if (tp->ecn_flags & TE_ACE_SETUP_NON_ECT) {
2160 			flags |= TH_CWR;
2161 			tp->ecn_flags &= ~TE_ACE_SETUP_NON_ECT;
2162 		} else if (tp->ecn_flags & TE_ACE_SETUP_ECT1) {
2163 			flags |= (TH_CWR | TH_ECE);
2164 			tp->ecn_flags &= ~TE_ACE_SETUP_ECT1;
2165 		} else if (tp->ecn_flags & TE_ACE_SETUP_ECT0) {
2166 			flags |= TH_AE;
2167 			tp->ecn_flags &= ~TE_ACE_SETUP_ECT0;
2168 		} else if (tp->ecn_flags & TE_ACE_SETUP_CE) {
2169 			flags |= (TH_AE | TH_CWR);
2170 			tp->ecn_flags &= ~TE_ACE_SETUP_CE;
2171 		}
2172 		tp->ecn_flags &= ~(TE_ACE_FINAL_ACK_3WHS);
2173 	}
2174 
2175 	/*
2176 	 * Check if we should set the TCP CWR flag.
2177 	 * CWR flag is sent when we reduced the congestion window because
2178 	 * we received a TCP ECE or we performed a fast retransmit. We
2179 	 * never set the CWR flag on retransmitted packets. We only set
2180 	 * the CWR flag on data packets. Pure acks don't have this set.
2181 	 */
2182 	if ((tp->ecn_flags & TE_SENDCWR) != 0 && len != 0 &&
2183 	    !SEQ_LT(tp->snd_nxt, tp->snd_max) && !sack_rxmit) {
2184 		flags |= TH_CWR;
2185 		tp->ecn_flags &= ~TE_SENDCWR;
2186 	}
2187 
2188 	/*
2189 	 * Check if we should set the TCP ECE flag.
2190 	 */
2191 	if ((tp->ecn_flags & TE_SENDECE) != 0 && len == 0) {
2192 		flags |= TH_ECE;
2193 		tcpstat.tcps_ecn_sent_ece++;
2194 	}
2195 
2196 	hdrlen += optlen;
2197 
2198 	/* Reset DSACK sequence numbers */
2199 	tp->t_dsack_lseq = 0;
2200 	tp->t_dsack_rseq = 0;
2201 
2202 	if (isipv6) {
2203 		ipoptlen = ip6_optlen(inp);
2204 	} else {
2205 		if (tp_inp_options) {
2206 			ipoptlen = tp_inp_options->m_len -
2207 			    offsetof(struct ipoption, ipopt_list);
2208 		} else {
2209 			ipoptlen = 0;
2210 		}
2211 	}
2212 #if IPSEC
2213 	ipoptlen += ipsec_optlen;
2214 #endif
2215 
2216 	/*
2217 	 * Adjust data length if insertion of options will
2218 	 * bump the packet length beyond the t_maxopd length.
2219 	 * Clear the FIN bit because we cut off the tail of
2220 	 * the segment.
2221 	 *
2222 	 * When doing TSO limit a burst to TCP_MAXWIN minus the
2223 	 * IP, TCP and Options length to keep ip->ip_len from
2224 	 * overflowing.  Prevent the last segment from being
2225 	 * fractional thus making them all equal sized and set
2226 	 * the flag to continue sending.  TSO is disabled when
2227 	 * IP options or IPSEC are present.
2228 	 */
2229 	if (len + optlen + ipoptlen > tp->t_maxopd) {
2230 		/*
2231 		 * If there is still more to send,
2232 		 * don't close the connection.
2233 		 */
2234 		flags &= ~TH_FIN;
2235 		if (tso) {
2236 			int32_t tso_maxlen;
2237 
2238 			tso_maxlen = tp->tso_max_segment_size ?
2239 			    tp->tso_max_segment_size : TCP_MAXWIN;
2240 
2241 			/* hdrlen includes optlen */
2242 			if (len > tso_maxlen - hdrlen) {
2243 				len = tso_maxlen - hdrlen;
2244 				sendalot = 1;
2245 			} else if (tp->t_flags & TF_NEEDFIN) {
2246 				sendalot = 1;
2247 			}
2248 
2249 			if (len % (tp->t_maxopd - optlen) != 0) {
2250 				len = len - (len % (tp->t_maxopd - optlen));
2251 				sendalot = 1;
2252 			}
2253 		} else {
2254 			len = tp->t_maxopd - optlen - ipoptlen;
2255 			sendalot = 1;
2256 		}
2257 	}
2258 
2259 	if (max_linkhdr + hdrlen > MCLBYTES) {
2260 		panic("tcphdr too big");
2261 	}
2262 
2263 	/* Check if there is enough data in the send socket
2264 	 * buffer to start measuring bandwidth
2265 	 */
2266 	if ((tp->t_flagsext & TF_MEASURESNDBW) != 0 &&
2267 	    (tp->t_bwmeas != NULL) &&
2268 	    (tp->t_flagsext & TF_BWMEAS_INPROGRESS) == 0) {
2269 		tp->t_bwmeas->bw_size = min(min(
2270 			    (so->so_snd.sb_cc - (tp->snd_max - tp->snd_una)),
2271 			    tp->snd_cwnd), tp->snd_wnd);
2272 		if (tp->t_bwmeas->bw_minsize > 0 &&
2273 		    tp->t_bwmeas->bw_size < tp->t_bwmeas->bw_minsize) {
2274 			tp->t_bwmeas->bw_size = 0;
2275 		}
2276 		if (tp->t_bwmeas->bw_maxsize > 0) {
2277 			tp->t_bwmeas->bw_size = min(tp->t_bwmeas->bw_size,
2278 			    tp->t_bwmeas->bw_maxsize);
2279 		}
2280 		if (tp->t_bwmeas->bw_size > 0) {
2281 			tp->t_flagsext |= TF_BWMEAS_INPROGRESS;
2282 			tp->t_bwmeas->bw_start = tp->snd_max;
2283 			tp->t_bwmeas->bw_ts = tcp_now;
2284 		}
2285 	}
2286 
2287 	VERIFY(inp->inp_flowhash != 0);
2288 	/*
2289 	 * Grab a header mbuf, attaching a copy of data to
2290 	 * be transmitted, and initialize the header from
2291 	 * the template for sends on this connection.
2292 	 */
2293 	if (len) {
2294 		/* Remember what the last head-of-line packet-size was */
2295 		if (tp->t_pmtud_lastseg_size == 0 && tp->snd_nxt == tp->snd_una) {
2296 			ASSERT(len + optlen + ipoptlen <= IP_MAXPACKET);
2297 			tp->t_pmtud_lastseg_size = (uint16_t)(len + optlen + ipoptlen);
2298 		}
2299 		if ((tp->t_flagsext & TF_FORCE) && len == 1) {
2300 			tcpstat.tcps_sndprobe++;
2301 		} else if (SEQ_LT(tp->snd_nxt, tp->snd_max) || sack_rxmit) {
2302 			tcpstat.tcps_sndrexmitpack++;
2303 			tcpstat.tcps_sndrexmitbyte += len;
2304 			if (nstat_collect) {
2305 				nstat_route_tx(inp->inp_route.ro_rt, 1,
2306 				    len, NSTAT_TX_FLAG_RETRANSMIT);
2307 				INP_ADD_STAT(inp, cell, wifi, wired,
2308 				    txpackets, 1);
2309 				INP_ADD_STAT(inp, cell, wifi, wired,
2310 				    txbytes, len);
2311 				tp->t_stat.txretransmitbytes += len;
2312 				tp->t_stat.rxmitpkts++;
2313 			}
2314 			if (tp->ecn_flags & TE_SENDIPECT) {
2315 				tp->t_ecn_capable_packets_lost++;
2316 			}
2317 		} else {
2318 			tcpstat.tcps_sndpack++;
2319 			tcpstat.tcps_sndbyte += len;
2320 
2321 			if (nstat_collect) {
2322 				INP_ADD_STAT(inp, cell, wifi, wired,
2323 				    txpackets, 1);
2324 				INP_ADD_STAT(inp, cell, wifi, wired,
2325 				    txbytes, len);
2326 			}
2327 			if (tp->ecn_flags & TE_SENDIPECT) {
2328 				tp->t_ecn_capable_packets_sent++;
2329 			}
2330 			inp_decr_sndbytes_unsent(so, len);
2331 		}
2332 		inp_set_activity_bitmap(inp);
2333 #if MPTCP
2334 		if (tp->t_mpflags & TMPF_MPTCP_TRUE) {
2335 			tcpstat.tcps_mp_sndpacks++;
2336 			tcpstat.tcps_mp_sndbytes += len;
2337 		}
2338 #endif /* MPTCP */
2339 		/*
2340 		 * try to use the new interface that allocates all
2341 		 * the necessary mbuf hdrs under 1 mbuf lock and
2342 		 * avoids rescanning the socket mbuf list if
2343 		 * certain conditions are met.  This routine can't
2344 		 * be used in the following cases...
2345 		 * 1) the protocol headers exceed the capacity of
2346 		 * of a single mbuf header's data area (no cluster attached)
2347 		 * 2) the length of the data being transmitted plus
2348 		 * the protocol headers fits into a single mbuf header's
2349 		 * data area (no cluster attached)
2350 		 */
2351 		m = NULL;
2352 
2353 		/* minimum length we are going to allocate */
2354 		allocated_len = MHLEN;
2355 		if (MHLEN < hdrlen + max_linkhdr) {
2356 			MGETHDR(m, M_DONTWAIT, MT_HEADER);
2357 			if (m == NULL) {
2358 				error = ENOBUFS;
2359 				goto out;
2360 			}
2361 			MCLGET(m, M_DONTWAIT);
2362 			if ((m->m_flags & M_EXT) == 0) {
2363 				m_freem(m);
2364 				error = ENOBUFS;
2365 				goto out;
2366 			}
2367 			m->m_data += max_linkhdr;
2368 			m->m_len = hdrlen;
2369 			allocated_len = MCLBYTES;
2370 		}
2371 		if (len <= allocated_len - hdrlen - max_linkhdr) {
2372 			if (m == NULL) {
2373 				VERIFY(allocated_len <= MHLEN);
2374 				MGETHDR(m, M_DONTWAIT, MT_HEADER);
2375 				if (m == NULL) {
2376 					error = ENOBUFS;
2377 					goto out;
2378 				}
2379 				m->m_data += max_linkhdr;
2380 				m->m_len = hdrlen;
2381 			}
2382 			/* makes sure we still have data left to be sent at this point */
2383 			if (so->so_snd.sb_mb == NULL || off < 0) {
2384 				if (m != NULL) {
2385 					m_freem(m);
2386 				}
2387 				error = 0; /* should we return an error? */
2388 				goto out;
2389 			}
2390 			m_copydata(so->so_snd.sb_mb, off, (int) len,
2391 			    mtod(m, caddr_t) + hdrlen);
2392 			m->m_len += len;
2393 		} else {
2394 			uint32_t copymode;
2395 			/*
2396 			 * Retain packet header metadata at the socket
2397 			 * buffer if this is is an MPTCP subflow,
2398 			 * otherwise move it.
2399 			 */
2400 			copymode = M_COPYM_MOVE_HDR;
2401 #if MPTCP
2402 			if (so->so_flags & SOF_MP_SUBFLOW) {
2403 				copymode = M_COPYM_NOOP_HDR;
2404 			}
2405 #endif /* MPTCP */
2406 			if (m != NULL) {
2407 				m->m_next = m_copym_mode(so->so_snd.sb_mb,
2408 				    off, (int)len, M_DONTWAIT, copymode);
2409 				if (m->m_next == NULL) {
2410 					(void) m_free(m);
2411 					error = ENOBUFS;
2412 					goto out;
2413 				}
2414 			} else {
2415 				/*
2416 				 * make sure we still have data left
2417 				 * to be sent at this point
2418 				 */
2419 				if (so->so_snd.sb_mb == NULL) {
2420 					error = 0; /* should we return an error? */
2421 					goto out;
2422 				}
2423 
2424 				/*
2425 				 * m_copym_with_hdrs will always return the
2426 				 * last mbuf pointer and the offset into it that
2427 				 * it acted on to fullfill the current request,
2428 				 * whether a valid 'hint' was passed in or not.
2429 				 */
2430 				if ((m = m_copym_with_hdrs(so->so_snd.sb_mb,
2431 				    off, len, M_DONTWAIT, NULL, NULL,
2432 				    copymode)) == NULL) {
2433 					error = ENOBUFS;
2434 					goto out;
2435 				}
2436 				m->m_data += max_linkhdr;
2437 				m->m_len = hdrlen;
2438 			}
2439 		}
2440 		/*
2441 		 * If we're sending everything we've got, set PUSH.
2442 		 * (This will keep happy those implementations which only
2443 		 * give data to the user when a buffer fills or
2444 		 * a PUSH comes in.)
2445 		 *
2446 		 * On SYN-segments we should not add the PUSH-flag.
2447 		 */
2448 		if (off + len == so->so_snd.sb_cc && !(flags & TH_SYN)) {
2449 			flags |= TH_PUSH;
2450 		}
2451 	} else {
2452 		if (tp->t_flags & TF_ACKNOW) {
2453 			tcpstat.tcps_sndacks++;
2454 		} else if (flags & (TH_SYN | TH_FIN | TH_RST)) {
2455 			tcpstat.tcps_sndctrl++;
2456 		} else if (SEQ_GT(tp->snd_up, tp->snd_una)) {
2457 			tcpstat.tcps_sndurg++;
2458 		} else {
2459 			tcpstat.tcps_sndwinup++;
2460 		}
2461 
2462 		MGETHDR(m, M_DONTWAIT, MT_HEADER);      /* MAC-OK */
2463 		if (m == NULL) {
2464 			error = ENOBUFS;
2465 			goto out;
2466 		}
2467 		if (MHLEN < (hdrlen + max_linkhdr)) {
2468 			MCLGET(m, M_DONTWAIT);
2469 			if ((m->m_flags & M_EXT) == 0) {
2470 				m_freem(m);
2471 				error = ENOBUFS;
2472 				goto out;
2473 			}
2474 		}
2475 		m->m_data += max_linkhdr;
2476 		m->m_len = hdrlen;
2477 	}
2478 	m->m_pkthdr.rcvif = 0;
2479 	m_add_crumb(m, PKT_CRUMB_TCP_OUTPUT);
2480 
2481 	/* Any flag other than pure-ACK: Do not compress! */
2482 	if (flags & ~(TH_ACK)) {
2483 		do_not_compress = TRUE;
2484 	}
2485 
2486 	if (tp->rcv_scale == 0) {
2487 		do_not_compress = TRUE;
2488 	}
2489 
2490 	if (do_not_compress) {
2491 		m->m_pkthdr.comp_gencnt = 0;
2492 	} else {
2493 		if (TSTMP_LT(tp->t_comp_lastinc + tcp_ack_compression_rate, tcp_now)) {
2494 			tp->t_comp_gencnt++;
2495 			/* 0 means no compression, thus jump this */
2496 			if (tp->t_comp_gencnt <= TCP_ACK_COMPRESSION_DUMMY) {
2497 				tp->t_comp_gencnt = TCP_ACK_COMPRESSION_DUMMY + 1;
2498 			}
2499 			tp->t_comp_lastinc = tcp_now;
2500 		}
2501 		m->m_pkthdr.comp_gencnt = tp->t_comp_gencnt;
2502 	}
2503 
2504 	if (isipv6) {
2505 		ip6 = mtod(m, struct ip6_hdr *);
2506 		th = (struct tcphdr *)(void *)(ip6 + 1);
2507 		tcp_fillheaders(m, tp, ip6, th);
2508 		if ((tp->ecn_flags & TE_SENDIPECT) != 0 && len &&
2509 		    !SEQ_LT(tp->snd_nxt, tp->snd_max) && !sack_rxmit) {
2510 			ip6->ip6_flow |= htonl(IPTOS_ECN_ECT0 << 20);
2511 		}
2512 		svc_flags |= PKT_SCF_IPV6;
2513 #if PF_ECN
2514 		m_pftag(m)->pftag_hdr = (void *)ip6;
2515 		m_pftag(m)->pftag_flags |= PF_TAG_HDR_INET6;
2516 #endif /* PF_ECN */
2517 	} else {
2518 		ip = mtod(m, struct ip *);
2519 		th = (struct tcphdr *)(void *)(ip + 1);
2520 		/* this picks up the pseudo header (w/o the length) */
2521 		tcp_fillheaders(m, tp, ip, th);
2522 		if ((tp->ecn_flags & TE_SENDIPECT) != 0 && len &&
2523 		    !SEQ_LT(tp->snd_nxt, tp->snd_max) &&
2524 		    !sack_rxmit && !(flags & TH_SYN)) {
2525 			ip->ip_tos |= IPTOS_ECN_ECT0;
2526 		}
2527 #if PF_ECN
2528 		m_pftag(m)->pftag_hdr = (void *)ip;
2529 		m_pftag(m)->pftag_flags |= PF_TAG_HDR_INET;
2530 #endif /* PF_ECN */
2531 	}
2532 
2533 	/*
2534 	 * Fill in fields, remembering maximum advertised
2535 	 * window for use in delaying messages about window sizes.
2536 	 * If resending a FIN, be sure not to use a new sequence number.
2537 	 */
2538 	if ((flags & TH_FIN) && (tp->t_flags & TF_SENTFIN) &&
2539 	    tp->snd_nxt == tp->snd_max) {
2540 		tp->snd_nxt--;
2541 	}
2542 	/*
2543 	 * If we are doing retransmissions, then snd_nxt will
2544 	 * not reflect the first unsent octet.  For ACK only
2545 	 * packets, we do not want the sequence number of the
2546 	 * retransmitted packet, we want the sequence number
2547 	 * of the next unsent octet.  So, if there is no data
2548 	 * (and no SYN or FIN), use snd_max instead of snd_nxt
2549 	 * when filling in ti_seq.  But if we are in persist
2550 	 * state, snd_max might reflect one byte beyond the
2551 	 * right edge of the window, so use snd_nxt in that
2552 	 * case, since we know we aren't doing a retransmission.
2553 	 * (retransmit and persist are mutually exclusive...)
2554 	 *
2555 	 * Note the state of this retransmit segment to detect spurious
2556 	 * retransmissions.
2557 	 */
2558 	if (sack_rxmit == 0) {
2559 		if (len || (flags & (TH_SYN | TH_FIN)) ||
2560 		    tp->t_timer[TCPT_PERSIST]) {
2561 			th->th_seq = htonl(tp->snd_nxt);
2562 			if (len > 0) {
2563 				m->m_pkthdr.tx_start_seq = tp->snd_nxt;
2564 				m->m_pkthdr.pkt_flags |= PKTF_START_SEQ;
2565 			}
2566 			if (SEQ_LT(tp->snd_nxt, tp->snd_max)) {
2567 				if (SACK_ENABLED(tp) && len > 1 &&
2568 				    !(tp->t_flagsext & TF_SENT_TLPROBE)) {
2569 					tcp_rxtseg_insert(tp, tp->snd_nxt,
2570 					    (tp->snd_nxt + len - 1));
2571 				}
2572 				if (len > 0) {
2573 					m->m_pkthdr.pkt_flags |=
2574 					    PKTF_TCP_REXMT;
2575 				}
2576 			}
2577 		} else {
2578 			th->th_seq = htonl(tp->snd_max);
2579 		}
2580 	} else {
2581 		th->th_seq = htonl(p->rxmit);
2582 		if (len > 0) {
2583 			m->m_pkthdr.pkt_flags |=
2584 			    (PKTF_TCP_REXMT | PKTF_START_SEQ);
2585 			m->m_pkthdr.tx_start_seq = p->rxmit;
2586 		}
2587 		tcp_rxtseg_insert(tp, p->rxmit, (p->rxmit + len - 1));
2588 		p->rxmit += len;
2589 		tp->sackhint.sack_bytes_rexmit += len;
2590 	}
2591 	th->th_ack = htonl(tp->rcv_nxt);
2592 	tp->last_ack_sent = tp->rcv_nxt;
2593 	if (optlen) {
2594 		bcopy(opt, th + 1, optlen);
2595 		th->th_off = (sizeof(struct tcphdr) + optlen) >> 2;
2596 	}
2597 	/* Separate AE from flags */
2598 	th->th_flags = (flags & (TH_FLAGS_ALL));
2599 	th->th_x2 = (flags & (TH_AE)) >> 8;
2600 	th->th_win = htons((u_short) (recwin >> tp->rcv_scale));
2601 	tp->t_last_recwin = recwin;
2602 	if (!(so->so_flags & SOF_MP_SUBFLOW)) {
2603 		if (recwin > 0 && SEQ_LT(tp->rcv_adv, tp->rcv_nxt + recwin)) {
2604 			tp->rcv_adv = tp->rcv_nxt + recwin;
2605 		}
2606 	} else {
2607 		struct mptcb *mp_tp = tptomptp(tp);
2608 		if (recwin > 0) {
2609 			tp->rcv_adv = tp->rcv_nxt + recwin;
2610 		}
2611 
2612 		if (recwin > 0 && MPTCP_SEQ_LT(mp_tp->mpt_rcvadv, mp_tp->mpt_rcvnxt + recwin)) {
2613 			mp_tp->mpt_rcvadv = mp_tp->mpt_rcvnxt + recwin;
2614 		}
2615 	}
2616 
2617 	/*
2618 	 * Adjust the RXWIN0SENT flag - indicate that we have advertised
2619 	 * a 0 window.  This may cause the remote transmitter to stall.  This
2620 	 * flag tells soreceive() to disable delayed acknowledgements when
2621 	 * draining the buffer.  This can occur if the receiver is attempting
2622 	 * to read more data then can be buffered prior to transmitting on
2623 	 * the connection.
2624 	 */
2625 	if (th->th_win == 0) {
2626 		tp->t_flags |= TF_RXWIN0SENT;
2627 	} else {
2628 		tp->t_flags &= ~TF_RXWIN0SENT;
2629 	}
2630 
2631 	if (SEQ_GT(tp->snd_up, tp->snd_nxt)) {
2632 		th->th_urp = htons((u_short)(tp->snd_up - tp->snd_nxt));
2633 		th->th_flags |= TH_URG;
2634 	} else {
2635 		/*
2636 		 * If no urgent pointer to send, then we pull
2637 		 * the urgent pointer to the left edge of the send window
2638 		 * so that it doesn't drift into the send window on sequence
2639 		 * number wraparound.
2640 		 */
2641 		tp->snd_up = tp->snd_una;               /* drag it along */
2642 	}
2643 
2644 	/*
2645 	 * Put TCP length in extended header, and then
2646 	 * checksum extended header and data.
2647 	 */
2648 	m->m_pkthdr.len = hdrlen + len; /* in6_cksum() need this */
2649 
2650 	/*
2651 	 * If this is potentially the last packet on the stream, then mark
2652 	 * it in order to enable some optimizations in the underlying
2653 	 * layers
2654 	 */
2655 	if (tp->t_state != TCPS_ESTABLISHED &&
2656 	    (tp->t_state == TCPS_CLOSING || tp->t_state == TCPS_TIME_WAIT
2657 	    || tp->t_state == TCPS_LAST_ACK || (th->th_flags & TH_RST))) {
2658 		m->m_pkthdr.pkt_flags |= PKTF_LAST_PKT;
2659 	}
2660 
2661 	if (isipv6) {
2662 		/*
2663 		 * ip6_plen is not need to be filled now, and will be filled
2664 		 * in ip6_output.
2665 		 */
2666 		m->m_pkthdr.csum_flags = CSUM_TCPIPV6;
2667 		m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
2668 		if (len + optlen) {
2669 			th->th_sum = in_addword(th->th_sum,
2670 			    htons((u_short)(optlen + len)));
2671 		}
2672 	} else {
2673 		m->m_pkthdr.csum_flags = CSUM_TCP;
2674 		m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
2675 		if (len + optlen) {
2676 			th->th_sum = in_addword(th->th_sum,
2677 			    htons((u_short)(optlen + len)));
2678 		}
2679 	}
2680 
2681 	/*
2682 	 * Enable TSO and specify the size of the segments.
2683 	 * The TCP pseudo header checksum is always provided.
2684 	 */
2685 	if (tso) {
2686 		if (isipv6) {
2687 			m->m_pkthdr.csum_flags |= CSUM_TSO_IPV6;
2688 		} else {
2689 			m->m_pkthdr.csum_flags |= CSUM_TSO_IPV4;
2690 		}
2691 
2692 		m->m_pkthdr.tso_segsz = tp->t_maxopd - optlen;
2693 	} else {
2694 		m->m_pkthdr.tso_segsz = 0;
2695 	}
2696 
2697 	/*
2698 	 * In transmit state, time the transmission and arrange for
2699 	 * the retransmit.  In persist state, just set snd_max.
2700 	 */
2701 	if (!(tp->t_flagsext & TF_FORCE)
2702 	    || tp->t_timer[TCPT_PERSIST] == 0) {
2703 		tcp_seq startseq = tp->snd_nxt;
2704 
2705 		/*
2706 		 * Advance snd_nxt over sequence space of this segment.
2707 		 */
2708 		if (flags & (TH_SYN | TH_FIN)) {
2709 			if (flags & TH_SYN) {
2710 				tp->snd_nxt++;
2711 			}
2712 			if ((flags & TH_FIN) &&
2713 			    !(tp->t_flags & TF_SENTFIN)) {
2714 				tp->snd_nxt++;
2715 				tp->t_flags |= TF_SENTFIN;
2716 			}
2717 		}
2718 		if (sack_rxmit) {
2719 			goto timer;
2720 		}
2721 		if (sack_rescue_rxt == TRUE) {
2722 			tp->snd_nxt = old_snd_nxt;
2723 			sack_rescue_rxt = FALSE;
2724 			tcpstat.tcps_pto_in_recovery++;
2725 		} else {
2726 			tp->snd_nxt += len;
2727 		}
2728 		if (SEQ_GT(tp->snd_nxt, tp->snd_max)) {
2729 			tp->snd_max = tp->snd_nxt;
2730 			tp->t_sndtime = tcp_now;
2731 			/*
2732 			 * Time this transmission if not a retransmission and
2733 			 * not currently timing anything.
2734 			 */
2735 			if (tp->t_rtttime == 0) {
2736 				tp->t_rtttime = tcp_now;
2737 				tp->t_rtseq = startseq;
2738 				tcpstat.tcps_segstimed++;
2739 
2740 				/* update variables related to pipe ack */
2741 				tp->t_pipeack_lastuna = tp->snd_una;
2742 			}
2743 		}
2744 
2745 		/*
2746 		 * Set retransmit timer if not currently set,
2747 		 * and not doing an ack or a keep-alive probe.
2748 		 */
2749 timer:
2750 		if (tp->t_timer[TCPT_REXMT] == 0 &&
2751 		    ((sack_rxmit && tp->snd_nxt != tp->snd_max) ||
2752 		    tp->snd_nxt != tp->snd_una || (flags & TH_FIN))) {
2753 			if (tp->t_timer[TCPT_PERSIST]) {
2754 				tp->t_timer[TCPT_PERSIST] = 0;
2755 				tp->t_persist_stop = 0;
2756 				TCP_RESET_REXMT_STATE(tp);
2757 			}
2758 			tp->t_timer[TCPT_REXMT] =
2759 			    OFFSET_FROM_START(tp, tp->t_rxtcur);
2760 		}
2761 
2762 		/*
2763 		 * Set tail loss probe timeout if new data is being
2764 		 * transmitted. This will be supported only when
2765 		 * SACK option is enabled on a connection.
2766 		 *
2767 		 * Every time new data is sent PTO will get reset.
2768 		 */
2769 		if (tcp_enable_tlp && len != 0 && tp->t_state == TCPS_ESTABLISHED &&
2770 		    SACK_ENABLED(tp) && !IN_FASTRECOVERY(tp) &&
2771 		    tp->snd_nxt == tp->snd_max &&
2772 		    SEQ_GT(tp->snd_nxt, tp->snd_una) &&
2773 		    tp->t_rxtshift == 0 &&
2774 		    (tp->t_flagsext & (TF_SENT_TLPROBE | TF_PKTS_REORDERED)) == 0) {
2775 			uint32_t pto, srtt;
2776 
2777 			if (tcp_do_better_lr) {
2778 				srtt = tp->t_srtt >> TCP_RTT_SHIFT;
2779 				pto = 2 * srtt;
2780 				if ((tp->snd_max - tp->snd_una) <= tp->t_maxseg) {
2781 					pto += tcp_delack;
2782 				} else {
2783 					pto += 2;
2784 				}
2785 			} else {
2786 				/*
2787 				 * Using SRTT alone to set PTO can cause spurious
2788 				 * retransmissions on wireless networks where there
2789 				 * is a lot of variance in RTT. Taking variance
2790 				 * into account will avoid this.
2791 				 */
2792 				srtt = tp->t_srtt >> TCP_RTT_SHIFT;
2793 				pto = ((TCP_REXMTVAL(tp)) * 3) >> 1;
2794 				pto = max(2 * srtt, pto);
2795 				if ((tp->snd_max - tp->snd_una) == tp->t_maxseg) {
2796 					pto = max(pto,
2797 					    (((3 * pto) >> 2) + tcp_delack * 2));
2798 				} else {
2799 					pto = max(10, pto);
2800 				}
2801 			}
2802 
2803 			/* if RTO is less than PTO, choose RTO instead */
2804 			if (tp->t_rxtcur < pto) {
2805 				pto = tp->t_rxtcur;
2806 			}
2807 
2808 			tp->t_timer[TCPT_PTO] = OFFSET_FROM_START(tp, pto);
2809 		}
2810 	} else {
2811 		/*
2812 		 * Persist case, update snd_max but since we are in
2813 		 * persist mode (no window) we do not update snd_nxt.
2814 		 */
2815 		int xlen = len;
2816 		if (flags & TH_SYN) {
2817 			++xlen;
2818 		}
2819 		if ((flags & TH_FIN) &&
2820 		    !(tp->t_flags & TF_SENTFIN)) {
2821 			++xlen;
2822 			tp->t_flags |= TF_SENTFIN;
2823 		}
2824 		if (SEQ_GT(tp->snd_nxt + xlen, tp->snd_max)) {
2825 			tp->snd_max = tp->snd_nxt + len;
2826 			tp->t_sndtime = tcp_now;
2827 		}
2828 	}
2829 
2830 #if TCPDEBUG
2831 	/*
2832 	 * Trace.
2833 	 */
2834 	if (so_options & SO_DEBUG) {
2835 		tcp_trace(TA_OUTPUT, tp->t_state, tp, mtod(m, void *), th, 0);
2836 	}
2837 #endif
2838 
2839 	/*
2840 	 * Fill in IP length and desired time to live and
2841 	 * send to IP level.  There should be a better way
2842 	 * to handle ttl and tos; we could keep them in
2843 	 * the template, but need a way to checksum without them.
2844 	 */
2845 	/*
2846 	 * m->m_pkthdr.len should have been set before cksum calcuration,
2847 	 * because in6_cksum() need it.
2848 	 */
2849 	if (isipv6) {
2850 		/*
2851 		 * we separately set hoplimit for every segment, since the
2852 		 * user might want to change the value via setsockopt.
2853 		 * Also, desired default hop limit might be changed via
2854 		 * Neighbor Discovery.
2855 		 */
2856 		ip6->ip6_hlim = in6_selecthlim(inp, inp->in6p_route.ro_rt ?
2857 		    inp->in6p_route.ro_rt->rt_ifp : NULL);
2858 
2859 		/* Don't set ECT bit if requested by an app */
2860 
2861 		/* Set ECN bits for testing purposes */
2862 		if (tp->ecn_flags & TE_FORCE_ECT1) {
2863 			ip6->ip6_flow |= htonl(IPTOS_ECN_ECT1 << 20);
2864 		} else if (tp->ecn_flags & TE_FORCE_ECT0) {
2865 			ip6->ip6_flow |= htonl(IPTOS_ECN_ECT0 << 20);
2866 		}
2867 
2868 		KERNEL_DEBUG(DBG_LAYER_BEG,
2869 		    ((inp->inp_fport << 16) | inp->inp_lport),
2870 		    (((inp->in6p_laddr.s6_addr16[0] & 0xffff) << 16) |
2871 		    (inp->in6p_faddr.s6_addr16[0] & 0xffff)),
2872 		    sendalot, 0, 0);
2873 	} else {
2874 		ASSERT(m->m_pkthdr.len <= IP_MAXPACKET);
2875 		ip->ip_len = (u_short)m->m_pkthdr.len;
2876 		ip->ip_ttl = inp->inp_ip_ttl;   /* XXX */
2877 
2878 		/* Don't set ECN bit if requested by an app */
2879 		ip->ip_tos |= (inp->inp_ip_tos & ~IPTOS_ECN_MASK);
2880 
2881 		/* Set ECN bits for testing purposes */
2882 		if (tp->ecn_flags & TE_FORCE_ECT1) {
2883 			ip->ip_tos |= IPTOS_ECN_ECT1;
2884 		} else if (tp->ecn_flags & TE_FORCE_ECT0) {
2885 			ip->ip_tos |= IPTOS_ECN_ECT0;
2886 		}
2887 
2888 		KERNEL_DEBUG(DBG_LAYER_BEG,
2889 		    ((inp->inp_fport << 16) | inp->inp_lport),
2890 		    (((inp->inp_laddr.s_addr & 0xffff) << 16) |
2891 		    (inp->inp_faddr.s_addr & 0xffff)), 0, 0, 0);
2892 	}
2893 
2894 	/*
2895 	 * See if we should do MTU discovery.
2896 	 * Look at the flag updated on the following criterias:
2897 	 *	1) Path MTU discovery is authorized by the sysctl
2898 	 *	2) The route isn't set yet (unlikely but could happen)
2899 	 *	3) The route is up
2900 	 *	4) the MTU is not locked (if it is, then discovery has been
2901 	 *	   disabled for that route)
2902 	 */
2903 	if (!isipv6) {
2904 		if (path_mtu_discovery && (tp->t_flags & TF_PMTUD)) {
2905 			ip->ip_off |= IP_DF;
2906 		}
2907 	}
2908 
2909 #if NECP
2910 	{
2911 		necp_kernel_policy_id policy_id;
2912 		necp_kernel_policy_id skip_policy_id;
2913 		u_int32_t route_rule_id;
2914 		u_int32_t pass_flags;
2915 		if (!necp_socket_is_allowed_to_send_recv(inp, NULL, 0, &policy_id, &route_rule_id, &skip_policy_id, &pass_flags)) {
2916 			TCP_LOG_DROP_NECP(isipv6 ? (void *)ip6 : (void *)ip, th, tp, true);
2917 			m_freem(m);
2918 			error = EHOSTUNREACH;
2919 			goto out;
2920 		}
2921 		necp_mark_packet_from_socket(m, inp, policy_id, route_rule_id, skip_policy_id, pass_flags);
2922 
2923 		if (net_qos_policy_restricted != 0) {
2924 			necp_socket_update_qos_marking(inp, inp->inp_route.ro_rt, route_rule_id);
2925 		}
2926 	}
2927 #endif /* NECP */
2928 
2929 #if IPSEC
2930 	if (inp->inp_sp != NULL) {
2931 		ipsec_setsocket(m, so);
2932 	}
2933 #endif /*IPSEC*/
2934 
2935 	/*
2936 	 * The socket is kept locked while sending out packets in ip_output, even if packet chaining is not active.
2937 	 */
2938 	lost = 0;
2939 
2940 	/*
2941 	 * Embed the flow hash in pkt hdr and mark the packet as
2942 	 * capable of flow controlling
2943 	 */
2944 	m->m_pkthdr.pkt_flowsrc = FLOWSRC_INPCB;
2945 	m->m_pkthdr.pkt_flowid = inp->inp_flowhash;
2946 	m->m_pkthdr.pkt_flags |= (PKTF_FLOW_ID | PKTF_FLOW_LOCALSRC | PKTF_FLOW_ADV);
2947 	m->m_pkthdr.pkt_proto = IPPROTO_TCP;
2948 	m->m_pkthdr.tx_tcp_pid = so->last_pid;
2949 	if (so->so_flags & SOF_DELEGATED) {
2950 		m->m_pkthdr.tx_tcp_e_pid = so->e_pid;
2951 	} else {
2952 		m->m_pkthdr.tx_tcp_e_pid = 0;
2953 	}
2954 
2955 	m->m_nextpkt = NULL;
2956 
2957 	if (inp->inp_last_outifp != NULL &&
2958 	    !(inp->inp_last_outifp->if_flags & IFF_LOOPBACK)) {
2959 		/* Hint to prioritize this packet if
2960 		 * 1. if the packet has no data
2961 		 * 2. the interface supports transmit-start model and did
2962 		 *    not disable ACK prioritization.
2963 		 * 3. Only ACK flag is set.
2964 		 * 4. there is no outstanding data on this connection.
2965 		 */
2966 		if (len == 0 && (inp->inp_last_outifp->if_eflags & (IFEF_TXSTART | IFEF_NOACKPRI)) == IFEF_TXSTART) {
2967 			if (th->th_flags == TH_ACK &&
2968 			    tp->snd_una == tp->snd_max &&
2969 			    tp->t_timer[TCPT_REXMT] == 0) {
2970 				svc_flags |= PKT_SCF_TCP_ACK;
2971 			}
2972 			if (th->th_flags & TH_SYN) {
2973 				svc_flags |= PKT_SCF_TCP_SYN;
2974 			}
2975 		}
2976 		set_packet_service_class(m, so, sotc, svc_flags);
2977 	} else {
2978 		/*
2979 		 * Optimization for loopback just set the mbuf
2980 		 * service class
2981 		 */
2982 		(void) m_set_service_class(m, so_tc2msc(sotc));
2983 	}
2984 
2985 	if ((th->th_flags & TH_SYN) && tp->t_syn_sent < UINT8_MAX) {
2986 		tp->t_syn_sent++;
2987 	}
2988 	if ((th->th_flags & TH_FIN) && tp->t_fin_sent < UINT8_MAX) {
2989 		tp->t_fin_sent++;
2990 	}
2991 	if ((th->th_flags & TH_RST) && tp->t_rst_sent < UINT8_MAX) {
2992 		tp->t_rst_sent++;
2993 	}
2994 	TCP_LOG_TH_FLAGS(isipv6 ? (void *)ip6 : (void *)ip, th, tp, true,
2995 	    inp->inp_last_outifp != NULL ? inp->inp_last_outifp :
2996 	    inp->inp_boundifp);
2997 
2998 	tp->t_pktlist_sentlen += len;
2999 	tp->t_lastchain++;
3000 
3001 	if (isipv6) {
3002 		DTRACE_TCP5(send, struct mbuf *, m, struct inpcb *, inp,
3003 		    struct ip6 *, ip6, struct tcpcb *, tp, struct tcphdr *,
3004 		    th);
3005 	} else {
3006 		DTRACE_TCP5(send, struct mbuf *, m, struct inpcb *, inp,
3007 		    struct ip *, ip, struct tcpcb *, tp, struct tcphdr *, th);
3008 	}
3009 
3010 	if (tp->t_pktlist_head != NULL) {
3011 		tp->t_pktlist_tail->m_nextpkt = m;
3012 		tp->t_pktlist_tail = m;
3013 	} else {
3014 		packchain_newlist++;
3015 		tp->t_pktlist_head = tp->t_pktlist_tail = m;
3016 	}
3017 
3018 	if (sendalot == 0 || (tp->t_state != TCPS_ESTABLISHED) ||
3019 	    (tp->snd_cwnd <= (tp->snd_wnd / 8)) ||
3020 	    (tp->t_flags & TF_ACKNOW) ||
3021 	    (tp->t_flagsext & TF_FORCE) ||
3022 	    tp->t_lastchain >= tcp_packet_chaining) {
3023 		error = 0;
3024 		while (inp->inp_sndinprog_cnt == 0 &&
3025 		    tp->t_pktlist_head != NULL) {
3026 			packetlist = tp->t_pktlist_head;
3027 			packchain_listadd = tp->t_lastchain;
3028 			packchain_sent++;
3029 			lost = tp->t_pktlist_sentlen;
3030 			TCP_PKTLIST_CLEAR(tp);
3031 
3032 			error = tcp_ip_output(so, tp, packetlist,
3033 			    packchain_listadd, tp_inp_options,
3034 			    (so_options & SO_DONTROUTE),
3035 			    (sack_rxmit || (sack_bytes_rxmt != 0)), isipv6);
3036 			if (error) {
3037 				/*
3038 				 * Take into account the rest of unsent
3039 				 * packets in the packet list for this tcp
3040 				 * into "lost", since we're about to free
3041 				 * the whole list below.
3042 				 */
3043 				lost += tp->t_pktlist_sentlen;
3044 				break;
3045 			} else {
3046 				lost = 0;
3047 			}
3048 		}
3049 		/* tcp was closed while we were in ip; resume close */
3050 		if (inp->inp_sndinprog_cnt == 0 &&
3051 		    (tp->t_flags & TF_CLOSING)) {
3052 			tp->t_flags &= ~TF_CLOSING;
3053 			(void) tcp_close(tp);
3054 			return 0;
3055 		}
3056 	} else {
3057 		error = 0;
3058 		packchain_looped++;
3059 		tcpstat.tcps_sndtotal++;
3060 
3061 		goto again;
3062 	}
3063 	if (error) {
3064 		/*
3065 		 * Assume that the packets were lost, so back out the
3066 		 * sequence number advance, if any.  Note that the "lost"
3067 		 * variable represents the amount of user data sent during
3068 		 * the recent call to ip_output_list() plus the amount of
3069 		 * user data in the packet list for this tcp at the moment.
3070 		 */
3071 		if (!(tp->t_flagsext & TF_FORCE)
3072 		    || tp->t_timer[TCPT_PERSIST] == 0) {
3073 			/*
3074 			 * No need to check for TH_FIN here because
3075 			 * the TF_SENTFIN flag handles that case.
3076 			 */
3077 			if ((flags & TH_SYN) == 0) {
3078 				if (sack_rxmit) {
3079 					if (SEQ_GT((p->rxmit - lost),
3080 					    tp->snd_una)) {
3081 						p->rxmit -= lost;
3082 
3083 						if (SEQ_LT(p->rxmit, p->start)) {
3084 							p->rxmit = p->start;
3085 						}
3086 					} else {
3087 						lost = p->rxmit - tp->snd_una;
3088 						p->rxmit = tp->snd_una;
3089 
3090 						if (SEQ_LT(p->rxmit, p->start)) {
3091 							p->rxmit = p->start;
3092 						}
3093 					}
3094 					tp->sackhint.sack_bytes_rexmit -= lost;
3095 					if (tp->sackhint.sack_bytes_rexmit < 0) {
3096 						tp->sackhint.sack_bytes_rexmit = 0;
3097 					}
3098 				} else {
3099 					if (SEQ_GT((tp->snd_nxt - lost),
3100 					    tp->snd_una)) {
3101 						tp->snd_nxt -= lost;
3102 					} else {
3103 						tp->snd_nxt = tp->snd_una;
3104 					}
3105 				}
3106 			}
3107 		}
3108 out:
3109 		if (tp->t_pktlist_head != NULL) {
3110 			m_freem_list(tp->t_pktlist_head);
3111 		}
3112 		TCP_PKTLIST_CLEAR(tp);
3113 
3114 		if (error == ENOBUFS) {
3115 			/*
3116 			 * Set retransmit timer if not currently set
3117 			 * when we failed to send a segment that can be
3118 			 * retransmitted (i.e. not pure ack or rst)
3119 			 */
3120 			if (tp->t_timer[TCPT_REXMT] == 0 &&
3121 			    tp->t_timer[TCPT_PERSIST] == 0 &&
3122 			    (len != 0 || (flags & (TH_SYN | TH_FIN)) != 0 ||
3123 			    so->so_snd.sb_cc > 0)) {
3124 				tp->t_timer[TCPT_REXMT] =
3125 				    OFFSET_FROM_START(tp, tp->t_rxtcur);
3126 			}
3127 			tp->snd_cwnd = tp->t_maxseg;
3128 			tp->t_bytes_acked = 0;
3129 			tcp_check_timer_state(tp);
3130 			KERNEL_DEBUG(DBG_FNC_TCP_OUTPUT | DBG_FUNC_END, 0, 0, 0, 0, 0);
3131 
3132 			TCP_LOG_OUTPUT(tp, "error ENOBUFS silently handled");
3133 
3134 			tcp_ccdbg_trace(tp, NULL, TCP_CC_OUTPUT_ERROR);
3135 			return 0;
3136 		}
3137 		if (error == EMSGSIZE) {
3138 			/*
3139 			 * ip_output() will have already fixed the route
3140 			 * for us.  tcp_mtudisc() will, as its last action,
3141 			 * initiate retransmission, so it is important to
3142 			 * not do so here.
3143 			 *
3144 			 * If TSO was active we either got an interface
3145 			 * without TSO capabilits or TSO was turned off.
3146 			 * Disable it for this connection as too and
3147 			 * immediatly retry with MSS sized segments generated
3148 			 * by this function.
3149 			 */
3150 			if (tso) {
3151 				tp->t_flags &= ~TF_TSO;
3152 			}
3153 
3154 			tcp_mtudisc(inp, 0);
3155 			tcp_check_timer_state(tp);
3156 
3157 			TCP_LOG_OUTPUT(tp, "error EMSGSIZE silently handled");
3158 
3159 			KERNEL_DEBUG(DBG_FNC_TCP_OUTPUT | DBG_FUNC_END, 0, 0, 0, 0, 0);
3160 			return 0;
3161 		}
3162 		/*
3163 		 * Unless this is due to interface restriction policy,
3164 		 * treat EHOSTUNREACH/ENETDOWN/EADDRNOTAVAIL as a soft error.
3165 		 */
3166 		if ((error == EHOSTUNREACH || error == ENETDOWN || error == EADDRNOTAVAIL) &&
3167 		    TCPS_HAVERCVDSYN(tp->t_state) &&
3168 		    !inp_restricted_send(inp, inp->inp_last_outifp)) {
3169 			tp->t_softerror = error;
3170 			TCP_LOG_OUTPUT(tp, "soft error %d silently handled", error);
3171 			error = 0;
3172 		} else {
3173 			TCP_LOG_OUTPUT(tp, "error %d", error);
3174 		}
3175 		tcp_check_timer_state(tp);
3176 		KERNEL_DEBUG(DBG_FNC_TCP_OUTPUT | DBG_FUNC_END, 0, 0, 0, 0, 0);
3177 		return error;
3178 	}
3179 
3180 	tcpstat.tcps_sndtotal++;
3181 
3182 	KERNEL_DEBUG(DBG_FNC_TCP_OUTPUT | DBG_FUNC_END, 0, 0, 0, 0, 0);
3183 	if (sendalot) {
3184 		goto again;
3185 	}
3186 
3187 	tcp_check_timer_state(tp);
3188 
3189 	return 0;
3190 }
3191 
3192 static int
tcp_ip_output(struct socket * so,struct tcpcb * tp,struct mbuf * pkt,int cnt,struct mbuf * opt,int flags,int sack_in_progress,boolean_t isipv6)3193 tcp_ip_output(struct socket *so, struct tcpcb *tp, struct mbuf *pkt,
3194     int cnt, struct mbuf *opt, int flags, int sack_in_progress, boolean_t isipv6)
3195 {
3196 	int error = 0;
3197 	boolean_t chain;
3198 	boolean_t unlocked = FALSE;
3199 	boolean_t ifdenied = FALSE;
3200 	struct inpcb *inp = tp->t_inpcb;
3201 	struct ifnet *outif = NULL;
3202 	bool check_qos_marking_again = (so->so_flags1 & SOF1_QOSMARKING_POLICY_OVERRIDE) ? FALSE : TRUE;
3203 
3204 	union {
3205 		struct route _ro;
3206 		struct route_in6 _ro6;
3207 	} route_u_ = {};
3208 #define ro route_u_._ro
3209 #define ro6 route_u_._ro6
3210 
3211 	union {
3212 		struct ip_out_args _ipoa;
3213 		struct ip6_out_args _ip6oa;
3214 	} out_args_u_ = {};
3215 #define ipoa out_args_u_._ipoa
3216 #define ip6oa out_args_u_._ip6oa
3217 
3218 	if (isipv6) {
3219 		ip6oa.ip6oa_boundif = IFSCOPE_NONE;
3220 		ip6oa.ip6oa_flags = IP6OAF_SELECT_SRCIF | IP6OAF_BOUND_SRCADDR;
3221 		ip6oa.ip6oa_sotc = SO_TC_UNSPEC;
3222 		ip6oa.ip6oa_netsvctype = _NET_SERVICE_TYPE_UNSPEC;
3223 	} else {
3224 		ipoa.ipoa_boundif = IFSCOPE_NONE;
3225 		ipoa.ipoa_flags = IPOAF_SELECT_SRCIF | IPOAF_BOUND_SRCADDR;
3226 		ipoa.ipoa_sotc = SO_TC_UNSPEC;
3227 		ipoa.ipoa_netsvctype = _NET_SERVICE_TYPE_UNSPEC;
3228 	}
3229 
3230 	struct flowadv *adv =
3231 	    (isipv6 ? &ip6oa.ip6oa_flowadv : &ipoa.ipoa_flowadv);
3232 
3233 	/* If socket was bound to an ifindex, tell ip_output about it */
3234 	if (inp->inp_flags & INP_BOUND_IF) {
3235 		if (isipv6) {
3236 			ip6oa.ip6oa_boundif = inp->inp_boundifp->if_index;
3237 			ip6oa.ip6oa_flags |= IP6OAF_BOUND_IF;
3238 		} else {
3239 			ipoa.ipoa_boundif = inp->inp_boundifp->if_index;
3240 			ipoa.ipoa_flags |= IPOAF_BOUND_IF;
3241 		}
3242 	} else if (!in6_embedded_scope && isipv6 && (IN6_IS_SCOPE_EMBED(&inp->in6p_faddr))) {
3243 		ip6oa.ip6oa_boundif = inp->inp_fifscope;
3244 		ip6oa.ip6oa_flags |= IP6OAF_BOUND_IF;
3245 	}
3246 
3247 	if (INP_NO_CELLULAR(inp)) {
3248 		if (isipv6) {
3249 			ip6oa.ip6oa_flags |=  IP6OAF_NO_CELLULAR;
3250 		} else {
3251 			ipoa.ipoa_flags |=  IPOAF_NO_CELLULAR;
3252 		}
3253 	}
3254 	if (INP_NO_EXPENSIVE(inp)) {
3255 		if (isipv6) {
3256 			ip6oa.ip6oa_flags |=  IP6OAF_NO_EXPENSIVE;
3257 		} else {
3258 			ipoa.ipoa_flags |=  IPOAF_NO_EXPENSIVE;
3259 		}
3260 	}
3261 	if (INP_NO_CONSTRAINED(inp)) {
3262 		if (isipv6) {
3263 			ip6oa.ip6oa_flags |=  IP6OAF_NO_CONSTRAINED;
3264 		} else {
3265 			ipoa.ipoa_flags |=  IPOAF_NO_CONSTRAINED;
3266 		}
3267 	}
3268 	if (INP_AWDL_UNRESTRICTED(inp)) {
3269 		if (isipv6) {
3270 			ip6oa.ip6oa_flags |=  IP6OAF_AWDL_UNRESTRICTED;
3271 		} else {
3272 			ipoa.ipoa_flags |=  IPOAF_AWDL_UNRESTRICTED;
3273 		}
3274 	}
3275 	if (INP_INTCOPROC_ALLOWED(inp) && isipv6) {
3276 		ip6oa.ip6oa_flags |=  IP6OAF_INTCOPROC_ALLOWED;
3277 	}
3278 	if (INP_MANAGEMENT_ALLOWED(inp)) {
3279 		if (isipv6) {
3280 			ip6oa.ip6oa_flags |=  IP6OAF_MANAGEMENT_ALLOWED;
3281 		} else {
3282 			ipoa.ipoa_flags |=  IPOAF_MANAGEMENT_ALLOWED;
3283 		}
3284 	}
3285 	if (isipv6) {
3286 		ip6oa.ip6oa_sotc = so->so_traffic_class;
3287 		ip6oa.ip6oa_netsvctype = so->so_netsvctype;
3288 		ip6oa.qos_marking_gencount = inp->inp_policyresult.results.qos_marking_gencount;
3289 	} else {
3290 		ipoa.ipoa_sotc = so->so_traffic_class;
3291 		ipoa.ipoa_netsvctype = so->so_netsvctype;
3292 		ipoa.qos_marking_gencount = inp->inp_policyresult.results.qos_marking_gencount;
3293 	}
3294 	if ((so->so_flags1 & SOF1_QOSMARKING_ALLOWED)) {
3295 		if (isipv6) {
3296 			ip6oa.ip6oa_flags |= IP6OAF_QOSMARKING_ALLOWED;
3297 		} else {
3298 			ipoa.ipoa_flags |= IPOAF_QOSMARKING_ALLOWED;
3299 		}
3300 	}
3301 	if (check_qos_marking_again) {
3302 		if (isipv6) {
3303 			ip6oa.ip6oa_flags |= IP6OAF_REDO_QOSMARKING_POLICY;
3304 		} else {
3305 			ipoa.ipoa_flags |= IPOAF_REDO_QOSMARKING_POLICY;
3306 		}
3307 	}
3308 	if (isipv6) {
3309 		flags |= IPV6_OUTARGS;
3310 	} else {
3311 		flags |= IP_OUTARGS;
3312 	}
3313 
3314 	/* Copy the cached route and take an extra reference */
3315 	if (isipv6) {
3316 		in6p_route_copyout(inp, &ro6);
3317 	} else {
3318 		inp_route_copyout(inp, &ro);
3319 	}
3320 #if (DEBUG || DEVELOPMENT)
3321 	if ((so->so_flags & SOF_MARK_WAKE_PKT) && pkt != NULL) {
3322 		so->so_flags &= ~SOF_MARK_WAKE_PKT;
3323 		pkt->m_pkthdr.pkt_flags |= PKTF_WAKE_PKT;
3324 	}
3325 #endif /* (DEBUG || DEVELOPMENT) */
3326 
3327 	/*
3328 	 * Make sure ACK/DELACK conditions are cleared before
3329 	 * we unlock the socket.
3330 	 */
3331 	tp->last_ack_sent = tp->rcv_nxt;
3332 	tp->t_flags &= ~(TF_ACKNOW | TF_DELACK);
3333 	tp->t_timer[TCPT_DELACK] = 0;
3334 	tp->t_unacksegs = 0;
3335 	tp->t_unacksegs_ce = 0;
3336 
3337 	/* Increment the count of outstanding send operations */
3338 	inp->inp_sndinprog_cnt++;
3339 
3340 	/*
3341 	 * If allowed, unlock TCP socket while in IP
3342 	 * but only if the connection is established and
3343 	 * in a normal mode where reentrancy on the tcpcb won't be
3344 	 * an issue:
3345 	 * - there is no SACK episode
3346 	 * - we're not in Fast Recovery mode
3347 	 * - if we're not sending from an upcall.
3348 	 */
3349 	if (tcp_output_unlocked && !so->so_upcallusecount &&
3350 	    (tp->t_state == TCPS_ESTABLISHED) && (sack_in_progress == 0) &&
3351 	    !IN_FASTRECOVERY(tp) && !(so->so_flags & SOF_MP_SUBFLOW)) {
3352 		unlocked = TRUE;
3353 		socket_unlock(so, 0);
3354 	}
3355 
3356 	/*
3357 	 * Don't send down a chain of packets when:
3358 	 * - TCP chaining is disabled
3359 	 * - there is an IPsec rule set
3360 	 * - there is a non default rule set for the firewall
3361 	 */
3362 
3363 	chain = tcp_packet_chaining > 1
3364 #if IPSEC
3365 	    && ipsec_bypass
3366 #endif
3367 	;         // I'm important, not extraneous
3368 
3369 	while (pkt != NULL) {
3370 		struct mbuf *npkt = pkt->m_nextpkt;
3371 
3372 		if (!chain) {
3373 			pkt->m_nextpkt = NULL;
3374 			/*
3375 			 * If we are not chaining, make sure to set the packet
3376 			 * list count to 0 so that IP takes the right path;
3377 			 * this is important for cases such as IPsec where a
3378 			 * single mbuf might result in multiple mbufs as part
3379 			 * of the encapsulation.  If a non-zero count is passed
3380 			 * down to IP, the head of the chain might change and
3381 			 * we could end up skipping it (thus generating bogus
3382 			 * packets).  Fixing it in IP would be desirable, but
3383 			 * for now this would do it.
3384 			 */
3385 			cnt = 0;
3386 		}
3387 		if (isipv6) {
3388 			error = ip6_output_list(pkt, cnt,
3389 			    inp->in6p_outputopts, &ro6, flags, NULL, NULL,
3390 			    &ip6oa);
3391 			ifdenied = (ip6oa.ip6oa_flags & IP6OAF_R_IFDENIED);
3392 		} else {
3393 			error = ip_output_list(pkt, cnt, opt, &ro, flags, NULL,
3394 			    &ipoa);
3395 			ifdenied = (ipoa.ipoa_flags & IPOAF_R_IFDENIED);
3396 		}
3397 
3398 		if (chain || error) {
3399 			/*
3400 			 * If we sent down a chain then we are done since
3401 			 * the callee had taken care of everything; else
3402 			 * we need to free the rest of the chain ourselves.
3403 			 */
3404 			if (!chain) {
3405 				m_freem_list(npkt);
3406 			}
3407 			break;
3408 		}
3409 		pkt = npkt;
3410 	}
3411 
3412 	if (unlocked) {
3413 		socket_lock(so, 0);
3414 	}
3415 
3416 	/*
3417 	 * Enter flow controlled state if the connection is established
3418 	 * and is not in recovery. Flow control is allowed only if there
3419 	 * is outstanding data.
3420 	 *
3421 	 * A connection will enter suspended state even if it is in
3422 	 * recovery.
3423 	 */
3424 	if (((adv->code == FADV_FLOW_CONTROLLED && !IN_FASTRECOVERY(tp)) ||
3425 	    adv->code == FADV_SUSPENDED) &&
3426 	    !(tp->t_flags & TF_CLOSING) &&
3427 	    tp->t_state == TCPS_ESTABLISHED &&
3428 	    SEQ_GT(tp->snd_max, tp->snd_una)) {
3429 		int rc;
3430 		rc = inp_set_fc_state(inp, adv->code);
3431 
3432 		if (rc == 1) {
3433 			tcp_ccdbg_trace(tp, NULL,
3434 			    ((adv->code == FADV_FLOW_CONTROLLED) ?
3435 			    TCP_CC_FLOW_CONTROL : TCP_CC_SUSPEND));
3436 			if (adv->code == FADV_FLOW_CONTROLLED) {
3437 				TCP_LOG_OUTPUT(tp, "flow controlled");
3438 			} else {
3439 				TCP_LOG_OUTPUT(tp, "flow suspended");
3440 			}
3441 		}
3442 	}
3443 
3444 	/*
3445 	 * When an interface queue gets suspended, some of the
3446 	 * packets are dropped. Return ENOBUFS, to update the
3447 	 * pcb state.
3448 	 */
3449 	if (adv->code == FADV_SUSPENDED) {
3450 		error = ENOBUFS;
3451 	}
3452 
3453 	VERIFY(inp->inp_sndinprog_cnt > 0);
3454 	if (--inp->inp_sndinprog_cnt == 0) {
3455 		inp->inp_flags &= ~(INP_FC_FEEDBACK);
3456 		if (inp->inp_sndingprog_waiters > 0) {
3457 			wakeup(&inp->inp_sndinprog_cnt);
3458 		}
3459 	}
3460 
3461 	if (isipv6) {
3462 		/*
3463 		 * When an NECP IP tunnel policy forces the outbound interface,
3464 		 * ip6_output_list() informs the transport layer what is the actual
3465 		 * outgoing interface
3466 		 */
3467 		if (ip6oa.ip6oa_flags & IP6OAF_BOUND_IF) {
3468 			outif = ifindex2ifnet[ip6oa.ip6oa_boundif];
3469 		} else if (ro6.ro_rt != NULL) {
3470 			outif = ro6.ro_rt->rt_ifp;
3471 		}
3472 	} else {
3473 		if (ro.ro_rt != NULL) {
3474 			outif = ro.ro_rt->rt_ifp;
3475 		}
3476 	}
3477 	if (check_qos_marking_again) {
3478 		uint32_t qos_marking_gencount;
3479 		bool allow_qos_marking;
3480 		if (isipv6) {
3481 			qos_marking_gencount = ip6oa.qos_marking_gencount;
3482 			allow_qos_marking = ip6oa.ip6oa_flags & IP6OAF_QOSMARKING_ALLOWED ? TRUE : FALSE;
3483 		} else {
3484 			qos_marking_gencount = ipoa.qos_marking_gencount;
3485 			allow_qos_marking = ipoa.ipoa_flags & IPOAF_QOSMARKING_ALLOWED ? TRUE : FALSE;
3486 		}
3487 		inp->inp_policyresult.results.qos_marking_gencount = qos_marking_gencount;
3488 		if (allow_qos_marking == TRUE) {
3489 			inp->inp_socket->so_flags1 |= SOF1_QOSMARKING_ALLOWED;
3490 		} else {
3491 			inp->inp_socket->so_flags1 &= ~SOF1_QOSMARKING_ALLOWED;
3492 		}
3493 	}
3494 
3495 	if (outif != NULL && outif != inp->inp_last_outifp) {
3496 		/* Update the send byte count */
3497 		if (so->so_snd.sb_cc > 0 && so->so_snd.sb_flags & SB_SNDBYTE_CNT) {
3498 			inp_decr_sndbytes_total(so, so->so_snd.sb_cc);
3499 			inp_decr_sndbytes_allunsent(so, tp->snd_una);
3500 			so->so_snd.sb_flags &= ~SB_SNDBYTE_CNT;
3501 		}
3502 		inp->inp_last_outifp = outif;
3503 #if SKYWALK
3504 		if (NETNS_TOKEN_VALID(&inp->inp_netns_token)) {
3505 			netns_set_ifnet(&inp->inp_netns_token, inp->inp_last_outifp);
3506 		}
3507 #endif /* SKYWALK */
3508 	}
3509 
3510 	if (error != 0 && ifdenied &&
3511 	    (INP_NO_CELLULAR(inp) || INP_NO_EXPENSIVE(inp) || INP_NO_CONSTRAINED(inp))) {
3512 		soevent(so,
3513 		    (SO_FILT_HINT_LOCKED | SO_FILT_HINT_IFDENIED));
3514 	}
3515 
3516 	/* Synchronize cached PCB route & options */
3517 	if (isipv6) {
3518 		in6p_route_copyin(inp, &ro6);
3519 	} else {
3520 		inp_route_copyin(inp, &ro);
3521 	}
3522 
3523 	if (tp->t_state < TCPS_ESTABLISHED && tp->t_rxtshift == 0 &&
3524 	    tp->t_inpcb->inp_route.ro_rt != NULL) {
3525 		/* If we found the route and there is an rtt on it
3526 		 * reset the retransmit timer
3527 		 */
3528 		tcp_getrt_rtt(tp, tp->t_inpcb->in6p_route.ro_rt);
3529 		tp->t_timer[TCPT_REXMT] = OFFSET_FROM_START(tp, tp->t_rxtcur);
3530 	}
3531 	return error;
3532 #undef ro
3533 #undef ro6
3534 #undef ipoa
3535 #undef ip6oa
3536 }
3537 
3538 int tcptv_persmin_val = TCPTV_PERSMIN;
3539 
3540 void
tcp_setpersist(struct tcpcb * tp)3541 tcp_setpersist(struct tcpcb *tp)
3542 {
3543 	int t = ((tp->t_srtt >> 2) + tp->t_rttvar) >> 1;
3544 
3545 	/* If a PERSIST_TIMER option was set we will limit the
3546 	 * time the persist timer will be active for that connection
3547 	 * in order to avoid DOS by using zero window probes.
3548 	 * see rdar://5805356
3549 	 */
3550 
3551 	if (tp->t_persist_timeout != 0 &&
3552 	    tp->t_timer[TCPT_PERSIST] == 0 &&
3553 	    tp->t_persist_stop == 0) {
3554 		tp->t_persist_stop = tcp_now + tp->t_persist_timeout;
3555 	}
3556 
3557 	/*
3558 	 * Start/restart persistance timer.
3559 	 */
3560 	TCPT_RANGESET(tp->t_timer[TCPT_PERSIST],
3561 	    t * tcp_backoff[tp->t_rxtshift],
3562 	    tcptv_persmin_val, TCPTV_PERSMAX, 0);
3563 	tp->t_timer[TCPT_PERSIST] = OFFSET_FROM_START(tp, tp->t_timer[TCPT_PERSIST]);
3564 
3565 	if (tp->t_rxtshift < TCP_MAXRXTSHIFT) {
3566 		tp->t_rxtshift++;
3567 	}
3568 }
3569 
3570 static int
tcp_recv_throttle(struct tcpcb * tp)3571 tcp_recv_throttle(struct tcpcb *tp)
3572 {
3573 	uint32_t base_rtt, newsize;
3574 	struct sockbuf *sbrcv = &tp->t_inpcb->inp_socket->so_rcv;
3575 
3576 	if (tcp_use_rtt_recvbg == 1 &&
3577 	    TSTMP_SUPPORTED(tp)) {
3578 		/*
3579 		 * Timestamps are supported on this connection. Use
3580 		 * RTT to look for an increase in latency.
3581 		 */
3582 
3583 		/*
3584 		 * If the connection is already being throttled, leave it
3585 		 * in that state until rtt comes closer to base rtt
3586 		 */
3587 		if (tp->t_flagsext & TF_RECV_THROTTLE) {
3588 			return 1;
3589 		}
3590 
3591 		base_rtt = get_base_rtt(tp);
3592 
3593 		if (base_rtt != 0 && tp->t_rttcur != 0) {
3594 			/*
3595 			 * if latency increased on a background flow,
3596 			 * return 1 to start throttling.
3597 			 */
3598 			if (tp->t_rttcur > (base_rtt + target_qdelay)) {
3599 				tp->t_flagsext |= TF_RECV_THROTTLE;
3600 				if (tp->t_recv_throttle_ts == 0) {
3601 					tp->t_recv_throttle_ts = tcp_now;
3602 				}
3603 				/*
3604 				 * Reduce the recv socket buffer size to
3605 				 * minimize latecy.
3606 				 */
3607 				if (sbrcv->sb_idealsize >
3608 				    tcp_recv_throttle_minwin) {
3609 					newsize = sbrcv->sb_idealsize >> 1;
3610 					/* Set a minimum of 16 K */
3611 					newsize =
3612 					    max(newsize,
3613 					    tcp_recv_throttle_minwin);
3614 					sbrcv->sb_idealsize = newsize;
3615 				}
3616 				return 1;
3617 			} else {
3618 				return 0;
3619 			}
3620 		}
3621 	}
3622 
3623 	/*
3624 	 * Timestamps are not supported or there is no good RTT
3625 	 * measurement. Use IPDV in this case.
3626 	 */
3627 	if (tp->acc_iaj > tcp_acc_iaj_react_limit) {
3628 		return 1;
3629 	}
3630 
3631 	return 0;
3632 }
3633