xref: /xnu-8796.101.5/bsd/netinet/tcp_output.c (revision aca3beaa3dfbd42498b42c5e5ce20a938e6554e5)
1 /*
2  * Copyright (c) 2000-2022 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995
30  *	The Regents of the University of California.  All rights reserved.
31  *
32  * Redistribution and use in source and binary forms, with or without
33  * modification, are permitted provided that the following conditions
34  * are met:
35  * 1. Redistributions of source code must retain the above copyright
36  *    notice, this list of conditions and the following disclaimer.
37  * 2. Redistributions in binary form must reproduce the above copyright
38  *    notice, this list of conditions and the following disclaimer in the
39  *    documentation and/or other materials provided with the distribution.
40  * 3. All advertising materials mentioning features or use of this software
41  *    must display the following acknowledgement:
42  *	This product includes software developed by the University of
43  *	California, Berkeley and its contributors.
44  * 4. Neither the name of the University nor the names of its contributors
45  *    may be used to endorse or promote products derived from this software
46  *    without specific prior written permission.
47  *
48  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58  * SUCH DAMAGE.
59  *
60  *	@(#)tcp_output.c	8.4 (Berkeley) 5/24/95
61  * $FreeBSD: src/sys/netinet/tcp_output.c,v 1.39.2.10 2001/07/07 04:30:38 silby Exp $
62  */
63 /*
64  * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
65  * support for mandatory and extensible security protections.  This notice
66  * is included in support of clause 2.2 (b) of the Apple Public License,
67  * Version 2.0.
68  */
69 
70 #define _IP_VHL
71 
72 #include "tcp_includes.h"
73 
74 #include <sys/param.h>
75 #include <sys/systm.h>
76 #include <sys/kernel.h>
77 #include <sys/sysctl.h>
78 #include <sys/mbuf.h>
79 #include <sys/domain.h>
80 #include <sys/protosw.h>
81 #include <sys/socket.h>
82 #include <sys/socketvar.h>
83 
84 #include <net/route.h>
85 #include <net/ntstat.h>
86 #include <net/if_var.h>
87 #include <net/if.h>
88 #include <net/if_types.h>
89 #include <net/dlil.h>
90 
91 #include <netinet/in.h>
92 #include <netinet/in_systm.h>
93 #include <netinet/in_var.h>
94 #include <netinet/in_tclass.h>
95 #include <netinet/ip.h>
96 #include <netinet/in_pcb.h>
97 #include <netinet/ip_var.h>
98 #include <mach/sdt.h>
99 #include <netinet6/in6_pcb.h>
100 #include <netinet/ip6.h>
101 #include <netinet6/ip6_var.h>
102 #include <netinet/tcp.h>
103 #include <netinet/tcp_cache.h>
104 #include <netinet/tcp_fsm.h>
105 #include <netinet/tcp_seq.h>
106 #include <netinet/tcp_timer.h>
107 #include <netinet/tcp_var.h>
108 #include <netinet/tcpip.h>
109 #include <netinet/tcp_cc.h>
110 #if TCPDEBUG
111 #include <netinet/tcp_debug.h>
112 #endif
113 #include <netinet/tcp_log.h>
114 #include <sys/kdebug.h>
115 #include <mach/sdt.h>
116 
117 #if IPSEC
118 #include <netinet6/ipsec.h>
119 #endif /*IPSEC*/
120 
121 #if MPTCP
122 #include <netinet/mptcp_var.h>
123 #include <netinet/mptcp.h>
124 #include <netinet/mptcp_opt.h>
125 #include <netinet/mptcp_seq.h>
126 #endif
127 
128 #include <corecrypto/ccaes.h>
129 
130 #define DBG_LAYER_BEG           NETDBG_CODE(DBG_NETTCP, 1)
131 #define DBG_LAYER_END           NETDBG_CODE(DBG_NETTCP, 3)
132 #define DBG_FNC_TCP_OUTPUT      NETDBG_CODE(DBG_NETTCP, (4 << 8) | 1)
133 
134 SYSCTL_SKMEM_TCP_INT(OID_AUTO, path_mtu_discovery,
135     CTLFLAG_RW | CTLFLAG_LOCKED, int, path_mtu_discovery, 1,
136     "Enable Path MTU Discovery");
137 
138 SYSCTL_SKMEM_TCP_INT(OID_AUTO, local_slowstart_flightsize,
139     CTLFLAG_RW | CTLFLAG_LOCKED, int, ss_fltsz_local, 8,
140     "Slow start flight size for local networks");
141 
142 SYSCTL_SKMEM_TCP_INT(OID_AUTO, tso, CTLFLAG_RW | CTLFLAG_LOCKED,
143     int, tcp_do_tso, 1, "Enable TCP Segmentation Offload");
144 
145 SYSCTL_SKMEM_TCP_INT(OID_AUTO, ecn_setup_percentage,
146     CTLFLAG_RW | CTLFLAG_LOCKED, int, tcp_ecn_setup_percentage, 100,
147     "Max ECN setup percentage");
148 
149 SYSCTL_SKMEM_TCP_INT(OID_AUTO, accurate_ecn,
150     CTLFLAG_RW | CTLFLAG_LOCKED, int, tcp_acc_ecn, 0,
151     "Accurate ECN mode (0: disable, 1: enable ACE feedback");
152 
153 // TO BE REMOVED
154 SYSCTL_SKMEM_TCP_INT(OID_AUTO, do_ack_compression,
155     CTLFLAG_RW | CTLFLAG_LOCKED, int, tcp_do_ack_compression, 1,
156     "Enable TCP ACK compression (on (cell only): 1, off: 0, on (all interfaces): 2)");
157 
158 SYSCTL_SKMEM_TCP_INT(OID_AUTO, ack_compression_rate,
159     CTLFLAG_RW | CTLFLAG_LOCKED, int, tcp_ack_compression_rate, TCP_COMP_CHANGE_RATE,
160     "Rate at which we force sending new ACKs (in ms)");
161 
162 SYSCTL_SKMEM_TCP_INT(OID_AUTO, randomize_timestamps,
163     CTLFLAG_RW | CTLFLAG_LOCKED, int, tcp_randomize_timestamps, 1,
164     "Randomize TCP timestamps to prevent tracking (on: 1, off: 0)");
165 
166 static int
167 sysctl_change_ecn_setting SYSCTL_HANDLER_ARGS
168 {
169 #pragma unused(oidp, arg1, arg2)
170 	int i, err = 0, changed = 0;
171 	struct ifnet *ifp;
172 
173 	err = sysctl_io_number(req, tcp_ecn_outbound, sizeof(int32_t),
174 	    &i, &changed);
175 	if (err != 0 || req->newptr == USER_ADDR_NULL) {
176 		return err;
177 	}
178 
179 	if (changed) {
180 		if ((tcp_ecn_outbound == 0 || tcp_ecn_outbound == 1) &&
181 		    (i == 0 || i == 1)) {
182 			tcp_ecn_outbound = i;
183 			SYSCTL_SKMEM_UPDATE_FIELD(tcp.ecn_initiate_out, tcp_ecn_outbound);
184 			return err;
185 		}
186 		if (tcp_ecn_outbound == 2 && (i == 0 || i == 1)) {
187 			/*
188 			 * Reset ECN enable flags on non-cellular
189 			 * interfaces so that the system default will take
190 			 * over
191 			 */
192 			ifnet_head_lock_shared();
193 			TAILQ_FOREACH(ifp, &ifnet_head, if_link) {
194 				if (!IFNET_IS_CELLULAR(ifp)) {
195 					if_clear_eflags(ifp,
196 					    IFEF_ECN_ENABLE |
197 					    IFEF_ECN_DISABLE);
198 				}
199 			}
200 			ifnet_head_done();
201 		} else {
202 			/*
203 			 * Set ECN enable flags on non-cellular
204 			 * interfaces
205 			 */
206 			ifnet_head_lock_shared();
207 			TAILQ_FOREACH(ifp, &ifnet_head, if_link) {
208 				if (!IFNET_IS_CELLULAR(ifp)) {
209 					if_set_eflags(ifp, IFEF_ECN_ENABLE);
210 					if_clear_eflags(ifp, IFEF_ECN_DISABLE);
211 				}
212 			}
213 			ifnet_head_done();
214 		}
215 		tcp_ecn_outbound = i;
216 		SYSCTL_SKMEM_UPDATE_FIELD(tcp.ecn_initiate_out, tcp_ecn_outbound);
217 	}
218 	/* Change the other one too as the work is done */
219 	if (i == 2 || tcp_ecn_inbound == 2) {
220 		tcp_ecn_inbound = i;
221 		SYSCTL_SKMEM_UPDATE_FIELD(tcp.ecn_negotiate_in, tcp_ecn_inbound);
222 	}
223 	return err;
224 }
225 
226 int     tcp_ecn_outbound = 2;
227 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, ecn_initiate_out,
228     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, &tcp_ecn_outbound, 0,
229     sysctl_change_ecn_setting, "IU",
230     "Initiate ECN for outbound connections");
231 
232 int     tcp_ecn_inbound = 2;
233 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, ecn_negotiate_in,
234     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, &tcp_ecn_inbound, 0,
235     sysctl_change_ecn_setting, "IU",
236     "Initiate ECN for inbound connections");
237 
238 SYSCTL_SKMEM_TCP_INT(OID_AUTO, packetchain,
239     CTLFLAG_RW | CTLFLAG_LOCKED, int, tcp_packet_chaining, 50,
240     "Enable TCP output packet chaining");
241 
242 SYSCTL_SKMEM_TCP_INT(OID_AUTO, socket_unlocked_on_output,
243     CTLFLAG_RW | CTLFLAG_LOCKED, int, tcp_output_unlocked, 1,
244     "Unlock TCP when sending packets down to IP");
245 
246 SYSCTL_SKMEM_TCP_INT(OID_AUTO, min_iaj_win,
247     CTLFLAG_RW | CTLFLAG_LOCKED, int, tcp_min_iaj_win, MIN_IAJ_WIN,
248     "Minimum recv win based on inter-packet arrival jitter");
249 
250 SYSCTL_SKMEM_TCP_INT(OID_AUTO, acc_iaj_react_limit,
251     CTLFLAG_RW | CTLFLAG_LOCKED, int, tcp_acc_iaj_react_limit,
252     ACC_IAJ_REACT_LIMIT, "Accumulated IAJ when receiver starts to react");
253 
254 SYSCTL_SKMEM_TCP_INT(OID_AUTO, autosndbufinc,
255     CTLFLAG_RW | CTLFLAG_LOCKED, uint32_t, tcp_autosndbuf_inc,
256     8 * 1024, "Increment in send socket bufffer size");
257 
258 SYSCTL_SKMEM_TCP_INT(OID_AUTO, autosndbufmax,
259     CTLFLAG_RW | CTLFLAG_LOCKED, uint32_t, tcp_autosndbuf_max, 2 * 1024 * 1024,
260     "Maximum send socket buffer size");
261 
262 SYSCTL_SKMEM_TCP_INT(OID_AUTO, rtt_recvbg,
263     CTLFLAG_RW | CTLFLAG_LOCKED, uint32_t, tcp_use_rtt_recvbg, 1,
264     "Use RTT for bg recv algorithm");
265 
266 SYSCTL_SKMEM_TCP_INT(OID_AUTO, recv_throttle_minwin,
267     CTLFLAG_RW | CTLFLAG_LOCKED, uint32_t, tcp_recv_throttle_minwin, 16 * 1024,
268     "Minimum recv win for throttling");
269 
270 SYSCTL_SKMEM_TCP_INT(OID_AUTO, enable_tlp,
271     CTLFLAG_RW | CTLFLAG_LOCKED,
272     int32_t, tcp_enable_tlp, 1, "Enable Tail loss probe");
273 
274 static int32_t packchain_newlist = 0;
275 static int32_t packchain_looped = 0;
276 static int32_t packchain_sent = 0;
277 
278 /* temporary: for testing */
279 #if IPSEC
280 extern int ipsec_bypass;
281 #endif
282 
283 extern int slowlink_wsize;      /* window correction for slow links */
284 
285 extern u_int32_t kipf_count;
286 
287 static int tcp_ip_output(struct socket *, struct tcpcb *, struct mbuf *,
288     int, struct mbuf *, int, int, boolean_t);
289 static int tcp_recv_throttle(struct tcpcb *tp);
290 
291 __attribute__((noinline))
292 static int32_t
tcp_tfo_check(struct tcpcb * tp,int32_t len)293 tcp_tfo_check(struct tcpcb *tp, int32_t len)
294 {
295 	struct socket *so = tp->t_inpcb->inp_socket;
296 	unsigned int optlen = 0;
297 	unsigned int cookie_len;
298 
299 	if (tp->t_flags & TF_NOOPT) {
300 		goto fallback;
301 	}
302 
303 	if (!(tp->t_flagsext & TF_FASTOPEN_FORCE_ENABLE) &&
304 	    !tcp_heuristic_do_tfo(tp)) {
305 		tp->t_tfo_stats |= TFO_S_HEURISTICS_DISABLE;
306 		tcpstat.tcps_tfo_heuristics_disable++;
307 		goto fallback;
308 	}
309 
310 	if (so->so_flags1 & SOF1_DATA_AUTHENTICATED) {
311 		return len;
312 	}
313 
314 	optlen += TCPOLEN_MAXSEG;
315 
316 	if (tp->t_flags & TF_REQ_SCALE) {
317 		optlen += 4;
318 	}
319 
320 #if MPTCP
321 	if ((so->so_flags & SOF_MP_SUBFLOW) && mptcp_enable &&
322 	    (tp->t_rxtshift <= mptcp_mpcap_retries ||
323 	    (tptomptp(tp)->mpt_mpte->mpte_flags & MPTE_FORCE_ENABLE))) {
324 		optlen += sizeof(struct mptcp_mpcapable_opt_common) + sizeof(mptcp_key_t);
325 	}
326 #endif /* MPTCP */
327 
328 	if (tp->t_flags & TF_REQ_TSTMP) {
329 		optlen += TCPOLEN_TSTAMP_APPA;
330 	}
331 
332 	if (SACK_ENABLED(tp)) {
333 		optlen += TCPOLEN_SACK_PERMITTED;
334 	}
335 
336 	/* Now, decide whether to use TFO or not */
337 
338 	/* Don't even bother trying if there is no space at all... */
339 	if (MAX_TCPOPTLEN - optlen < TCPOLEN_FASTOPEN_REQ) {
340 		goto fallback;
341 	}
342 
343 	cookie_len = tcp_cache_get_cookie_len(tp);
344 	if (cookie_len == 0) {
345 		/* No cookie, so we request one */
346 		return 0;
347 	}
348 
349 	/* There is not enough space for the cookie, so we cannot do TFO */
350 	if (MAX_TCPOPTLEN - optlen < cookie_len) {
351 		goto fallback;
352 	}
353 
354 	/* Do not send SYN+data if there is more in the queue than MSS */
355 	if (so->so_snd.sb_cc > (tp->t_maxopd - MAX_TCPOPTLEN)) {
356 		goto fallback;
357 	}
358 
359 	/* Ok, everything looks good. We can go on and do TFO */
360 	return len;
361 
362 fallback:
363 	tcp_disable_tfo(tp);
364 	return 0;
365 }
366 
367 /* Returns the number of bytes written to the TCP option-space */
368 __attribute__((noinline))
369 static unsigned int
tcp_tfo_write_cookie_rep(struct tcpcb * tp,unsigned int optlen,u_char * opt)370 tcp_tfo_write_cookie_rep(struct tcpcb *tp, unsigned int optlen, u_char *opt)
371 {
372 	u_char out[CCAES_BLOCK_SIZE];
373 	unsigned ret = 0;
374 	u_char *bp;
375 
376 	if (MAX_TCPOPTLEN - optlen <
377 	    TCPOLEN_FASTOPEN_REQ + TFO_COOKIE_LEN_DEFAULT) {
378 		return ret;
379 	}
380 
381 	tcp_tfo_gen_cookie(tp->t_inpcb, out, sizeof(out));
382 
383 	bp = opt + optlen;
384 
385 	*bp++ = TCPOPT_FASTOPEN;
386 	*bp++ = 2 + TFO_COOKIE_LEN_DEFAULT;
387 	memcpy(bp, out, TFO_COOKIE_LEN_DEFAULT);
388 	ret += 2 + TFO_COOKIE_LEN_DEFAULT;
389 
390 	tp->t_tfo_stats |= TFO_S_COOKIE_SENT;
391 	tcpstat.tcps_tfo_cookie_sent++;
392 
393 	return ret;
394 }
395 
396 __attribute__((noinline))
397 static unsigned int
tcp_tfo_write_cookie(struct tcpcb * tp,unsigned int optlen,int32_t len,u_char * opt)398 tcp_tfo_write_cookie(struct tcpcb *tp, unsigned int optlen, int32_t len,
399     u_char *opt)
400 {
401 	uint8_t tfo_len;
402 	struct socket *so = tp->t_inpcb->inp_socket;
403 	unsigned ret = 0;
404 	int res;
405 	u_char *bp;
406 
407 	if (TCPOLEN_FASTOPEN_REQ > MAX_TCPOPTLEN - optlen) {
408 		return 0;
409 	}
410 	tfo_len = (uint8_t)(MAX_TCPOPTLEN - optlen - TCPOLEN_FASTOPEN_REQ);
411 
412 	if (so->so_flags1 & SOF1_DATA_AUTHENTICATED) {
413 		/* If there is some data, let's track it */
414 		if (len > 0) {
415 			tp->t_tfo_stats |= TFO_S_SYN_DATA_SENT;
416 			tcpstat.tcps_tfo_syn_data_sent++;
417 		}
418 
419 		return 0;
420 	}
421 
422 	bp = opt + optlen;
423 
424 	/*
425 	 * The cookie will be copied in the appropriate place within the
426 	 * TCP-option space. That way we avoid the need for an intermediate
427 	 * variable.
428 	 */
429 	res = tcp_cache_get_cookie(tp, bp + TCPOLEN_FASTOPEN_REQ, &tfo_len);
430 	if (res == 0) {
431 		*bp++ = TCPOPT_FASTOPEN;
432 		*bp++ = TCPOLEN_FASTOPEN_REQ;
433 		ret += TCPOLEN_FASTOPEN_REQ;
434 
435 		tp->t_tfo_flags |= TFO_F_COOKIE_REQ;
436 
437 		tp->t_tfo_stats |= TFO_S_COOKIE_REQ;
438 		tcpstat.tcps_tfo_cookie_req++;
439 	} else {
440 		*bp++ = TCPOPT_FASTOPEN;
441 		*bp++ = TCPOLEN_FASTOPEN_REQ + tfo_len;
442 
443 		ret += TCPOLEN_FASTOPEN_REQ + tfo_len;
444 
445 		tp->t_tfo_flags |= TFO_F_COOKIE_SENT;
446 
447 		/* If there is some data, let's track it */
448 		if (len > 0) {
449 			tp->t_tfo_stats |= TFO_S_SYN_DATA_SENT;
450 			tcpstat.tcps_tfo_syn_data_sent++;
451 		}
452 	}
453 
454 	return ret;
455 }
456 
457 static inline bool
tcp_send_ecn_flags_on_syn(struct tcpcb * tp)458 tcp_send_ecn_flags_on_syn(struct tcpcb *tp)
459 {
460 	return !(tp->ecn_flags & (TE_SETUPSENT | TE_ACE_SETUPSENT));
461 }
462 
463 void
tcp_set_ecn(struct tcpcb * tp,struct ifnet * ifp)464 tcp_set_ecn(struct tcpcb *tp, struct ifnet *ifp)
465 {
466 	boolean_t inbound;
467 
468 	/*
469 	 * Socket option has precedence
470 	 */
471 	if (tp->ecn_flags & TE_ECN_MODE_ENABLE) {
472 		tp->ecn_flags |= TE_ENABLE_ECN;
473 		goto check_heuristic;
474 	}
475 
476 	if (tp->ecn_flags & TE_ECN_MODE_DISABLE) {
477 		tp->ecn_flags &= ~TE_ENABLE_ECN;
478 		return;
479 	}
480 	/*
481 	 * Per interface setting comes next
482 	 */
483 	if (ifp != NULL) {
484 		if (ifp->if_eflags & IFEF_ECN_ENABLE) {
485 			tp->ecn_flags |= TE_ENABLE_ECN;
486 			goto check_heuristic;
487 		}
488 
489 		if (ifp->if_eflags & IFEF_ECN_DISABLE) {
490 			tp->ecn_flags &= ~TE_ENABLE_ECN;
491 			return;
492 		}
493 	}
494 	/*
495 	 * System wide settings come last
496 	 */
497 	inbound = (tp->t_inpcb->inp_socket->so_head != NULL);
498 	if ((inbound && tcp_ecn_inbound == 1) ||
499 	    (!inbound && tcp_ecn_outbound == 1)) {
500 		tp->ecn_flags |= TE_ENABLE_ECN;
501 		goto check_heuristic;
502 	} else {
503 		tp->ecn_flags &= ~TE_ENABLE_ECN;
504 	}
505 
506 	return;
507 
508 check_heuristic:
509 	if (!tcp_heuristic_do_ecn(tp) && !TCP_ACC_ECN_ENABLED()) {
510 		/* Allow ECN when Accurate ECN is enabled until heuristics are fixed */
511 		tp->ecn_flags &= ~TE_ENABLE_ECN;
512 	}
513 
514 	/*
515 	 * If the interface setting, system-level setting and heuristics
516 	 * allow to enable ECN, randomly select 5% of connections to
517 	 * enable it
518 	 */
519 	if ((tp->ecn_flags & (TE_ECN_MODE_ENABLE | TE_ECN_MODE_DISABLE
520 	    | TE_ENABLE_ECN)) == TE_ENABLE_ECN) {
521 		/*
522 		 * Use the random value in iss for randomizing
523 		 * this selection
524 		 */
525 		if ((tp->iss % 100) >= tcp_ecn_setup_percentage && !TCP_ACC_ECN_ENABLED()) {
526 			/* Don't disable Accurate ECN randomly */
527 			tp->ecn_flags &= ~TE_ENABLE_ECN;
528 		}
529 	}
530 }
531 
532 int
tcp_flight_size(struct tcpcb * tp)533 tcp_flight_size(struct tcpcb *tp)
534 {
535 	int ret;
536 
537 	VERIFY(tp->sackhint.sack_bytes_acked >= 0);
538 	VERIFY(tp->sackhint.sack_bytes_rexmit >= 0);
539 
540 	/*
541 	 * RFC6675, SetPipe (), SACK'd bytes are discounted. All the rest is still in-flight.
542 	 */
543 	ret = tp->snd_nxt - tp->snd_una - tp->sackhint.sack_bytes_acked;
544 
545 	if (ret < 0) {
546 		/*
547 		 * This happens when the RTO-timer fires because snd_nxt gets artificially
548 		 * decreased. If we then receive some SACK-blogs, sack_bytes_acked is
549 		 * going to be high.
550 		 */
551 		ret = 0;
552 	}
553 
554 	return ret;
555 }
556 
557 /*
558  * Either of ECT0 or ECT1 flag should be set
559  * when this function is called
560  */
561 static void
tcp_add_accecn_option(struct tcpcb * tp,uint16_t flags,uint32_t * lp,uint8_t * optlen)562 tcp_add_accecn_option(struct tcpcb *tp, uint16_t flags, uint32_t *lp, uint8_t *optlen)
563 {
564 	uint8_t max_len = TCP_MAXOLEN - *optlen;
565 	uint8_t len = TCPOLEN_ACCECN_EMPTY;
566 
567 	uint32_t e1b = (uint32_t)(tp->t_rcv_ect1_bytes & TCP_ACO_MASK);
568 	uint32_t e0b = (uint32_t)(tp->t_rcv_ect0_bytes & TCP_ACO_MASK);
569 	uint32_t ceb =  (uint32_t)(tp->t_rcv_ce_bytes & TCP_ACO_MASK);
570 
571 	if (max_len < TCPOLEN_ACCECN_EMPTY) {
572 		TCP_LOG(tp, "not enough space to add any AccECN option");
573 		return;
574 	}
575 
576 	if (!(flags & TH_SYN || tp->ecn_flags & (TE_ACO_ECT1 | TE_ACO_ECT0))) {
577 		/*
578 		 * Since this is neither a SYN-ACK packet nor any of the ECT byte
579 		 * counter flags are set, no need to send the option.
580 		 */
581 		return;
582 	}
583 
584 	if (max_len < (TCPOLEN_ACCECN_EMPTY + 1 * TCPOLEN_ACCECN_COUNTER)) {
585 		/* Can carry EMPTY option which can be used to test path in SYN-ACK packet */
586 		if (flags & TH_SYN) {
587 			*lp++ = htonl((TCPOPT_ACCECN1 << 24) | (len << 16) |
588 			    (TCPOPT_NOP << 8) | TCPOPT_NOP);
589 			*optlen += len + 2; /* 2 NOPs */
590 			TCP_LOG(tp, "add empty AccECN option, optlen=%u", *optlen);
591 		}
592 	} else if (max_len < (TCPOLEN_ACCECN_EMPTY + 2 * TCPOLEN_ACCECN_COUNTER)) {
593 		/* Can carry one option */
594 		len += 1 * TCPOLEN_ACCECN_COUNTER;
595 		if (tp->ecn_flags & TE_ACO_ECT1) {
596 			*lp++ = htonl((TCPOPT_ACCECN1 << 24) | (len << 16) | ((e1b >> 8) & 0xffff));
597 			*lp++ = htonl(((e1b & 0xff) << 24) | (TCPOPT_NOP << 16) | (TCPOPT_NOP << 8) | TCPOPT_NOP);
598 		} else {
599 			*lp++ = htonl((TCPOPT_ACCECN0 << 24) | (len << 16) | ((e0b >> 8) & 0xffff));
600 			*lp++ = htonl(((e0b & 0xff) << 24) | (TCPOPT_NOP << 16) | (TCPOPT_NOP << 8) | TCPOPT_NOP);
601 		}
602 		*optlen += len + 3; /* 3 NOPs */
603 		TCP_LOG(tp, "add single counter for AccECN option, optlen=%u", *optlen);
604 	} else if (max_len < (TCPOLEN_ACCECN_EMPTY + 3 * TCPOLEN_ACCECN_COUNTER)) {
605 		/* Can carry two options */
606 		len += 2 * TCPOLEN_ACCECN_COUNTER;
607 		if (tp->ecn_flags & TE_ACO_ECT1) {
608 			*lp++ = htonl((TCPOPT_ACCECN1 << 24) | (len << 16) | ((e1b >> 8) & 0xffff));
609 			*lp++ = htonl(((e1b & 0xff) << 24) | (ceb & 0xffffff));
610 		} else {
611 			*lp++ = htonl((TCPOPT_ACCECN0 << 24) | (len << 16) | ((e0b >> 8) & 0xffff));
612 			*lp++ = htonl(((e0b & 0xff) << 24) | (ceb & 0xffffff));
613 		}
614 		*optlen += len; /* 0 NOPs */
615 		TCP_LOG(tp, "add 2 counters for AccECN option, optlen=%u", *optlen);
616 	} else {
617 		/*
618 		 * TCP option sufficient to hold full AccECN option
619 		 * but send counter that changed during the entire connection.
620 		 */
621 		len += 3 * TCPOLEN_ACCECN_COUNTER;
622 		/* Can carry all three options */
623 		if (tp->ecn_flags & TE_ACO_ECT1) {
624 			*lp++ = htonl((TCPOPT_ACCECN1 << 24) | (len << 16) | ((e1b >> 8) & 0xffff));
625 			*lp++ = htonl(((e1b & 0xff) << 24) | (ceb & 0xffffff));
626 			*lp++ = htonl(((e0b & 0xffffff) << 8) | TCPOPT_NOP);
627 		} else {
628 			*lp++ = htonl((TCPOPT_ACCECN0 << 24) | (len << 16) | ((e0b >> 8) & 0xffff));
629 			*lp++ = htonl(((e0b & 0xff) << 24) | (ceb & 0xffffff));
630 			*lp++ = htonl(((e1b & 0xffffff) << 8) | TCPOPT_NOP);
631 		}
632 		*optlen += len + 1; /* 1 NOP */
633 		TCP_LOG(tp, "add all 3 counters for AccECN option, optlen=%u", *optlen);
634 	}
635 }
636 
637 /*
638  * Tcp output routine: figure out what should be sent and send it.
639  *
640  * Returns:	0			Success
641  *		EADDRNOTAVAIL
642  *		ENOBUFS
643  *		EMSGSIZE
644  *		EHOSTUNREACH
645  *		ENETDOWN
646  *	ip_output_list:ENOMEM
647  *	ip_output_list:EADDRNOTAVAIL
648  *	ip_output_list:ENETUNREACH
649  *	ip_output_list:EHOSTUNREACH
650  *	ip_output_list:EACCES
651  *	ip_output_list:EMSGSIZE
652  *	ip_output_list:ENOBUFS
653  *	ip_output_list:???		[ignorable: mostly IPSEC/firewall/DLIL]
654  *	ip6_output_list:EINVAL
655  *	ip6_output_list:EOPNOTSUPP
656  *	ip6_output_list:EHOSTUNREACH
657  *	ip6_output_list:EADDRNOTAVAIL
658  *	ip6_output_list:ENETUNREACH
659  *	ip6_output_list:EMSGSIZE
660  *	ip6_output_list:ENOBUFS
661  *	ip6_output_list:???		[ignorable: mostly IPSEC/firewall/DLIL]
662  */
663 int
tcp_output(struct tcpcb * tp)664 tcp_output(struct tcpcb *tp)
665 {
666 	struct inpcb *inp = tp->t_inpcb;
667 	struct socket *so = inp->inp_socket;
668 	int32_t len, recwin, sendwin, off;
669 	uint16_t flags;
670 	int error;
671 	struct mbuf *m;
672 	struct ip *ip = NULL;
673 	struct ip6_hdr *ip6 = NULL;
674 	struct tcphdr *th;
675 	u_char opt[TCP_MAXOLEN];
676 	unsigned int ipoptlen, optlen, hdrlen;
677 	int idle, sendalot, lost = 0;
678 	int sendalot_cnt = 0;
679 	int i, sack_rxmit;
680 	int tso = 0;
681 	int sack_bytes_rxmt;
682 	tcp_seq old_snd_nxt = 0;
683 	struct sackhole *p;
684 #if IPSEC
685 	size_t ipsec_optlen = 0;
686 #endif /* IPSEC */
687 	int    idle_time = 0;
688 	struct mbuf *packetlist = NULL;
689 	struct mbuf *tp_inp_options = inp->inp_depend4.inp4_options;
690 	int isipv6 = inp->inp_vflag & INP_IPV6;
691 	int packchain_listadd = 0;
692 	int so_options = so->so_options;
693 	struct rtentry *rt;
694 	u_int32_t svc_flags = 0, allocated_len;
695 #if MPTCP
696 	boolean_t mptcp_acknow;
697 #endif /* MPTCP */
698 	boolean_t cell = FALSE;
699 	boolean_t wifi = FALSE;
700 	boolean_t wired = FALSE;
701 	boolean_t sack_rescue_rxt = FALSE;
702 	int sotc = so->so_traffic_class;
703 	boolean_t do_not_compress = FALSE;
704 	boolean_t sack_rxmted = FALSE;
705 
706 	/*
707 	 * Determine length of data that should be transmitted,
708 	 * and flags that will be used.
709 	 * If there is some data or critical controls (SYN, RST)
710 	 * to send, then transmit; otherwise, investigate further.
711 	 */
712 	idle = (tp->t_flags & TF_LASTIDLE) || (tp->snd_max == tp->snd_una);
713 
714 	/* Since idle_time is signed integer, the following integer subtraction
715 	 * will take care of wrap around of tcp_now
716 	 */
717 	idle_time = tcp_now - tp->t_rcvtime;
718 	if (idle && idle_time >= TCP_IDLETIMEOUT(tp)) {
719 		if (CC_ALGO(tp)->after_idle != NULL &&
720 		    (tp->tcp_cc_index != TCP_CC_ALGO_CUBIC_INDEX ||
721 		    idle_time >= TCP_CC_CWND_NONVALIDATED_PERIOD)) {
722 			CC_ALGO(tp)->after_idle(tp);
723 			tcp_ccdbg_trace(tp, NULL, TCP_CC_IDLE_TIMEOUT);
724 		}
725 
726 		/*
727 		 * Do some other tasks that need to be done after
728 		 * idle time
729 		 */
730 		if (!SLIST_EMPTY(&tp->t_rxt_segments)) {
731 			tcp_rxtseg_clean(tp);
732 		}
733 
734 		/* If stretch ack was auto-disabled, re-evaluate it */
735 		tcp_cc_after_idle_stretchack(tp);
736 		tp->t_forced_acks = TCP_FORCED_ACKS_COUNT;
737 	}
738 	tp->t_flags &= ~TF_LASTIDLE;
739 	if (idle) {
740 		if (tp->t_flags & TF_MORETOCOME) {
741 			tp->t_flags |= TF_LASTIDLE;
742 			idle = 0;
743 		}
744 	}
745 #if MPTCP
746 	if (tp->t_mpflags & TMPF_RESET) {
747 		tcp_check_timer_state(tp);
748 		/*
749 		 * Once a RST has been sent for an MPTCP subflow,
750 		 * the subflow socket stays around until deleted.
751 		 * No packets such as FINs must be sent after RST.
752 		 */
753 		return 0;
754 	}
755 #endif /* MPTCP */
756 
757 again:
758 #if MPTCP
759 	mptcp_acknow = FALSE;
760 
761 	if (so->so_flags & SOF_MP_SUBFLOW && SEQ_LT(tp->snd_nxt, tp->snd_una)) {
762 		os_log_error(mptcp_log_handle, "%s - %lx: snd_nxt is %u and snd_una is %u, cnt %d\n",
763 		    __func__, (unsigned long)VM_KERNEL_ADDRPERM(tp->t_mpsub->mpts_mpte),
764 		    tp->snd_nxt, tp->snd_una, sendalot_cnt);
765 	}
766 #endif
767 	do_not_compress = FALSE;
768 	sendalot_cnt++;
769 
770 	KERNEL_DEBUG(DBG_FNC_TCP_OUTPUT | DBG_FUNC_START, 0, 0, 0, 0, 0);
771 
772 	if (isipv6) {
773 		KERNEL_DEBUG(DBG_LAYER_BEG,
774 		    ((inp->inp_fport << 16) | inp->inp_lport),
775 		    (((inp->in6p_laddr.s6_addr16[0] & 0xffff) << 16) |
776 		    (inp->in6p_faddr.s6_addr16[0] & 0xffff)),
777 		    sendalot, 0, 0);
778 	} else {
779 		KERNEL_DEBUG(DBG_LAYER_BEG,
780 		    ((inp->inp_fport << 16) | inp->inp_lport),
781 		    (((inp->inp_laddr.s_addr & 0xffff) << 16) |
782 		    (inp->inp_faddr.s_addr & 0xffff)),
783 		    sendalot, 0, 0);
784 	}
785 	/*
786 	 * If the route generation id changed, we need to check that our
787 	 * local (source) IP address is still valid. If it isn't either
788 	 * return error or silently do nothing (assuming the address will
789 	 * come back before the TCP connection times out).
790 	 */
791 	rt = inp->inp_route.ro_rt;
792 	if (rt != NULL && ROUTE_UNUSABLE(&tp->t_inpcb->inp_route)) {
793 		struct ifnet *ifp;
794 		struct in_ifaddr *ia = NULL;
795 		struct in6_ifaddr *ia6 = NULL;
796 		int found_srcaddr = 0;
797 
798 		/* disable multipages at the socket */
799 		somultipages(so, FALSE);
800 
801 		/* Disable TSO for the socket until we know more */
802 		tp->t_flags &= ~TF_TSO;
803 
804 		soif2kcl(so, FALSE);
805 
806 		if (isipv6) {
807 			ia6 = ifa_foraddr6(&inp->in6p_laddr);
808 			if (ia6 != NULL) {
809 				found_srcaddr = 1;
810 			}
811 		} else {
812 			ia = ifa_foraddr(inp->inp_laddr.s_addr);
813 			if (ia != NULL) {
814 				found_srcaddr = 1;
815 			}
816 		}
817 
818 		/* check that the source address is still valid */
819 		if (found_srcaddr == 0) {
820 			soevent(so,
821 			    (SO_FILT_HINT_LOCKED | SO_FILT_HINT_NOSRCADDR));
822 
823 			if (tp->t_state >= TCPS_CLOSE_WAIT) {
824 				tcp_drop(tp, EADDRNOTAVAIL);
825 				return EADDRNOTAVAIL;
826 			}
827 
828 			/*
829 			 * Set retransmit  timer if it wasn't set,
830 			 * reset Persist timer and shift register as the
831 			 * advertised peer window may not be valid anymore
832 			 */
833 			if (tp->t_timer[TCPT_REXMT] == 0) {
834 				tp->t_timer[TCPT_REXMT] =
835 				    OFFSET_FROM_START(tp, tp->t_rxtcur);
836 				if (tp->t_timer[TCPT_PERSIST] != 0) {
837 					tp->t_timer[TCPT_PERSIST] = 0;
838 					tp->t_persist_stop = 0;
839 					TCP_RESET_REXMT_STATE(tp);
840 				}
841 			}
842 
843 			if (tp->t_pktlist_head != NULL) {
844 				m_freem_list(tp->t_pktlist_head);
845 			}
846 			TCP_PKTLIST_CLEAR(tp);
847 
848 			/* drop connection if source address isn't available */
849 			if (so->so_flags & SOF_NOADDRAVAIL) {
850 				tcp_drop(tp, EADDRNOTAVAIL);
851 				return EADDRNOTAVAIL;
852 			} else {
853 				TCP_LOG_OUTPUT(tp, "no source address silently ignored");
854 				tcp_check_timer_state(tp);
855 				return 0; /* silently ignore, keep data in socket: address may be back */
856 			}
857 		}
858 		if (ia != NULL) {
859 			IFA_REMREF(&ia->ia_ifa);
860 		}
861 
862 		if (ia6 != NULL) {
863 			IFA_REMREF(&ia6->ia_ifa);
864 		}
865 
866 		/*
867 		 * Address is still valid; check for multipages capability
868 		 * again in case the outgoing interface has changed.
869 		 */
870 		RT_LOCK(rt);
871 		if ((ifp = rt->rt_ifp) != NULL) {
872 			somultipages(so, (ifp->if_hwassist & IFNET_MULTIPAGES));
873 			tcp_set_tso(tp, ifp);
874 			soif2kcl(so, (ifp->if_eflags & IFEF_2KCL));
875 			tcp_set_ecn(tp, ifp);
876 		}
877 		if (rt->rt_flags & RTF_UP) {
878 			RT_GENID_SYNC(rt);
879 		}
880 		/*
881 		 * See if we should do MTU discovery. Don't do it if:
882 		 *	1) it is disabled via the sysctl
883 		 *	2) the route isn't up
884 		 *	3) the MTU is locked (if it is, then discovery
885 		 *         has been disabled)
886 		 */
887 
888 		if (!path_mtu_discovery || ((rt != NULL) &&
889 		    (!(rt->rt_flags & RTF_UP) ||
890 		    (rt->rt_rmx.rmx_locks & RTV_MTU)))) {
891 			tp->t_flags &= ~TF_PMTUD;
892 		} else {
893 			tp->t_flags |= TF_PMTUD;
894 		}
895 
896 		RT_UNLOCK(rt);
897 	}
898 
899 	if (rt != NULL) {
900 		cell = IFNET_IS_CELLULAR(rt->rt_ifp);
901 		wifi = (!cell && IFNET_IS_WIFI(rt->rt_ifp));
902 		wired = (!wifi && IFNET_IS_WIRED(rt->rt_ifp));
903 	}
904 
905 	/*
906 	 * If we've recently taken a timeout, snd_max will be greater than
907 	 * snd_nxt.  There may be SACK information that allows us to avoid
908 	 * resending already delivered data.  Adjust snd_nxt accordingly.
909 	 */
910 	if (SACK_ENABLED(tp) && SEQ_LT(tp->snd_nxt, tp->snd_max)) {
911 		tcp_sack_adjust(tp);
912 	}
913 	sendalot = 0;
914 	off = tp->snd_nxt - tp->snd_una;
915 	sendwin = min(tp->snd_wnd, tp->snd_cwnd);
916 
917 	if (tp->t_flags & TF_SLOWLINK && slowlink_wsize > 0) {
918 		sendwin = min(sendwin, slowlink_wsize);
919 	}
920 
921 	flags = tcp_outflags[tp->t_state];
922 	/*
923 	 * Send any SACK-generated retransmissions.  If we're explicitly
924 	 * trying to send out new data (when sendalot is 1), bypass this
925 	 * function. If we retransmit in fast recovery mode, decrement
926 	 * snd_cwnd, since we're replacing a (future) new transmission
927 	 * with a retransmission now, and we previously incremented
928 	 * snd_cwnd in tcp_input().
929 	 */
930 	/*
931 	 * Still in sack recovery , reset rxmit flag to zero.
932 	 */
933 	sack_rxmit = 0;
934 	sack_bytes_rxmt = 0;
935 	len = 0;
936 	p = NULL;
937 	if (SACK_ENABLED(tp) && IN_FASTRECOVERY(tp) &&
938 	    (p = tcp_sack_output(tp, &sack_bytes_rxmt))) {
939 		int32_t cwin;
940 
941 		if (tcp_do_better_lr) {
942 			cwin = min(tp->snd_wnd, tp->snd_cwnd) - tcp_flight_size(tp);
943 			if (cwin <= 0 && sack_rxmted == FALSE) {
944 				/* Allow to clock out at least on per period */
945 				cwin = tp->t_maxseg;
946 			}
947 
948 			sack_rxmted = TRUE;
949 		} else {
950 			cwin = min(tp->snd_wnd, tp->snd_cwnd) - sack_bytes_rxmt;
951 		}
952 		if (cwin < 0) {
953 			cwin = 0;
954 		}
955 		/* Do not retransmit SACK segments beyond snd_recover */
956 		if (SEQ_GT(p->end, tp->snd_recover)) {
957 			/*
958 			 * (At least) part of sack hole extends beyond
959 			 * snd_recover. Check to see if we can rexmit data
960 			 * for this hole.
961 			 */
962 			if (SEQ_GEQ(p->rxmit, tp->snd_recover)) {
963 				/*
964 				 * Can't rexmit any more data for this hole.
965 				 * That data will be rexmitted in the next
966 				 * sack recovery episode, when snd_recover
967 				 * moves past p->rxmit.
968 				 */
969 				p = NULL;
970 				goto after_sack_rexmit;
971 			} else {
972 				/* Can rexmit part of the current hole */
973 				len = ((int32_t)min(cwin,
974 				    tp->snd_recover - p->rxmit));
975 			}
976 		} else {
977 			len = ((int32_t)min(cwin, p->end - p->rxmit));
978 		}
979 		if (len > 0) {
980 			off = p->rxmit - tp->snd_una;
981 			sack_rxmit = 1;
982 			sendalot = 1;
983 			/* Everything sent after snd_nxt will allow us to account for fast-retransmit of the retransmitted segment */
984 			tp->send_highest_sack = tp->snd_nxt;
985 			tp->t_new_dupacks = 0;
986 			tcpstat.tcps_sack_rexmits++;
987 			tcpstat.tcps_sack_rexmit_bytes +=
988 			    min(len, tp->t_maxseg);
989 		} else {
990 			len = 0;
991 		}
992 	}
993 after_sack_rexmit:
994 	/*
995 	 * Get standard flags, and add SYN or FIN if requested by 'hidden'
996 	 * state flags.
997 	 */
998 	if (tp->t_flags & TF_NEEDFIN) {
999 		flags |= TH_FIN;
1000 	}
1001 
1002 	/*
1003 	 * If in persist timeout with window of 0, send 1 byte.
1004 	 * Otherwise, if window is small but nonzero
1005 	 * and timer expired, we will send what we can
1006 	 * and go to transmit state.
1007 	 */
1008 	if (tp->t_flagsext & TF_FORCE) {
1009 		if (sendwin == 0) {
1010 			/*
1011 			 * If we still have some data to send, then
1012 			 * clear the FIN bit.  Usually this would
1013 			 * happen below when it realizes that we
1014 			 * aren't sending all the data.  However,
1015 			 * if we have exactly 1 byte of unsent data,
1016 			 * then it won't clear the FIN bit below,
1017 			 * and if we are in persist state, we wind
1018 			 * up sending the packet without recording
1019 			 * that we sent the FIN bit.
1020 			 *
1021 			 * We can't just blindly clear the FIN bit,
1022 			 * because if we don't have any more data
1023 			 * to send then the probe will be the FIN
1024 			 * itself.
1025 			 */
1026 			if (off < so->so_snd.sb_cc) {
1027 				flags &= ~TH_FIN;
1028 			}
1029 			sendwin = 1;
1030 		} else {
1031 			tp->t_timer[TCPT_PERSIST] = 0;
1032 			tp->t_persist_stop = 0;
1033 			TCP_RESET_REXMT_STATE(tp);
1034 		}
1035 	}
1036 
1037 	/*
1038 	 * If snd_nxt == snd_max and we have transmitted a FIN, the
1039 	 * offset will be > 0 even if so_snd.sb_cc is 0, resulting in
1040 	 * a negative length.  This can also occur when TCP opens up
1041 	 * its congestion window while receiving additional duplicate
1042 	 * acks after fast-retransmit because TCP will reset snd_nxt
1043 	 * to snd_max after the fast-retransmit.
1044 	 *
1045 	 * In the normal retransmit-FIN-only case, however, snd_nxt will
1046 	 * be set to snd_una, the offset will be 0, and the length may
1047 	 * wind up 0.
1048 	 *
1049 	 * If sack_rxmit is true we are retransmitting from the scoreboard
1050 	 * in which case len is already set.
1051 	 */
1052 	if (sack_rxmit == 0) {
1053 		if (sack_bytes_rxmt == 0) {
1054 			len = min(so->so_snd.sb_cc, sendwin) - off;
1055 		} else {
1056 			int32_t cwin;
1057 
1058 			if (tcp_do_better_lr) {
1059 				cwin = tp->snd_cwnd - tcp_flight_size(tp);
1060 			} else {
1061 				cwin = tp->snd_cwnd -
1062 				    (tp->snd_nxt - tp->sack_newdata) -
1063 				    sack_bytes_rxmt;
1064 			}
1065 			if (cwin < 0) {
1066 				cwin = 0;
1067 			}
1068 			/*
1069 			 * We are inside of a SACK recovery episode and are
1070 			 * sending new data, having retransmitted all the
1071 			 * data possible in the scoreboard.
1072 			 */
1073 			len = min(so->so_snd.sb_cc, tp->snd_wnd) - off;
1074 			/*
1075 			 * Don't remove this (len > 0) check !
1076 			 * We explicitly check for len > 0 here (although it
1077 			 * isn't really necessary), to work around a gcc
1078 			 * optimization issue - to force gcc to compute
1079 			 * len above. Without this check, the computation
1080 			 * of len is bungled by the optimizer.
1081 			 */
1082 			if (len > 0) {
1083 				len = imin(len, cwin);
1084 			} else {
1085 				len = 0;
1086 			}
1087 			/*
1088 			 * At this point SACK recovery can not send any
1089 			 * data from scoreboard or any new data. Check
1090 			 * if we can do a rescue retransmit towards the
1091 			 * tail end of recovery window.
1092 			 */
1093 			if (len == 0 && cwin > 0 &&
1094 			    SEQ_LT(tp->snd_fack, tp->snd_recover) &&
1095 			    !(tp->t_flagsext & TF_RESCUE_RXT)) {
1096 				len = min((tp->snd_recover - tp->snd_fack),
1097 				    tp->t_maxseg);
1098 				len = imin(len, cwin);
1099 				old_snd_nxt = tp->snd_nxt;
1100 				sack_rescue_rxt = TRUE;
1101 				tp->snd_nxt = tp->snd_recover - len;
1102 				/*
1103 				 * If FIN has been sent, snd_max
1104 				 * must have been advanced to cover it.
1105 				 */
1106 				if ((tp->t_flags & TF_SENTFIN) &&
1107 				    tp->snd_max == tp->snd_recover) {
1108 					tp->snd_nxt--;
1109 				}
1110 
1111 				off = tp->snd_nxt - tp->snd_una;
1112 				sendalot = 0;
1113 				tp->t_flagsext |= TF_RESCUE_RXT;
1114 			}
1115 		}
1116 	}
1117 
1118 	/*
1119 	 * Lop off SYN bit if it has already been sent.  However, if this
1120 	 * is SYN-SENT state and if segment contains data and if we don't
1121 	 * know that foreign host supports TAO, suppress sending segment.
1122 	 */
1123 	if ((flags & TH_SYN) && SEQ_GT(tp->snd_nxt, tp->snd_una)) {
1124 		if (tp->t_state == TCPS_SYN_RECEIVED && tfo_enabled(tp) && tp->snd_nxt == tp->snd_una + 1) {
1125 			/* We are sending the SYN again! */
1126 			off--;
1127 			len++;
1128 		} else {
1129 			if (tp->t_state != TCPS_SYN_RECEIVED || tfo_enabled(tp)) {
1130 				flags &= ~TH_SYN;
1131 			}
1132 
1133 			off--;
1134 			len++;
1135 			if (len > 0 && tp->t_state == TCPS_SYN_SENT) {
1136 				while (inp->inp_sndinprog_cnt == 0 &&
1137 				    tp->t_pktlist_head != NULL) {
1138 					packetlist = tp->t_pktlist_head;
1139 					packchain_listadd = tp->t_lastchain;
1140 					packchain_sent++;
1141 					TCP_PKTLIST_CLEAR(tp);
1142 
1143 					error = tcp_ip_output(so, tp, packetlist,
1144 					    packchain_listadd, tp_inp_options,
1145 					    (so_options & SO_DONTROUTE),
1146 					    (sack_rxmit || (sack_bytes_rxmt != 0)),
1147 					    isipv6);
1148 				}
1149 
1150 				/*
1151 				 * tcp was closed while we were in ip,
1152 				 * resume close
1153 				 */
1154 				if (inp->inp_sndinprog_cnt == 0 &&
1155 				    (tp->t_flags & TF_CLOSING)) {
1156 					tp->t_flags &= ~TF_CLOSING;
1157 					(void) tcp_close(tp);
1158 				} else {
1159 					tcp_check_timer_state(tp);
1160 				}
1161 				KERNEL_DEBUG(DBG_FNC_TCP_OUTPUT | DBG_FUNC_END,
1162 				    0, 0, 0, 0, 0);
1163 				return 0;
1164 			}
1165 		}
1166 	}
1167 
1168 	/*
1169 	 * Be careful not to send data and/or FIN on SYN segments.
1170 	 * This measure is needed to prevent interoperability problems
1171 	 * with not fully conformant TCP implementations.
1172 	 *
1173 	 * In case of TFO, we handle the setting of the len in
1174 	 * tcp_tfo_check. In case TFO is not enabled, never ever send
1175 	 * SYN+data.
1176 	 */
1177 	if ((flags & TH_SYN) && !tfo_enabled(tp)) {
1178 		len = 0;
1179 		flags &= ~TH_FIN;
1180 	}
1181 
1182 	/*
1183 	 * Don't send a RST with data.
1184 	 */
1185 	if (flags & TH_RST) {
1186 		len = 0;
1187 	}
1188 
1189 	if ((flags & TH_SYN) && tp->t_state <= TCPS_SYN_SENT && tfo_enabled(tp)) {
1190 		len = tcp_tfo_check(tp, len);
1191 	}
1192 
1193 	/*
1194 	 * The check here used to be (len < 0). Some times len is zero
1195 	 * when the congestion window is closed and we need to check
1196 	 * if persist timer has to be set in that case. But don't set
1197 	 * persist until connection is established.
1198 	 */
1199 	if (len <= 0 && !(flags & TH_SYN)) {
1200 		/*
1201 		 * If FIN has been sent but not acked,
1202 		 * but we haven't been called to retransmit,
1203 		 * len will be < 0.  Otherwise, window shrank
1204 		 * after we sent into it.  If window shrank to 0,
1205 		 * cancel pending retransmit, pull snd_nxt back
1206 		 * to (closed) window, and set the persist timer
1207 		 * if it isn't already going.  If the window didn't
1208 		 * close completely, just wait for an ACK.
1209 		 */
1210 		len = 0;
1211 		if (sendwin == 0) {
1212 			tp->t_timer[TCPT_REXMT] = 0;
1213 			tp->t_timer[TCPT_PTO] = 0;
1214 			TCP_RESET_REXMT_STATE(tp);
1215 			tp->snd_nxt = tp->snd_una;
1216 			off = 0;
1217 			if (tp->t_timer[TCPT_PERSIST] == 0) {
1218 				tcp_setpersist(tp);
1219 			}
1220 		}
1221 	}
1222 
1223 	/*
1224 	 * Automatic sizing of send socket buffer. Increase the send
1225 	 * socket buffer size if all of the following criteria are met
1226 	 *	1. the receiver has enough buffer space for this data
1227 	 *	2. send buffer is filled to 7/8th with data (so we actually
1228 	 *	   have data to make use of it);
1229 	 *	3. our send window (slow start and congestion controlled) is
1230 	 *	   larger than sent but unacknowledged data in send buffer.
1231 	 */
1232 	if (!INP_WAIT_FOR_IF_FEEDBACK(inp) && !IN_FASTRECOVERY(tp) &&
1233 	    (so->so_snd.sb_flags & (SB_AUTOSIZE | SB_TRIM)) == SB_AUTOSIZE &&
1234 	    tcp_cansbgrow(&so->so_snd)) {
1235 		if ((tp->snd_wnd / 4 * 5) >= so->so_snd.sb_hiwat &&
1236 		    so->so_snd.sb_cc >= (so->so_snd.sb_hiwat / 8 * 7) &&
1237 		    sendwin >= (so->so_snd.sb_cc - (tp->snd_nxt - tp->snd_una))) {
1238 			if (sbreserve(&so->so_snd,
1239 			    min(so->so_snd.sb_hiwat + tcp_autosndbuf_inc,
1240 			    tcp_autosndbuf_max)) == 1) {
1241 				so->so_snd.sb_idealsize = so->so_snd.sb_hiwat;
1242 			}
1243 		}
1244 	}
1245 
1246 	/*
1247 	 * Truncate to the maximum segment length or enable TCP Segmentation
1248 	 * Offloading (if supported by hardware) and ensure that FIN is removed
1249 	 * if the length no longer contains the last data byte.
1250 	 *
1251 	 * TSO may only be used if we are in a pure bulk sending state.
1252 	 * The presence of TCP-MD5, SACK retransmits, SACK advertizements,
1253 	 * filters and IP options, as well as disabling hardware checksum
1254 	 * offload prevent using TSO.  With TSO the TCP header is the same
1255 	 * (except for the sequence number) for all generated packets.  This
1256 	 * makes it impossible to transmit any options which vary per generated
1257 	 * segment or packet.
1258 	 *
1259 	 * The length of TSO bursts is limited to TCP_MAXWIN.  That limit and
1260 	 * removal of FIN (if not already catched here) are handled later after
1261 	 * the exact length of the TCP options are known.
1262 	 */
1263 #if IPSEC
1264 	/*
1265 	 * Pre-calculate here as we save another lookup into the darknesses
1266 	 * of IPsec that way and can actually decide if TSO is ok.
1267 	 */
1268 	if (ipsec_bypass == 0) {
1269 		ipsec_optlen = ipsec_hdrsiz_tcp(tp);
1270 	}
1271 #endif
1272 	if (len > tp->t_maxseg) {
1273 		if ((tp->t_flags & TF_TSO) && tcp_do_tso && hwcksum_tx &&
1274 		    kipf_count == 0 &&
1275 		    tp->rcv_numsacks == 0 && sack_rxmit == 0 &&
1276 		    sack_bytes_rxmt == 0 &&
1277 		    inp->inp_options == NULL &&
1278 		    inp->in6p_options == NULL
1279 #if IPSEC
1280 		    && ipsec_optlen == 0
1281 #endif
1282 		    ) {
1283 			tso = 1;
1284 			sendalot = 0;
1285 		} else {
1286 			len = tp->t_maxseg;
1287 			sendalot = 1;
1288 			tso = 0;
1289 		}
1290 	} else {
1291 		tso = 0;
1292 	}
1293 
1294 	/* Send one segment or less as a tail loss probe */
1295 	if (tp->t_flagsext & TF_SENT_TLPROBE) {
1296 		len = min(len, tp->t_maxseg);
1297 		sendalot = 0;
1298 		tso = 0;
1299 	}
1300 
1301 #if MPTCP
1302 	if (so->so_flags & SOF_MP_SUBFLOW && off < 0) {
1303 		os_log_error(mptcp_log_handle, "%s - %lx: offset is negative! len %d off %d\n",
1304 		    __func__, (unsigned long)VM_KERNEL_ADDRPERM(tp->t_mpsub->mpts_mpte),
1305 		    len, off);
1306 	}
1307 
1308 	if ((so->so_flags & SOF_MP_SUBFLOW) &&
1309 	    !(tp->t_mpflags & TMPF_TCP_FALLBACK)) {
1310 		int newlen = len;
1311 		struct mptcb *mp_tp = tptomptp(tp);
1312 		if (tp->t_state >= TCPS_ESTABLISHED &&
1313 		    (tp->t_mpflags & TMPF_SND_MPPRIO ||
1314 		    tp->t_mpflags & TMPF_SND_REM_ADDR ||
1315 		    tp->t_mpflags & TMPF_SND_MPFAIL ||
1316 		    (tp->t_mpflags & TMPF_SND_KEYS &&
1317 		    mp_tp->mpt_version == MPTCP_VERSION_0) ||
1318 		    tp->t_mpflags & TMPF_SND_JACK ||
1319 		    tp->t_mpflags & TMPF_MPTCP_ECHO_ADDR)) {
1320 			if (len > 0) {
1321 				len = 0;
1322 				tso = 0;
1323 			}
1324 			/*
1325 			 * On a new subflow, don't try to send again, because
1326 			 * we are still waiting for the fourth ack.
1327 			 */
1328 			if (!(tp->t_mpflags & TMPF_PREESTABLISHED)) {
1329 				sendalot = 1;
1330 			}
1331 			mptcp_acknow = TRUE;
1332 		} else {
1333 			mptcp_acknow = FALSE;
1334 		}
1335 		/*
1336 		 * The contiguous bytes in the subflow socket buffer can be
1337 		 * discontiguous at the MPTCP level. Since only one DSS
1338 		 * option can be sent in one packet, reduce length to match
1339 		 * the contiguous MPTCP level. Set sendalot to send remainder.
1340 		 */
1341 		if (len > 0 && off >= 0) {
1342 			newlen = mptcp_adj_sendlen(so, off);
1343 		}
1344 
1345 		if (newlen < len) {
1346 			len = newlen;
1347 			if (len <= tp->t_maxseg) {
1348 				tso = 0;
1349 			}
1350 		}
1351 	}
1352 #endif /* MPTCP */
1353 
1354 	if (sack_rxmit) {
1355 		if (SEQ_LT(p->rxmit + len, tp->snd_una + so->so_snd.sb_cc)) {
1356 			flags &= ~TH_FIN;
1357 		}
1358 	} else {
1359 		if (SEQ_LT(tp->snd_nxt + len, tp->snd_una + so->so_snd.sb_cc)) {
1360 			flags &= ~TH_FIN;
1361 		}
1362 	}
1363 	/*
1364 	 * Compare available window to amount of window
1365 	 * known to peer (as advertised window less
1366 	 * next expected input).  If the difference is at least two
1367 	 * max size segments, or at least 25% of the maximum possible
1368 	 * window, then want to send a window update to peer.
1369 	 */
1370 	recwin = tcp_sbspace(tp);
1371 
1372 	if (!(so->so_flags & SOF_MP_SUBFLOW)) {
1373 		if (recwin < (int32_t)(so->so_rcv.sb_hiwat / 4) &&
1374 		    recwin < (int)tp->t_maxseg) {
1375 			recwin = 0;
1376 		}
1377 	} else {
1378 		struct mptcb *mp_tp = tptomptp(tp);
1379 		struct socket *mp_so = mptetoso(mp_tp->mpt_mpte);
1380 
1381 		if (recwin < (int32_t)(mp_so->so_rcv.sb_hiwat / 4) &&
1382 		    recwin < (int)tp->t_maxseg) {
1383 			recwin = 0;
1384 		}
1385 	}
1386 
1387 #if TRAFFIC_MGT
1388 	if (tcp_recv_bg == 1 || IS_TCP_RECV_BG(so)) {
1389 		/*
1390 		 * Timestamp MUST be supported to use rledbat unless we haven't
1391 		 * yet negotiated it.
1392 		 */
1393 		if (TCP_RLEDBAT_ENABLED(tp) || (tcp_rledbat && tp->t_state <
1394 		    TCPS_ESTABLISHED)) {
1395 			if (recwin > 0 && tcp_cc_rledbat.get_rlwin != NULL) {
1396 				/* Min of flow control window and rledbat window */
1397 				recwin = imin(recwin, tcp_cc_rledbat.get_rlwin(tp));
1398 			}
1399 		} else if (recwin > 0 && tcp_recv_throttle(tp)) {
1400 			uint32_t min_iaj_win = tcp_min_iaj_win * tp->t_maxseg;
1401 			uint32_t bg_rwintop = tp->rcv_adv;
1402 			if (SEQ_LT(bg_rwintop, tp->rcv_nxt + min_iaj_win)) {
1403 				bg_rwintop =  tp->rcv_nxt + min_iaj_win;
1404 			}
1405 			recwin = imin((int32_t)(bg_rwintop - tp->rcv_nxt),
1406 			    recwin);
1407 			if (recwin < 0) {
1408 				recwin = 0;
1409 			}
1410 		}
1411 	}
1412 #endif /* TRAFFIC_MGT */
1413 
1414 	if (recwin > (int32_t)(TCP_MAXWIN << tp->rcv_scale)) {
1415 		recwin = (int32_t)(TCP_MAXWIN << tp->rcv_scale);
1416 	}
1417 
1418 	if (!(so->so_flags & SOF_MP_SUBFLOW)) {
1419 		if (recwin < (int32_t)(tp->rcv_adv - tp->rcv_nxt)) {
1420 			recwin = (int32_t)(tp->rcv_adv - tp->rcv_nxt);
1421 		}
1422 	} else {
1423 		struct mptcb *mp_tp = tptomptp(tp);
1424 		int64_t recwin_announced = (int64_t)(mp_tp->mpt_rcvadv - mp_tp->mpt_rcvnxt);
1425 
1426 		/* Don't remove what we announced at the MPTCP-layer */
1427 		VERIFY(recwin_announced < INT32_MAX && recwin_announced > INT32_MIN);
1428 		if (recwin < (int32_t)recwin_announced) {
1429 			recwin = (int32_t)recwin_announced;
1430 		}
1431 	}
1432 
1433 	/*
1434 	 * Sender silly window avoidance.   We transmit under the following
1435 	 * conditions when len is non-zero:
1436 	 *
1437 	 *	- we've timed out (e.g. persist timer)
1438 	 *	- we need to retransmit
1439 	 *	- We have a full segment (or more with TSO)
1440 	 *	- This is the last buffer in a write()/send() and we are
1441 	 *	  either idle or running NODELAY
1442 	 *	- we have more then 1/2 the maximum send window's worth of
1443 	 *	  data (receiver may be limited the window size)
1444 	 */
1445 	if (len) {
1446 		if (tp->t_flagsext & TF_FORCE) {
1447 			goto send;
1448 		}
1449 		if (SEQ_LT(tp->snd_nxt, tp->snd_max)) {
1450 			goto send;
1451 		}
1452 		if (sack_rxmit) {
1453 			goto send;
1454 		}
1455 
1456 		/*
1457 		 * If this here is the first segment after SYN/ACK and TFO
1458 		 * is being used, then we always send it, regardless of Nagle,...
1459 		 */
1460 		if (tp->t_state == TCPS_SYN_RECEIVED &&
1461 		    tfo_enabled(tp) &&
1462 		    (tp->t_tfo_flags & TFO_F_COOKIE_VALID) &&
1463 		    tp->snd_nxt == tp->iss + 1) {
1464 			goto send;
1465 		}
1466 
1467 		/*
1468 		 * Send new data on the connection only if it is
1469 		 * not flow controlled
1470 		 */
1471 		if (!INP_WAIT_FOR_IF_FEEDBACK(inp) ||
1472 		    tp->t_state != TCPS_ESTABLISHED) {
1473 			if (len >= tp->t_maxseg) {
1474 				goto send;
1475 			}
1476 
1477 			if (!(tp->t_flags & TF_MORETOCOME) &&
1478 			    (idle || tp->t_flags & TF_NODELAY ||
1479 			    (tp->t_flags & TF_MAXSEGSNT) ||
1480 			    ALLOW_LIMITED_TRANSMIT(tp)) &&
1481 			    (tp->t_flags & TF_NOPUSH) == 0 &&
1482 			    (len + off >= so->so_snd.sb_cc ||
1483 			    /*
1484 			     * MPTCP needs to respect the DSS-mappings. So, it
1485 			     * may be sending data that *could* have been
1486 			     * coalesced, but cannot because of
1487 			     * mptcp_adj_sendlen().
1488 			     */
1489 			    so->so_flags & SOF_MP_SUBFLOW)) {
1490 				goto send;
1491 			}
1492 			if (len >= tp->max_sndwnd / 2 && tp->max_sndwnd > 0) {
1493 				goto send;
1494 			}
1495 		} else {
1496 			tcpstat.tcps_fcholdpacket++;
1497 		}
1498 	}
1499 
1500 	if (recwin > 0) {
1501 		/*
1502 		 * "adv" is the amount we can increase the window,
1503 		 * taking into account that we are limited by
1504 		 * TCP_MAXWIN << tp->rcv_scale.
1505 		 */
1506 		int32_t adv, oldwin = 0;
1507 		adv = imin(recwin, (int)TCP_MAXWIN << tp->rcv_scale) -
1508 		    (tp->rcv_adv - tp->rcv_nxt);
1509 
1510 		if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt)) {
1511 			oldwin = tp->rcv_adv - tp->rcv_nxt;
1512 		}
1513 
1514 		if (tcp_ack_strategy == TCP_ACK_STRATEGY_LEGACY) {
1515 			if (adv >= (int32_t) (2 * tp->t_maxseg)) {
1516 				/*
1517 				 * Update only if the resulting scaled value of
1518 				 * the window changed, or if there is a change in
1519 				 * the sequence since the last ack. This avoids
1520 				 * what appears as dupe ACKS (see rdar://5640997)
1521 				 *
1522 				 * If streaming is detected avoid sending too many
1523 				 * window updates. We will depend on the delack
1524 				 * timer to send a window update when needed.
1525 				 *
1526 				 * If there is more data to read, don't send an ACK.
1527 				 * Otherwise we will end up sending many ACKs if the
1528 				 * application is doing micro-reads.
1529 				 */
1530 				if (!(tp->t_flags & TF_STRETCHACK) &&
1531 				    (tp->last_ack_sent != tp->rcv_nxt ||
1532 				    ((oldwin + adv) >> tp->rcv_scale) >
1533 				    (oldwin >> tp->rcv_scale))) {
1534 					goto send;
1535 				}
1536 			}
1537 		} else {
1538 			if (adv >= (int32_t) (2 * tp->t_maxseg)) {
1539 				/*
1540 				 * ACK every second full-sized segment, if the
1541 				 * ACK is advancing or the window becomes bigger
1542 				 */
1543 				if (so->so_rcv.sb_cc < so->so_rcv.sb_lowat &&
1544 				    (tp->last_ack_sent != tp->rcv_nxt ||
1545 				    ((oldwin + adv) >> tp->rcv_scale) >
1546 				    (oldwin >> tp->rcv_scale))) {
1547 					goto send;
1548 				}
1549 			} else if (tp->t_flags & TF_DELACK) {
1550 				/*
1551 				 * If we delayed the ACK and the window
1552 				 * is not advancing by a lot (< 2MSS), ACK
1553 				 * immediately if the last incoming packet had
1554 				 * the push flag set and we emptied the buffer.
1555 				 *
1556 				 * This takes care of a sender doing small
1557 				 * repeated writes with Nagle enabled.
1558 				 */
1559 				if (so->so_rcv.sb_cc == 0 &&
1560 				    tp->last_ack_sent != tp->rcv_nxt &&
1561 				    (tp->t_flagsext & TF_LAST_IS_PSH)) {
1562 					goto send;
1563 				}
1564 			}
1565 		}
1566 		if (4 * adv >= (int32_t) so->so_rcv.sb_hiwat) {
1567 			goto send;
1568 		}
1569 
1570 		/*
1571 		 * Make sure that the delayed ack timer is set if
1572 		 * we delayed sending a window update because of
1573 		 * streaming detection.
1574 		 */
1575 		if (tcp_ack_strategy == TCP_ACK_STRATEGY_LEGACY &&
1576 		    (tp->t_flags & TF_STRETCHACK) &&
1577 		    !(tp->t_flags & TF_DELACK)) {
1578 			tp->t_flags |= TF_DELACK;
1579 			tp->t_timer[TCPT_DELACK] =
1580 			    OFFSET_FROM_START(tp, tcp_delack);
1581 		}
1582 	}
1583 
1584 	/*
1585 	 * Send if we owe the peer an ACK, RST, SYN, or urgent data. ACKNOW
1586 	 * is also a catch-all for the retransmit timer timeout case.
1587 	 */
1588 	if (tp->t_flags & TF_ACKNOW) {
1589 		if (tp->t_forced_acks > 0) {
1590 			tp->t_forced_acks--;
1591 		}
1592 		goto send;
1593 	}
1594 	if ((flags & TH_RST) || (flags & TH_SYN)) {
1595 		goto send;
1596 	}
1597 	if (SEQ_GT(tp->snd_up, tp->snd_una)) {
1598 		goto send;
1599 	}
1600 #if MPTCP
1601 	if (mptcp_acknow) {
1602 		goto send;
1603 	}
1604 #endif /* MPTCP */
1605 	/*
1606 	 * If our state indicates that FIN should be sent
1607 	 * and we have not yet done so, then we need to send.
1608 	 */
1609 	if ((flags & TH_FIN) &&
1610 	    (!(tp->t_flags & TF_SENTFIN) || tp->snd_nxt == tp->snd_una)) {
1611 		goto send;
1612 	}
1613 	/*
1614 	 * In SACK, it is possible for tcp_output to fail to send a segment
1615 	 * after the retransmission timer has been turned off.  Make sure
1616 	 * that the retransmission timer is set.
1617 	 */
1618 	if (SACK_ENABLED(tp) && (tp->t_state >= TCPS_ESTABLISHED) &&
1619 	    SEQ_GT(tp->snd_max, tp->snd_una) &&
1620 	    tp->t_timer[TCPT_REXMT] == 0 &&
1621 	    tp->t_timer[TCPT_PERSIST] == 0) {
1622 		tp->t_timer[TCPT_REXMT] = OFFSET_FROM_START(tp,
1623 		    tp->t_rxtcur);
1624 		goto just_return;
1625 	}
1626 	/*
1627 	 * TCP window updates are not reliable, rather a polling protocol
1628 	 * using ``persist'' packets is used to insure receipt of window
1629 	 * updates.  The three ``states'' for the output side are:
1630 	 *	idle			not doing retransmits or persists
1631 	 *	persisting		to move a small or zero window
1632 	 *	(re)transmitting	and thereby not persisting
1633 	 *
1634 	 * tp->t_timer[TCPT_PERSIST]
1635 	 *	is set when we are in persist state.
1636 	 * tp->t_force
1637 	 *	is set when we are called to send a persist packet.
1638 	 * tp->t_timer[TCPT_REXMT]
1639 	 *	is set when we are retransmitting
1640 	 * The output side is idle when both timers are zero.
1641 	 *
1642 	 * If send window is too small, there is data to transmit, and no
1643 	 * retransmit or persist is pending, then go to persist state.
1644 	 * If nothing happens soon, send when timer expires:
1645 	 * if window is nonzero, transmit what we can,
1646 	 * otherwise force out a byte.
1647 	 */
1648 	if (so->so_snd.sb_cc && tp->t_timer[TCPT_REXMT] == 0 &&
1649 	    tp->t_timer[TCPT_PERSIST] == 0) {
1650 		TCP_RESET_REXMT_STATE(tp);
1651 		tcp_setpersist(tp);
1652 	}
1653 just_return:
1654 	/*
1655 	 * If there is no reason to send a segment, just return.
1656 	 * but if there is some packets left in the packet list, send them now.
1657 	 */
1658 	while (inp->inp_sndinprog_cnt == 0 &&
1659 	    tp->t_pktlist_head != NULL) {
1660 		packetlist = tp->t_pktlist_head;
1661 		packchain_listadd = tp->t_lastchain;
1662 		packchain_sent++;
1663 		TCP_PKTLIST_CLEAR(tp);
1664 
1665 		error = tcp_ip_output(so, tp, packetlist,
1666 		    packchain_listadd,
1667 		    tp_inp_options, (so_options & SO_DONTROUTE),
1668 		    (sack_rxmit || (sack_bytes_rxmt != 0)), isipv6);
1669 	}
1670 	/* tcp was closed while we were in ip; resume close */
1671 	if (inp->inp_sndinprog_cnt == 0 &&
1672 	    (tp->t_flags & TF_CLOSING)) {
1673 		tp->t_flags &= ~TF_CLOSING;
1674 		(void) tcp_close(tp);
1675 	} else {
1676 		tcp_check_timer_state(tp);
1677 	}
1678 	KERNEL_DEBUG(DBG_FNC_TCP_OUTPUT | DBG_FUNC_END, 0, 0, 0, 0, 0);
1679 	return 0;
1680 
1681 send:
1682 	/*
1683 	 * Set TF_MAXSEGSNT flag if the segment size is greater than
1684 	 * the max segment size.
1685 	 */
1686 	if (len > 0) {
1687 		do_not_compress = TRUE;
1688 
1689 		if (len >= tp->t_maxseg) {
1690 			tp->t_flags |= TF_MAXSEGSNT;
1691 		} else {
1692 			tp->t_flags &= ~TF_MAXSEGSNT;
1693 		}
1694 	}
1695 	/*
1696 	 * Before ESTABLISHED, force sending of initial options
1697 	 * unless TCP set not to do any options.
1698 	 * NOTE: we assume that the IP/TCP header plus TCP options
1699 	 * always fit in a single mbuf, leaving room for a maximum
1700 	 * link header, i.e.
1701 	 *	max_linkhdr + sizeof (struct tcpiphdr) + optlen <= MCLBYTES
1702 	 */
1703 	optlen = 0;
1704 	if (isipv6) {
1705 		hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
1706 	} else {
1707 		hdrlen = sizeof(struct tcpiphdr);
1708 	}
1709 	if (flags & TH_SYN) {
1710 		tp->snd_nxt = tp->iss;
1711 		if ((tp->t_flags & TF_NOOPT) == 0) {
1712 			u_short mss;
1713 
1714 			opt[0] = TCPOPT_MAXSEG;
1715 			opt[1] = TCPOLEN_MAXSEG;
1716 			mss = htons((u_short) tcp_mssopt(tp));
1717 			(void)memcpy(opt + 2, &mss, sizeof(mss));
1718 			optlen = TCPOLEN_MAXSEG;
1719 
1720 			if ((tp->t_flags & TF_REQ_SCALE) &&
1721 			    ((flags & TH_ACK) == 0 ||
1722 			    (tp->t_flags & TF_RCVD_SCALE))) {
1723 				*((u_int32_t *)(void *)(opt + optlen)) = htonl(
1724 					TCPOPT_NOP << 24 |
1725 					        TCPOPT_WINDOW << 16 |
1726 					        TCPOLEN_WINDOW << 8 |
1727 					        tp->request_r_scale);
1728 				optlen += 4;
1729 			}
1730 #if MPTCP
1731 			if (mptcp_enable && (so->so_flags & SOF_MP_SUBFLOW)) {
1732 				optlen = mptcp_setup_syn_opts(so, opt, optlen);
1733 			}
1734 #endif /* MPTCP */
1735 		}
1736 	}
1737 
1738 	/*
1739 	 * Send a timestamp and echo-reply if this is a SYN and our side
1740 	 * wants to use timestamps (TF_REQ_TSTMP is set) or both our side
1741 	 * and our peer have sent timestamps in our SYN's.
1742 	 */
1743 	if ((tp->t_flags & (TF_REQ_TSTMP | TF_NOOPT)) == TF_REQ_TSTMP &&
1744 	    (flags & TH_RST) == 0 &&
1745 	    ((flags & TH_ACK) == 0 ||
1746 	    (tp->t_flags & TF_RCVD_TSTMP))) {
1747 		u_int32_t *lp = (u_int32_t *)(void *)(opt + optlen);
1748 
1749 		/* Form timestamp option as shown in appendix A of RFC 1323. */
1750 		*lp++ = htonl(TCPOPT_TSTAMP_HDR);
1751 		*lp++ = htonl(tcp_now + tp->t_ts_offset);
1752 		*lp   = htonl(tp->ts_recent);
1753 		optlen += TCPOLEN_TSTAMP_APPA;
1754 	}
1755 
1756 	if (SACK_ENABLED(tp) && ((tp->t_flags & TF_NOOPT) == 0)) {
1757 		/*
1758 		 * Tack on the SACK permitted option *last*.
1759 		 * And do padding of options after tacking this on.
1760 		 * This is because of MSS, TS, WinScale and Signatures are
1761 		 * all present, we have just 2 bytes left for the SACK
1762 		 * permitted option, which is just enough.
1763 		 */
1764 		/*
1765 		 * If this is the first SYN of connection (not a SYN
1766 		 * ACK), include SACK permitted option.  If this is a
1767 		 * SYN ACK, include SACK permitted option if peer has
1768 		 * already done so. This is only for active connect,
1769 		 * since the syncache takes care of the passive connect.
1770 		 */
1771 		if ((flags & TH_SYN) &&
1772 		    (!(flags & TH_ACK) || (tp->t_flags & TF_SACK_PERMIT))) {
1773 			u_char *bp;
1774 			bp = (u_char *)opt + optlen;
1775 
1776 			*bp++ = TCPOPT_SACK_PERMITTED;
1777 			*bp++ = TCPOLEN_SACK_PERMITTED;
1778 			optlen += TCPOLEN_SACK_PERMITTED;
1779 		}
1780 	}
1781 #if MPTCP
1782 	if (so->so_flags & SOF_MP_SUBFLOW) {
1783 		/*
1784 		 * Its important to piggyback acks with data as ack only packets
1785 		 * may get lost and data packets that don't send Data ACKs
1786 		 * still advance the subflow level ACK and therefore make it
1787 		 * hard for the remote end to recover in low cwnd situations.
1788 		 */
1789 		if (len != 0) {
1790 			tp->t_mpflags |= (TMPF_SEND_DSN |
1791 			    TMPF_MPTCP_ACKNOW);
1792 		} else {
1793 			tp->t_mpflags |= TMPF_MPTCP_ACKNOW;
1794 		}
1795 		optlen = mptcp_setup_opts(tp, off, &opt[0], optlen, flags,
1796 		    len, &mptcp_acknow, &do_not_compress);
1797 		tp->t_mpflags &= ~TMPF_SEND_DSN;
1798 	}
1799 #endif /* MPTCP */
1800 
1801 	if (tfo_enabled(tp) && !(tp->t_flags & TF_NOOPT) &&
1802 	    (flags & (TH_SYN | TH_ACK)) == TH_SYN) {
1803 		optlen += tcp_tfo_write_cookie(tp, optlen, len, opt);
1804 	}
1805 
1806 	if (tfo_enabled(tp) &&
1807 	    (flags & (TH_SYN | TH_ACK)) == (TH_SYN | TH_ACK) &&
1808 	    (tp->t_tfo_flags & TFO_F_OFFER_COOKIE)) {
1809 		optlen += tcp_tfo_write_cookie_rep(tp, optlen, opt);
1810 	}
1811 
1812 	if (SACK_ENABLED(tp) && ((tp->t_flags & TF_NOOPT) == 0)) {
1813 		/*
1814 		 * Send SACKs if necessary.  This should be the last
1815 		 * option processed.  Only as many SACKs are sent as
1816 		 * are permitted by the maximum options size.
1817 		 *
1818 		 * In general, SACK blocks consume 8*n+2 bytes.
1819 		 * So a full size SACK blocks option is 34 bytes
1820 		 * (to generate 4 SACK blocks).  At a minimum,
1821 		 * we need 10 bytes (to generate 1 SACK block).
1822 		 * If TCP Timestamps (12 bytes) and TCP Signatures
1823 		 * (18 bytes) are both present, we'll just have
1824 		 * 10 bytes for SACK options 40 - (12 + 18).
1825 		 */
1826 		if (TCPS_HAVEESTABLISHED(tp->t_state) &&
1827 		    (tp->t_flags & TF_SACK_PERMIT) &&
1828 		    (tp->rcv_numsacks > 0 || TCP_SEND_DSACK_OPT(tp)) &&
1829 		    MAX_TCPOPTLEN - optlen >= TCPOLEN_SACK + 2) {
1830 			unsigned int sackoptlen = 0;
1831 			int nsack, padlen;
1832 			u_char *bp = (u_char *)opt + optlen;
1833 			u_int32_t *lp;
1834 
1835 			nsack = (MAX_TCPOPTLEN - optlen - 2) / TCPOLEN_SACK;
1836 			nsack = min(nsack, (tp->rcv_numsacks +
1837 			    (TCP_SEND_DSACK_OPT(tp) ? 1 : 0)));
1838 			sackoptlen = (2 + nsack * TCPOLEN_SACK);
1839 			VERIFY(sackoptlen < UINT8_MAX);
1840 
1841 			/*
1842 			 * First we need to pad options so that the
1843 			 * SACK blocks can start at a 4-byte boundary
1844 			 * (sack option and length are at a 2 byte offset).
1845 			 */
1846 			padlen = (MAX_TCPOPTLEN - optlen - sackoptlen) % 4;
1847 			optlen += padlen;
1848 			while (padlen-- > 0) {
1849 				*bp++ = TCPOPT_NOP;
1850 			}
1851 
1852 			tcpstat.tcps_sack_send_blocks++;
1853 			*bp++ = TCPOPT_SACK;
1854 			*bp++ = (uint8_t)sackoptlen;
1855 			lp = (u_int32_t *)(void *)bp;
1856 
1857 			/*
1858 			 * First block of SACK option should represent
1859 			 * DSACK. Prefer to send SACK information if there
1860 			 * is space for only one SACK block. This will
1861 			 * allow for faster recovery.
1862 			 */
1863 			if (TCP_SEND_DSACK_OPT(tp) && nsack > 0 &&
1864 			    (tp->rcv_numsacks == 0 || nsack > 1)) {
1865 				*lp++ = htonl(tp->t_dsack_lseq);
1866 				*lp++ = htonl(tp->t_dsack_rseq);
1867 				tcpstat.tcps_dsack_sent++;
1868 				tp->t_dsack_sent++;
1869 				nsack--;
1870 			}
1871 			VERIFY(nsack == 0 || tp->rcv_numsacks >= nsack);
1872 			for (i = 0; i < nsack; i++) {
1873 				struct sackblk sack = tp->sackblks[i];
1874 				*lp++ = htonl(sack.start);
1875 				*lp++ = htonl(sack.end);
1876 			}
1877 			optlen += sackoptlen;
1878 
1879 			/* Make sure we didn't write too much */
1880 			VERIFY((u_char *)lp - opt <= MAX_TCPOPTLEN);
1881 		}
1882 	}
1883 
1884 	/*
1885 	 * AccECN option - after SACK
1886 	 * Don't send on <SYN>,
1887 	 * send only on <SYN,ACK> before ACCECN is negotiated or
1888 	 * when doing an AccECN session.
1889 	 */
1890 	if (TCP_ACC_ECN_ON(tp) ||
1891 	    (TCP_ACC_ECN_ENABLED() && (flags & (TH_SYN | TH_ACK)) ==
1892 	    (TH_SYN | TH_ACK))) {
1893 		uint32_t *lp = (uint32_t *)(void *)(opt + optlen);
1894 		/* lp will become outdated after options are added */
1895 		tcp_add_accecn_option(tp, flags, lp, (uint8_t *)&optlen);
1896 	}
1897 
1898 	/* Pad TCP options to a 4 byte boundary */
1899 	if (optlen < MAX_TCPOPTLEN && (optlen % sizeof(u_int32_t))) {
1900 		int pad = sizeof(u_int32_t) - (optlen % sizeof(u_int32_t));
1901 		u_char *bp = (u_char *)opt + optlen;
1902 
1903 		optlen += pad;
1904 		while (pad) {
1905 			*bp++ = TCPOPT_EOL;
1906 			pad--;
1907 		}
1908 	}
1909 
1910 	/*
1911 	 * For Accurate ECN, send ACE flag based on r.cep, if
1912 	 * We have completed handshake and are in ESTABLISHED state, and
1913 	 * This is not the final ACK of 3WHS.
1914 	 */
1915 	if (TCP_ACC_ECN_ON(tp) && TCPS_HAVEESTABLISHED(tp->t_state) &&
1916 	    (tp->ecn_flags & TE_ACE_FINAL_ACK_3WHS) == 0) {
1917 		uint8_t ace = tp->t_rcv_ce_packets & TCP_ACE_MASK;
1918 		if (ace & 0x01) {
1919 			flags |= TH_ECE;
1920 		} else {
1921 			flags &= ~TH_ECE;
1922 		}
1923 		if (ace & 0x02) {
1924 			flags |= TH_CWR;
1925 		} else {
1926 			flags &= ~TH_CWR;
1927 		}
1928 		if (ace & 0x04) {
1929 			flags |= TH_AE;
1930 		} else {
1931 			flags &= ~TH_AE;
1932 		}
1933 	}
1934 
1935 	/*
1936 	 * RFC 3168 states that:
1937 	 * - If you ever sent an ECN-setup SYN/SYN-ACK you must be prepared
1938 	 * to handle the TCP ECE flag, even if you also later send a
1939 	 * non-ECN-setup SYN/SYN-ACK.
1940 	 * - If you ever send a non-ECN-setup SYN/SYN-ACK, you must not set
1941 	 * the ip ECT flag.
1942 	 *
1943 	 * It is not clear how the ECE flag would ever be set if you never
1944 	 * set the IP ECT flag on outbound packets. All the same, we use
1945 	 * the TE_SETUPSENT to indicate that we have committed to handling
1946 	 * the TCP ECE flag correctly. We use the TE_SENDIPECT to indicate
1947 	 * whether or not we should set the IP ECT flag on outbound packet
1948 	 *
1949 	 * For a SYN-ACK, send an ECN setup SYN-ACK
1950 	 *
1951 	 * Below we send ECN for three different handhshake states:
1952 	 * 1. Server received SYN and is sending a SYN-ACK (state->TCPS_SYN_RECEIVED)
1953 	 *    - both classic and Accurate ECN have special encoding
1954 	 * 2. Client is sending SYN packet (state->SYN_SENT)
1955 	 *    - both classic and Accurate ECN have special encoding
1956 	 * 3. Client is sending final ACK of 3WHS (state->ESTABLISHED)
1957 	 *    - Only Accurate ECN has special encoding
1958 	 */
1959 	if ((flags & (TH_SYN | TH_ACK)) == (TH_SYN | TH_ACK) &&
1960 	    (tp->ecn_flags & TE_ENABLE_ECN)) {
1961 		/* Server received either legacy or Accurate ECN setup SYN */
1962 		if (tp->ecn_flags & (TE_SETUPRECEIVED | TE_ACE_SETUPRECEIVED)) {
1963 			if (tcp_send_ecn_flags_on_syn(tp)) {
1964 				if (TCP_ACC_ECN_ENABLED() && (tp->ecn_flags & TE_ACE_SETUPRECEIVED)) {
1965 					/*
1966 					 * Accurate ECN mode is on. Initialize packet and byte counters
1967 					 * for the server sending SYN-ACK. Although s_cep will be initialized
1968 					 * during input processing of ACK of SYN-ACK, initialize here as well
1969 					 * in case ACK gets lost.
1970 					 *
1971 					 * Non-zero initial values are used to
1972 					 * support a stateless handshake (see
1973 					 * Section 5.1 of AccECN draft) and to be
1974 					 * distinct from cases where the fields
1975 					 * are incorrectly zeroed.
1976 					 */
1977 					tp->t_rcv_ce_packets = 5;
1978 					tp->t_snd_ce_packets = 5;
1979 
1980 					/* Initialize ECT byte counter to 1 to distinguish zeroing of options */
1981 					tp->t_rcv_ect1_bytes = tp->t_rcv_ect0_bytes = 1;
1982 					tp->t_snd_ect1_bytes = tp->t_snd_ect0_bytes = 1;
1983 
1984 					/* Initialize CE byte counter to 0 */
1985 					tp->t_rcv_ce_bytes = tp->t_snd_ce_bytes = 0;
1986 
1987 					if (tp->ecn_flags & TE_ACE_SETUP_NON_ECT) {
1988 						flags |= TH_CWR;
1989 						/* Remove the setup flag as it is also used for final ACK */
1990 						tp->ecn_flags &= ~TE_ACE_SETUP_NON_ECT;
1991 						tcpstat.tcps_ecn_ace_syn_not_ect++;
1992 					} else if (tp->ecn_flags & TE_ACE_SETUP_ECT1) {
1993 						flags |= (TH_CWR | TH_ECE);
1994 						tp->ecn_flags &= ~TE_ACE_SETUP_ECT1;
1995 						tcpstat.tcps_ecn_ace_syn_ect1++;
1996 					} else if (tp->ecn_flags & TE_ACE_SETUP_ECT0) {
1997 						flags |= TH_AE;
1998 						tp->ecn_flags &= ~TE_ACE_SETUP_ECT0;
1999 						tcpstat.tcps_ecn_ace_syn_ect0++;
2000 					} else if (tp->ecn_flags & TE_ACE_SETUP_CE) {
2001 						flags |= (TH_AE | TH_CWR);
2002 						tp->ecn_flags &= ~TE_ACE_SETUP_CE;
2003 						/*
2004 						 * Receive counter is updated on
2005 						 * all acceptable packets except
2006 						 * CE on SYN packets (SYN=1, ACK=0)
2007 						 */
2008 						tcpstat.tcps_ecn_ace_syn_ce++;
2009 					} else {
2010 						/* We shouldn't come here */
2011 						panic("ECN flags (0x%x) not set correctly", tp->ecn_flags);
2012 					}
2013 					/*
2014 					 * We are not yet committing to send IP ECT packets when
2015 					 * Accurate ECN mode is on
2016 					 */
2017 					tp->ecn_flags |= (TE_ACE_SETUPSENT);
2018 				} else if (tp->ecn_flags & TE_SETUPRECEIVED) {
2019 					/*
2020 					 * Setting TH_ECE makes this an ECN-setup
2021 					 * SYN-ACK
2022 					 */
2023 					flags |= TH_ECE;
2024 					/*
2025 					 * Record that we sent the ECN-setup and
2026 					 * default to setting IP ECT.
2027 					 */
2028 					tp->ecn_flags |= (TE_SETUPSENT | TE_SENDIPECT);
2029 				}
2030 				tcpstat.tcps_ecn_server_setup++;
2031 				tcpstat.tcps_ecn_server_success++;
2032 			} else {
2033 				/*
2034 				 * We sent an ECN-setup SYN-ACK but it was
2035 				 * dropped. Fallback to non-ECN-setup
2036 				 * SYN-ACK and clear flag to indicate that
2037 				 * we should not send data with IP ECT set
2038 				 *
2039 				 * Pretend we didn't receive an
2040 				 * ECN-setup SYN.
2041 				 *
2042 				 * We already incremented the counter
2043 				 * assuming that the ECN setup will
2044 				 * succeed. Decrementing here
2045 				 * tcps_ecn_server_success to correct it.
2046 				 */
2047 				if (tp->ecn_flags & (TE_SETUPSENT | TE_ACE_SETUPSENT)) {
2048 					tcpstat.tcps_ecn_lost_synack++;
2049 					tcpstat.tcps_ecn_server_success--;
2050 					tp->ecn_flags |= TE_LOST_SYNACK;
2051 				}
2052 
2053 				tp->ecn_flags &=
2054 				    ~(TE_SETUPRECEIVED | TE_SENDIPECT |
2055 				    TE_SENDCWR | TE_ACE_SETUPRECEIVED);
2056 			}
2057 		}
2058 	} else if ((flags & (TH_SYN | TH_ACK)) == TH_SYN &&
2059 	    (tp->ecn_flags & TE_ENABLE_ECN)) {
2060 		if (tcp_send_ecn_flags_on_syn(tp)) {
2061 			if (TCP_ACC_ECN_ENABLED()) {
2062 				/* We are negotiating AccECN in SYN */
2063 				flags |= TH_ACE;
2064 				/*
2065 				 * For AccECN, we only set the ECN-setup sent
2066 				 * flag as we are not committing to set ECT yet.
2067 				 */
2068 				tp->ecn_flags |= (TE_ACE_SETUPSENT);
2069 			} else {
2070 				/*
2071 				 * Setting TH_ECE and TH_CWR makes this an
2072 				 * ECN-setup SYN
2073 				 */
2074 				flags |= (TH_ECE | TH_CWR);
2075 				/*
2076 				 * Record that we sent the ECN-setup and default to
2077 				 * setting IP ECT.
2078 				 */
2079 				tp->ecn_flags |= (TE_SETUPSENT | TE_SENDIPECT);
2080 			}
2081 			tcpstat.tcps_ecn_client_setup++;
2082 			tp->ecn_flags |= TE_CLIENT_SETUP;
2083 		} else {
2084 			/*
2085 			 * We sent an ECN-setup SYN but it was dropped.
2086 			 * Fall back to non-ECN and clear flag indicating
2087 			 * we should send data with IP ECT set.
2088 			 */
2089 			if (tp->ecn_flags & (TE_SETUPSENT | TE_ACE_SETUPSENT)) {
2090 				tcpstat.tcps_ecn_lost_syn++;
2091 				tp->ecn_flags |= TE_LOST_SYN;
2092 			}
2093 			tp->ecn_flags &= ~TE_SENDIPECT;
2094 		}
2095 	} else if (TCP_ACC_ECN_ON(tp) && (tp->ecn_flags & TE_ACE_FINAL_ACK_3WHS) &&
2096 	    len == 0 && (flags & (TH_FLAGS_ALL)) == TH_ACK) {
2097 		/*
2098 		 * Client has processed SYN-ACK and moved to ESTABLISHED.
2099 		 * This is the final ACK of 3WHS. If ACC_ECN has been negotiated,
2100 		 * then send the handshake encoding as per Table 3 of Accurate ECN draft.
2101 		 * We are clearing the ACE flags just in case if they were set before.
2102 		 * TODO: if client has to carry data in the 3WHS ACK, then we need to send a pure ACK first
2103 		 */
2104 		flags &= ~(TH_AE | TH_CWR | TH_ECE);
2105 		if (tp->ecn_flags & TE_ACE_SETUP_NON_ECT) {
2106 			flags |= TH_CWR;
2107 			tp->ecn_flags &= ~TE_ACE_SETUP_NON_ECT;
2108 		} else if (tp->ecn_flags & TE_ACE_SETUP_ECT1) {
2109 			flags |= (TH_CWR | TH_ECE);
2110 			tp->ecn_flags &= ~TE_ACE_SETUP_ECT1;
2111 		} else if (tp->ecn_flags & TE_ACE_SETUP_ECT0) {
2112 			flags |= TH_AE;
2113 			tp->ecn_flags &= ~TE_ACE_SETUP_ECT0;
2114 		} else if (tp->ecn_flags & TE_ACE_SETUP_CE) {
2115 			flags |= (TH_AE | TH_CWR);
2116 			tp->ecn_flags &= ~TE_ACE_SETUP_CE;
2117 		}
2118 		tp->ecn_flags &= ~(TE_ACE_FINAL_ACK_3WHS);
2119 	}
2120 
2121 	/*
2122 	 * Check if we should set the TCP CWR flag.
2123 	 * CWR flag is sent when we reduced the congestion window because
2124 	 * we received a TCP ECE or we performed a fast retransmit. We
2125 	 * never set the CWR flag on retransmitted packets. We only set
2126 	 * the CWR flag on data packets. Pure acks don't have this set.
2127 	 */
2128 	if ((tp->ecn_flags & TE_SENDCWR) != 0 && len != 0 &&
2129 	    !SEQ_LT(tp->snd_nxt, tp->snd_max) && !sack_rxmit) {
2130 		flags |= TH_CWR;
2131 		tp->ecn_flags &= ~TE_SENDCWR;
2132 	}
2133 
2134 	/*
2135 	 * Check if we should set the TCP ECE flag.
2136 	 */
2137 	if ((tp->ecn_flags & TE_SENDECE) != 0 && len == 0) {
2138 		flags |= TH_ECE;
2139 		tcpstat.tcps_ecn_sent_ece++;
2140 	}
2141 
2142 	hdrlen += optlen;
2143 
2144 	/* Reset DSACK sequence numbers */
2145 	tp->t_dsack_lseq = 0;
2146 	tp->t_dsack_rseq = 0;
2147 
2148 	if (isipv6) {
2149 		ipoptlen = ip6_optlen(inp);
2150 	} else {
2151 		if (tp_inp_options) {
2152 			ipoptlen = tp_inp_options->m_len -
2153 			    offsetof(struct ipoption, ipopt_list);
2154 		} else {
2155 			ipoptlen = 0;
2156 		}
2157 	}
2158 #if IPSEC
2159 	ipoptlen += ipsec_optlen;
2160 #endif
2161 
2162 	/*
2163 	 * Adjust data length if insertion of options will
2164 	 * bump the packet length beyond the t_maxopd length.
2165 	 * Clear the FIN bit because we cut off the tail of
2166 	 * the segment.
2167 	 *
2168 	 * When doing TSO limit a burst to TCP_MAXWIN minus the
2169 	 * IP, TCP and Options length to keep ip->ip_len from
2170 	 * overflowing.  Prevent the last segment from being
2171 	 * fractional thus making them all equal sized and set
2172 	 * the flag to continue sending.  TSO is disabled when
2173 	 * IP options or IPSEC are present.
2174 	 */
2175 	if (len + optlen + ipoptlen > tp->t_maxopd) {
2176 		/*
2177 		 * If there is still more to send,
2178 		 * don't close the connection.
2179 		 */
2180 		flags &= ~TH_FIN;
2181 		if (tso) {
2182 			int32_t tso_maxlen;
2183 
2184 			tso_maxlen = tp->tso_max_segment_size ?
2185 			    tp->tso_max_segment_size : TCP_MAXWIN;
2186 
2187 			/* hdrlen includes optlen */
2188 			if (len > tso_maxlen - hdrlen) {
2189 				len = tso_maxlen - hdrlen;
2190 				sendalot = 1;
2191 			} else if (tp->t_flags & TF_NEEDFIN) {
2192 				sendalot = 1;
2193 			}
2194 
2195 			if (len % (tp->t_maxopd - optlen) != 0) {
2196 				len = len - (len % (tp->t_maxopd - optlen));
2197 				sendalot = 1;
2198 			}
2199 		} else {
2200 			len = tp->t_maxopd - optlen - ipoptlen;
2201 			sendalot = 1;
2202 		}
2203 	}
2204 
2205 	if (max_linkhdr + hdrlen > MCLBYTES) {
2206 		panic("tcphdr too big");
2207 	}
2208 
2209 	/* Check if there is enough data in the send socket
2210 	 * buffer to start measuring bandwidth
2211 	 */
2212 	if ((tp->t_flagsext & TF_MEASURESNDBW) != 0 &&
2213 	    (tp->t_bwmeas != NULL) &&
2214 	    (tp->t_flagsext & TF_BWMEAS_INPROGRESS) == 0) {
2215 		tp->t_bwmeas->bw_size = min(min(
2216 			    (so->so_snd.sb_cc - (tp->snd_max - tp->snd_una)),
2217 			    tp->snd_cwnd), tp->snd_wnd);
2218 		if (tp->t_bwmeas->bw_minsize > 0 &&
2219 		    tp->t_bwmeas->bw_size < tp->t_bwmeas->bw_minsize) {
2220 			tp->t_bwmeas->bw_size = 0;
2221 		}
2222 		if (tp->t_bwmeas->bw_maxsize > 0) {
2223 			tp->t_bwmeas->bw_size = min(tp->t_bwmeas->bw_size,
2224 			    tp->t_bwmeas->bw_maxsize);
2225 		}
2226 		if (tp->t_bwmeas->bw_size > 0) {
2227 			tp->t_flagsext |= TF_BWMEAS_INPROGRESS;
2228 			tp->t_bwmeas->bw_start = tp->snd_max;
2229 			tp->t_bwmeas->bw_ts = tcp_now;
2230 		}
2231 	}
2232 
2233 	VERIFY(inp->inp_flowhash != 0);
2234 	/*
2235 	 * Grab a header mbuf, attaching a copy of data to
2236 	 * be transmitted, and initialize the header from
2237 	 * the template for sends on this connection.
2238 	 */
2239 	if (len) {
2240 		/* Remember what the last head-of-line packet-size was */
2241 		if (tp->t_pmtud_lastseg_size == 0 && tp->snd_nxt == tp->snd_una) {
2242 			ASSERT(len + optlen + ipoptlen <= IP_MAXPACKET);
2243 			tp->t_pmtud_lastseg_size = (uint16_t)(len + optlen + ipoptlen);
2244 		}
2245 		if ((tp->t_flagsext & TF_FORCE) && len == 1) {
2246 			tcpstat.tcps_sndprobe++;
2247 		} else if (SEQ_LT(tp->snd_nxt, tp->snd_max) || sack_rxmit) {
2248 			tcpstat.tcps_sndrexmitpack++;
2249 			tcpstat.tcps_sndrexmitbyte += len;
2250 			if (nstat_collect) {
2251 				nstat_route_tx(inp->inp_route.ro_rt, 1,
2252 				    len, NSTAT_TX_FLAG_RETRANSMIT);
2253 				INP_ADD_STAT(inp, cell, wifi, wired,
2254 				    txpackets, 1);
2255 				INP_ADD_STAT(inp, cell, wifi, wired,
2256 				    txbytes, len);
2257 				tp->t_stat.txretransmitbytes += len;
2258 				tp->t_stat.rxmitpkts++;
2259 			}
2260 		} else {
2261 			tcpstat.tcps_sndpack++;
2262 			tcpstat.tcps_sndbyte += len;
2263 
2264 			if (nstat_collect) {
2265 				INP_ADD_STAT(inp, cell, wifi, wired,
2266 				    txpackets, 1);
2267 				INP_ADD_STAT(inp, cell, wifi, wired,
2268 				    txbytes, len);
2269 			}
2270 			inp_decr_sndbytes_unsent(so, len);
2271 		}
2272 		inp_set_activity_bitmap(inp);
2273 #if MPTCP
2274 		if (tp->t_mpflags & TMPF_MPTCP_TRUE) {
2275 			tcpstat.tcps_mp_sndpacks++;
2276 			tcpstat.tcps_mp_sndbytes += len;
2277 		}
2278 #endif /* MPTCP */
2279 		/*
2280 		 * try to use the new interface that allocates all
2281 		 * the necessary mbuf hdrs under 1 mbuf lock and
2282 		 * avoids rescanning the socket mbuf list if
2283 		 * certain conditions are met.  This routine can't
2284 		 * be used in the following cases...
2285 		 * 1) the protocol headers exceed the capacity of
2286 		 * of a single mbuf header's data area (no cluster attached)
2287 		 * 2) the length of the data being transmitted plus
2288 		 * the protocol headers fits into a single mbuf header's
2289 		 * data area (no cluster attached)
2290 		 */
2291 		m = NULL;
2292 
2293 		/* minimum length we are going to allocate */
2294 		allocated_len = MHLEN;
2295 		if (MHLEN < hdrlen + max_linkhdr) {
2296 			MGETHDR(m, M_DONTWAIT, MT_HEADER);
2297 			if (m == NULL) {
2298 				error = ENOBUFS;
2299 				goto out;
2300 			}
2301 			MCLGET(m, M_DONTWAIT);
2302 			if ((m->m_flags & M_EXT) == 0) {
2303 				m_freem(m);
2304 				error = ENOBUFS;
2305 				goto out;
2306 			}
2307 			m->m_data += max_linkhdr;
2308 			m->m_len = hdrlen;
2309 			allocated_len = MCLBYTES;
2310 		}
2311 		if (len <= allocated_len - hdrlen - max_linkhdr) {
2312 			if (m == NULL) {
2313 				VERIFY(allocated_len <= MHLEN);
2314 				MGETHDR(m, M_DONTWAIT, MT_HEADER);
2315 				if (m == NULL) {
2316 					error = ENOBUFS;
2317 					goto out;
2318 				}
2319 				m->m_data += max_linkhdr;
2320 				m->m_len = hdrlen;
2321 			}
2322 			/* makes sure we still have data left to be sent at this point */
2323 			if (so->so_snd.sb_mb == NULL || off < 0) {
2324 				if (m != NULL) {
2325 					m_freem(m);
2326 				}
2327 				error = 0; /* should we return an error? */
2328 				goto out;
2329 			}
2330 			m_copydata(so->so_snd.sb_mb, off, (int) len,
2331 			    mtod(m, caddr_t) + hdrlen);
2332 			m->m_len += len;
2333 		} else {
2334 			uint32_t copymode;
2335 			/*
2336 			 * Retain packet header metadata at the socket
2337 			 * buffer if this is is an MPTCP subflow,
2338 			 * otherwise move it.
2339 			 */
2340 			copymode = M_COPYM_MOVE_HDR;
2341 #if MPTCP
2342 			if (so->so_flags & SOF_MP_SUBFLOW) {
2343 				copymode = M_COPYM_NOOP_HDR;
2344 			}
2345 #endif /* MPTCP */
2346 			if (m != NULL) {
2347 				m->m_next = m_copym_mode(so->so_snd.sb_mb,
2348 				    off, (int)len, M_DONTWAIT, copymode);
2349 				if (m->m_next == NULL) {
2350 					(void) m_free(m);
2351 					error = ENOBUFS;
2352 					goto out;
2353 				}
2354 			} else {
2355 				/*
2356 				 * make sure we still have data left
2357 				 * to be sent at this point
2358 				 */
2359 				if (so->so_snd.sb_mb == NULL) {
2360 					error = 0; /* should we return an error? */
2361 					goto out;
2362 				}
2363 
2364 				/*
2365 				 * m_copym_with_hdrs will always return the
2366 				 * last mbuf pointer and the offset into it that
2367 				 * it acted on to fullfill the current request,
2368 				 * whether a valid 'hint' was passed in or not.
2369 				 */
2370 				if ((m = m_copym_with_hdrs(so->so_snd.sb_mb,
2371 				    off, len, M_DONTWAIT, NULL, NULL,
2372 				    copymode)) == NULL) {
2373 					error = ENOBUFS;
2374 					goto out;
2375 				}
2376 				m->m_data += max_linkhdr;
2377 				m->m_len = hdrlen;
2378 			}
2379 		}
2380 		/*
2381 		 * If we're sending everything we've got, set PUSH.
2382 		 * (This will keep happy those implementations which only
2383 		 * give data to the user when a buffer fills or
2384 		 * a PUSH comes in.)
2385 		 *
2386 		 * On SYN-segments we should not add the PUSH-flag.
2387 		 */
2388 		if (off + len == so->so_snd.sb_cc && !(flags & TH_SYN)) {
2389 			flags |= TH_PUSH;
2390 		}
2391 	} else {
2392 		if (tp->t_flags & TF_ACKNOW) {
2393 			tcpstat.tcps_sndacks++;
2394 		} else if (flags & (TH_SYN | TH_FIN | TH_RST)) {
2395 			tcpstat.tcps_sndctrl++;
2396 		} else if (SEQ_GT(tp->snd_up, tp->snd_una)) {
2397 			tcpstat.tcps_sndurg++;
2398 		} else {
2399 			tcpstat.tcps_sndwinup++;
2400 		}
2401 
2402 		MGETHDR(m, M_DONTWAIT, MT_HEADER);      /* MAC-OK */
2403 		if (m == NULL) {
2404 			error = ENOBUFS;
2405 			goto out;
2406 		}
2407 		if (MHLEN < (hdrlen + max_linkhdr)) {
2408 			MCLGET(m, M_DONTWAIT);
2409 			if ((m->m_flags & M_EXT) == 0) {
2410 				m_freem(m);
2411 				error = ENOBUFS;
2412 				goto out;
2413 			}
2414 		}
2415 		m->m_data += max_linkhdr;
2416 		m->m_len = hdrlen;
2417 	}
2418 	m->m_pkthdr.rcvif = 0;
2419 	m_add_crumb(m, PKT_CRUMB_TCP_OUTPUT);
2420 
2421 	/* Any flag other than pure-ACK: Do not compress! */
2422 	if (flags & ~(TH_ACK)) {
2423 		do_not_compress = TRUE;
2424 	}
2425 
2426 	if (tp->rcv_scale == 0) {
2427 		do_not_compress = TRUE;
2428 	}
2429 
2430 	if (do_not_compress) {
2431 		m->m_pkthdr.comp_gencnt = 0;
2432 	} else {
2433 		if (TSTMP_LT(tp->t_comp_lastinc + tcp_ack_compression_rate, tcp_now)) {
2434 			tp->t_comp_gencnt++;
2435 			/* 0 means no compression, thus jump this */
2436 			if (tp->t_comp_gencnt <= TCP_ACK_COMPRESSION_DUMMY) {
2437 				tp->t_comp_gencnt = TCP_ACK_COMPRESSION_DUMMY + 1;
2438 			}
2439 			tp->t_comp_lastinc = tcp_now;
2440 		}
2441 		m->m_pkthdr.comp_gencnt = tp->t_comp_gencnt;
2442 	}
2443 
2444 	if (isipv6) {
2445 		ip6 = mtod(m, struct ip6_hdr *);
2446 		th = (struct tcphdr *)(void *)(ip6 + 1);
2447 		tcp_fillheaders(m, tp, ip6, th);
2448 		if ((tp->ecn_flags & TE_SENDIPECT) != 0 && len &&
2449 		    !SEQ_LT(tp->snd_nxt, tp->snd_max) && !sack_rxmit) {
2450 			ip6->ip6_flow |= htonl(IPTOS_ECN_ECT0 << 20);
2451 		}
2452 		svc_flags |= PKT_SCF_IPV6;
2453 #if PF_ECN
2454 		m_pftag(m)->pftag_hdr = (void *)ip6;
2455 		m_pftag(m)->pftag_flags |= PF_TAG_HDR_INET6;
2456 #endif /* PF_ECN */
2457 	} else {
2458 		ip = mtod(m, struct ip *);
2459 		th = (struct tcphdr *)(void *)(ip + 1);
2460 		/* this picks up the pseudo header (w/o the length) */
2461 		tcp_fillheaders(m, tp, ip, th);
2462 		if ((tp->ecn_flags & TE_SENDIPECT) != 0 && len &&
2463 		    !SEQ_LT(tp->snd_nxt, tp->snd_max) &&
2464 		    !sack_rxmit && !(flags & TH_SYN)) {
2465 			ip->ip_tos |= IPTOS_ECN_ECT0;
2466 		}
2467 #if PF_ECN
2468 		m_pftag(m)->pftag_hdr = (void *)ip;
2469 		m_pftag(m)->pftag_flags |= PF_TAG_HDR_INET;
2470 #endif /* PF_ECN */
2471 	}
2472 
2473 	/*
2474 	 * Fill in fields, remembering maximum advertised
2475 	 * window for use in delaying messages about window sizes.
2476 	 * If resending a FIN, be sure not to use a new sequence number.
2477 	 */
2478 	if ((flags & TH_FIN) && (tp->t_flags & TF_SENTFIN) &&
2479 	    tp->snd_nxt == tp->snd_max) {
2480 		tp->snd_nxt--;
2481 	}
2482 	/*
2483 	 * If we are doing retransmissions, then snd_nxt will
2484 	 * not reflect the first unsent octet.  For ACK only
2485 	 * packets, we do not want the sequence number of the
2486 	 * retransmitted packet, we want the sequence number
2487 	 * of the next unsent octet.  So, if there is no data
2488 	 * (and no SYN or FIN), use snd_max instead of snd_nxt
2489 	 * when filling in ti_seq.  But if we are in persist
2490 	 * state, snd_max might reflect one byte beyond the
2491 	 * right edge of the window, so use snd_nxt in that
2492 	 * case, since we know we aren't doing a retransmission.
2493 	 * (retransmit and persist are mutually exclusive...)
2494 	 *
2495 	 * Note the state of this retransmit segment to detect spurious
2496 	 * retransmissions.
2497 	 */
2498 	if (sack_rxmit == 0) {
2499 		if (len || (flags & (TH_SYN | TH_FIN)) ||
2500 		    tp->t_timer[TCPT_PERSIST]) {
2501 			th->th_seq = htonl(tp->snd_nxt);
2502 			if (len > 0) {
2503 				m->m_pkthdr.tx_start_seq = tp->snd_nxt;
2504 				m->m_pkthdr.pkt_flags |= PKTF_START_SEQ;
2505 			}
2506 			if (SEQ_LT(tp->snd_nxt, tp->snd_max)) {
2507 				if (SACK_ENABLED(tp) && len > 1 &&
2508 				    !(tp->t_flagsext & TF_SENT_TLPROBE)) {
2509 					tcp_rxtseg_insert(tp, tp->snd_nxt,
2510 					    (tp->snd_nxt + len - 1));
2511 				}
2512 				if (len > 0) {
2513 					m->m_pkthdr.pkt_flags |=
2514 					    PKTF_TCP_REXMT;
2515 				}
2516 			}
2517 		} else {
2518 			th->th_seq = htonl(tp->snd_max);
2519 		}
2520 	} else {
2521 		th->th_seq = htonl(p->rxmit);
2522 		if (len > 0) {
2523 			m->m_pkthdr.pkt_flags |=
2524 			    (PKTF_TCP_REXMT | PKTF_START_SEQ);
2525 			m->m_pkthdr.tx_start_seq = p->rxmit;
2526 		}
2527 		tcp_rxtseg_insert(tp, p->rxmit, (p->rxmit + len - 1));
2528 		p->rxmit += len;
2529 		tp->sackhint.sack_bytes_rexmit += len;
2530 	}
2531 	th->th_ack = htonl(tp->rcv_nxt);
2532 	tp->last_ack_sent = tp->rcv_nxt;
2533 	if (optlen) {
2534 		bcopy(opt, th + 1, optlen);
2535 		th->th_off = (sizeof(struct tcphdr) + optlen) >> 2;
2536 	}
2537 	/* Separate AE from flags */
2538 	th->th_flags = (flags & (TH_FLAGS_ALL));
2539 	th->th_x2 = (flags & (TH_AE)) >> 8;
2540 	th->th_win = htons((u_short) (recwin >> tp->rcv_scale));
2541 	tp->t_last_recwin = recwin;
2542 	if (!(so->so_flags & SOF_MP_SUBFLOW)) {
2543 		if (recwin > 0 && SEQ_LT(tp->rcv_adv, tp->rcv_nxt + recwin)) {
2544 			tp->rcv_adv = tp->rcv_nxt + recwin;
2545 		}
2546 	} else {
2547 		struct mptcb *mp_tp = tptomptp(tp);
2548 		if (recwin > 0) {
2549 			tp->rcv_adv = tp->rcv_nxt + recwin;
2550 		}
2551 
2552 		if (recwin > 0 && MPTCP_SEQ_LT(mp_tp->mpt_rcvadv, mp_tp->mpt_rcvnxt + recwin)) {
2553 			mp_tp->mpt_rcvadv = mp_tp->mpt_rcvnxt + recwin;
2554 		}
2555 	}
2556 
2557 	/*
2558 	 * Adjust the RXWIN0SENT flag - indicate that we have advertised
2559 	 * a 0 window.  This may cause the remote transmitter to stall.  This
2560 	 * flag tells soreceive() to disable delayed acknowledgements when
2561 	 * draining the buffer.  This can occur if the receiver is attempting
2562 	 * to read more data then can be buffered prior to transmitting on
2563 	 * the connection.
2564 	 */
2565 	if (th->th_win == 0) {
2566 		tp->t_flags |= TF_RXWIN0SENT;
2567 	} else {
2568 		tp->t_flags &= ~TF_RXWIN0SENT;
2569 	}
2570 
2571 	if (SEQ_GT(tp->snd_up, tp->snd_nxt)) {
2572 		th->th_urp = htons((u_short)(tp->snd_up - tp->snd_nxt));
2573 		th->th_flags |= TH_URG;
2574 	} else {
2575 		/*
2576 		 * If no urgent pointer to send, then we pull
2577 		 * the urgent pointer to the left edge of the send window
2578 		 * so that it doesn't drift into the send window on sequence
2579 		 * number wraparound.
2580 		 */
2581 		tp->snd_up = tp->snd_una;               /* drag it along */
2582 	}
2583 
2584 	/*
2585 	 * Put TCP length in extended header, and then
2586 	 * checksum extended header and data.
2587 	 */
2588 	m->m_pkthdr.len = hdrlen + len; /* in6_cksum() need this */
2589 
2590 	/*
2591 	 * If this is potentially the last packet on the stream, then mark
2592 	 * it in order to enable some optimizations in the underlying
2593 	 * layers
2594 	 */
2595 	if (tp->t_state != TCPS_ESTABLISHED &&
2596 	    (tp->t_state == TCPS_CLOSING || tp->t_state == TCPS_TIME_WAIT
2597 	    || tp->t_state == TCPS_LAST_ACK || (th->th_flags & TH_RST))) {
2598 		m->m_pkthdr.pkt_flags |= PKTF_LAST_PKT;
2599 	}
2600 
2601 	if (isipv6) {
2602 		/*
2603 		 * ip6_plen is not need to be filled now, and will be filled
2604 		 * in ip6_output.
2605 		 */
2606 		m->m_pkthdr.csum_flags = CSUM_TCPIPV6;
2607 		m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
2608 		if (len + optlen) {
2609 			th->th_sum = in_addword(th->th_sum,
2610 			    htons((u_short)(optlen + len)));
2611 		}
2612 	} else {
2613 		m->m_pkthdr.csum_flags = CSUM_TCP;
2614 		m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
2615 		if (len + optlen) {
2616 			th->th_sum = in_addword(th->th_sum,
2617 			    htons((u_short)(optlen + len)));
2618 		}
2619 	}
2620 
2621 	/*
2622 	 * Enable TSO and specify the size of the segments.
2623 	 * The TCP pseudo header checksum is always provided.
2624 	 */
2625 	if (tso) {
2626 		if (isipv6) {
2627 			m->m_pkthdr.csum_flags |= CSUM_TSO_IPV6;
2628 		} else {
2629 			m->m_pkthdr.csum_flags |= CSUM_TSO_IPV4;
2630 		}
2631 
2632 		m->m_pkthdr.tso_segsz = tp->t_maxopd - optlen;
2633 	} else {
2634 		m->m_pkthdr.tso_segsz = 0;
2635 	}
2636 
2637 	/*
2638 	 * In transmit state, time the transmission and arrange for
2639 	 * the retransmit.  In persist state, just set snd_max.
2640 	 */
2641 	if (!(tp->t_flagsext & TF_FORCE)
2642 	    || tp->t_timer[TCPT_PERSIST] == 0) {
2643 		tcp_seq startseq = tp->snd_nxt;
2644 
2645 		/*
2646 		 * Advance snd_nxt over sequence space of this segment.
2647 		 */
2648 		if (flags & (TH_SYN | TH_FIN)) {
2649 			if (flags & TH_SYN) {
2650 				tp->snd_nxt++;
2651 			}
2652 			if ((flags & TH_FIN) &&
2653 			    !(tp->t_flags & TF_SENTFIN)) {
2654 				tp->snd_nxt++;
2655 				tp->t_flags |= TF_SENTFIN;
2656 			}
2657 		}
2658 		if (sack_rxmit) {
2659 			goto timer;
2660 		}
2661 		if (sack_rescue_rxt == TRUE) {
2662 			tp->snd_nxt = old_snd_nxt;
2663 			sack_rescue_rxt = FALSE;
2664 			tcpstat.tcps_pto_in_recovery++;
2665 		} else {
2666 			tp->snd_nxt += len;
2667 		}
2668 		if (SEQ_GT(tp->snd_nxt, tp->snd_max)) {
2669 			tp->snd_max = tp->snd_nxt;
2670 			tp->t_sndtime = tcp_now;
2671 			/*
2672 			 * Time this transmission if not a retransmission and
2673 			 * not currently timing anything.
2674 			 */
2675 			if (tp->t_rtttime == 0) {
2676 				tp->t_rtttime = tcp_now;
2677 				tp->t_rtseq = startseq;
2678 				tcpstat.tcps_segstimed++;
2679 
2680 				/* update variables related to pipe ack */
2681 				tp->t_pipeack_lastuna = tp->snd_una;
2682 			}
2683 		}
2684 
2685 		/*
2686 		 * Set retransmit timer if not currently set,
2687 		 * and not doing an ack or a keep-alive probe.
2688 		 */
2689 timer:
2690 		if (tp->t_timer[TCPT_REXMT] == 0 &&
2691 		    ((sack_rxmit && tp->snd_nxt != tp->snd_max) ||
2692 		    tp->snd_nxt != tp->snd_una || (flags & TH_FIN))) {
2693 			if (tp->t_timer[TCPT_PERSIST]) {
2694 				tp->t_timer[TCPT_PERSIST] = 0;
2695 				tp->t_persist_stop = 0;
2696 				TCP_RESET_REXMT_STATE(tp);
2697 			}
2698 			tp->t_timer[TCPT_REXMT] =
2699 			    OFFSET_FROM_START(tp, tp->t_rxtcur);
2700 		}
2701 
2702 		/*
2703 		 * Set tail loss probe timeout if new data is being
2704 		 * transmitted. This will be supported only when
2705 		 * SACK option is enabled on a connection.
2706 		 *
2707 		 * Every time new data is sent PTO will get reset.
2708 		 */
2709 		if (tcp_enable_tlp && len != 0 && tp->t_state == TCPS_ESTABLISHED &&
2710 		    SACK_ENABLED(tp) && !IN_FASTRECOVERY(tp) &&
2711 		    tp->snd_nxt == tp->snd_max &&
2712 		    SEQ_GT(tp->snd_nxt, tp->snd_una) &&
2713 		    tp->t_rxtshift == 0 &&
2714 		    (tp->t_flagsext & (TF_SENT_TLPROBE | TF_PKTS_REORDERED)) == 0) {
2715 			uint32_t pto, srtt;
2716 
2717 			if (tcp_do_better_lr) {
2718 				srtt = tp->t_srtt >> TCP_RTT_SHIFT;
2719 				pto = 2 * srtt;
2720 				if ((tp->snd_max - tp->snd_una) <= tp->t_maxseg) {
2721 					pto += tcp_delack;
2722 				} else {
2723 					pto += 2;
2724 				}
2725 			} else {
2726 				/*
2727 				 * Using SRTT alone to set PTO can cause spurious
2728 				 * retransmissions on wireless networks where there
2729 				 * is a lot of variance in RTT. Taking variance
2730 				 * into account will avoid this.
2731 				 */
2732 				srtt = tp->t_srtt >> TCP_RTT_SHIFT;
2733 				pto = ((TCP_REXMTVAL(tp)) * 3) >> 1;
2734 				pto = max(2 * srtt, pto);
2735 				if ((tp->snd_max - tp->snd_una) == tp->t_maxseg) {
2736 					pto = max(pto,
2737 					    (((3 * pto) >> 2) + tcp_delack * 2));
2738 				} else {
2739 					pto = max(10, pto);
2740 				}
2741 			}
2742 
2743 			/* if RTO is less than PTO, choose RTO instead */
2744 			if (tp->t_rxtcur < pto) {
2745 				pto = tp->t_rxtcur;
2746 			}
2747 
2748 			tp->t_timer[TCPT_PTO] = OFFSET_FROM_START(tp, pto);
2749 		}
2750 	} else {
2751 		/*
2752 		 * Persist case, update snd_max but since we are in
2753 		 * persist mode (no window) we do not update snd_nxt.
2754 		 */
2755 		int xlen = len;
2756 		if (flags & TH_SYN) {
2757 			++xlen;
2758 		}
2759 		if ((flags & TH_FIN) &&
2760 		    !(tp->t_flags & TF_SENTFIN)) {
2761 			++xlen;
2762 			tp->t_flags |= TF_SENTFIN;
2763 		}
2764 		if (SEQ_GT(tp->snd_nxt + xlen, tp->snd_max)) {
2765 			tp->snd_max = tp->snd_nxt + len;
2766 			tp->t_sndtime = tcp_now;
2767 		}
2768 	}
2769 
2770 #if TCPDEBUG
2771 	/*
2772 	 * Trace.
2773 	 */
2774 	if (so_options & SO_DEBUG) {
2775 		tcp_trace(TA_OUTPUT, tp->t_state, tp, mtod(m, void *), th, 0);
2776 	}
2777 #endif
2778 
2779 	/*
2780 	 * Fill in IP length and desired time to live and
2781 	 * send to IP level.  There should be a better way
2782 	 * to handle ttl and tos; we could keep them in
2783 	 * the template, but need a way to checksum without them.
2784 	 */
2785 	/*
2786 	 * m->m_pkthdr.len should have been set before cksum calcuration,
2787 	 * because in6_cksum() need it.
2788 	 */
2789 	if (isipv6) {
2790 		/*
2791 		 * we separately set hoplimit for every segment, since the
2792 		 * user might want to change the value via setsockopt.
2793 		 * Also, desired default hop limit might be changed via
2794 		 * Neighbor Discovery.
2795 		 */
2796 		ip6->ip6_hlim = in6_selecthlim(inp, inp->in6p_route.ro_rt ?
2797 		    inp->in6p_route.ro_rt->rt_ifp : NULL);
2798 
2799 		/* Don't set ECT bit if requested by an app */
2800 
2801 		/* Set ECN bits for testing purposes */
2802 		if (tp->ecn_flags & TE_FORCE_ECT1) {
2803 			ip6->ip6_flow |= htonl(IPTOS_ECN_ECT1 << 20);
2804 		} else if (tp->ecn_flags & TE_FORCE_ECT0) {
2805 			ip6->ip6_flow |= htonl(IPTOS_ECN_ECT0 << 20);
2806 		}
2807 
2808 		KERNEL_DEBUG(DBG_LAYER_BEG,
2809 		    ((inp->inp_fport << 16) | inp->inp_lport),
2810 		    (((inp->in6p_laddr.s6_addr16[0] & 0xffff) << 16) |
2811 		    (inp->in6p_faddr.s6_addr16[0] & 0xffff)),
2812 		    sendalot, 0, 0);
2813 	} else {
2814 		ASSERT(m->m_pkthdr.len <= IP_MAXPACKET);
2815 		ip->ip_len = (u_short)m->m_pkthdr.len;
2816 		ip->ip_ttl = inp->inp_ip_ttl;   /* XXX */
2817 
2818 		/* Don't set ECN bit if requested by an app */
2819 		ip->ip_tos |= (inp->inp_ip_tos & ~IPTOS_ECN_MASK);
2820 
2821 		/* Set ECN bits for testing purposes */
2822 		if (tp->ecn_flags & TE_FORCE_ECT1) {
2823 			ip->ip_tos |= IPTOS_ECN_ECT1;
2824 		} else if (tp->ecn_flags & TE_FORCE_ECT0) {
2825 			ip->ip_tos |= IPTOS_ECN_ECT0;
2826 		}
2827 
2828 		KERNEL_DEBUG(DBG_LAYER_BEG,
2829 		    ((inp->inp_fport << 16) | inp->inp_lport),
2830 		    (((inp->inp_laddr.s_addr & 0xffff) << 16) |
2831 		    (inp->inp_faddr.s_addr & 0xffff)), 0, 0, 0);
2832 	}
2833 
2834 	/*
2835 	 * See if we should do MTU discovery.
2836 	 * Look at the flag updated on the following criterias:
2837 	 *	1) Path MTU discovery is authorized by the sysctl
2838 	 *	2) The route isn't set yet (unlikely but could happen)
2839 	 *	3) The route is up
2840 	 *	4) the MTU is not locked (if it is, then discovery has been
2841 	 *	   disabled for that route)
2842 	 */
2843 	if (!isipv6) {
2844 		if (path_mtu_discovery && (tp->t_flags & TF_PMTUD)) {
2845 			ip->ip_off |= IP_DF;
2846 		}
2847 	}
2848 
2849 #if NECP
2850 	{
2851 		necp_kernel_policy_id policy_id;
2852 		necp_kernel_policy_id skip_policy_id;
2853 		u_int32_t route_rule_id;
2854 		u_int32_t pass_flags;
2855 		if (!necp_socket_is_allowed_to_send_recv(inp, NULL, 0, &policy_id, &route_rule_id, &skip_policy_id, &pass_flags)) {
2856 			TCP_LOG_DROP_NECP(isipv6 ? (void *)ip6 : (void *)ip, th, tp, true);
2857 			m_freem(m);
2858 			error = EHOSTUNREACH;
2859 			goto out;
2860 		}
2861 		necp_mark_packet_from_socket(m, inp, policy_id, route_rule_id, skip_policy_id, pass_flags);
2862 
2863 		if (net_qos_policy_restricted != 0) {
2864 			necp_socket_update_qos_marking(inp, inp->inp_route.ro_rt, route_rule_id);
2865 		}
2866 	}
2867 #endif /* NECP */
2868 
2869 #if IPSEC
2870 	if (inp->inp_sp != NULL) {
2871 		ipsec_setsocket(m, so);
2872 	}
2873 #endif /*IPSEC*/
2874 
2875 	/*
2876 	 * The socket is kept locked while sending out packets in ip_output, even if packet chaining is not active.
2877 	 */
2878 	lost = 0;
2879 
2880 	/*
2881 	 * Embed the flow hash in pkt hdr and mark the packet as
2882 	 * capable of flow controlling
2883 	 */
2884 	m->m_pkthdr.pkt_flowsrc = FLOWSRC_INPCB;
2885 	m->m_pkthdr.pkt_flowid = inp->inp_flowhash;
2886 	m->m_pkthdr.pkt_flags |= (PKTF_FLOW_ID | PKTF_FLOW_LOCALSRC | PKTF_FLOW_ADV);
2887 	m->m_pkthdr.pkt_proto = IPPROTO_TCP;
2888 	m->m_pkthdr.tx_tcp_pid = so->last_pid;
2889 	if (so->so_flags & SOF_DELEGATED) {
2890 		m->m_pkthdr.tx_tcp_e_pid = so->e_pid;
2891 	} else {
2892 		m->m_pkthdr.tx_tcp_e_pid = 0;
2893 	}
2894 
2895 	m->m_nextpkt = NULL;
2896 
2897 	if (inp->inp_last_outifp != NULL &&
2898 	    !(inp->inp_last_outifp->if_flags & IFF_LOOPBACK)) {
2899 		/* Hint to prioritize this packet if
2900 		 * 1. if the packet has no data
2901 		 * 2. the interface supports transmit-start model and did
2902 		 *    not disable ACK prioritization.
2903 		 * 3. Only ACK flag is set.
2904 		 * 4. there is no outstanding data on this connection.
2905 		 */
2906 		if (len == 0 && (inp->inp_last_outifp->if_eflags & (IFEF_TXSTART | IFEF_NOACKPRI)) == IFEF_TXSTART) {
2907 			if (th->th_flags == TH_ACK &&
2908 			    tp->snd_una == tp->snd_max &&
2909 			    tp->t_timer[TCPT_REXMT] == 0) {
2910 				svc_flags |= PKT_SCF_TCP_ACK;
2911 			}
2912 			if (th->th_flags & TH_SYN) {
2913 				svc_flags |= PKT_SCF_TCP_SYN;
2914 			}
2915 		}
2916 		set_packet_service_class(m, so, sotc, svc_flags);
2917 	} else {
2918 		/*
2919 		 * Optimization for loopback just set the mbuf
2920 		 * service class
2921 		 */
2922 		(void) m_set_service_class(m, so_tc2msc(sotc));
2923 	}
2924 
2925 	if ((th->th_flags & TH_SYN) && tp->t_syn_sent < UINT8_MAX) {
2926 		tp->t_syn_sent++;
2927 	}
2928 	if ((th->th_flags & TH_FIN) && tp->t_fin_sent < UINT8_MAX) {
2929 		tp->t_fin_sent++;
2930 	}
2931 	if ((th->th_flags & TH_RST) && tp->t_rst_sent < UINT8_MAX) {
2932 		tp->t_rst_sent++;
2933 	}
2934 	TCP_LOG_TH_FLAGS(isipv6 ? (void *)ip6 : (void *)ip, th, tp, true,
2935 	    inp->inp_last_outifp != NULL ? inp->inp_last_outifp :
2936 	    inp->inp_boundifp);
2937 
2938 	tp->t_pktlist_sentlen += len;
2939 	tp->t_lastchain++;
2940 
2941 	if (isipv6) {
2942 		DTRACE_TCP5(send, struct mbuf *, m, struct inpcb *, inp,
2943 		    struct ip6 *, ip6, struct tcpcb *, tp, struct tcphdr *,
2944 		    th);
2945 	} else {
2946 		DTRACE_TCP5(send, struct mbuf *, m, struct inpcb *, inp,
2947 		    struct ip *, ip, struct tcpcb *, tp, struct tcphdr *, th);
2948 	}
2949 
2950 	if (tp->t_pktlist_head != NULL) {
2951 		tp->t_pktlist_tail->m_nextpkt = m;
2952 		tp->t_pktlist_tail = m;
2953 	} else {
2954 		packchain_newlist++;
2955 		tp->t_pktlist_head = tp->t_pktlist_tail = m;
2956 	}
2957 
2958 	if (sendalot == 0 || (tp->t_state != TCPS_ESTABLISHED) ||
2959 	    (tp->snd_cwnd <= (tp->snd_wnd / 8)) ||
2960 	    (tp->t_flags & TF_ACKNOW) ||
2961 	    (tp->t_flagsext & TF_FORCE) ||
2962 	    tp->t_lastchain >= tcp_packet_chaining) {
2963 		error = 0;
2964 		while (inp->inp_sndinprog_cnt == 0 &&
2965 		    tp->t_pktlist_head != NULL) {
2966 			packetlist = tp->t_pktlist_head;
2967 			packchain_listadd = tp->t_lastchain;
2968 			packchain_sent++;
2969 			lost = tp->t_pktlist_sentlen;
2970 			TCP_PKTLIST_CLEAR(tp);
2971 
2972 			error = tcp_ip_output(so, tp, packetlist,
2973 			    packchain_listadd, tp_inp_options,
2974 			    (so_options & SO_DONTROUTE),
2975 			    (sack_rxmit || (sack_bytes_rxmt != 0)), isipv6);
2976 			if (error) {
2977 				/*
2978 				 * Take into account the rest of unsent
2979 				 * packets in the packet list for this tcp
2980 				 * into "lost", since we're about to free
2981 				 * the whole list below.
2982 				 */
2983 				lost += tp->t_pktlist_sentlen;
2984 				break;
2985 			} else {
2986 				lost = 0;
2987 			}
2988 		}
2989 		/* tcp was closed while we were in ip; resume close */
2990 		if (inp->inp_sndinprog_cnt == 0 &&
2991 		    (tp->t_flags & TF_CLOSING)) {
2992 			tp->t_flags &= ~TF_CLOSING;
2993 			(void) tcp_close(tp);
2994 			return 0;
2995 		}
2996 	} else {
2997 		error = 0;
2998 		packchain_looped++;
2999 		tcpstat.tcps_sndtotal++;
3000 
3001 		goto again;
3002 	}
3003 	if (error) {
3004 		/*
3005 		 * Assume that the packets were lost, so back out the
3006 		 * sequence number advance, if any.  Note that the "lost"
3007 		 * variable represents the amount of user data sent during
3008 		 * the recent call to ip_output_list() plus the amount of
3009 		 * user data in the packet list for this tcp at the moment.
3010 		 */
3011 		if (!(tp->t_flagsext & TF_FORCE)
3012 		    || tp->t_timer[TCPT_PERSIST] == 0) {
3013 			/*
3014 			 * No need to check for TH_FIN here because
3015 			 * the TF_SENTFIN flag handles that case.
3016 			 */
3017 			if ((flags & TH_SYN) == 0) {
3018 				if (sack_rxmit) {
3019 					if (SEQ_GT((p->rxmit - lost),
3020 					    tp->snd_una)) {
3021 						p->rxmit -= lost;
3022 
3023 						if (SEQ_LT(p->rxmit, p->start)) {
3024 							p->rxmit = p->start;
3025 						}
3026 					} else {
3027 						lost = p->rxmit - tp->snd_una;
3028 						p->rxmit = tp->snd_una;
3029 
3030 						if (SEQ_LT(p->rxmit, p->start)) {
3031 							p->rxmit = p->start;
3032 						}
3033 					}
3034 					tp->sackhint.sack_bytes_rexmit -= lost;
3035 					if (tp->sackhint.sack_bytes_rexmit < 0) {
3036 						tp->sackhint.sack_bytes_rexmit = 0;
3037 					}
3038 				} else {
3039 					if (SEQ_GT((tp->snd_nxt - lost),
3040 					    tp->snd_una)) {
3041 						tp->snd_nxt -= lost;
3042 					} else {
3043 						tp->snd_nxt = tp->snd_una;
3044 					}
3045 				}
3046 			}
3047 		}
3048 out:
3049 		if (tp->t_pktlist_head != NULL) {
3050 			m_freem_list(tp->t_pktlist_head);
3051 		}
3052 		TCP_PKTLIST_CLEAR(tp);
3053 
3054 		if (error == ENOBUFS) {
3055 			/*
3056 			 * Set retransmit timer if not currently set
3057 			 * when we failed to send a segment that can be
3058 			 * retransmitted (i.e. not pure ack or rst)
3059 			 */
3060 			if (tp->t_timer[TCPT_REXMT] == 0 &&
3061 			    tp->t_timer[TCPT_PERSIST] == 0 &&
3062 			    (len != 0 || (flags & (TH_SYN | TH_FIN)) != 0 ||
3063 			    so->so_snd.sb_cc > 0)) {
3064 				tp->t_timer[TCPT_REXMT] =
3065 				    OFFSET_FROM_START(tp, tp->t_rxtcur);
3066 			}
3067 			tp->snd_cwnd = tp->t_maxseg;
3068 			tp->t_bytes_acked = 0;
3069 			tcp_check_timer_state(tp);
3070 			KERNEL_DEBUG(DBG_FNC_TCP_OUTPUT | DBG_FUNC_END, 0, 0, 0, 0, 0);
3071 
3072 			TCP_LOG_OUTPUT(tp, "error ENOBUFS silently handled");
3073 
3074 			tcp_ccdbg_trace(tp, NULL, TCP_CC_OUTPUT_ERROR);
3075 			return 0;
3076 		}
3077 		if (error == EMSGSIZE) {
3078 			/*
3079 			 * ip_output() will have already fixed the route
3080 			 * for us.  tcp_mtudisc() will, as its last action,
3081 			 * initiate retransmission, so it is important to
3082 			 * not do so here.
3083 			 *
3084 			 * If TSO was active we either got an interface
3085 			 * without TSO capabilits or TSO was turned off.
3086 			 * Disable it for this connection as too and
3087 			 * immediatly retry with MSS sized segments generated
3088 			 * by this function.
3089 			 */
3090 			if (tso) {
3091 				tp->t_flags &= ~TF_TSO;
3092 			}
3093 
3094 			tcp_mtudisc(inp, 0);
3095 			tcp_check_timer_state(tp);
3096 
3097 			TCP_LOG_OUTPUT(tp, "error EMSGSIZE silently handled");
3098 
3099 			KERNEL_DEBUG(DBG_FNC_TCP_OUTPUT | DBG_FUNC_END, 0, 0, 0, 0, 0);
3100 			return 0;
3101 		}
3102 		/*
3103 		 * Unless this is due to interface restriction policy,
3104 		 * treat EHOSTUNREACH/ENETDOWN/EADDRNOTAVAIL as a soft error.
3105 		 */
3106 		if ((error == EHOSTUNREACH || error == ENETDOWN || error == EADDRNOTAVAIL) &&
3107 		    TCPS_HAVERCVDSYN(tp->t_state) &&
3108 		    !inp_restricted_send(inp, inp->inp_last_outifp)) {
3109 			tp->t_softerror = error;
3110 			TCP_LOG_OUTPUT(tp, "soft error %d silently handled", error);
3111 			error = 0;
3112 		} else {
3113 			TCP_LOG_OUTPUT(tp, "error %d", error);
3114 		}
3115 		tcp_check_timer_state(tp);
3116 		KERNEL_DEBUG(DBG_FNC_TCP_OUTPUT | DBG_FUNC_END, 0, 0, 0, 0, 0);
3117 		return error;
3118 	}
3119 
3120 	tcpstat.tcps_sndtotal++;
3121 
3122 	KERNEL_DEBUG(DBG_FNC_TCP_OUTPUT | DBG_FUNC_END, 0, 0, 0, 0, 0);
3123 	if (sendalot) {
3124 		goto again;
3125 	}
3126 
3127 	tcp_check_timer_state(tp);
3128 
3129 	return 0;
3130 }
3131 
3132 static int
tcp_ip_output(struct socket * so,struct tcpcb * tp,struct mbuf * pkt,int cnt,struct mbuf * opt,int flags,int sack_in_progress,boolean_t isipv6)3133 tcp_ip_output(struct socket *so, struct tcpcb *tp, struct mbuf *pkt,
3134     int cnt, struct mbuf *opt, int flags, int sack_in_progress, boolean_t isipv6)
3135 {
3136 	int error = 0;
3137 	boolean_t chain;
3138 	boolean_t unlocked = FALSE;
3139 	boolean_t ifdenied = FALSE;
3140 	struct inpcb *inp = tp->t_inpcb;
3141 	struct ifnet *outif = NULL;
3142 	bool check_qos_marking_again = (so->so_flags1 & SOF1_QOSMARKING_POLICY_OVERRIDE) ? FALSE : TRUE;
3143 
3144 	union {
3145 		struct route _ro;
3146 		struct route_in6 _ro6;
3147 	} route_u_ = {};
3148 #define ro route_u_._ro
3149 #define ro6 route_u_._ro6
3150 
3151 	union {
3152 		struct ip_out_args _ipoa;
3153 		struct ip6_out_args _ip6oa;
3154 	} out_args_u_ = {};
3155 #define ipoa out_args_u_._ipoa
3156 #define ip6oa out_args_u_._ip6oa
3157 
3158 	if (isipv6) {
3159 		ip6oa.ip6oa_boundif = IFSCOPE_NONE;
3160 		ip6oa.ip6oa_flags = IP6OAF_SELECT_SRCIF | IP6OAF_BOUND_SRCADDR;
3161 		ip6oa.ip6oa_sotc = SO_TC_UNSPEC;
3162 		ip6oa.ip6oa_netsvctype = _NET_SERVICE_TYPE_UNSPEC;
3163 	} else {
3164 		ipoa.ipoa_boundif = IFSCOPE_NONE;
3165 		ipoa.ipoa_flags = IPOAF_SELECT_SRCIF | IPOAF_BOUND_SRCADDR;
3166 		ipoa.ipoa_sotc = SO_TC_UNSPEC;
3167 		ipoa.ipoa_netsvctype = _NET_SERVICE_TYPE_UNSPEC;
3168 	}
3169 
3170 	struct flowadv *adv =
3171 	    (isipv6 ? &ip6oa.ip6oa_flowadv : &ipoa.ipoa_flowadv);
3172 
3173 	/* If socket was bound to an ifindex, tell ip_output about it */
3174 	if (inp->inp_flags & INP_BOUND_IF) {
3175 		if (isipv6) {
3176 			ip6oa.ip6oa_boundif = inp->inp_boundifp->if_index;
3177 			ip6oa.ip6oa_flags |= IP6OAF_BOUND_IF;
3178 		} else {
3179 			ipoa.ipoa_boundif = inp->inp_boundifp->if_index;
3180 			ipoa.ipoa_flags |= IPOAF_BOUND_IF;
3181 		}
3182 	} else if (!in6_embedded_scope && isipv6 && (IN6_IS_SCOPE_EMBED(&inp->in6p_faddr))) {
3183 		ip6oa.ip6oa_boundif = inp->inp_fifscope;
3184 		ip6oa.ip6oa_flags |= IP6OAF_BOUND_IF;
3185 	}
3186 
3187 	if (INP_NO_CELLULAR(inp)) {
3188 		if (isipv6) {
3189 			ip6oa.ip6oa_flags |=  IP6OAF_NO_CELLULAR;
3190 		} else {
3191 			ipoa.ipoa_flags |=  IPOAF_NO_CELLULAR;
3192 		}
3193 	}
3194 	if (INP_NO_EXPENSIVE(inp)) {
3195 		if (isipv6) {
3196 			ip6oa.ip6oa_flags |=  IP6OAF_NO_EXPENSIVE;
3197 		} else {
3198 			ipoa.ipoa_flags |=  IPOAF_NO_EXPENSIVE;
3199 		}
3200 	}
3201 	if (INP_NO_CONSTRAINED(inp)) {
3202 		if (isipv6) {
3203 			ip6oa.ip6oa_flags |=  IP6OAF_NO_CONSTRAINED;
3204 		} else {
3205 			ipoa.ipoa_flags |=  IPOAF_NO_CONSTRAINED;
3206 		}
3207 	}
3208 	if (INP_AWDL_UNRESTRICTED(inp)) {
3209 		if (isipv6) {
3210 			ip6oa.ip6oa_flags |=  IP6OAF_AWDL_UNRESTRICTED;
3211 		} else {
3212 			ipoa.ipoa_flags |=  IPOAF_AWDL_UNRESTRICTED;
3213 		}
3214 	}
3215 	if (INP_INTCOPROC_ALLOWED(inp) && isipv6) {
3216 		ip6oa.ip6oa_flags |=  IP6OAF_INTCOPROC_ALLOWED;
3217 	}
3218 	if (isipv6) {
3219 		ip6oa.ip6oa_sotc = so->so_traffic_class;
3220 		ip6oa.ip6oa_netsvctype = so->so_netsvctype;
3221 		ip6oa.qos_marking_gencount = inp->inp_policyresult.results.qos_marking_gencount;
3222 	} else {
3223 		ipoa.ipoa_sotc = so->so_traffic_class;
3224 		ipoa.ipoa_netsvctype = so->so_netsvctype;
3225 		ipoa.qos_marking_gencount = inp->inp_policyresult.results.qos_marking_gencount;
3226 	}
3227 	if ((so->so_flags1 & SOF1_QOSMARKING_ALLOWED)) {
3228 		if (isipv6) {
3229 			ip6oa.ip6oa_flags |= IP6OAF_QOSMARKING_ALLOWED;
3230 		} else {
3231 			ipoa.ipoa_flags |= IPOAF_QOSMARKING_ALLOWED;
3232 		}
3233 	}
3234 	if (check_qos_marking_again) {
3235 		if (isipv6) {
3236 			ip6oa.ip6oa_flags |= IP6OAF_REDO_QOSMARKING_POLICY;
3237 		} else {
3238 			ipoa.ipoa_flags |= IPOAF_REDO_QOSMARKING_POLICY;
3239 		}
3240 	}
3241 	if (isipv6) {
3242 		flags |= IPV6_OUTARGS;
3243 	} else {
3244 		flags |= IP_OUTARGS;
3245 	}
3246 
3247 	/* Copy the cached route and take an extra reference */
3248 	if (isipv6) {
3249 		in6p_route_copyout(inp, &ro6);
3250 	} else {
3251 		inp_route_copyout(inp, &ro);
3252 	}
3253 #if (DEBUG || DEVELOPMENT)
3254 	if ((so->so_flags & SOF_MARK_WAKE_PKT) && pkt != NULL) {
3255 		so->so_flags &= ~SOF_MARK_WAKE_PKT;
3256 		pkt->m_pkthdr.pkt_flags |= PKTF_WAKE_PKT;
3257 	}
3258 #endif /* (DEBUG || DEVELOPMENT) */
3259 
3260 	/*
3261 	 * Make sure ACK/DELACK conditions are cleared before
3262 	 * we unlock the socket.
3263 	 */
3264 	tp->last_ack_sent = tp->rcv_nxt;
3265 	tp->t_flags &= ~(TF_ACKNOW | TF_DELACK);
3266 	tp->t_timer[TCPT_DELACK] = 0;
3267 	tp->t_unacksegs = 0;
3268 
3269 	/* Increment the count of outstanding send operations */
3270 	inp->inp_sndinprog_cnt++;
3271 
3272 	/*
3273 	 * If allowed, unlock TCP socket while in IP
3274 	 * but only if the connection is established and
3275 	 * in a normal mode where reentrancy on the tcpcb won't be
3276 	 * an issue:
3277 	 * - there is no SACK episode
3278 	 * - we're not in Fast Recovery mode
3279 	 * - if we're not sending from an upcall.
3280 	 */
3281 	if (tcp_output_unlocked && !so->so_upcallusecount &&
3282 	    (tp->t_state == TCPS_ESTABLISHED) && (sack_in_progress == 0) &&
3283 	    !IN_FASTRECOVERY(tp) && !(so->so_flags & SOF_MP_SUBFLOW)) {
3284 		unlocked = TRUE;
3285 		socket_unlock(so, 0);
3286 	}
3287 
3288 	/*
3289 	 * Don't send down a chain of packets when:
3290 	 * - TCP chaining is disabled
3291 	 * - there is an IPsec rule set
3292 	 * - there is a non default rule set for the firewall
3293 	 */
3294 
3295 	chain = tcp_packet_chaining > 1
3296 #if IPSEC
3297 	    && ipsec_bypass
3298 #endif
3299 	;         // I'm important, not extraneous
3300 
3301 	while (pkt != NULL) {
3302 		struct mbuf *npkt = pkt->m_nextpkt;
3303 
3304 		if (!chain) {
3305 			pkt->m_nextpkt = NULL;
3306 			/*
3307 			 * If we are not chaining, make sure to set the packet
3308 			 * list count to 0 so that IP takes the right path;
3309 			 * this is important for cases such as IPsec where a
3310 			 * single mbuf might result in multiple mbufs as part
3311 			 * of the encapsulation.  If a non-zero count is passed
3312 			 * down to IP, the head of the chain might change and
3313 			 * we could end up skipping it (thus generating bogus
3314 			 * packets).  Fixing it in IP would be desirable, but
3315 			 * for now this would do it.
3316 			 */
3317 			cnt = 0;
3318 		}
3319 		if (isipv6) {
3320 			error = ip6_output_list(pkt, cnt,
3321 			    inp->in6p_outputopts, &ro6, flags, NULL, NULL,
3322 			    &ip6oa);
3323 			ifdenied = (ip6oa.ip6oa_flags & IP6OAF_R_IFDENIED);
3324 		} else {
3325 			error = ip_output_list(pkt, cnt, opt, &ro, flags, NULL,
3326 			    &ipoa);
3327 			ifdenied = (ipoa.ipoa_flags & IPOAF_R_IFDENIED);
3328 		}
3329 
3330 		if (chain || error) {
3331 			/*
3332 			 * If we sent down a chain then we are done since
3333 			 * the callee had taken care of everything; else
3334 			 * we need to free the rest of the chain ourselves.
3335 			 */
3336 			if (!chain) {
3337 				m_freem_list(npkt);
3338 			}
3339 			break;
3340 		}
3341 		pkt = npkt;
3342 	}
3343 
3344 	if (unlocked) {
3345 		socket_lock(so, 0);
3346 	}
3347 
3348 	/*
3349 	 * Enter flow controlled state if the connection is established
3350 	 * and is not in recovery. Flow control is allowed only if there
3351 	 * is outstanding data.
3352 	 *
3353 	 * A connection will enter suspended state even if it is in
3354 	 * recovery.
3355 	 */
3356 	if (((adv->code == FADV_FLOW_CONTROLLED && !IN_FASTRECOVERY(tp)) ||
3357 	    adv->code == FADV_SUSPENDED) &&
3358 	    !(tp->t_flags & TF_CLOSING) &&
3359 	    tp->t_state == TCPS_ESTABLISHED &&
3360 	    SEQ_GT(tp->snd_max, tp->snd_una)) {
3361 		int rc;
3362 		rc = inp_set_fc_state(inp, adv->code);
3363 
3364 		if (rc == 1) {
3365 			tcp_ccdbg_trace(tp, NULL,
3366 			    ((adv->code == FADV_FLOW_CONTROLLED) ?
3367 			    TCP_CC_FLOW_CONTROL : TCP_CC_SUSPEND));
3368 			if (adv->code == FADV_FLOW_CONTROLLED) {
3369 				TCP_LOG_OUTPUT(tp, "flow controlled");
3370 			} else {
3371 				TCP_LOG_OUTPUT(tp, "flow suspended");
3372 			}
3373 		}
3374 	}
3375 
3376 	/*
3377 	 * When an interface queue gets suspended, some of the
3378 	 * packets are dropped. Return ENOBUFS, to update the
3379 	 * pcb state.
3380 	 */
3381 	if (adv->code == FADV_SUSPENDED) {
3382 		error = ENOBUFS;
3383 	}
3384 
3385 	VERIFY(inp->inp_sndinprog_cnt > 0);
3386 	if (--inp->inp_sndinprog_cnt == 0) {
3387 		inp->inp_flags &= ~(INP_FC_FEEDBACK);
3388 		if (inp->inp_sndingprog_waiters > 0) {
3389 			wakeup(&inp->inp_sndinprog_cnt);
3390 		}
3391 	}
3392 
3393 	if (isipv6) {
3394 		/*
3395 		 * When an NECP IP tunnel policy forces the outbound interface,
3396 		 * ip6_output_list() informs the transport layer what is the actual
3397 		 * outgoing interface
3398 		 */
3399 		if (ip6oa.ip6oa_flags & IP6OAF_BOUND_IF) {
3400 			outif = ifindex2ifnet[ip6oa.ip6oa_boundif];
3401 		} else if (ro6.ro_rt != NULL) {
3402 			outif = ro6.ro_rt->rt_ifp;
3403 		}
3404 	} else {
3405 		if (ro.ro_rt != NULL) {
3406 			outif = ro.ro_rt->rt_ifp;
3407 		}
3408 	}
3409 	if (check_qos_marking_again) {
3410 		uint32_t qos_marking_gencount;
3411 		bool allow_qos_marking;
3412 		if (isipv6) {
3413 			qos_marking_gencount = ip6oa.qos_marking_gencount;
3414 			allow_qos_marking = ip6oa.ip6oa_flags & IP6OAF_QOSMARKING_ALLOWED ? TRUE : FALSE;
3415 		} else {
3416 			qos_marking_gencount = ipoa.qos_marking_gencount;
3417 			allow_qos_marking = ipoa.ipoa_flags & IPOAF_QOSMARKING_ALLOWED ? TRUE : FALSE;
3418 		}
3419 		inp->inp_policyresult.results.qos_marking_gencount = qos_marking_gencount;
3420 		if (allow_qos_marking == TRUE) {
3421 			inp->inp_socket->so_flags1 |= SOF1_QOSMARKING_ALLOWED;
3422 		} else {
3423 			inp->inp_socket->so_flags1 &= ~SOF1_QOSMARKING_ALLOWED;
3424 		}
3425 	}
3426 
3427 	if (outif != NULL && outif != inp->inp_last_outifp) {
3428 		/* Update the send byte count */
3429 		if (so->so_snd.sb_cc > 0 && so->so_snd.sb_flags & SB_SNDBYTE_CNT) {
3430 			inp_decr_sndbytes_total(so, so->so_snd.sb_cc);
3431 			inp_decr_sndbytes_allunsent(so, tp->snd_una);
3432 			so->so_snd.sb_flags &= ~SB_SNDBYTE_CNT;
3433 		}
3434 		inp->inp_last_outifp = outif;
3435 #if SKYWALK
3436 		if (NETNS_TOKEN_VALID(&inp->inp_netns_token)) {
3437 			netns_set_ifnet(&inp->inp_netns_token, inp->inp_last_outifp);
3438 		}
3439 #endif /* SKYWALK */
3440 	}
3441 
3442 	if (error != 0 && ifdenied &&
3443 	    (INP_NO_CELLULAR(inp) || INP_NO_EXPENSIVE(inp) || INP_NO_CONSTRAINED(inp))) {
3444 		soevent(so,
3445 		    (SO_FILT_HINT_LOCKED | SO_FILT_HINT_IFDENIED));
3446 	}
3447 
3448 	/* Synchronize cached PCB route & options */
3449 	if (isipv6) {
3450 		in6p_route_copyin(inp, &ro6);
3451 	} else {
3452 		inp_route_copyin(inp, &ro);
3453 	}
3454 
3455 	if (tp->t_state < TCPS_ESTABLISHED && tp->t_rxtshift == 0 &&
3456 	    tp->t_inpcb->inp_route.ro_rt != NULL) {
3457 		/* If we found the route and there is an rtt on it
3458 		 * reset the retransmit timer
3459 		 */
3460 		tcp_getrt_rtt(tp, tp->t_inpcb->in6p_route.ro_rt);
3461 		tp->t_timer[TCPT_REXMT] = OFFSET_FROM_START(tp, tp->t_rxtcur);
3462 	}
3463 	return error;
3464 #undef ro
3465 #undef ro6
3466 #undef ipoa
3467 #undef ip6oa
3468 }
3469 
3470 int tcptv_persmin_val = TCPTV_PERSMIN;
3471 
3472 void
tcp_setpersist(struct tcpcb * tp)3473 tcp_setpersist(struct tcpcb *tp)
3474 {
3475 	int t = ((tp->t_srtt >> 2) + tp->t_rttvar) >> 1;
3476 
3477 	/* If a PERSIST_TIMER option was set we will limit the
3478 	 * time the persist timer will be active for that connection
3479 	 * in order to avoid DOS by using zero window probes.
3480 	 * see rdar://5805356
3481 	 */
3482 
3483 	if (tp->t_persist_timeout != 0 &&
3484 	    tp->t_timer[TCPT_PERSIST] == 0 &&
3485 	    tp->t_persist_stop == 0) {
3486 		tp->t_persist_stop = tcp_now + tp->t_persist_timeout;
3487 	}
3488 
3489 	/*
3490 	 * Start/restart persistance timer.
3491 	 */
3492 	TCPT_RANGESET(tp->t_timer[TCPT_PERSIST],
3493 	    t * tcp_backoff[tp->t_rxtshift],
3494 	    tcptv_persmin_val, TCPTV_PERSMAX, 0);
3495 	tp->t_timer[TCPT_PERSIST] = OFFSET_FROM_START(tp, tp->t_timer[TCPT_PERSIST]);
3496 
3497 	if (tp->t_rxtshift < TCP_MAXRXTSHIFT) {
3498 		tp->t_rxtshift++;
3499 	}
3500 }
3501 
3502 static int
tcp_recv_throttle(struct tcpcb * tp)3503 tcp_recv_throttle(struct tcpcb *tp)
3504 {
3505 	uint32_t base_rtt, newsize;
3506 	struct sockbuf *sbrcv = &tp->t_inpcb->inp_socket->so_rcv;
3507 
3508 	if (tcp_use_rtt_recvbg == 1 &&
3509 	    TSTMP_SUPPORTED(tp)) {
3510 		/*
3511 		 * Timestamps are supported on this connection. Use
3512 		 * RTT to look for an increase in latency.
3513 		 */
3514 
3515 		/*
3516 		 * If the connection is already being throttled, leave it
3517 		 * in that state until rtt comes closer to base rtt
3518 		 */
3519 		if (tp->t_flagsext & TF_RECV_THROTTLE) {
3520 			return 1;
3521 		}
3522 
3523 		base_rtt = get_base_rtt(tp);
3524 
3525 		if (base_rtt != 0 && tp->t_rttcur != 0) {
3526 			/*
3527 			 * if latency increased on a background flow,
3528 			 * return 1 to start throttling.
3529 			 */
3530 			if (tp->t_rttcur > (base_rtt + target_qdelay)) {
3531 				tp->t_flagsext |= TF_RECV_THROTTLE;
3532 				if (tp->t_recv_throttle_ts == 0) {
3533 					tp->t_recv_throttle_ts = tcp_now;
3534 				}
3535 				/*
3536 				 * Reduce the recv socket buffer size to
3537 				 * minimize latecy.
3538 				 */
3539 				if (sbrcv->sb_idealsize >
3540 				    tcp_recv_throttle_minwin) {
3541 					newsize = sbrcv->sb_idealsize >> 1;
3542 					/* Set a minimum of 16 K */
3543 					newsize =
3544 					    max(newsize,
3545 					    tcp_recv_throttle_minwin);
3546 					sbrcv->sb_idealsize = newsize;
3547 				}
3548 				return 1;
3549 			} else {
3550 				return 0;
3551 			}
3552 		}
3553 	}
3554 
3555 	/*
3556 	 * Timestamps are not supported or there is no good RTT
3557 	 * measurement. Use IPDV in this case.
3558 	 */
3559 	if (tp->acc_iaj > tcp_acc_iaj_react_limit) {
3560 		return 1;
3561 	}
3562 
3563 	return 0;
3564 }
3565