xref: /xnu-12377.61.12/bsd/netinet/tcp_subr.c (revision 4d495c6e23c53686cf65f45067f79024cf5dcee8)
1 /*
2  * Copyright (c) 2000-2024 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995
30  *	The Regents of the University of California.  All rights reserved.
31  *
32  * Redistribution and use in source and binary forms, with or without
33  * modification, are permitted provided that the following conditions
34  * are met:
35  * 1. Redistributions of source code must retain the above copyright
36  *    notice, this list of conditions and the following disclaimer.
37  * 2. Redistributions in binary form must reproduce the above copyright
38  *    notice, this list of conditions and the following disclaimer in the
39  *    documentation and/or other materials provided with the distribution.
40  * 3. All advertising materials mentioning features or use of this software
41  *    must display the following acknowledgement:
42  *	This product includes software developed by the University of
43  *	California, Berkeley and its contributors.
44  * 4. Neither the name of the University nor the names of its contributors
45  *    may be used to endorse or promote products derived from this software
46  *    without specific prior written permission.
47  *
48  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58  * SUCH DAMAGE.
59  *
60  *	@(#)tcp_subr.c	8.2 (Berkeley) 5/24/95
61  */
62 /*
63  * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
64  * support for mandatory and extensible security protections.  This notice
65  * is included in support of clause 2.2 (b) of the Apple Public License,
66  * Version 2.0.
67  */
68 
69 #include "tcp_includes.h"
70 
71 #include <sys/param.h>
72 #include <sys/systm.h>
73 #include <sys/kernel.h>
74 #include <sys/sysctl.h>
75 #include <sys/malloc.h>
76 #include <sys/mbuf.h>
77 #include <sys/domain.h>
78 #include <sys/proc.h>
79 #include <sys/kauth.h>
80 #include <sys/socket.h>
81 #include <sys/socketvar.h>
82 #include <sys/protosw.h>
83 #include <sys/random.h>
84 #include <sys/syslog.h>
85 #include <sys/mcache.h>
86 #include <kern/locks.h>
87 #include <kern/uipc_domain.h>
88 #include <kern/zalloc.h>
89 
90 #include <dev/random/randomdev.h>
91 
92 #include <net/route.h>
93 #include <net/if.h>
94 #include <net/content_filter.h>
95 #include <net/ntstat.h>
96 #include <net/multi_layer_pkt_log.h>
97 
98 #define tcp_minmssoverload fring
99 #define _IP_VHL
100 #include <netinet/in.h>
101 #include <netinet/in_systm.h>
102 #include <netinet/ip.h>
103 #include <netinet/ip_icmp.h>
104 #include <netinet/ip6.h>
105 #include <netinet/icmp6.h>
106 #include <netinet/in_pcb.h>
107 #include <netinet6/in6_pcb.h>
108 #include <netinet/in_var.h>
109 #include <netinet/ip_var.h>
110 #include <netinet/icmp_var.h>
111 #include <netinet6/ip6_var.h>
112 #include <netinet/mptcp_var.h>
113 #include <netinet/tcp.h>
114 #include <netinet/tcp_fsm.h>
115 #include <netinet/tcp_seq.h>
116 #include <netinet/tcp_syncookie.h>
117 #include <netinet/tcp_timer.h>
118 #include <netinet/tcp_var.h>
119 #include <netinet/tcp_cc.h>
120 #include <netinet/tcp_cache.h>
121 #include <kern/thread_call.h>
122 
123 #include <netinet6/tcp6_var.h>
124 #include <netinet/tcpip.h>
125 #include <netinet/tcp_log.h>
126 
127 #include <netinet6/ip6protosw.h>
128 #include <netinet6/esp.h>
129 
130 #if IPSEC
131 #include <netinet6/ipsec.h>
132 #include <netinet6/ipsec6.h>
133 #endif /* IPSEC */
134 
135 #if NECP
136 #include <net/necp.h>
137 #endif /* NECP */
138 
139 #undef tcp_minmssoverload
140 
141 #include <net/sockaddr_utils.h>
142 
143 #include <corecrypto/ccaes.h>
144 #include <libkern/crypto/aes.h>
145 #include <libkern/crypto/md5.h>
146 #include <sys/kdebug.h>
147 #include <mach/sdt.h>
148 #include <pexpert/pexpert.h>
149 #include <mach/mach_time.h>
150 #include <os/ptrtools.h>
151 
152 #define DBG_FNC_TCP_CLOSE       NETDBG_CODE(DBG_NETTCP, ((5 << 8) | 2))
153 
154 static tcp_cc tcp_ccgen;
155 
156 struct mem_acct *tcp_memacct;
157 
158 extern struct tcptimerlist tcp_timer_list;
159 extern struct tcptailq tcp_tw_tailq;
160 
161 extern int tcp_awdl_rtobase;
162 
163 SYSCTL_SKMEM_TCP_INT(TCPCTL_MSSDFLT, mssdflt, CTLFLAG_RW | CTLFLAG_LOCKED,
164     int, tcp_mssdflt, TCP_MSS, "Default TCP Maximum Segment Size");
165 
166 SYSCTL_SKMEM_TCP_INT(TCPCTL_V6MSSDFLT, v6mssdflt,
167     CTLFLAG_RW | CTLFLAG_LOCKED, int, tcp_v6mssdflt, TCP6_MSS,
168     "Default TCP Maximum Segment Size for IPv6");
169 
170 int tcp_sysctl_fastopenkey(struct sysctl_oid *, void *, int,
171     struct sysctl_req *);
172 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, fastopen_key, CTLTYPE_STRING | CTLFLAG_WR,
173     0, 0, tcp_sysctl_fastopenkey, "S", "TCP Fastopen key");
174 
175 /* Current count of half-open TFO connections */
176 int     tcp_tfo_halfcnt = 0;
177 
178 /* Maximum of half-open TFO connection backlog */
179 SYSCTL_SKMEM_TCP_INT(OID_AUTO, fastopen_backlog,
180     CTLFLAG_RW | CTLFLAG_LOCKED, int, tcp_tfo_backlog, 10,
181     "Backlog queue for half-open TFO connections");
182 
183 SYSCTL_SKMEM_TCP_INT(OID_AUTO, fastopen, CTLFLAG_RW | CTLFLAG_LOCKED,
184     int, tcp_fastopen, TCP_FASTOPEN_CLIENT | TCP_FASTOPEN_SERVER,
185     "Enable TCP Fastopen (RFC 7413)");
186 
187 /* ToDo - remove once uTCP stops using it */
188 SYSCTL_SKMEM_TCP_INT(OID_AUTO, now_init, CTLFLAG_RD | CTLFLAG_LOCKED,
189     uint32_t, tcp_now_init, 0, "Initial tcp now value");
190 
191 /* ToDo - remove once uTCP stops using it */
192 SYSCTL_SKMEM_TCP_INT(OID_AUTO, microuptime_init, CTLFLAG_RD | CTLFLAG_LOCKED,
193     uint32_t, tcp_microuptime_init, 0, "Initial tcp uptime value in micro seconds");
194 
195 /*
196  * Minimum MSS we accept and use. This prevents DoS attacks where
197  * we are forced to a ridiculous low MSS like 20 and send hundreds
198  * of packets instead of one. The effect scales with the available
199  * bandwidth and quickly saturates the CPU and network interface
200  * with packet generation and sending. Set to zero to disable MINMSS
201  * checking. This setting prevents us from sending too small packets.
202  */
203 SYSCTL_SKMEM_TCP_INT(OID_AUTO, minmss, CTLFLAG_RW | CTLFLAG_LOCKED,
204     int, tcp_minmss, TCP_MINMSS, "Minmum TCP Maximum Segment Size");
205 
206 SYSCTL_UINT(_net_inet_tcp, OID_AUTO, pcbcount, CTLFLAG_RD | CTLFLAG_LOCKED,
207     &tcbinfo.ipi_count, 0, "Number of active PCBs");
208 
209 SYSCTL_SKMEM_TCP_INT(OID_AUTO, icmp_may_rst, CTLFLAG_RW | CTLFLAG_LOCKED,
210     static int, icmp_may_rst, 1,
211     "Certain ICMP unreachable messages may abort connections in SYN_SENT");
212 
213 int             tcp_do_timestamps = 1;
214 #if (DEVELOPMENT || DEBUG)
215 SYSCTL_INT(_net_inet_tcp, OID_AUTO, do_timestamps,
216     CTLFLAG_RW | CTLFLAG_LOCKED, &tcp_do_timestamps, 0, "enable TCP timestamps");
217 #endif /* (DEVELOPMENT || DEBUG) */
218 
219 SYSCTL_SKMEM_TCP_INT(OID_AUTO, rtt_min, CTLFLAG_RW | CTLFLAG_LOCKED,
220     int, tcp_TCPTV_MIN, 100, "min rtt value allowed");
221 
222 SYSCTL_SKMEM_TCP_INT(OID_AUTO, rexmt_slop, CTLFLAG_RW,
223     int, tcp_rexmt_slop, TCPTV_REXMTSLOP, "Slop added to retransmit timeout");
224 
225 SYSCTL_SKMEM_TCP_INT(OID_AUTO, randomize_ports, CTLFLAG_RW | CTLFLAG_LOCKED,
226     __private_extern__ int, tcp_use_randomport, 0,
227     "Randomize TCP port numbers");
228 
229 SYSCTL_SKMEM_TCP_INT(OID_AUTO, win_scale_factor, CTLFLAG_RW | CTLFLAG_LOCKED,
230     __private_extern__ int, tcp_win_scale, 3, "Window scaling factor");
231 
232 #if (DEVELOPMENT || DEBUG)
233 SYSCTL_SKMEM_TCP_INT(OID_AUTO, init_rtt_from_cache,
234     CTLFLAG_RW | CTLFLAG_LOCKED, static int, tcp_init_rtt_from_cache, 1,
235     "Initalize RTT from route cache");
236 #else
237 SYSCTL_SKMEM_TCP_INT(OID_AUTO, init_rtt_from_cache,
238     CTLFLAG_RD | CTLFLAG_LOCKED, static int, tcp_init_rtt_from_cache, 1,
239     "Initalize RTT from route cache");
240 #endif /* (DEVELOPMENT || DEBUG) */
241 
242 static int tso_debug = 0;
243 SYSCTL_INT(_net_inet_tcp, OID_AUTO, tso_debug, CTLFLAG_RW | CTLFLAG_LOCKED,
244     &tso_debug, 0, "TSO verbosity");
245 
246 static int tcp_rxt_seg_max = 1024;
247 SYSCTL_INT(_net_inet_tcp, OID_AUTO, rxt_seg_max, CTLFLAG_RW | CTLFLAG_LOCKED,
248     &tcp_rxt_seg_max, 0, "");
249 
250 static unsigned long tcp_rxt_seg_drop = 0;
251 SYSCTL_ULONG(_net_inet_tcp, OID_AUTO, rxt_seg_drop, CTLFLAG_RD | CTLFLAG_LOCKED,
252     &tcp_rxt_seg_drop, "");
253 
254 static void     tcp_notify(struct inpcb *, int);
255 
256 static KALLOC_TYPE_DEFINE(tcp_bwmeas_zone, struct bwmeas, NET_KT_DEFAULT);
257 KALLOC_TYPE_DEFINE(tcp_reass_zone, struct tseg_qent, NET_KT_DEFAULT);
258 KALLOC_TYPE_DEFINE(tcp_rxt_seg_zone, struct tcp_rxt_seg, NET_KT_DEFAULT);
259 KALLOC_TYPE_DEFINE(tcp_seg_sent_zone, struct tcp_seg_sent, NET_KT_DEFAULT);
260 
261 extern int slowlink_wsize;      /* window correction for slow links */
262 extern int path_mtu_discovery;
263 
264 static void tcp_sbrcv_grow_rwin(struct tcpcb *tp, struct sockbuf *sb);
265 
266 #define TCP_BWMEAS_BURST_MINSIZE 6
267 #define TCP_BWMEAS_BURST_MAXSIZE 25
268 
269 /*
270  * Target size of TCP PCB hash tables. Must be a power of two.
271  *
272  * Note that this can be overridden by the kernel environment
273  * variable net.inet.tcp.tcbhashsize
274  */
275 #ifndef TCBHASHSIZE
276 #define TCBHASHSIZE     CONFIG_TCBHASHSIZE
277 #endif
278 
279 __private_extern__ int  tcp_tcbhashsize = TCBHASHSIZE;
280 SYSCTL_INT(_net_inet_tcp, OID_AUTO, tcbhashsize, CTLFLAG_RD | CTLFLAG_LOCKED,
281     &tcp_tcbhashsize, 0, "Size of TCP control-block hashtable");
282 
283 /*
284  * This is the actual shape of what we allocate using the zone
285  * allocator.  Doing it this way allows us to protect both structures
286  * using the same generation count, and also eliminates the overhead
287  * of allocating tcpcbs separately.  By hiding the structure here,
288  * we avoid changing most of the rest of the code (although it needs
289  * to be changed, eventually, for greater efficiency).
290  */
291 #define ALIGNMENT       32
292 struct  inp_tp {
293 	struct  inpcb   inp;
294 	struct  tcpcb   tcb __attribute__((aligned(ALIGNMENT)));
295 };
296 #undef ALIGNMENT
297 
298 static KALLOC_TYPE_DEFINE(tcpcbzone, struct inp_tp, NET_KT_DEFAULT);
299 
300 os_log_t tcp_mpkl_log_object = NULL;
301 
302 static void tcpcb_to_otcpcb(struct tcpcb *, struct otcpcb *);
303 
304 int tcp_notsent_lowat_check(struct socket *so);
305 static void tcp_flow_lim_stats(struct ifnet_stats_per_flow *ifs,
306     struct if_lim_perf_stat *stat);
307 static void tcp_flow_ecn_perf_stats(struct ifnet_stats_per_flow *ifs,
308     struct if_tcp_ecn_perf_stat *stat);
309 
310 static aes_encrypt_ctx tfo_ctx; /* Crypto-context for TFO */
311 
312 /* TCP RST duplicate suppression */
313 static LCK_ATTR_DECLARE(tcp_rst_rlc_attr, 0, 0);
314 static LCK_GRP_DECLARE(tcp_rst_rlc_mtx_grp, "rst_rlc");
315 static LCK_MTX_DECLARE_ATTR(tcp_rst_rlc_mtx_data, &tcp_rst_rlc_mtx_grp, &tcp_rst_rlc_attr);
316 static lck_mtx_t  * const tcp_rst_rlc_mtx = &tcp_rst_rlc_mtx_data;
317 
318 static struct in_endpoints      tcp_rst_rlc_state;
319 static uint32_t                 tcp_rst_rlc_ts;
320 static uint32_t                 tcp_rst_rlc_cnt = 0;
321 
322 SYSCTL_SKMEM_TCP_INT(OID_AUTO, rst_rlc_enable,
323     CTLFLAG_RW | CTLFLAG_LOCKED, static int, tcp_rst_rlc_enable, 1,
324     "Enable RST run-length-compression");
325 
326 SYSCTL_SKMEM_TCP_INT(OID_AUTO, rst_rlc_bucket_ms,
327     CTLFLAG_RW | CTLFLAG_LOCKED, static int, tcp_rst_rlc_bucket_ms, 200,
328     "Duration of RLC bucket in milliseconds for the RST run-length-compression");
329 
330 SYSCTL_SKMEM_TCP_INT(OID_AUTO, rst_rlc_use_ts,
331     CTLFLAG_RW | CTLFLAG_LOCKED, static int, tcp_rst_rlc_use_ts, 1,
332     "Include timestamp in RST run-length-compression");
333 
334 SYSCTL_SKMEM_TCP_INT(OID_AUTO, rst_rlc_verbose,
335     CTLFLAG_RW | CTLFLAG_LOCKED, static int, tcp_rst_rlc_verbose, 0,
336     "Verbose output: 0: no output; 1: log whenever the RST RLC buffer changes");
337 
338 
339 bool
tcp_rst_rlc_compress(void * ipgen __sized_by (ipgen_size),size_t ipgen_size __unused,struct tcphdr * th)340 tcp_rst_rlc_compress(void *ipgen __sized_by(ipgen_size), size_t ipgen_size __unused, struct tcphdr *th)
341 {
342 	struct ip *ip;
343 	struct ip6_hdr *ip6;
344 	bool isipv6;
345 	struct in_endpoints flow;
346 	bool should_throttle = false;
347 	uint32_t last_tcp_rst_rlc_cnt = 0;
348 	in_port_t last_sport = 0;
349 	in_port_t last_dport = 0;
350 
351 	if (tcp_rst_rlc_enable == 0 || (th->th_flags & TH_RST) == 0) {
352 		return false;
353 	}
354 	bzero(&flow, sizeof(flow));
355 
356 	isipv6 = IP_VHL_V(((struct ip *)ipgen)->ip_vhl) == 6;
357 
358 	ip6 = ipgen;
359 	ip = ipgen;
360 
361 	flow.ie_lport = th->th_sport;
362 	flow.ie_fport = th->th_dport;
363 
364 	if (isipv6) {
365 		bcopy(&ip6->ip6_src, &flow.ie6_laddr, sizeof(struct in6_addr));
366 		bcopy(&ip6->ip6_dst, &flow.ie6_faddr, sizeof(struct in6_addr));
367 	} else {
368 		bcopy(&ip->ip_src, &flow.ie_laddr, sizeof(struct in_addr));
369 		bcopy(&ip->ip_dst, &flow.ie_faddr, sizeof(struct in_addr));
370 	}
371 
372 	lck_mtx_lock(tcp_rst_rlc_mtx);
373 	if (__improbable((tcp_rst_rlc_use_ts == false || tcp_now - tcp_rst_rlc_ts < tcp_rst_rlc_bucket_ms) &&
374 	    bcmp(&flow, &tcp_rst_rlc_state, sizeof(struct in_endpoints)) == 0)) {
375 		/*
376 		 * The rst rlc state hasn't changed changed, we should throttle.
377 		 */
378 		should_throttle = true;
379 		tcp_rst_rlc_cnt++;
380 		tcpstat.tcps_rst_dup_suppressed++;
381 	} else {
382 		should_throttle = false;
383 		last_tcp_rst_rlc_cnt = tcp_rst_rlc_cnt;
384 		last_sport = tcp_rst_rlc_state.ie_lport;
385 		last_dport = tcp_rst_rlc_state.ie_fport;
386 
387 		bcopy(&flow, &tcp_rst_rlc_state, sizeof(struct in_endpoints));
388 		tcp_rst_rlc_ts = tcp_now;
389 
390 		tcp_rst_rlc_cnt = 0;
391 		tcpstat.tcps_rst_not_suppressed++;
392 	}
393 	lck_mtx_unlock(tcp_rst_rlc_mtx);
394 
395 	if (tcp_rst_rlc_verbose) {
396 		if (last_tcp_rst_rlc_cnt != 0) {
397 			os_log(OS_LOG_DEFAULT, "RST RLC compression: compressed %u RST segments [%hu:%hu]",
398 			    last_tcp_rst_rlc_cnt, ntohs(last_sport), ntohs(last_dport));
399 		}
400 	}
401 
402 	return should_throttle;
403 }
404 
405 void
tcp_tfo_gen_cookie(struct inpcb * inp,u_char * out __sized_by (blk_size),size_t blk_size)406 tcp_tfo_gen_cookie(struct inpcb *inp, u_char *out __sized_by(blk_size), size_t blk_size)
407 {
408 	u_char in[CCAES_BLOCK_SIZE];
409 	int isipv6 = inp->inp_vflag & INP_IPV6;
410 
411 	VERIFY(blk_size == CCAES_BLOCK_SIZE);
412 
413 	bzero(&in[0], CCAES_BLOCK_SIZE);
414 	bzero(&out[0], CCAES_BLOCK_SIZE);
415 
416 	if (isipv6) {
417 		memcpy(in, &inp->in6p_faddr, sizeof(struct in6_addr));
418 	} else {
419 		memcpy(in, &inp->inp_faddr, sizeof(struct in_addr));
420 	}
421 
422 	aes_encrypt_cbc(in, NULL, 1, out, &tfo_ctx);
423 }
424 
425 __private_extern__ int
tcp_sysctl_fastopenkey(__unused struct sysctl_oid * oidp,__unused void * arg1,__unused int arg2,struct sysctl_req * req)426 tcp_sysctl_fastopenkey(__unused struct sysctl_oid *oidp, __unused void *arg1,
427     __unused int arg2, struct sysctl_req *req)
428 {
429 	int error = 0;
430 	/*
431 	 * TFO-key is expressed as a string in hex format
432 	 *  +1 to account for the \0 char
433 	 *  +1 because sysctl_io_string() expects a string length but the sysctl command
434 	 *     now includes the terminating \0 in newlen -- see rdar://77205344
435 	 */
436 	char keystring[TCP_FASTOPEN_KEYLEN * 2 + 2];
437 	u_int32_t key[TCP_FASTOPEN_KEYLEN / sizeof(u_int32_t)];
438 	int i;
439 	size_t ks_len;
440 
441 	/*
442 	 * sysctl_io_string copies keystring into the oldptr of the sysctl_req.
443 	 * Make sure everything is zero, to avoid putting garbage in there or
444 	 * leaking the stack.
445 	 */
446 	bzero(keystring, sizeof(keystring));
447 
448 	error = sysctl_io_string(req, keystring, sizeof(keystring), 0, NULL);
449 	if (error) {
450 		os_log(OS_LOG_DEFAULT,
451 		    "%s: sysctl_io_string() error %d, req->newlen %lu, sizeof(keystring) %lu",
452 		    __func__, error, req->newlen, sizeof(keystring));
453 		goto exit;
454 	}
455 	if (req->newptr == USER_ADDR_NULL) {
456 		goto exit;
457 	}
458 
459 	ks_len = strbuflen(keystring, sizeof(keystring));
460 	if (ks_len != TCP_FASTOPEN_KEYLEN * 2) {
461 		os_log(OS_LOG_DEFAULT,
462 		    "%s: strlen(keystring) %lu != TCP_FASTOPEN_KEYLEN * 2 %u, newlen %lu",
463 		    __func__, ks_len, TCP_FASTOPEN_KEYLEN * 2, req->newlen);
464 		error = EINVAL;
465 		goto exit;
466 	}
467 
468 	for (i = 0; i < (TCP_FASTOPEN_KEYLEN / sizeof(u_int32_t)); i++) {
469 		/*
470 		 * We jump over the keystring in 8-character (4 byte in hex)
471 		 * steps
472 		 */
473 		if (sscanf(__unsafe_null_terminated_from_indexable(&keystring[i * 8]), "%8x", &key[i]) != 1) {
474 			error = EINVAL;
475 			os_log(OS_LOG_DEFAULT,
476 			    "%s: sscanf() != 1, error EINVAL", __func__);
477 			goto exit;
478 		}
479 	}
480 
481 	aes_encrypt_key128((u_char *)key, &tfo_ctx);
482 
483 exit:
484 	return error;
485 }
486 
487 static int scale_to_powerof2(int size);
488 
489 /*
490  * This helper routine returns one of the following scaled value of size:
491  * 1. Rounded down power of two value of size if the size value passed as
492  *    argument is not a power of two and the rounded up value overflows.
493  * OR
494  * 2. Rounded up power of two value of size if the size value passed as
495  *    argument is not a power of two and the rounded up value does not overflow
496  * OR
497  * 3. Same value as argument size if it is already a power of two.
498  */
499 static int
scale_to_powerof2(int size)500 scale_to_powerof2(int size)
501 {
502 	/* Handle special case of size = 0 */
503 	int ret = size ? size : 1;
504 
505 	if (!powerof2(ret)) {
506 		while (!powerof2(size)) {
507 			/*
508 			 * Clear out least significant
509 			 * set bit till size is left with
510 			 * its highest set bit at which point
511 			 * it is rounded down power of two.
512 			 */
513 			size = size & (size - 1);
514 		}
515 
516 		/* Check for overflow when rounding up */
517 		if (0 == (size << 1)) {
518 			ret = size;
519 		} else {
520 			ret = size << 1;
521 		}
522 	}
523 
524 	return ret;
525 }
526 
527 /*
528  * Round the floating point to the next integer
529  * Eg. 1.3 will round up to 2.
530  */
531 uint32_t
tcp_ceil(double a)532 tcp_ceil(double a)
533 {
534 	double res = (uint32_t) a;
535 	return (uint32_t)(res + (res < a));
536 }
537 
538 uint32_t
tcp_round_to(uint32_t val,uint32_t round)539 tcp_round_to(uint32_t val, uint32_t round)
540 {
541 	/*
542 	 * Round up or down based on the middle. Meaning, if we round upon a
543 	 * multiple of 10, 16 will round to 20 and 14 will round to 10.
544 	 */
545 	return ((val + (round / 2)) / round) * round;
546 }
547 
548 /*
549  * Round up to the next multiple of base.
550  * Eg. for a base of 64, 65 will become 128,
551  * 2896 will become 2944.
552  */
553 uint32_t
tcp_round_up(uint32_t val,uint32_t base)554 tcp_round_up(uint32_t val, uint32_t base)
555 {
556 	if (base == 1 || val % base == 0) {
557 		return val;
558 	}
559 
560 	return ((val + base) / base) * base;
561 }
562 
563 uint32_t
564 ntoh24(u_char *p __sized_by(3))
565 {
566 	uint32_t v;
567 
568 	v  = (uint32_t)(p[0] << 16);
569 	v |= (uint32_t)(p[1] << 8);
570 	v |= (uint32_t)(p[2] << 0);
571 	return v;
572 }
573 
574 uint32_t
tcp_packets_this_ack(struct tcpcb * tp,uint32_t acked)575 tcp_packets_this_ack(struct tcpcb *tp, uint32_t acked)
576 {
577 	return acked / tp->t_maxseg +
578 	       (((acked % tp->t_maxseg) != 0) ? 1 : 0);
579 }
580 
581 static void
tcp_tfo_init(void)582 tcp_tfo_init(void)
583 {
584 	u_char key[TCP_FASTOPEN_KEYLEN];
585 
586 	read_frandom(key, sizeof(key));
587 	aes_encrypt_key128(key, &tfo_ctx);
588 }
589 
590 static u_char isn_secret[32];
591 
592 /*
593  * Tcp initialization
594  */
595 void
tcp_init(struct protosw * pp,struct domain * dp)596 tcp_init(struct protosw *pp, struct domain *dp)
597 {
598 #pragma unused(dp)
599 	static int tcp_initialized = 0;
600 	struct inpcbinfo *pcbinfo;
601 	struct timeval now;
602 
603 	VERIFY((pp->pr_flags & (PR_INITIALIZED | PR_ATTACHED)) == PR_ATTACHED);
604 
605 	if (tcp_memacct == NULL) {
606 		uint64_t hlimit = max_mem_actual >> 5;
607 		tcp_memacct = mem_acct_register("TCP", hlimit, 80);
608 		if (tcp_memacct == NULL) {
609 			panic("mem_acct_register returned NULL");
610 		}
611 	}
612 	pp->pr_mem_acct = tcp_memacct;
613 
614 	if (!os_atomic_cmpxchg(&tcp_initialized, 0, 1, relaxed)) {
615 		return;
616 	}
617 
618 #if DEBUG || DEVELOPMENT
619 	(void) PE_parse_boot_argn("tcp_rxt_seg_max", &tcp_rxt_seg_max,
620 	    sizeof(tcp_rxt_seg_max));
621 #endif /* DEBUG || DEVELOPMENT */
622 
623 	tcp_ccgen = 1;
624 	tcp_keepinit = TCPTV_KEEP_INIT;
625 	tcp_keepidle = TCPTV_KEEP_IDLE;
626 	tcp_keepintvl = TCPTV_KEEPINTVL;
627 	tcp_keepcnt = TCPTV_KEEPCNT;
628 	tcp_maxpersistidle = TCPTV_KEEP_IDLE;
629 	tcp_msl = TCPTV_MSL;
630 
631 	microuptime(&now);
632 	tcp_now = (uint32_t)now.tv_sec * 1000 + now.tv_usec / TCP_RETRANSHZ_TO_USEC;
633 
634 	/* ToDo - remove once uTCP stops using it */
635 	tcp_now_init = tcp_now;
636 	tcp_microuptime_init = tcp_now;
637 	SYSCTL_SKMEM_UPDATE_FIELD(tcp.microuptime_init, tcp_microuptime_init);
638 	SYSCTL_SKMEM_UPDATE_FIELD(tcp.now_init, tcp_now_init);
639 
640 	tcp_tfo_init();
641 	tcp_syncookie_init();
642 
643 	LIST_INIT(&tcb);
644 	tcbinfo.ipi_listhead = &tcb;
645 
646 	pcbinfo = &tcbinfo;
647 
648 	/*
649 	 * allocate group, lock attributes and lock for tcp pcb mutexes
650 	 */
651 	pcbinfo->ipi_lock_grp = lck_grp_alloc_init("tcppcb",
652 	    LCK_GRP_ATTR_NULL);
653 	lck_attr_setdefault(&pcbinfo->ipi_lock_attr);
654 	lck_rw_init(&pcbinfo->ipi_lock, pcbinfo->ipi_lock_grp,
655 	    &pcbinfo->ipi_lock_attr);
656 
657 	if (tcp_tcbhashsize == 0) {
658 		/* Set to default */
659 		tcp_tcbhashsize = 512;
660 	}
661 
662 	if (!powerof2(tcp_tcbhashsize)) {
663 		int old_hash_size = tcp_tcbhashsize;
664 		tcp_tcbhashsize = scale_to_powerof2(tcp_tcbhashsize);
665 		/* Lower limit of 16  */
666 		if (tcp_tcbhashsize < 16) {
667 			tcp_tcbhashsize = 16;
668 		}
669 		printf("WARNING: TCB hash size not a power of 2, "
670 		    "scaled from %d to %d.\n",
671 		    old_hash_size,
672 		    tcp_tcbhashsize);
673 	}
674 
675 	hashinit_counted_by(tcp_tcbhashsize, tcbinfo.ipi_hashbase,
676 	    tcbinfo.ipi_hashbase_count);
677 	tcbinfo.ipi_hashmask = tcbinfo.ipi_hashbase_count - 1;
678 	hashinit_counted_by(tcp_tcbhashsize, tcbinfo.ipi_porthashbase,
679 	    tcbinfo.ipi_porthashbase_count);
680 	tcbinfo.ipi_porthashmask = tcbinfo.ipi_porthashbase_count - 1;
681 	tcbinfo.ipi_zone = tcpcbzone;
682 
683 	tcbinfo.ipi_gc = tcp_gc;
684 	tcbinfo.ipi_timer = tcp_itimer;
685 	in_pcbinfo_attach(&tcbinfo);
686 
687 #define TCP_MINPROTOHDR (sizeof(struct ip6_hdr) + sizeof(struct tcphdr))
688 	if (max_protohdr < TCP_MINPROTOHDR) {
689 		max_protohdr = (int)P2ROUNDUP(TCP_MINPROTOHDR, sizeof(uint32_t));
690 	}
691 	if (max_linkhdr + max_protohdr > MCLBYTES) {
692 		panic("tcp_init");
693 	}
694 #undef TCP_MINPROTOHDR
695 
696 	/* Initialize time wait and timer lists */
697 	TAILQ_INIT(&tcp_tw_tailq);
698 
699 	bzero(&tcp_timer_list, sizeof(tcp_timer_list));
700 	LIST_INIT(&tcp_timer_list.lhead);
701 	/*
702 	 * allocate group and attribute for the tcp timer list
703 	 */
704 	tcp_timer_list.mtx_grp = lck_grp_alloc_init("tcptimerlist",
705 	    LCK_GRP_ATTR_NULL);
706 	lck_mtx_init(&tcp_timer_list.mtx, tcp_timer_list.mtx_grp,
707 	    LCK_ATTR_NULL);
708 
709 	tcp_timer_list.call = thread_call_allocate(tcp_run_timerlist, NULL);
710 	if (tcp_timer_list.call == NULL) {
711 		panic("failed to allocate call entry 1 in tcp_init");
712 	}
713 
714 	/* Initialize TCP Cache */
715 	tcp_cache_init();
716 
717 	tcp_mpkl_log_object = MPKL_CREATE_LOGOBJECT("com.apple.xnu.tcp");
718 	if (tcp_mpkl_log_object == NULL) {
719 		panic("MPKL_CREATE_LOGOBJECT failed");
720 	}
721 
722 	if (PE_parse_boot_argn("tcp_log", &tcp_log_enable_flags, sizeof(tcp_log_enable_flags))) {
723 		os_log(OS_LOG_DEFAULT, "tcp_init: set tcp_log_enable_flags to 0x%x", tcp_log_enable_flags);
724 	}
725 
726 	if (PE_parse_boot_argn("tcp_link_heuristics", &tcp_link_heuristics_flags, sizeof(tcp_link_heuristics_flags))) {
727 		os_log(OS_LOG_DEFAULT, "tcp_init: set tcp_link_heuristics_flags to 0x%x", tcp_link_heuristics_flags);
728 	}
729 
730 	/*
731 	 * If more than 4GB of actual memory is available, increase the
732 	 * maximum allowed receive and send socket buffer size.
733 	 */
734 	if (mem_actual >= (1ULL << (GBSHIFT + 2))) {
735 		if (serverperfmode) {
736 			tcp_autorcvbuf_max = 8 * 1024 * 1024;
737 			tcp_autosndbuf_max = 8 * 1024 * 1024;
738 		} else {
739 			tcp_autorcvbuf_max = 4 * 1024 * 1024;
740 			tcp_autosndbuf_max = 4 * 1024 * 1024;
741 		}
742 
743 		SYSCTL_SKMEM_UPDATE_FIELD(tcp.autorcvbufmax, tcp_autorcvbuf_max);
744 		SYSCTL_SKMEM_UPDATE_FIELD(tcp.autosndbufmax, tcp_autosndbuf_max);
745 	}
746 
747 	if (serverperfmode) {
748 		tcp_syncookie = 1;
749 	}
750 
751 	/* Initialize the TCP CCA array */
752 	tcp_cc_init();
753 
754 	read_frandom(&isn_secret, sizeof(isn_secret));
755 
756 	bzero(&tcp_rst_rlc_state, sizeof(struct in_endpoints));
757 }
758 
759 /*
760  * Fill in the IP and TCP headers for an outgoing packet, given the tcpcb.
761  * tcp_template used to store this data in mbufs, but we now recopy it out
762  * of the tcpcb each time to conserve mbufs.
763  */
764 void
tcp_fillheaders(struct mbuf * m,struct tcpcb * tp,void * ip_ptr,void * tcp_ptr,struct sockaddr * local,struct sockaddr * remote)765 tcp_fillheaders(struct mbuf *m, struct tcpcb *tp, void *ip_ptr, void *tcp_ptr,
766     struct sockaddr *local, struct sockaddr *remote)
767 {
768 	struct inpcb *inp = tp->t_inpcb;
769 	struct tcphdr *tcp_hdr = (struct tcphdr *)tcp_ptr;
770 
771 	bool isipv6 = false;
772 
773 	if (local != NULL && remote != NULL) {
774 		isipv6 = (local->sa_family == AF_INET6);
775 	} else {
776 		isipv6 = (inp->inp_vflag & INP_IPV6) != 0;
777 	}
778 
779 	if (isipv6) {
780 		struct ip6_hdr *ip6;
781 
782 		ip6 = (struct ip6_hdr *)ip_ptr;
783 		ip6->ip6_flow = (ip6->ip6_flow & ~IPV6_FLOWINFO_MASK) |
784 		    (inp->inp_flow & IPV6_FLOWINFO_MASK);
785 		ip6->ip6_vfc = (ip6->ip6_vfc & ~IPV6_VERSION_MASK) |
786 		    (IPV6_VERSION & IPV6_VERSION_MASK);
787 		ip6->ip6_plen = htons(sizeof(struct tcphdr));
788 		ip6->ip6_nxt = IPPROTO_TCP;
789 		ip6->ip6_hlim = 0;
790 		if (local != NULL) {
791 			ip6->ip6_src = SIN6(local)->sin6_addr;
792 		} else {
793 			ip6->ip6_src = inp->in6p_laddr;
794 		}
795 		if (remote != NULL) {
796 			ip6->ip6_dst = SIN6(remote)->sin6_addr;
797 		} else {
798 			ip6->ip6_dst = inp->in6p_faddr;
799 		}
800 
801 		if (m->m_flags & M_PKTHDR) {
802 			uint32_t lifscope = IFSCOPE_NONE, fifscope = IFSCOPE_NONE;
803 			if (!IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr)) {
804 				lifscope = inp->inp_lifscope;
805 			} else if (SIN6(local)->sin6_scope_id != IFSCOPE_NONE) {
806 				lifscope = SIN6(local)->sin6_scope_id;
807 			}
808 			if (!IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_faddr)) {
809 				fifscope = inp->inp_fifscope;
810 			} else if (SIN6(remote)->sin6_scope_id != IFSCOPE_NONE) {
811 				fifscope = SIN6(remote)->sin6_scope_id;
812 			}
813 			ip6_output_setsrcifscope(m, lifscope, NULL);
814 			ip6_output_setdstifscope(m, fifscope, NULL);
815 		}
816 		tcp_hdr->th_sum = in6_pseudo(&ip6->ip6_src, &ip6->ip6_dst,
817 		    htonl(sizeof(struct tcphdr) + IPPROTO_TCP));
818 	} else {
819 		struct ip *ip = (struct ip *) ip_ptr;
820 
821 		ip->ip_vhl = IP_VHL_BORING;
822 		ip->ip_tos = 0;
823 		ip->ip_len = 0;
824 		ip->ip_id = 0;
825 		ip->ip_off = 0;
826 		ip->ip_ttl = 0;
827 		ip->ip_sum = 0;
828 		ip->ip_p = IPPROTO_TCP;
829 		if (local != NULL) {
830 			ip->ip_src = SIN(local)->sin_addr;
831 		} else {
832 			ip->ip_src = inp->inp_laddr;
833 		}
834 		if (remote != NULL) {
835 			ip->ip_dst = SIN(remote)->sin_addr;
836 		} else {
837 			ip->ip_dst = inp->inp_faddr;
838 		}
839 		tcp_hdr->th_sum =
840 		    in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr,
841 		    htons(sizeof(struct tcphdr) + IPPROTO_TCP));
842 	}
843 	if (local != NULL) {
844 		tcp_hdr->th_sport = SIN(local)->sin_port;
845 	} else {
846 		tcp_hdr->th_sport = inp->inp_lport;
847 	}
848 	if (remote != NULL) {
849 		tcp_hdr->th_dport = SIN(remote)->sin_port;
850 	} else {
851 		tcp_hdr->th_dport = inp->inp_fport;
852 	}
853 	tcp_hdr->th_seq = 0;
854 	tcp_hdr->th_ack = 0;
855 	tcp_hdr->th_x2 = 0;
856 	tcp_hdr->th_off = 5;
857 	tcp_hdr->th_flags = 0;
858 	tcp_hdr->th_win = 0;
859 	tcp_hdr->th_urp = 0;
860 }
861 
862 static uint8_t
tcp_filloptions(struct tcpopt * peer_to,uint16_t thflags,uint16_t mss,uint8_t rcv_scale,uint32_t ts_offset,u_char * __counted_by (TCP_MAXOLEN)optp)863 tcp_filloptions(struct tcpopt *peer_to, uint16_t thflags, uint16_t mss, uint8_t rcv_scale,
864     uint32_t ts_offset, u_char *__counted_by(TCP_MAXOLEN) optp)
865 {
866 	uint8_t optlen = 0;
867 	struct tcpopt to;
868 
869 	to.to_flags = 0;
870 
871 	if (thflags & TH_SYN) {
872 		to.to_mss = mss;
873 		to.to_flags = TOF_MSS;
874 		if (peer_to->to_flags & TOF_SCALE) {
875 			to.to_wscale = rcv_scale;
876 			to.to_flags |= TOF_SCALE;
877 		}
878 		if (peer_to->to_flags & TOF_SACKPERM) {
879 			to.to_flags |= TOF_SACKPERM;
880 		}
881 	}
882 	if ((peer_to->to_flags & TOF_TS)) {
883 		uint32_t tcp_now_local = os_access_once(tcp_now);
884 		to.to_tsval = ts_offset + tcp_now_local;
885 		to.to_tsecr = peer_to->to_tsval;
886 		to.to_flags |= TOF_TS;
887 	}
888 	optlen = tcp_addoptions(&to, optp, optp + TCP_MAXOLEN);
889 
890 	return optlen;
891 }
892 
893 /*
894  * Create template to be used to send tcp packets on a connection.
895  * Allocates an mbuf and fills in a skeletal tcp/ip header.  The only
896  * use for this function is in keepalives, which use tcp_respond.
897  */
898 struct tcptemp *
tcp_maketemplate(struct tcpcb * tp,struct mbuf ** mp,struct sockaddr * local,struct sockaddr * remote)899 tcp_maketemplate(struct tcpcb *tp, struct mbuf **mp,
900     struct sockaddr *local, struct sockaddr *remote)
901 {
902 	struct mbuf *m;
903 	struct tcptemp *n;
904 
905 	*mp = m = m_get(M_DONTWAIT, MT_HEADER);
906 	if (m == NULL) {
907 		return NULL;
908 	}
909 	m->m_len = sizeof(struct tcptemp);
910 	n = mtod(m, struct tcptemp *);
911 
912 	tcp_fillheaders(m, tp, (void *)&n->tt_ipgen, (void *)&n->tt_t, local, remote);
913 	return n;
914 }
915 
916 /*
917  * Send a single message to the TCP at address specified by
918  * the given TCP/IP header.  If m == 0, then we make a copy
919  * of the tcpiphdr at ti and send directly to the addressed host.
920  * This is used to force keep alive messages out using the TCP
921  * template for a connection.  If flags are given then we send
922  * a message back to the TCP which originated the * segment ti,
923  * and discard the mbuf containing it and any other attached mbufs.
924  *
925  * In any case the ack and sequence number of the transmitted
926  * segment are as specified by the parameters.
927  *
928  * NOTE: If m != NULL, then ti must point to *inside* the mbuf.
929  */
930 void
tcp_respond(struct tcpcb * tp,void * ipgen __sized_by (ipgen_size),size_t ipgen_size __unused,struct tcphdr * th,struct mbuf * m,tcp_seq ack,tcp_seq seq,uint32_t rcv_win,uint16_t flags,struct tcpopt * peer_to,uint16_t mss,uint8_t rcv_scale,uint32_t ts_offset,struct tcp_respond_args * tra,bool send_syncookie)931 tcp_respond(struct tcpcb *tp, void *ipgen __sized_by(ipgen_size), size_t ipgen_size __unused,
932     struct tcphdr *th, struct mbuf *m, tcp_seq ack, tcp_seq seq, uint32_t rcv_win, uint16_t flags,
933     struct tcpopt *peer_to, uint16_t mss, uint8_t rcv_scale, uint32_t ts_offset,
934     struct tcp_respond_args *tra, bool send_syncookie)
935 {
936 	uint16_t tlen;
937 	uint8_t optlen = 0;
938 	int win = 0;
939 	struct route *ro = 0;
940 	struct route sro;
941 	struct ip *ip;
942 	struct tcphdr *nth;
943 	struct route_in6 *ro6 = 0;
944 	struct route_in6 sro6;
945 	struct ip6_hdr *ip6;
946 	int isipv6;
947 	struct ifnet *outif;
948 	int sotc = SO_TC_UNSPEC;
949 	bool check_qos_marking_again = FALSE;
950 	uint32_t sifscope = IFSCOPE_NONE, fifscope = IFSCOPE_NONE;
951 
952 	isipv6 = IP_VHL_V(((struct ip *)ipgen)->ip_vhl) == 6;
953 	ip6 = ipgen;
954 	ip = ipgen;
955 
956 	if (tp) {
957 		check_qos_marking_again = tp->t_inpcb->inp_socket->so_flags1 & SOF1_QOSMARKING_POLICY_OVERRIDE ? FALSE : TRUE;
958 		sifscope = tp->t_inpcb->inp_lifscope;
959 		fifscope = tp->t_inpcb->inp_fifscope;
960 		if (!(flags & TH_RST)) {
961 			win = tcp_sbspace(tp);
962 			if (win > (int32_t)TCP_MAXWIN << tp->rcv_scale) {
963 				win = (int32_t)TCP_MAXWIN << tp->rcv_scale;
964 			}
965 		}
966 		if (isipv6) {
967 			ro6 = &tp->t_inpcb->in6p_route;
968 		} else {
969 			ro = &tp->t_inpcb->inp_route;
970 		}
971 	} else {
972 		if (isipv6) {
973 			ro6 = &sro6;
974 			bzero(ro6, sizeof(*ro6));
975 		} else {
976 			ro = &sro;
977 			bzero(ro, sizeof(*ro));
978 		}
979 		if (rcv_win != 0) {
980 			/* Set TCP receive window if provided */
981 			win = rcv_win;
982 		}
983 	}
984 	if (m == 0) {
985 		m = m_gethdr(M_DONTWAIT, MT_HEADER);    /* MAC-OK */
986 		if (m == NULL) {
987 			return;
988 		}
989 		tlen = 0;
990 		m->m_data += max_linkhdr;
991 		if (isipv6) {
992 			VERIFY((MHLEN - max_linkhdr) >=
993 			    (sizeof(*ip6) + sizeof(*nth)));
994 			bcopy((caddr_t)ip6, mtod(m, caddr_t),
995 			    sizeof(struct ip6_hdr));
996 			ip6 = mtod(m, struct ip6_hdr *);
997 			nth = (struct tcphdr *)(void *)(ip6 + 1);
998 		} else {
999 			VERIFY((MHLEN - max_linkhdr) >=
1000 			    (sizeof(*ip) + sizeof(*nth)));
1001 			bcopy((caddr_t)ip, mtod(m, caddr_t), sizeof(struct ip));
1002 			ip = mtod(m, struct ip *);
1003 			nth = (struct tcphdr *)(void *)(ip + 1);
1004 		}
1005 		bcopy(th, nth, sizeof(struct tcphdr));
1006 #if MPTCP
1007 		if ((tp) && (tp->t_mpflags & TMPF_RESET)) {
1008 			flags = (TH_RST | TH_ACK);
1009 		} else if (!send_syncookie)
1010 #endif
1011 		flags = TH_ACK;
1012 	} else {
1013 		m_freem(m->m_next);
1014 		m->m_next = 0;
1015 		m->m_data = (uintptr_t)ipgen;
1016 		/* m_len is set later */
1017 		tlen = 0;
1018 #define xchg(a, b, type) { type t; t = a; a = b; b = t; }
1019 		if (isipv6) {
1020 			ip6_getsrcifaddr_info(m, &sifscope, NULL);
1021 			ip6_getdstifaddr_info(m, &fifscope, NULL);
1022 			if (!in6_embedded_scope) {
1023 				m->m_pkthdr.pkt_flags &= ~PKTF_IFAINFO;
1024 			}
1025 			/* Expect 32-bit aligned IP on strict-align platforms */
1026 			IP6_HDR_STRICT_ALIGNMENT_CHECK(ip6);
1027 			xchg(ip6->ip6_dst, ip6->ip6_src, struct in6_addr);
1028 			nth = (struct tcphdr *)(void *)(ip6 + 1);
1029 		} else {
1030 			/* Expect 32-bit aligned IP on strict-align platforms */
1031 			IP_HDR_STRICT_ALIGNMENT_CHECK(ip);
1032 			xchg(ip->ip_dst.s_addr, ip->ip_src.s_addr, n_long);
1033 			nth = (struct tcphdr *)(void *)(ip + 1);
1034 		}
1035 		if (th != nth) {
1036 			/*
1037 			 * this is usually a case when an extension header
1038 			 * exists between the IPv6 header and the
1039 			 * TCP header.
1040 			 */
1041 			nth->th_sport = th->th_sport;
1042 			nth->th_dport = th->th_dport;
1043 		}
1044 		xchg(nth->th_dport, nth->th_sport, n_short);
1045 #undef xchg
1046 	}
1047 
1048 	if (peer_to != NULL) {
1049 		u_char *optp = (u_char *)(nth + 1);
1050 		optlen = tcp_filloptions(peer_to, flags, mss, rcv_scale, ts_offset, optp);
1051 		tlen += optlen;
1052 	}
1053 
1054 	if (isipv6) {
1055 		ip6->ip6_plen = htons((u_short)(sizeof(struct tcphdr) +
1056 		    tlen));
1057 		tlen += sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
1058 		ip6_output_setsrcifscope(m, sifscope, NULL);
1059 		ip6_output_setdstifscope(m, fifscope, NULL);
1060 	} else {
1061 		tlen += sizeof(struct tcpiphdr);
1062 		ip->ip_len = tlen;
1063 		ip->ip_ttl = (uint8_t)ip_defttl;
1064 	}
1065 	m->m_len = tlen;
1066 	m->m_pkthdr.len = tlen;
1067 	m->m_pkthdr.rcvif = 0;
1068 	if (tra->keep_alive) {
1069 		m->m_pkthdr.pkt_flags |= PKTF_KEEPALIVE;
1070 	}
1071 
1072 	nth->th_seq = htonl(seq);
1073 	nth->th_ack = htonl(ack);
1074 	nth->th_x2 = 0;
1075 	nth->th_off = (sizeof(struct tcphdr) + optlen) >> 2;
1076 	tcp_set_flags(nth, flags);
1077 	if (tp) {
1078 		nth->th_win = htons((u_short) (win >> tp->rcv_scale));
1079 	} else {
1080 		nth->th_win = htons((u_short)win);
1081 	}
1082 	nth->th_urp = 0;
1083 	if (isipv6) {
1084 		nth->th_sum = 0;
1085 		nth->th_sum = in6_pseudo(&ip6->ip6_src, &ip6->ip6_dst,
1086 		    htonl((tlen - sizeof(struct ip6_hdr)) + IPPROTO_TCP));
1087 		m->m_pkthdr.csum_flags = CSUM_TCPIPV6;
1088 		m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
1089 		ip6->ip6_hlim = in6_selecthlim(tp ? tp->t_inpcb : NULL,
1090 		    ro6 && ro6->ro_rt ? ro6->ro_rt->rt_ifp : NULL);
1091 	} else {
1092 		nth->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr,
1093 		    htons((u_short)(tlen - sizeof(struct ip) + ip->ip_p)));
1094 		m->m_pkthdr.csum_flags = CSUM_TCP;
1095 		m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
1096 	}
1097 
1098 	if (tcp_rst_rlc_compress(mtod(m, void *), m->m_len, nth) == true) {
1099 		m_freem(m);
1100 		return;
1101 	}
1102 
1103 #if NECP
1104 	necp_mark_packet_from_socket(m, tp ? tp->t_inpcb : NULL, 0, 0, 0, 0);
1105 #endif /* NECP */
1106 
1107 #if IPSEC
1108 	if (tp != NULL && tp->t_inpcb->inp_sp != NULL &&
1109 	    ipsec_setsocket(m, tp ? tp->t_inpcb->inp_socket : NULL) != 0) {
1110 		m_freem(m);
1111 		return;
1112 	}
1113 #endif
1114 
1115 	if (tp != NULL) {
1116 		u_int32_t svc_flags = 0;
1117 		if (isipv6) {
1118 			svc_flags |= PKT_SCF_IPV6;
1119 		}
1120 		sotc = tp->t_inpcb->inp_socket->so_traffic_class;
1121 		if ((flags & TH_RST) == 0) {
1122 			set_packet_service_class(m, tp->t_inpcb->inp_socket,
1123 			    sotc, svc_flags);
1124 		} else {
1125 			m_set_service_class(m, MBUF_SC_BK_SYS);
1126 		}
1127 
1128 		/* Embed flowhash and flow control flags */
1129 		m->m_pkthdr.pkt_flowsrc = FLOWSRC_INPCB;
1130 		m->m_pkthdr.pkt_flowid = tp->t_inpcb->inp_flowhash;
1131 		m->m_pkthdr.pkt_flags |= (PKTF_FLOW_ID | PKTF_FLOW_LOCALSRC | PKTF_FLOW_ADV);
1132 		m->m_pkthdr.pkt_proto = IPPROTO_TCP;
1133 		m->m_pkthdr.tx_tcp_pid = tp->t_inpcb->inp_socket->last_pid;
1134 		m->m_pkthdr.tx_tcp_e_pid = tp->t_inpcb->inp_socket->e_pid;
1135 
1136 		if (flags & TH_RST) {
1137 			m->m_pkthdr.comp_gencnt = tp->t_comp_ack_gencnt;
1138 		}
1139 	} else {
1140 		if (flags & TH_RST) {
1141 			m->m_pkthdr.comp_gencnt = TCP_ACK_COMPRESSION_DUMMY;
1142 			m_set_service_class(m, MBUF_SC_BK_SYS);
1143 		}
1144 	}
1145 
1146 	if (isipv6) {
1147 		struct ip6_out_args ip6oa;
1148 		bzero(&ip6oa, sizeof(ip6oa));
1149 		ip6oa.ip6oa_boundif = tra->ifscope;
1150 		ip6oa.ip6oa_flags = IP6OAF_SELECT_SRCIF | IP6OAF_BOUND_SRCADDR;
1151 		ip6oa.ip6oa_sotc = SO_TC_UNSPEC;
1152 		ip6oa.ip6oa_netsvctype = _NET_SERVICE_TYPE_UNSPEC;
1153 
1154 		if (tra->ifscope != IFSCOPE_NONE) {
1155 			ip6oa.ip6oa_flags |= IP6OAF_BOUND_IF;
1156 		}
1157 		if (tra->nocell) {
1158 			ip6oa.ip6oa_flags |= IP6OAF_NO_CELLULAR;
1159 		}
1160 		if (tra->noexpensive) {
1161 			ip6oa.ip6oa_flags |= IP6OAF_NO_EXPENSIVE;
1162 		}
1163 		if (tra->noconstrained) {
1164 			ip6oa.ip6oa_flags |= IP6OAF_NO_CONSTRAINED;
1165 		}
1166 		if (tra->awdl_unrestricted) {
1167 			ip6oa.ip6oa_flags |= IP6OAF_AWDL_UNRESTRICTED;
1168 		}
1169 		if (tra->intcoproc_allowed) {
1170 			ip6oa.ip6oa_flags |= IP6OAF_INTCOPROC_ALLOWED;
1171 		}
1172 		if (tra->management_allowed) {
1173 			ip6oa.ip6oa_flags |= IP6OAF_MANAGEMENT_ALLOWED;
1174 		}
1175 		if (tra->ultra_constrained_allowed) {
1176 			ip6oa.ip6oa_flags |= IP6OAF_ULTRA_CONSTRAINED_ALLOWED;
1177 		}
1178 		ip6oa.ip6oa_sotc = sotc;
1179 		if (tp != NULL) {
1180 			if ((tp->t_inpcb->inp_socket->so_flags1 & SOF1_QOSMARKING_ALLOWED)) {
1181 				ip6oa.ip6oa_flags |= IP6OAF_QOSMARKING_ALLOWED;
1182 			}
1183 			ip6oa.qos_marking_gencount = tp->t_inpcb->inp_policyresult.results.qos_marking_gencount;
1184 			if (check_qos_marking_again) {
1185 				ip6oa.ip6oa_flags |= IP6OAF_REDO_QOSMARKING_POLICY;
1186 			}
1187 			ip6oa.ip6oa_netsvctype = tp->t_inpcb->inp_socket->so_netsvctype;
1188 		}
1189 		(void) ip6_output(m, NULL, ro6, IPV6_OUTARGS, NULL,
1190 		    NULL, &ip6oa);
1191 
1192 		if (check_qos_marking_again) {
1193 			struct inpcb *inp = tp->t_inpcb;
1194 			inp->inp_policyresult.results.qos_marking_gencount = ip6oa.qos_marking_gencount;
1195 			if (ip6oa.ip6oa_flags & IP6OAF_QOSMARKING_ALLOWED) {
1196 				inp->inp_socket->so_flags1 |= SOF1_QOSMARKING_ALLOWED;
1197 			} else {
1198 				inp->inp_socket->so_flags1 &= ~SOF1_QOSMARKING_ALLOWED;
1199 			}
1200 		}
1201 
1202 		if (tp != NULL && ro6 != NULL && ro6->ro_rt != NULL &&
1203 		    (outif = ro6->ro_rt->rt_ifp) !=
1204 		    tp->t_inpcb->in6p_last_outifp) {
1205 			tp->t_inpcb->in6p_last_outifp = outif;
1206 #if SKYWALK
1207 			if (NETNS_TOKEN_VALID(&tp->t_inpcb->inp_netns_token)) {
1208 				netns_set_ifnet(&tp->t_inpcb->inp_netns_token,
1209 				    tp->t_inpcb->in6p_last_outifp);
1210 			}
1211 #endif /* SKYWALK */
1212 		}
1213 
1214 		if (ro6 == &sro6) {
1215 			ROUTE_RELEASE(ro6);
1216 		}
1217 	} else {
1218 		struct ip_out_args ipoa;
1219 		bzero(&ipoa, sizeof(ipoa));
1220 		ipoa.ipoa_boundif = tra->ifscope;
1221 		ipoa.ipoa_flags = IPOAF_SELECT_SRCIF | IPOAF_BOUND_SRCADDR;
1222 		ipoa.ipoa_sotc = SO_TC_UNSPEC;
1223 		ipoa.ipoa_netsvctype = _NET_SERVICE_TYPE_UNSPEC;
1224 
1225 		if (tra->ifscope != IFSCOPE_NONE) {
1226 			ipoa.ipoa_flags |= IPOAF_BOUND_IF;
1227 		}
1228 		if (tra->nocell) {
1229 			ipoa.ipoa_flags |= IPOAF_NO_CELLULAR;
1230 		}
1231 		if (tra->noexpensive) {
1232 			ipoa.ipoa_flags |= IPOAF_NO_EXPENSIVE;
1233 		}
1234 		if (tra->noconstrained) {
1235 			ipoa.ipoa_flags |= IPOAF_NO_CONSTRAINED;
1236 		}
1237 		if (tra->awdl_unrestricted) {
1238 			ipoa.ipoa_flags |= IPOAF_AWDL_UNRESTRICTED;
1239 		}
1240 		if (tra->management_allowed) {
1241 			ipoa.ipoa_flags |= IPOAF_MANAGEMENT_ALLOWED;
1242 		}
1243 		ipoa.ipoa_sotc = sotc;
1244 		if (tp != NULL) {
1245 			if ((tp->t_inpcb->inp_socket->so_flags1 & SOF1_QOSMARKING_ALLOWED)) {
1246 				ipoa.ipoa_flags |= IPOAF_QOSMARKING_ALLOWED;
1247 			}
1248 			if (!(tp->t_inpcb->inp_socket->so_flags1 & SOF1_QOSMARKING_POLICY_OVERRIDE)) {
1249 				ipoa.ipoa_flags |= IPOAF_REDO_QOSMARKING_POLICY;
1250 			}
1251 			ipoa.qos_marking_gencount = tp->t_inpcb->inp_policyresult.results.qos_marking_gencount;
1252 			ipoa.ipoa_netsvctype = tp->t_inpcb->inp_socket->so_netsvctype;
1253 		}
1254 		if (ro != &sro) {
1255 			/* Copy the cached route and take an extra reference */
1256 			inp_route_copyout(tp->t_inpcb, &sro);
1257 		}
1258 		/*
1259 		 * For consistency, pass a local route copy.
1260 		 */
1261 		(void) ip_output(m, NULL, &sro, IP_OUTARGS, NULL, &ipoa);
1262 
1263 		if (check_qos_marking_again) {
1264 			struct inpcb *inp = tp->t_inpcb;
1265 			inp->inp_policyresult.results.qos_marking_gencount = ipoa.qos_marking_gencount;
1266 			if (ipoa.ipoa_flags & IPOAF_QOSMARKING_ALLOWED) {
1267 				inp->inp_socket->so_flags1 |= SOF1_QOSMARKING_ALLOWED;
1268 			} else {
1269 				inp->inp_socket->so_flags1 &= ~SOF1_QOSMARKING_ALLOWED;
1270 			}
1271 		}
1272 		if (tp != NULL && sro.ro_rt != NULL &&
1273 		    (outif = sro.ro_rt->rt_ifp) !=
1274 		    tp->t_inpcb->inp_last_outifp) {
1275 			tp->t_inpcb->inp_last_outifp = outif;
1276 #if SKYWALK
1277 			if (NETNS_TOKEN_VALID(&tp->t_inpcb->inp_netns_token)) {
1278 				netns_set_ifnet(&tp->t_inpcb->inp_netns_token, outif);
1279 			}
1280 #endif /* SKYWALK */
1281 		}
1282 		if (ro != &sro) {
1283 			/* Synchronize cached PCB route */
1284 			inp_route_copyin(tp->t_inpcb, &sro);
1285 		} else {
1286 			ROUTE_RELEASE(&sro);
1287 		}
1288 	}
1289 }
1290 
1291 /*
1292  * Create a new TCP control block, making an
1293  * empty reassembly queue and hooking it to the argument
1294  * protocol control block.  The `inp' parameter must have
1295  * come from the zone allocator set up in tcp_init().
1296  */
1297 struct tcpcb *
tcp_newtcpcb(struct inpcb * inp)1298 tcp_newtcpcb(struct inpcb *inp)
1299 {
1300 	struct inp_tp *it;
1301 	struct tcpcb *tp;
1302 	int isipv6 = (inp->inp_vflag & INP_IPV6) != 0;
1303 	uint32_t random_32;
1304 
1305 	calculate_tcp_clock();
1306 
1307 	it = (struct inp_tp *)(void *)inp;
1308 	tp = &it->tcb;
1309 
1310 	bzero((char *) tp, sizeof(struct tcpcb));
1311 	LIST_INIT(&tp->t_segq);
1312 	tp->t_maxseg = tp->t_maxopd = isipv6 ? tcp_v6mssdflt : tcp_mssdflt;
1313 
1314 	tp->t_flags = TF_REQ_SCALE | (tcp_do_timestamps ? TF_REQ_TSTMP : 0);
1315 	tp->t_flagsext |= TF_SACK_ENABLE;
1316 
1317 	if (tcp_rack) {
1318 		tp->t_flagsext |= TF_RACK_ENABLED;
1319 	}
1320 
1321 	if (tcp_syncookie == 1) {
1322 		tp->t_flagsext |= TF_SYN_COOKIE_ENABLED;
1323 	} else if (tcp_syncookie == 2) {
1324 		tp->t_flagsext |= TF_SYN_COOKIE_FORCE_ENABLED;
1325 	}
1326 
1327 	TAILQ_INIT(&tp->snd_holes);
1328 	SLIST_INIT(&tp->t_rxt_segments);
1329 	TAILQ_INIT(&tp->t_segs_sent);
1330 	RB_INIT(&tp->t_segs_sent_tree);
1331 	TAILQ_INIT(&tp->t_segs_acked);
1332 	TAILQ_INIT(&tp->seg_pool.free_segs);
1333 	SLIST_INIT(&tp->t_notify_ack);
1334 	tp->t_inpcb = inp;
1335 	/*
1336 	 * Init srtt to TCPTV_SRTTBASE (0), so we can tell that we have no
1337 	 * rtt estimate.  Set rttvar so that srtt + 4 * rttvar gives
1338 	 * reasonable initial retransmit time.
1339 	 */
1340 	tp->t_srtt = TCPTV_SRTTBASE;
1341 	tp->t_rttvar =
1342 	    ((TCPTV_RTOBASE - TCPTV_SRTTBASE) << TCP_RTTVAR_SHIFT) / 4;
1343 	tp->t_rttmin = tcp_TCPTV_MIN;
1344 	tp->t_rxtcur = TCPTV_RTOBASE;
1345 
1346 	if (tcp_use_newreno) {
1347 		/* use newreno by default */
1348 		tp->tcp_cc_index = TCP_CC_ALGO_NEWRENO_INDEX;
1349 #if (DEVELOPMENT || DEBUG)
1350 	} else if (tcp_use_ledbat) {
1351 		/* use ledbat for testing */
1352 		tp->tcp_cc_index = TCP_CC_ALGO_BACKGROUND_INDEX;
1353 #endif
1354 	} else {
1355 		/* Set L4S state even if ifp might be NULL */
1356 		tcp_set_l4s(tp, inp->inp_last_outifp);
1357 		if (tp->l4s_enabled) {
1358 			tp->tcp_cc_index = TCP_CC_ALGO_PRAGUE_INDEX;
1359 		} else {
1360 			tp->tcp_cc_index = TCP_CC_ALGO_CUBIC_INDEX;
1361 		}
1362 	}
1363 
1364 	tcp_cc_allocate_state(tp);
1365 
1366 	if (CC_ALGO(tp)->init != NULL) {
1367 		CC_ALGO(tp)->init(tp);
1368 	}
1369 
1370 	/* Initialize rledbat if we are using recv_bg */
1371 	if (tcp_rledbat == 1 && TCP_RECV_BG(inp->inp_socket) &&
1372 	    tcp_cc_rledbat.init != NULL) {
1373 		tcp_cc_rledbat.init(tp);
1374 	}
1375 
1376 	tp->snd_cwnd = tcp_initial_cwnd(tp);
1377 	tp->snd_ssthresh = TCP_MAXWIN << TCP_MAX_WINSHIFT;
1378 	tp->snd_ssthresh_prev = TCP_MAXWIN << TCP_MAX_WINSHIFT;
1379 	tp->t_rcvtime = tcp_now;
1380 	tp->tentry.te_timer_start = tcp_now;
1381 	tp->t_persist_timeout = tcp_max_persist_timeout;
1382 	tp->t_persist_stop = 0;
1383 	tp->t_rexmtthresh = (uint8_t)tcprexmtthresh;
1384 	tp->rack.reo_wnd_multi = 1;
1385 	tp->rfbuf_ts = tcp_now;
1386 	tp->rfbuf_space = tcp_initial_cwnd(tp);
1387 	tp->t_forced_acks = TCP_FORCED_ACKS_COUNT;
1388 	tp->bytes_lost = tp->bytes_sacked = tp->bytes_retransmitted = 0;
1389 
1390 	/* Enable bandwidth measurement on this connection */
1391 	tp->t_flagsext |= TF_MEASURESNDBW;
1392 	if (tp->t_bwmeas == NULL) {
1393 		tp->t_bwmeas = tcp_bwmeas_alloc(tp);
1394 		if (tp->t_bwmeas == NULL) {
1395 			tp->t_flagsext &= ~TF_MEASURESNDBW;
1396 		}
1397 	}
1398 
1399 	/* Clear time wait tailq entry */
1400 	tp->t_twentry.tqe_next = NULL;
1401 	tp->t_twentry.tqe_prev = NULL;
1402 
1403 	read_frandom(&random_32, sizeof(random_32));
1404 	tp->t_comp_ack_gencnt = random_32;
1405 	if (tp->t_comp_ack_gencnt <= TCP_ACK_COMPRESSION_DUMMY ||
1406 	    tp->t_comp_ack_gencnt > INT_MAX) {
1407 		tp->t_comp_ack_gencnt = TCP_ACK_COMPRESSION_DUMMY + 1;
1408 	}
1409 	tp->t_comp_ack_lastinc = tcp_now;
1410 
1411 	/* Initialize Accurate ECN state */
1412 	tp->t_client_accecn_state = tcp_connection_client_accurate_ecn_feature_disabled;
1413 	tp->t_server_accecn_state = tcp_connection_server_accurate_ecn_feature_disabled;
1414 
1415 	/*
1416 	 * IPv4 TTL initialization is necessary for an IPv6 socket as well,
1417 	 * because the socket may be bound to an IPv6 wildcard address,
1418 	 * which may match an IPv4-mapped IPv6 address.
1419 	 */
1420 	inp->inp_ip_ttl = (uint8_t)ip_defttl;
1421 	inp->inp_ppcb = (caddr_t)tp;
1422 	return tp;            /* XXX */
1423 }
1424 
1425 /*
1426  * Drop a TCP connection, reporting
1427  * the specified error.  If connection is synchronized,
1428  * then send a RST to peer.
1429  */
1430 struct tcpcb *
tcp_drop(struct tcpcb * tp,int errno)1431 tcp_drop(struct tcpcb *tp, int errno)
1432 {
1433 	struct socket *so = tp->t_inpcb->inp_socket;
1434 #if CONFIG_DTRACE
1435 	struct inpcb *inp = tp->t_inpcb;
1436 #endif
1437 
1438 	if (TCPS_HAVERCVDSYN(tp->t_state)) {
1439 		DTRACE_TCP4(state__change, void, NULL, struct inpcb *, inp,
1440 		    struct tcpcb *, tp, int32_t, TCPS_CLOSED);
1441 		TCP_LOG_STATE(tp, TCPS_CLOSED);
1442 		tp->t_state = TCPS_CLOSED;
1443 		(void) tcp_output(tp);
1444 		tcpstat.tcps_drops++;
1445 	} else {
1446 		tcpstat.tcps_conndrops++;
1447 	}
1448 	if (errno == ETIMEDOUT && tp->t_softerror) {
1449 		errno = tp->t_softerror;
1450 	}
1451 	so->so_error = (u_short)errno;
1452 
1453 	TCP_LOG_CONNECTION_SUMMARY(tp);
1454 
1455 	return tcp_close(tp);
1456 }
1457 
1458 void
tcp_getrt_rtt(struct tcpcb * tp,struct rtentry * rt)1459 tcp_getrt_rtt(struct tcpcb *tp, struct rtentry *rt)
1460 {
1461 	TCP_LOG_RTM_RTT(tp, rt);
1462 
1463 	if (rt->rt_rmx.rmx_rtt != 0 && tcp_init_rtt_from_cache != 0) {
1464 		uint32_t rtt = rt->rt_rmx.rmx_rtt;
1465 		uint32_t rttvar;
1466 		/*
1467 		 * XXX the lock bit for RTT indicates that the value
1468 		 * is also a minimum value; this is subject to time.
1469 		 */
1470 		if (rt->rt_rmx.rmx_locks & RTV_RTT) {
1471 			tp->t_rttmin = rtt / (RTM_RTTUNIT / TCP_RETRANSHZ);
1472 		} else {
1473 			tp->t_rttmin = TCPTV_REXMTMIN;
1474 		}
1475 
1476 		rtt = rtt / (RTM_RTTUNIT / (TCP_RETRANSHZ * TCP_RTT_SCALE));
1477 		tcpstat.tcps_usedrtt++;
1478 
1479 		if (rt->rt_rmx.rmx_rttvar) {
1480 			rttvar = rt->rt_rmx.rmx_rttvar /
1481 			    (RTM_RTTUNIT / (TCP_RETRANSHZ * TCP_RTTVAR_SCALE));
1482 			tcpstat.tcps_usedrttvar++;
1483 		} else {
1484 			/* default variation is +- 1 rtt */
1485 			rttvar =
1486 			    tp->t_srtt * TCP_RTTVAR_SCALE / TCP_RTT_SCALE;
1487 		}
1488 
1489 		TCPT_RANGESET(tp->t_rxtcur,
1490 		    tcp_rto_formula(tp->t_rttmin, rtt, rttvar),
1491 		    tp->t_rttmin, TCPTV_REXMTMAX,
1492 		    TCP_ADD_REXMTSLOP(tp));
1493 	} else if (tp->t_state < TCPS_ESTABLISHED && tp->t_srtt == 0 &&
1494 	    tp->t_rxtshift == 0) {
1495 		struct ifnet *ifp = rt->rt_ifp;
1496 
1497 		if (ifp != NULL && (ifp->if_eflags & IFEF_AWDL) != 0) {
1498 			/*
1499 			 * AWDL needs a special value for the default initial retransmission timeout
1500 			 */
1501 			if (tcp_awdl_rtobase > tcp_TCPTV_MIN) {
1502 				tp->t_rttvar = ((tcp_awdl_rtobase - TCPTV_SRTTBASE) << TCP_RTTVAR_SHIFT) / 4;
1503 			} else {
1504 				tp->t_rttvar = ((tcp_TCPTV_MIN - TCPTV_SRTTBASE) << TCP_RTTVAR_SHIFT) / 4;
1505 			}
1506 			TCPT_RANGESET(tp->t_rxtcur,
1507 			    TCP_REXMTVAL(tp),
1508 			    tp->t_rttmin, TCPTV_REXMTMAX,
1509 			    TCP_ADD_REXMTSLOP(tp));
1510 		}
1511 	}
1512 
1513 	TCP_LOG_RTT_INFO(tp);
1514 }
1515 
1516 static inline void
tcp_create_ifnet_stats_per_flow(struct tcpcb * tp,struct ifnet_stats_per_flow * ifs)1517 tcp_create_ifnet_stats_per_flow(struct tcpcb *tp,
1518     struct ifnet_stats_per_flow *ifs)
1519 {
1520 	struct inpcb *inp;
1521 	struct socket *so;
1522 	if (tp == NULL || ifs == NULL) {
1523 		return;
1524 	}
1525 
1526 	bzero(ifs, sizeof(*ifs));
1527 	inp = tp->t_inpcb;
1528 	so = inp->inp_socket;
1529 
1530 	ifs->ipv4 = (inp->inp_vflag & INP_IPV6) ? 0 : 1;
1531 	ifs->local = (tp->t_flags & TF_LOCAL) ? 1 : 0;
1532 	ifs->connreset = (so->so_error == ECONNRESET) ? 1 : 0;
1533 	ifs->conntimeout = (so->so_error == ETIMEDOUT) ? 1 : 0;
1534 	ifs->ecn_flags = tp->ecn_flags;
1535 	ifs->txretransmitbytes = tp->t_stat.txretransmitbytes;
1536 	ifs->rxoutoforderbytes = tp->t_stat.rxoutoforderbytes;
1537 	ifs->rxmitpkts = tp->t_stat.rxmitpkts;
1538 	ifs->rcvoopack = tp->t_rcvoopack;
1539 	ifs->pawsdrop = tp->t_pawsdrop;
1540 	ifs->sack_recovery_episodes = tp->t_sack_recovery_episode;
1541 	ifs->reordered_pkts = tp->t_reordered_pkts;
1542 	ifs->dsack_sent = tp->t_dsack_sent;
1543 	ifs->dsack_recvd = tp->t_dsack_recvd;
1544 	ifs->srtt = tp->t_srtt;
1545 	ifs->rttupdated = tp->t_rttupdated;
1546 	ifs->rttvar = tp->t_rttvar;
1547 	ifs->rttmin = get_base_rtt(tp);
1548 	if (tp->t_bwmeas != NULL && tp->t_bwmeas->bw_sndbw_max > 0) {
1549 		ifs->bw_sndbw_max = tp->t_bwmeas->bw_sndbw_max;
1550 	} else {
1551 		ifs->bw_sndbw_max = 0;
1552 	}
1553 	if (tp->t_bwmeas != NULL && tp->t_bwmeas->bw_rcvbw_max > 0) {
1554 		ifs->bw_rcvbw_max = tp->t_bwmeas->bw_rcvbw_max;
1555 	} else {
1556 		ifs->bw_rcvbw_max = 0;
1557 	}
1558 	ifs->bk_txpackets = so->so_tc_stats[MBUF_TC_BK].txpackets;
1559 	ifs->txpackets = inp->inp_mstat.ms_total.ts_txpackets;
1560 	ifs->rxpackets = inp->inp_mstat.ms_total.ts_rxpackets;
1561 }
1562 
1563 static inline void
tcp_flow_ecn_perf_stats(struct ifnet_stats_per_flow * ifs,struct if_tcp_ecn_perf_stat * stat)1564 tcp_flow_ecn_perf_stats(struct ifnet_stats_per_flow *ifs,
1565     struct if_tcp_ecn_perf_stat *stat)
1566 {
1567 	u_int64_t curval, oldval;
1568 	stat->total_txpkts += ifs->txpackets;
1569 	stat->total_rxpkts += ifs->rxpackets;
1570 	stat->total_rxmitpkts += ifs->rxmitpkts;
1571 	stat->total_oopkts += ifs->rcvoopack;
1572 	stat->total_reorderpkts += (ifs->reordered_pkts +
1573 	    ifs->pawsdrop + ifs->dsack_sent + ifs->dsack_recvd);
1574 
1575 	/* Average RTT */
1576 	curval = ifs->srtt >> TCP_RTT_SHIFT;
1577 	if (curval > 0 && ifs->rttupdated >= 16) {
1578 		if (stat->rtt_avg == 0) {
1579 			stat->rtt_avg = curval;
1580 		} else {
1581 			oldval = stat->rtt_avg;
1582 			stat->rtt_avg = ((oldval << 4) - oldval + curval) >> 4;
1583 		}
1584 	}
1585 
1586 	/* RTT variance */
1587 	curval = ifs->rttvar >> TCP_RTTVAR_SHIFT;
1588 	if (curval > 0 && ifs->rttupdated >= 16) {
1589 		if (stat->rtt_var == 0) {
1590 			stat->rtt_var = curval;
1591 		} else {
1592 			oldval = stat->rtt_var;
1593 			stat->rtt_var =
1594 			    ((oldval << 4) - oldval + curval) >> 4;
1595 		}
1596 	}
1597 
1598 	/* SACK episodes */
1599 	stat->sack_episodes += ifs->sack_recovery_episodes;
1600 	if (ifs->connreset) {
1601 		stat->rst_drop++;
1602 	}
1603 }
1604 
1605 static inline void
tcp_flow_lim_stats(struct ifnet_stats_per_flow * ifs,struct if_lim_perf_stat * stat)1606 tcp_flow_lim_stats(struct ifnet_stats_per_flow *ifs,
1607     struct if_lim_perf_stat *stat)
1608 {
1609 	u_int64_t curval, oldval;
1610 
1611 	stat->lim_total_txpkts += ifs->txpackets;
1612 	stat->lim_total_rxpkts += ifs->rxpackets;
1613 	stat->lim_total_retxpkts += ifs->rxmitpkts;
1614 	stat->lim_total_oopkts += ifs->rcvoopack;
1615 
1616 	if (ifs->bw_sndbw_max > 0) {
1617 		/* convert from bytes per ms to bits per second */
1618 		ifs->bw_sndbw_max *= 8000;
1619 		stat->lim_ul_max_bandwidth = MAX(stat->lim_ul_max_bandwidth,
1620 		    ifs->bw_sndbw_max);
1621 	}
1622 
1623 	if (ifs->bw_rcvbw_max > 0) {
1624 		/* convert from bytes per ms to bits per second */
1625 		ifs->bw_rcvbw_max *= 8000;
1626 		stat->lim_dl_max_bandwidth = MAX(stat->lim_dl_max_bandwidth,
1627 		    ifs->bw_rcvbw_max);
1628 	}
1629 
1630 	/* Average RTT */
1631 	curval = ifs->srtt >> TCP_RTT_SHIFT;
1632 	if (curval > 0 && ifs->rttupdated >= 16) {
1633 		if (stat->lim_rtt_average == 0) {
1634 			stat->lim_rtt_average = curval;
1635 		} else {
1636 			oldval = stat->lim_rtt_average;
1637 			stat->lim_rtt_average =
1638 			    ((oldval << 4) - oldval + curval) >> 4;
1639 		}
1640 	}
1641 
1642 	/* RTT variance */
1643 	curval = ifs->rttvar >> TCP_RTTVAR_SHIFT;
1644 	if (curval > 0 && ifs->rttupdated >= 16) {
1645 		if (stat->lim_rtt_variance == 0) {
1646 			stat->lim_rtt_variance = curval;
1647 		} else {
1648 			oldval = stat->lim_rtt_variance;
1649 			stat->lim_rtt_variance =
1650 			    ((oldval << 4) - oldval + curval) >> 4;
1651 		}
1652 	}
1653 
1654 	if (stat->lim_rtt_min == 0) {
1655 		stat->lim_rtt_min = ifs->rttmin;
1656 	} else {
1657 		stat->lim_rtt_min = MIN(stat->lim_rtt_min, ifs->rttmin);
1658 	}
1659 
1660 	/* connection timeouts */
1661 	stat->lim_conn_attempts++;
1662 	if (ifs->conntimeout) {
1663 		stat->lim_conn_timeouts++;
1664 	}
1665 
1666 	/* bytes sent using background delay-based algorithms */
1667 	stat->lim_bk_txpkts += ifs->bk_txpackets;
1668 }
1669 
1670 static void
tcp_free_reassq(struct tcpcb * tp)1671 tcp_free_reassq(struct tcpcb *tp)
1672 {
1673 	struct tseg_qent *q;
1674 
1675 	while ((q = LIST_FIRST(&tp->t_segq)) != NULL) {
1676 		struct mbuf *m;
1677 
1678 		LIST_REMOVE(q, tqe_q);
1679 		m = tcp_destroy_reass_qent(tp, q);
1680 		m_freem(m);
1681 	}
1682 }
1683 
1684 struct tseg_qent *
tcp_create_reass_qent(struct tcpcb * tp,struct mbuf * m,struct tcphdr * th,int len)1685 tcp_create_reass_qent(struct tcpcb *tp, struct mbuf *m,
1686     struct tcphdr *th, int len)
1687 {
1688 	struct tseg_qent *te;
1689 	int size;
1690 
1691 	te = tcp_reass_qent_alloc(tp->t_inpcb->inp_socket->so_proto);
1692 	if (te == NULL) {
1693 		return NULL;
1694 	}
1695 
1696 	tp->t_reassqlen++;
1697 	OSIncrementAtomic(&tcp_reass_total_qlen);
1698 
1699 	size = m_chain_capacity(m);
1700 	tcp_memacct_add(size);
1701 	tp->t_reassq_mbcnt += size;
1702 
1703 	te->tqe_m = m;
1704 	te->tqe_th = th;
1705 	te->tqe_len = len;
1706 
1707 	return te;
1708 }
1709 
1710 struct mbuf *
tcp_destroy_reass_qent(struct tcpcb * tp,struct tseg_qent * q)1711 tcp_destroy_reass_qent(struct tcpcb *tp, struct tseg_qent *q)
1712 {
1713 	struct mbuf *m = q->tqe_m;
1714 	int size;
1715 
1716 	size = m_chain_capacity(m);
1717 	tcp_memacct_sub(size);
1718 	tp->t_reassq_mbcnt -= size;
1719 
1720 	tp->t_reassqlen--;
1721 	OSDecrementAtomic(&tcp_reass_total_qlen);
1722 	tcp_reass_qent_free(tp->t_inpcb->inp_socket->so_proto, q);
1723 
1724 	return m;
1725 }
1726 
1727 struct tseg_qent *
tcp_reass_qent_alloc(struct protosw * proto)1728 tcp_reass_qent_alloc(struct protosw *proto)
1729 {
1730 	struct tseg_qent *reass;
1731 
1732 	if (proto_memacct_hardlimit(proto)) {
1733 		return NULL;
1734 	}
1735 	reass = zalloc_flags(tcp_reass_zone, Z_NOPAGEWAIT);
1736 	if (reass == NULL) {
1737 		return NULL;
1738 	}
1739 
1740 	proto_memacct_add(proto, kalloc_type_size(tcp_reass_zone));
1741 
1742 	return reass;
1743 }
1744 
1745 void
tcp_reass_qent_free(struct protosw * proto,struct tseg_qent * te)1746 tcp_reass_qent_free(struct protosw *proto, struct tseg_qent *te)
1747 {
1748 	proto_memacct_sub(proto, kalloc_type_size(tcp_reass_zone));
1749 	zfree(tcp_reass_zone, te);
1750 }
1751 
1752 /*
1753  * Close a TCP control block:
1754  *	discard all space held by the tcp
1755  *	discard internet protocol block
1756  *	wake up any sleepers
1757  */
1758 struct tcpcb *
tcp_close(struct tcpcb * tp)1759 tcp_close(struct tcpcb *tp)
1760 {
1761 	struct inpcb *inp = tp->t_inpcb;
1762 	struct socket *so = inp->inp_socket;
1763 	int isipv6 = (inp->inp_vflag & INP_IPV6) != 0;
1764 	struct route *ro;
1765 	struct rtentry *rt;
1766 	int dosavessthresh;
1767 	struct ifnet_stats_per_flow ifs;
1768 
1769 	/* tcp_close was called previously, bail */
1770 	if (inp->inp_ppcb == NULL) {
1771 		return NULL;
1772 	}
1773 
1774 	tcp_del_fsw_flow(tp);
1775 
1776 	tcp_canceltimers(tp);
1777 	KERNEL_DEBUG(DBG_FNC_TCP_CLOSE | DBG_FUNC_START, tp, 0, 0, 0, 0);
1778 
1779 	/*
1780 	 * If another thread for this tcp is currently in ip (indicated by
1781 	 * the TF_SENDINPROG flag), defer the cleanup until after it returns
1782 	 * back to tcp.  This is done to serialize the close until after all
1783 	 * pending output is finished, in order to avoid having the PCB be
1784 	 * detached and the cached route cleaned, only for ip to cache the
1785 	 * route back into the PCB again.  Note that we've cleared all the
1786 	 * timers at this point.  Set TF_CLOSING to indicate to tcp_output()
1787 	 * that is should call us again once it returns from ip; at that
1788 	 * point both flags should be cleared and we can proceed further
1789 	 * with the cleanup.
1790 	 */
1791 	if ((tp->t_flags & TF_CLOSING) ||
1792 	    inp->inp_sndinprog_cnt > 0) {
1793 		tp->t_flags |= TF_CLOSING;
1794 		return NULL;
1795 	}
1796 
1797 	TCP_LOG_CONNECTION_SUMMARY(tp);
1798 
1799 	DTRACE_TCP4(state__change, void, NULL, struct inpcb *, inp,
1800 	    struct tcpcb *, tp, int32_t, TCPS_CLOSED);
1801 
1802 	ro = (isipv6 ? (struct route *)&inp->in6p_route : &inp->inp_route);
1803 	rt = ro->ro_rt;
1804 	if (rt != NULL) {
1805 		RT_LOCK_SPIN(rt);
1806 	}
1807 
1808 	/*
1809 	 * If we got enough samples through the srtt filter,
1810 	 * save the rtt and rttvar in the routing entry.
1811 	 * 'Enough' is arbitrarily defined as the 16 samples.
1812 	 * 16 samples is enough for the srtt filter to converge
1813 	 * to within 5% of the correct value; fewer samples and
1814 	 * we could save a very bogus rtt.
1815 	 *
1816 	 * Don't update the default route's characteristics and don't
1817 	 * update anything that the user "locked".
1818 	 */
1819 	if (tp->t_rttupdated >= 16) {
1820 		u_int32_t i = 0;
1821 		bool log_rtt = false;
1822 
1823 		if (isipv6) {
1824 			struct sockaddr_in6 *sin6;
1825 
1826 			if (rt == NULL) {
1827 				goto no_valid_rt;
1828 			}
1829 			sin6 = SIN6(rt_key(rt));
1830 			if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
1831 				goto no_valid_rt;
1832 			}
1833 		} else if (ROUTE_UNUSABLE(ro) ||
1834 		    SIN(rt_key(rt))->sin_addr.s_addr == INADDR_ANY) {
1835 			DTRACE_TCP4(state__change, void, NULL,
1836 			    struct inpcb *, inp, struct tcpcb *, tp,
1837 			    int32_t, TCPS_CLOSED);
1838 			TCP_LOG_STATE(tp, TCPS_CLOSED);
1839 			tp->t_state = TCPS_CLOSED;
1840 			goto no_valid_rt;
1841 		}
1842 
1843 		RT_LOCK_ASSERT_HELD(rt);
1844 		if ((rt->rt_rmx.rmx_locks & RTV_RTT) == 0) {
1845 			i = tp->t_srtt *
1846 			    (RTM_RTTUNIT / (TCP_RETRANSHZ * TCP_RTT_SCALE));
1847 			if (rt->rt_rmx.rmx_rtt && i) {
1848 				/*
1849 				 * filter this update to half the old & half
1850 				 * the new values, converting scale.
1851 				 * See route.h and tcp_var.h for a
1852 				 * description of the scaling constants.
1853 				 */
1854 				rt->rt_rmx.rmx_rtt =
1855 				    (rt->rt_rmx.rmx_rtt + i) / 2;
1856 			} else {
1857 				rt->rt_rmx.rmx_rtt = i;
1858 			}
1859 			tcpstat.tcps_cachedrtt++;
1860 			log_rtt = true;
1861 		}
1862 		if ((rt->rt_rmx.rmx_locks & RTV_RTTVAR) == 0) {
1863 			i = tp->t_rttvar *
1864 			    (RTM_RTTUNIT / (TCP_RETRANSHZ * TCP_RTTVAR_SCALE));
1865 			if (rt->rt_rmx.rmx_rttvar && i) {
1866 				rt->rt_rmx.rmx_rttvar =
1867 				    (rt->rt_rmx.rmx_rttvar + i) / 2;
1868 			} else {
1869 				rt->rt_rmx.rmx_rttvar = i;
1870 			}
1871 			tcpstat.tcps_cachedrttvar++;
1872 			log_rtt = true;
1873 		}
1874 		if (log_rtt) {
1875 			TCP_LOG_RTM_RTT(tp, rt);
1876 			TCP_LOG_RTT_INFO(tp);
1877 		}
1878 		/*
1879 		 * The old comment here said:
1880 		 * update the pipelimit (ssthresh) if it has been updated
1881 		 * already or if a pipesize was specified & the threshhold
1882 		 * got below half the pipesize.  I.e., wait for bad news
1883 		 * before we start updating, then update on both good
1884 		 * and bad news.
1885 		 *
1886 		 * But we want to save the ssthresh even if no pipesize is
1887 		 * specified explicitly in the route, because such
1888 		 * connections still have an implicit pipesize specified
1889 		 * by the global tcp_sendspace.  In the absence of a reliable
1890 		 * way to calculate the pipesize, it will have to do.
1891 		 */
1892 		i = tp->snd_ssthresh;
1893 		if (rt->rt_rmx.rmx_sendpipe != 0) {
1894 			dosavessthresh = (i < rt->rt_rmx.rmx_sendpipe / 2);
1895 		} else {
1896 			dosavessthresh = (i < so->so_snd.sb_hiwat / 2);
1897 		}
1898 		if (((rt->rt_rmx.rmx_locks & RTV_SSTHRESH) == 0 &&
1899 		    i != 0 && rt->rt_rmx.rmx_ssthresh != 0) ||
1900 		    dosavessthresh) {
1901 			/*
1902 			 * convert the limit from user data bytes to
1903 			 * packets then to packet data bytes.
1904 			 */
1905 			i = (i + tp->t_maxseg / 2) / tp->t_maxseg;
1906 			if (i < 2) {
1907 				i = 2;
1908 			}
1909 			i *= (u_int32_t)(tp->t_maxseg +
1910 			    isipv6 ? sizeof(struct ip6_hdr) +
1911 			    sizeof(struct tcphdr) :
1912 			    sizeof(struct tcpiphdr));
1913 			if (rt->rt_rmx.rmx_ssthresh) {
1914 				rt->rt_rmx.rmx_ssthresh =
1915 				    (rt->rt_rmx.rmx_ssthresh + i) / 2;
1916 			} else {
1917 				rt->rt_rmx.rmx_ssthresh = i;
1918 			}
1919 			tcpstat.tcps_cachedssthresh++;
1920 		}
1921 	}
1922 
1923 	/*
1924 	 * Mark route for deletion if no information is cached.
1925 	 */
1926 	if (rt != NULL && (so->so_flags & SOF_OVERFLOW)) {
1927 		if (!(rt->rt_rmx.rmx_locks & RTV_RTT) &&
1928 		    rt->rt_rmx.rmx_rtt == 0) {
1929 			rt->rt_flags |= RTF_DELCLONE;
1930 		}
1931 	}
1932 
1933 no_valid_rt:
1934 	if (rt != NULL) {
1935 		RT_UNLOCK(rt);
1936 	}
1937 
1938 	/* free the reassembly queue, if any */
1939 	tcp_free_reassq(tp);
1940 
1941 	/* performance stats per interface */
1942 	tcp_create_ifnet_stats_per_flow(tp, &ifs);
1943 	tcp_update_stats_per_flow(&ifs, inp->inp_last_outifp);
1944 
1945 	tcp_free_sackholes(tp);
1946 	tcp_notify_ack_free(tp);
1947 
1948 	inp_decr_sndbytes_allunsent(so, tp->snd_una);
1949 
1950 	if (tp->t_bwmeas != NULL) {
1951 		tcp_bwmeas_free(tp);
1952 	}
1953 	tcp_rxtseg_clean(tp);
1954 	tcp_segs_sent_clean(tp, true);
1955 
1956 	/* Free the packet list */
1957 	if (tp->t_pktlist_head != NULL) {
1958 		m_freem_list(tp->t_pktlist_head);
1959 	}
1960 	TCP_PKTLIST_CLEAR(tp);
1961 
1962 	TCP_LOG_STATE(tp, TCPS_CLOSED);
1963 	tp->t_state = TCPS_CLOSED;
1964 
1965 	/*
1966 	 * Issue a wakeup before detach so that we don't miss
1967 	 * a wakeup
1968 	 */
1969 	sodisconnectwakeup(so);
1970 
1971 	/*
1972 	 * Make sure to clear the TCP Keep Alive Offload as it is
1973 	 * ref counted on the interface
1974 	 */
1975 	tcp_clear_keep_alive_offload(so);
1976 
1977 	/*
1978 	 * If this is a socket that does not want to wakeup the device
1979 	 * for it's traffic, the application might need to know that the
1980 	 * socket is closed, send a notification.
1981 	 */
1982 	if ((so->so_options & SO_NOWAKEFROMSLEEP) &&
1983 	    inp->inp_state != INPCB_STATE_DEAD &&
1984 	    !(inp->inp_flags2 & INP2_TIMEWAIT)) {
1985 		socket_post_kev_msg_closed(so);
1986 	}
1987 
1988 	if (CC_ALGO(tp)->cleanup != NULL) {
1989 		CC_ALGO(tp)->cleanup(tp);
1990 	}
1991 
1992 	tp->tcp_cc_index = TCP_CC_ALGO_NONE;
1993 
1994 	if (TCP_USE_RLEDBAT(tp, so) && tcp_cc_rledbat.cleanup != NULL) {
1995 		tcp_cc_rledbat.cleanup(tp);
1996 	}
1997 
1998 	/* Can happen if we close the socket before receiving the third ACK */
1999 	if ((tp->t_tfo_flags & TFO_F_COOKIE_VALID)) {
2000 		OSDecrementAtomic(&tcp_tfo_halfcnt);
2001 
2002 		/* Panic if something has gone terribly wrong. */
2003 		VERIFY(tcp_tfo_halfcnt >= 0);
2004 
2005 		tp->t_tfo_flags &= ~TFO_F_COOKIE_VALID;
2006 	}
2007 
2008 	if (SOCK_CHECK_DOM(so, PF_INET6)) {
2009 		in6_pcbdetach(inp);
2010 	} else {
2011 		in_pcbdetach(inp);
2012 	}
2013 
2014 	/*
2015 	 * Call soisdisconnected after detach because it might unlock the socket
2016 	 */
2017 	soisdisconnected(so);
2018 	tcpstat.tcps_closed++;
2019 	KERNEL_DEBUG(DBG_FNC_TCP_CLOSE | DBG_FUNC_END,
2020 	    tcpstat.tcps_closed, 0, 0, 0, 0);
2021 	return NULL;
2022 }
2023 
2024 void
tcp_drain(void)2025 tcp_drain(void)
2026 {
2027 	struct inpcb *inp;
2028 	struct tcpcb *tp;
2029 
2030 	if (!lck_rw_try_lock_exclusive(&tcbinfo.ipi_lock)) {
2031 		return;
2032 	}
2033 
2034 	LIST_FOREACH(inp, tcbinfo.ipi_listhead, inp_list) {
2035 		if (in_pcb_checkstate(inp, WNT_ACQUIRE, 0) !=
2036 		    WNT_STOPUSING) {
2037 			socket_lock(inp->inp_socket, 1);
2038 			if (in_pcb_checkstate(inp, WNT_RELEASE, 1)
2039 			    == WNT_STOPUSING) {
2040 				/* lost a race, try the next one */
2041 				socket_unlock(inp->inp_socket, 1);
2042 				continue;
2043 			}
2044 			tp = intotcpcb(inp);
2045 
2046 			so_drain_extended_bk_idle(inp->inp_socket);
2047 
2048 			socket_unlock(inp->inp_socket, 1);
2049 		}
2050 	}
2051 	lck_rw_done(&tcbinfo.ipi_lock);
2052 }
2053 
2054 /*
2055  * Notify a tcp user of an asynchronous error;
2056  * store error as soft error, but wake up user
2057  * (for now, won't do anything until can select for soft error).
2058  *
2059  * Do not wake up user since there currently is no mechanism for
2060  * reporting soft errors (yet - a kqueue filter may be added).
2061  */
2062 static void
tcp_notify(struct inpcb * inp,int error)2063 tcp_notify(struct inpcb *inp, int error)
2064 {
2065 	struct tcpcb *tp;
2066 
2067 	if (inp == NULL || (inp->inp_state == INPCB_STATE_DEAD)) {
2068 		return; /* pcb is gone already */
2069 	}
2070 	tp = (struct tcpcb *)inp->inp_ppcb;
2071 
2072 	VERIFY(tp != NULL);
2073 	/*
2074 	 * Ignore some errors if we are hooked up.
2075 	 * If connection hasn't completed, has retransmitted several times,
2076 	 * and receives a second error, give up now.  This is better
2077 	 * than waiting a long time to establish a connection that
2078 	 * can never complete.
2079 	 */
2080 	if (tp->t_state == TCPS_ESTABLISHED &&
2081 	    (error == EHOSTUNREACH || error == ENETUNREACH ||
2082 	    error == EHOSTDOWN)) {
2083 		if (inp->inp_route.ro_rt) {
2084 			rtfree(inp->inp_route.ro_rt);
2085 			inp->inp_route.ro_rt = (struct rtentry *)NULL;
2086 		}
2087 	} else if (tp->t_state < TCPS_ESTABLISHED && tp->t_rxtshift > 3 &&
2088 	    tp->t_softerror) {
2089 		tcp_drop(tp, error);
2090 	} else {
2091 		tp->t_softerror = error;
2092 	}
2093 }
2094 
2095 struct bwmeas *
tcp_bwmeas_alloc(struct tcpcb * tp)2096 tcp_bwmeas_alloc(struct tcpcb *tp)
2097 {
2098 	struct bwmeas *elm;
2099 	elm = zalloc_flags(tcp_bwmeas_zone, Z_ZERO | Z_WAITOK);
2100 	elm->bw_minsizepkts = TCP_BWMEAS_BURST_MINSIZE;
2101 	elm->bw_minsize = elm->bw_minsizepkts * tp->t_maxseg;
2102 	return elm;
2103 }
2104 
2105 void
tcp_bwmeas_free(struct tcpcb * tp)2106 tcp_bwmeas_free(struct tcpcb *tp)
2107 {
2108 	zfree(tcp_bwmeas_zone, tp->t_bwmeas);
2109 	tp->t_bwmeas = NULL;
2110 	tp->t_flagsext &= ~(TF_MEASURESNDBW);
2111 }
2112 
2113 int
get_tcp_inp_list(struct inpcb * __single * inp_list __counted_by (n),size_t n,inp_gen_t gencnt)2114 get_tcp_inp_list(struct inpcb * __single *inp_list __counted_by(n), size_t n, inp_gen_t gencnt)
2115 {
2116 	struct tcpcb *tp;
2117 	struct inpcb *inp;
2118 	int i = 0;
2119 
2120 	LIST_FOREACH(inp, tcbinfo.ipi_listhead, inp_list) {
2121 		if (i >= n) {
2122 			break;
2123 		}
2124 		if (inp->inp_gencnt <= gencnt &&
2125 		    inp->inp_state != INPCB_STATE_DEAD) {
2126 			inp_list[i++] = inp;
2127 		}
2128 	}
2129 
2130 	TAILQ_FOREACH(tp, &tcp_tw_tailq, t_twentry) {
2131 		if (i >= n) {
2132 			break;
2133 		}
2134 		inp = tp->t_inpcb;
2135 		if (inp->inp_gencnt <= gencnt &&
2136 		    inp->inp_state != INPCB_STATE_DEAD) {
2137 			inp_list[i++] = inp;
2138 		}
2139 	}
2140 	return i;
2141 }
2142 
2143 /*
2144  * tcpcb_to_otcpcb copies specific bits of a tcpcb to a otcpcb format.
2145  * The otcpcb data structure is passed to user space and must not change.
2146  */
2147 static void
tcpcb_to_otcpcb(struct tcpcb * tp,struct otcpcb * otp)2148 tcpcb_to_otcpcb(struct tcpcb *tp, struct otcpcb *otp)
2149 {
2150 	otp->t_segq = (uint32_t)VM_KERNEL_ADDRHASH(tp->t_segq.lh_first);
2151 	otp->t_dupacks = tp->t_dupacks;
2152 	otp->t_timer[TCPT_REXMT_EXT] = tp->t_timer[TCPT_REXMT];
2153 	otp->t_timer[TCPT_PERSIST_EXT] = tp->t_timer[TCPT_PERSIST];
2154 	otp->t_timer[TCPT_KEEP_EXT] = tp->t_timer[TCPT_KEEP];
2155 	otp->t_timer[TCPT_2MSL_EXT] = tp->t_timer[TCPT_2MSL];
2156 	otp->t_inpcb =
2157 	    (_TCPCB_PTR(struct inpcb *))VM_KERNEL_ADDRHASH(tp->t_inpcb);
2158 	otp->t_state = tp->t_state;
2159 	otp->t_flags = tp->t_flags;
2160 	otp->t_force = (tp->t_flagsext & TF_FORCE) ? 1 : 0;
2161 	otp->snd_una = tp->snd_una;
2162 	otp->snd_max = tp->snd_max;
2163 	otp->snd_nxt = tp->snd_nxt;
2164 	otp->snd_up = tp->snd_up;
2165 	otp->snd_wl1 = tp->snd_wl1;
2166 	otp->snd_wl2 = tp->snd_wl2;
2167 	otp->iss = tp->iss;
2168 	otp->irs = tp->irs;
2169 	otp->rcv_nxt = tp->rcv_nxt;
2170 	otp->rcv_adv = tp->rcv_adv;
2171 	otp->rcv_wnd = tp->rcv_wnd;
2172 	otp->rcv_up = tp->rcv_up;
2173 	otp->snd_wnd = tp->snd_wnd;
2174 	otp->snd_cwnd = tp->snd_cwnd;
2175 	otp->snd_ssthresh = tp->snd_ssthresh;
2176 	otp->t_maxopd = tp->t_maxopd;
2177 	otp->t_rcvtime = tp->t_rcvtime;
2178 	otp->t_starttime = tp->t_starttime;
2179 	otp->t_rtttime = tp->t_rtttime;
2180 	otp->t_rtseq = tp->t_rtseq;
2181 	otp->t_rxtcur = tp->t_rxtcur;
2182 	otp->t_maxseg = tp->t_maxseg;
2183 	otp->t_srtt = tp->t_srtt;
2184 	otp->t_rttvar = tp->t_rttvar;
2185 	otp->t_rxtshift = tp->t_rxtshift;
2186 	otp->t_rttmin = tp->t_rttmin;
2187 	otp->t_rttupdated = tp->t_rttupdated;
2188 	otp->max_sndwnd = tp->max_sndwnd;
2189 	otp->t_softerror = tp->t_softerror;
2190 	otp->t_oobflags = tp->t_oobflags;
2191 	otp->t_iobc = tp->t_iobc;
2192 	otp->snd_scale = tp->snd_scale;
2193 	otp->rcv_scale = tp->rcv_scale;
2194 	otp->request_r_scale = tp->request_r_scale;
2195 	otp->requested_s_scale = tp->requested_s_scale;
2196 	otp->ts_recent = tp->ts_recent;
2197 	otp->ts_recent_age = tp->ts_recent_age;
2198 	otp->last_ack_sent = tp->last_ack_sent;
2199 	otp->cc_send = 0;
2200 	otp->cc_recv = 0;
2201 	otp->snd_recover = tp->snd_recover;
2202 	otp->snd_cwnd_prev = tp->snd_cwnd_prev;
2203 	otp->snd_ssthresh_prev = tp->snd_ssthresh_prev;
2204 	otp->t_badrxtwin = 0;
2205 }
2206 
2207 static int
2208 tcp_pcblist SYSCTL_HANDLER_ARGS
2209 {
2210 #pragma unused(oidp, arg1, arg2)
2211 	int error, i = 0, n, sz;
2212 	struct inpcb **inp_list;
2213 	inp_gen_t gencnt;
2214 	struct xinpgen xig;
2215 
2216 	/*
2217 	 * The process of preparing the TCB list is too time-consuming and
2218 	 * resource-intensive to repeat twice on every request.
2219 	 */
2220 	lck_rw_lock_shared(&tcbinfo.ipi_lock);
2221 	if (req->oldptr == USER_ADDR_NULL) {
2222 		n = tcbinfo.ipi_count;
2223 		req->oldidx = 2 * (sizeof(xig))
2224 		    + (n + n / 8) * sizeof(struct xtcpcb);
2225 		lck_rw_done(&tcbinfo.ipi_lock);
2226 		return 0;
2227 	}
2228 
2229 	if (req->newptr != USER_ADDR_NULL) {
2230 		lck_rw_done(&tcbinfo.ipi_lock);
2231 		return EPERM;
2232 	}
2233 
2234 	/*
2235 	 * OK, now we're committed to doing something.
2236 	 */
2237 	gencnt = tcbinfo.ipi_gencnt;
2238 	sz = n = tcbinfo.ipi_count;
2239 
2240 	bzero(&xig, sizeof(xig));
2241 	xig.xig_len = sizeof(xig);
2242 	xig.xig_count = n;
2243 	xig.xig_gen = gencnt;
2244 	xig.xig_sogen = so_gencnt;
2245 	error = SYSCTL_OUT(req, &xig, sizeof(xig));
2246 	if (error) {
2247 		lck_rw_done(&tcbinfo.ipi_lock);
2248 		return error;
2249 	}
2250 	/*
2251 	 * We are done if there is no pcb
2252 	 */
2253 	if (n == 0) {
2254 		lck_rw_done(&tcbinfo.ipi_lock);
2255 		return 0;
2256 	}
2257 
2258 	inp_list = kalloc_type(struct inpcb *, n, Z_WAITOK);
2259 	if (inp_list == NULL) {
2260 		lck_rw_done(&tcbinfo.ipi_lock);
2261 		return ENOMEM;
2262 	}
2263 
2264 	n = get_tcp_inp_list(inp_list, n, gencnt);
2265 
2266 	error = 0;
2267 	for (i = 0; i < n; i++) {
2268 		struct xtcpcb xt;
2269 		caddr_t inp_ppcb __single;
2270 		struct inpcb *inp;
2271 
2272 		inp = inp_list[i];
2273 
2274 		if (in_pcb_checkstate(inp, WNT_ACQUIRE, 0) == WNT_STOPUSING) {
2275 			continue;
2276 		}
2277 		socket_lock(inp->inp_socket, 1);
2278 		if (in_pcb_checkstate(inp, WNT_RELEASE, 1) == WNT_STOPUSING) {
2279 			socket_unlock(inp->inp_socket, 1);
2280 			continue;
2281 		}
2282 		if (inp->inp_gencnt > gencnt) {
2283 			socket_unlock(inp->inp_socket, 1);
2284 			continue;
2285 		}
2286 
2287 		bzero(&xt, sizeof(xt));
2288 		xt.xt_len = sizeof(xt);
2289 		/* XXX should avoid extra copy */
2290 		inpcb_to_compat(inp, &xt.xt_inp);
2291 		inp_ppcb = inp->inp_ppcb;
2292 		if (inp_ppcb != NULL) {
2293 			tcpcb_to_otcpcb((struct tcpcb *)(void *)inp_ppcb,
2294 			    &xt.xt_tp);
2295 		} else {
2296 			bzero((char *) &xt.xt_tp, sizeof(xt.xt_tp));
2297 		}
2298 		if (inp->inp_socket) {
2299 			sotoxsocket(inp->inp_socket, &xt.xt_socket);
2300 		}
2301 
2302 		socket_unlock(inp->inp_socket, 1);
2303 
2304 		error = SYSCTL_OUT(req, &xt, sizeof(xt));
2305 	}
2306 	if (!error) {
2307 		/*
2308 		 * Give the user an updated idea of our state.
2309 		 * If the generation differs from what we told
2310 		 * her before, she knows that something happened
2311 		 * while we were processing this request, and it
2312 		 * might be necessary to retry.
2313 		 */
2314 		bzero(&xig, sizeof(xig));
2315 		xig.xig_len = sizeof(xig);
2316 		xig.xig_gen = tcbinfo.ipi_gencnt;
2317 		xig.xig_sogen = so_gencnt;
2318 		xig.xig_count = tcbinfo.ipi_count;
2319 		error = SYSCTL_OUT(req, &xig, sizeof(xig));
2320 	}
2321 
2322 	lck_rw_done(&tcbinfo.ipi_lock);
2323 	kfree_type(struct inpcb *, sz, inp_list);
2324 	return error;
2325 }
2326 
2327 SYSCTL_PROC(_net_inet_tcp, TCPCTL_PCBLIST, pcblist,
2328     CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
2329     tcp_pcblist, "S,xtcpcb", "List of active TCP connections");
2330 
2331 #if XNU_TARGET_OS_OSX
2332 
2333 static void
tcpcb_to_xtcpcb64(struct tcpcb * tp,struct xtcpcb64 * otp)2334 tcpcb_to_xtcpcb64(struct tcpcb *tp, struct xtcpcb64 *otp)
2335 {
2336 	otp->t_segq = (uint32_t)VM_KERNEL_ADDRHASH(tp->t_segq.lh_first);
2337 	otp->t_dupacks = tp->t_dupacks;
2338 	otp->t_timer[TCPT_REXMT_EXT] = tp->t_timer[TCPT_REXMT];
2339 	otp->t_timer[TCPT_PERSIST_EXT] = tp->t_timer[TCPT_PERSIST];
2340 	otp->t_timer[TCPT_KEEP_EXT] = tp->t_timer[TCPT_KEEP];
2341 	otp->t_timer[TCPT_2MSL_EXT] = tp->t_timer[TCPT_2MSL];
2342 	otp->t_state = tp->t_state;
2343 	otp->t_flags = tp->t_flags;
2344 	otp->t_force = (tp->t_flagsext & TF_FORCE) ? 1 : 0;
2345 	otp->snd_una = tp->snd_una;
2346 	otp->snd_max = tp->snd_max;
2347 	otp->snd_nxt = tp->snd_nxt;
2348 	otp->snd_up = tp->snd_up;
2349 	otp->snd_wl1 = tp->snd_wl1;
2350 	otp->snd_wl2 = tp->snd_wl2;
2351 	otp->iss = tp->iss;
2352 	otp->irs = tp->irs;
2353 	otp->rcv_nxt = tp->rcv_nxt;
2354 	otp->rcv_adv = tp->rcv_adv;
2355 	otp->rcv_wnd = tp->rcv_wnd;
2356 	otp->rcv_up = tp->rcv_up;
2357 	otp->snd_wnd = tp->snd_wnd;
2358 	otp->snd_cwnd = tp->snd_cwnd;
2359 	otp->snd_ssthresh = tp->snd_ssthresh;
2360 	otp->t_maxopd = tp->t_maxopd;
2361 	otp->t_rcvtime = tp->t_rcvtime;
2362 	otp->t_starttime = tp->t_starttime;
2363 	otp->t_rtttime = tp->t_rtttime;
2364 	otp->t_rtseq = tp->t_rtseq;
2365 	otp->t_rxtcur = tp->t_rxtcur;
2366 	otp->t_maxseg = tp->t_maxseg;
2367 	otp->t_srtt = tp->t_srtt;
2368 	otp->t_rttvar = tp->t_rttvar;
2369 	otp->t_rxtshift = tp->t_rxtshift;
2370 	otp->t_rttmin = tp->t_rttmin;
2371 	otp->t_rttupdated = tp->t_rttupdated;
2372 	otp->max_sndwnd = tp->max_sndwnd;
2373 	otp->t_softerror = tp->t_softerror;
2374 	otp->t_oobflags = tp->t_oobflags;
2375 	otp->t_iobc = tp->t_iobc;
2376 	otp->snd_scale = tp->snd_scale;
2377 	otp->rcv_scale = tp->rcv_scale;
2378 	otp->request_r_scale = tp->request_r_scale;
2379 	otp->requested_s_scale = tp->requested_s_scale;
2380 	otp->ts_recent = tp->ts_recent;
2381 	otp->ts_recent_age = tp->ts_recent_age;
2382 	otp->last_ack_sent = tp->last_ack_sent;
2383 	otp->cc_send = 0;
2384 	otp->cc_recv = 0;
2385 	otp->snd_recover = tp->snd_recover;
2386 	otp->snd_cwnd_prev = tp->snd_cwnd_prev;
2387 	otp->snd_ssthresh_prev = tp->snd_ssthresh_prev;
2388 	otp->t_badrxtwin = 0;
2389 }
2390 
2391 
2392 static int
2393 tcp_pcblist64 SYSCTL_HANDLER_ARGS
2394 {
2395 #pragma unused(oidp, arg1, arg2)
2396 	int error, i = 0, n, sz;
2397 	struct inpcb **inp_list;
2398 	inp_gen_t gencnt;
2399 	struct xinpgen xig;
2400 
2401 	/*
2402 	 * The process of preparing the TCB list is too time-consuming and
2403 	 * resource-intensive to repeat twice on every request.
2404 	 */
2405 	lck_rw_lock_shared(&tcbinfo.ipi_lock);
2406 	if (req->oldptr == USER_ADDR_NULL) {
2407 		n = tcbinfo.ipi_count;
2408 		req->oldidx = 2 * (sizeof(xig))
2409 		    + (n + n / 8) * sizeof(struct xtcpcb64);
2410 		lck_rw_done(&tcbinfo.ipi_lock);
2411 		return 0;
2412 	}
2413 
2414 	if (req->newptr != USER_ADDR_NULL) {
2415 		lck_rw_done(&tcbinfo.ipi_lock);
2416 		return EPERM;
2417 	}
2418 
2419 	/*
2420 	 * OK, now we're committed to doing something.
2421 	 */
2422 	gencnt = tcbinfo.ipi_gencnt;
2423 	sz = n = tcbinfo.ipi_count;
2424 
2425 	bzero(&xig, sizeof(xig));
2426 	xig.xig_len = sizeof(xig);
2427 	xig.xig_count = n;
2428 	xig.xig_gen = gencnt;
2429 	xig.xig_sogen = so_gencnt;
2430 	error = SYSCTL_OUT(req, &xig, sizeof(xig));
2431 	if (error) {
2432 		lck_rw_done(&tcbinfo.ipi_lock);
2433 		return error;
2434 	}
2435 	/*
2436 	 * We are done if there is no pcb
2437 	 */
2438 	if (n == 0) {
2439 		lck_rw_done(&tcbinfo.ipi_lock);
2440 		return 0;
2441 	}
2442 
2443 	inp_list = kalloc_type(struct inpcb *, n, Z_WAITOK);
2444 	if (inp_list == NULL) {
2445 		lck_rw_done(&tcbinfo.ipi_lock);
2446 		return ENOMEM;
2447 	}
2448 
2449 	n = get_tcp_inp_list(inp_list, n, gencnt);
2450 
2451 	error = 0;
2452 	for (i = 0; i < n; i++) {
2453 		struct xtcpcb64 xt;
2454 		struct inpcb *inp;
2455 
2456 		inp = inp_list[i];
2457 
2458 		if (in_pcb_checkstate(inp, WNT_ACQUIRE, 0) == WNT_STOPUSING) {
2459 			continue;
2460 		}
2461 		socket_lock(inp->inp_socket, 1);
2462 		if (in_pcb_checkstate(inp, WNT_RELEASE, 1) == WNT_STOPUSING) {
2463 			socket_unlock(inp->inp_socket, 1);
2464 			continue;
2465 		}
2466 		if (inp->inp_gencnt > gencnt) {
2467 			socket_unlock(inp->inp_socket, 1);
2468 			continue;
2469 		}
2470 
2471 		bzero(&xt, sizeof(xt));
2472 		xt.xt_len = sizeof(xt);
2473 		inpcb_to_xinpcb64(inp, &xt.xt_inpcb);
2474 		xt.xt_inpcb.inp_ppcb =
2475 		    (uint64_t)VM_KERNEL_ADDRHASH(inp->inp_ppcb);
2476 		if (inp->inp_ppcb != NULL) {
2477 			tcpcb_to_xtcpcb64((struct tcpcb *)inp->inp_ppcb,
2478 			    &xt);
2479 		}
2480 		if (inp->inp_socket) {
2481 			sotoxsocket64(inp->inp_socket,
2482 			    &xt.xt_inpcb.xi_socket);
2483 		}
2484 
2485 		socket_unlock(inp->inp_socket, 1);
2486 
2487 		error = SYSCTL_OUT(req, &xt, sizeof(xt));
2488 	}
2489 	if (!error) {
2490 		/*
2491 		 * Give the user an updated idea of our state.
2492 		 * If the generation differs from what we told
2493 		 * her before, she knows that something happened
2494 		 * while we were processing this request, and it
2495 		 * might be necessary to retry.
2496 		 */
2497 		bzero(&xig, sizeof(xig));
2498 		xig.xig_len = sizeof(xig);
2499 		xig.xig_gen = tcbinfo.ipi_gencnt;
2500 		xig.xig_sogen = so_gencnt;
2501 		xig.xig_count = tcbinfo.ipi_count;
2502 		error = SYSCTL_OUT(req, &xig, sizeof(xig));
2503 	}
2504 
2505 	lck_rw_done(&tcbinfo.ipi_lock);
2506 	kfree_type(struct inpcb *, sz, inp_list);
2507 	return error;
2508 }
2509 
2510 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, pcblist64,
2511     CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
2512     tcp_pcblist64, "S,xtcpcb64", "List of active TCP connections");
2513 
2514 #endif /* XNU_TARGET_OS_OSX */
2515 
2516 static int
2517 tcp_pcblist_n SYSCTL_HANDLER_ARGS
2518 {
2519 #pragma unused(oidp, arg1, arg2)
2520 	int error = 0;
2521 
2522 	error = get_pcblist_n(IPPROTO_TCP, req, &tcbinfo);
2523 
2524 	return error;
2525 }
2526 
2527 
2528 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, pcblist_n,
2529     CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
2530     tcp_pcblist_n, "S,xtcpcb_n", "List of active TCP connections");
2531 
2532 static int
2533 tcp_progress_probe_enable SYSCTL_HANDLER_ARGS
2534 {
2535 #pragma unused(oidp, arg1, arg2)
2536 
2537 	return ntstat_tcp_progress_enable(req);
2538 }
2539 
2540 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, progress_enable,
2541     CTLTYPE_STRUCT | CTLFLAG_RW | CTLFLAG_LOCKED | CTLFLAG_ANYBODY, 0, 0,
2542     tcp_progress_probe_enable, "S", "Enable/disable TCP keepalive probing on the specified link(s)");
2543 
2544 
2545 __private_extern__ void
tcp_get_ports_used(ifnet_t ifp,int protocol,uint32_t flags,bitstr_t * __counted_by (bitstr_size (IP_PORTRANGE_SIZE))bitfield)2546 tcp_get_ports_used(ifnet_t ifp, int protocol, uint32_t flags,
2547     bitstr_t *__counted_by(bitstr_size(IP_PORTRANGE_SIZE)) bitfield)
2548 {
2549 	inpcb_get_ports_used(ifp, protocol, flags, bitfield,
2550 	    &tcbinfo);
2551 }
2552 
2553 __private_extern__ uint32_t
tcp_count_opportunistic(unsigned int ifindex,u_int32_t flags)2554 tcp_count_opportunistic(unsigned int ifindex, u_int32_t flags)
2555 {
2556 	return inpcb_count_opportunistic(ifindex, &tcbinfo, flags);
2557 }
2558 
2559 __private_extern__ uint32_t
tcp_find_anypcb_byaddr(struct ifaddr * ifa)2560 tcp_find_anypcb_byaddr(struct ifaddr *ifa)
2561 {
2562 #if SKYWALK
2563 	if (netns_is_enabled()) {
2564 		return netns_find_anyres_byaddr(ifa, IPPROTO_TCP);
2565 	} else
2566 #endif /* SKYWALK */
2567 	return inpcb_find_anypcb_byaddr(ifa, &tcbinfo);
2568 }
2569 
2570 static void
tcp_handle_msgsize(struct ip * ip,struct inpcb * inp)2571 tcp_handle_msgsize(struct ip *ip, struct inpcb *inp)
2572 {
2573 	struct rtentry *rt = NULL;
2574 	u_short ifscope = IFSCOPE_NONE;
2575 	int mtu;
2576 	struct sockaddr_in icmpsrc = {
2577 		.sin_len = sizeof(struct sockaddr_in),
2578 		.sin_family = AF_INET, .sin_port = 0, .sin_addr = { .s_addr = 0 },
2579 		.sin_zero = { 0, 0, 0, 0, 0, 0, 0, 0 }
2580 	};
2581 	struct icmp *icp = NULL;
2582 
2583 	icp = __container_of(ip, struct icmp, icmp_ip);
2584 	icmpsrc.sin_addr = icp->icmp_ip.ip_dst;
2585 
2586 	/*
2587 	 * MTU discovery:
2588 	 * If we got a needfrag and there is a host route to the
2589 	 * original destination, and the MTU is not locked, then
2590 	 * set the MTU in the route to the suggested new value
2591 	 * (if given) and then notify as usual.  The ULPs will
2592 	 * notice that the MTU has changed and adapt accordingly.
2593 	 * If no new MTU was suggested, then we guess a new one
2594 	 * less than the current value.  If the new MTU is
2595 	 * unreasonably small (defined by sysctl tcp_minmss), then
2596 	 * we reset the MTU to the interface value and enable the
2597 	 * lock bit, indicating that we are no longer doing MTU
2598 	 * discovery.
2599 	 */
2600 	if (ROUTE_UNUSABLE(&(inp->inp_route)) == false) {
2601 		rt = inp->inp_route.ro_rt;
2602 	}
2603 
2604 	/*
2605 	 * icmp6_mtudisc_update scopes the routing lookup
2606 	 * to the incoming interface (delivered from mbuf
2607 	 * packet header.
2608 	 * That is mostly ok but for asymmetric networks
2609 	 * that may be an issue.
2610 	 * Frag needed OR Packet too big really communicates
2611 	 * MTU for the out data path.
2612 	 * Take the interface scope from cached route or
2613 	 * the last outgoing interface from inp
2614 	 */
2615 	if (rt != NULL) {
2616 		ifscope = (rt->rt_ifp != NULL) ?
2617 		    rt->rt_ifp->if_index : IFSCOPE_NONE;
2618 	} else {
2619 		ifscope = (inp->inp_last_outifp != NULL) ?
2620 		    inp->inp_last_outifp->if_index : IFSCOPE_NONE;
2621 	}
2622 
2623 	if ((rt == NULL) ||
2624 	    !(rt->rt_flags & RTF_HOST) ||
2625 	    (rt->rt_flags & (RTF_CLONING | RTF_PRCLONING))) {
2626 		rt = rtalloc1_scoped(SA(&icmpsrc), 0, RTF_CLONING | RTF_PRCLONING, ifscope);
2627 	} else if (rt) {
2628 		RT_LOCK(rt);
2629 		rtref(rt);
2630 		RT_UNLOCK(rt);
2631 	}
2632 
2633 	if (rt != NULL) {
2634 		RT_LOCK(rt);
2635 		if ((rt->rt_flags & RTF_HOST) &&
2636 		    !(rt->rt_rmx.rmx_locks & RTV_MTU)) {
2637 			mtu = ntohs(icp->icmp_nextmtu);
2638 			/*
2639 			 * XXX Stock BSD has changed the following
2640 			 * to compare with icp->icmp_ip.ip_len
2641 			 * to converge faster when sent packet
2642 			 * < route's MTU. We may want to adopt
2643 			 * that change.
2644 			 */
2645 			if (mtu == 0) {
2646 				mtu = ip_next_mtu(rt->rt_rmx.
2647 				    rmx_mtu, 1);
2648 			}
2649 #if DEBUG_MTUDISC
2650 			printf("MTU for %s reduced to %d\n",
2651 			    inet_ntop(AF_INET,
2652 			    &icmpsrc.sin_addr, ipv4str,
2653 			    sizeof(ipv4str)), mtu);
2654 #endif
2655 			if (mtu < max(296, (tcp_minmss +
2656 			    sizeof(struct tcpiphdr)))) {
2657 				rt->rt_rmx.rmx_locks |= RTV_MTU;
2658 			} else if (rt->rt_rmx.rmx_mtu > mtu) {
2659 				rt->rt_rmx.rmx_mtu = mtu;
2660 			}
2661 		}
2662 		RT_UNLOCK(rt);
2663 		rtfree(rt);
2664 	}
2665 }
2666 
2667 void
tcp_ctlinput(int cmd,struct sockaddr * sa,void * vip,__unused struct ifnet * ifp)2668 tcp_ctlinput(int cmd, struct sockaddr *sa, void *vip, __unused struct ifnet *ifp)
2669 {
2670 	tcp_seq icmp_tcp_seq;
2671 	struct ipctlparam *ctl_param __single = vip;
2672 	struct ip *ip = NULL;
2673 	struct mbuf *m = NULL;
2674 	struct in_addr faddr;
2675 	struct inpcb *inp;
2676 	struct tcpcb *tp;
2677 	struct tcphdr *th;
2678 	struct icmp *icp;
2679 	size_t off;
2680 #if SKYWALK
2681 	union sockaddr_in_4_6 sock_laddr;
2682 	struct protoctl_ev_val prctl_ev_val;
2683 #endif /* SKYWALK */
2684 	void (*notify)(struct inpcb *, int) = tcp_notify;
2685 
2686 	if (ctl_param != NULL) {
2687 		ip = ctl_param->ipc_icmp_ip;
2688 		icp = ctl_param->ipc_icmp;
2689 		m = ctl_param->ipc_m;
2690 		off = ctl_param->ipc_off;
2691 	} else {
2692 		ip = NULL;
2693 		icp = NULL;
2694 		m = NULL;
2695 		off = 0;
2696 	}
2697 
2698 	faddr = SIN(sa)->sin_addr;
2699 	if (sa->sa_family != AF_INET || faddr.s_addr == INADDR_ANY) {
2700 		return;
2701 	}
2702 
2703 	if ((unsigned)cmd >= PRC_NCMDS) {
2704 		return;
2705 	}
2706 
2707 	/* Source quench is deprecated */
2708 	if (cmd == PRC_QUENCH) {
2709 		return;
2710 	}
2711 
2712 	if (cmd == PRC_MSGSIZE) {
2713 		notify = tcp_mtudisc;
2714 	} else if (icmp_may_rst && (cmd == PRC_UNREACH_ADMIN_PROHIB ||
2715 	    cmd == PRC_UNREACH_PORT || cmd == PRC_UNREACH_PROTOCOL ||
2716 	    cmd == PRC_TIMXCEED_INTRANS) && ip) {
2717 		notify = tcp_drop_syn_sent;
2718 	}
2719 	/*
2720 	 * Hostdead is ugly because it goes linearly through all PCBs.
2721 	 * XXX: We never get this from ICMP, otherwise it makes an
2722 	 * excellent DoS attack on machines with many connections.
2723 	 */
2724 	else if (cmd == PRC_HOSTDEAD) {
2725 		ip = NULL;
2726 	} else if (inetctlerrmap[cmd] == 0 && !PRC_IS_REDIRECT(cmd)) {
2727 		return;
2728 	}
2729 
2730 #if SKYWALK
2731 	bzero(&prctl_ev_val, sizeof(prctl_ev_val));
2732 	bzero(&sock_laddr, sizeof(sock_laddr));
2733 #endif /* SKYWALK */
2734 
2735 	if (ip == NULL) {
2736 		in_pcbnotifyall(&tcbinfo, faddr, inetctlerrmap[cmd], notify);
2737 #if SKYWALK
2738 		protoctl_event_enqueue_nwk_wq_entry(ifp, NULL,
2739 		    sa, 0, 0, IPPROTO_TCP, cmd, NULL);
2740 #endif /* SKYWALK */
2741 		return;
2742 	}
2743 
2744 	/* Check if we can safely get the sport, dport and the sequence number from the tcp header. */
2745 	if (m == NULL ||
2746 	    (m->m_len < off + (sizeof(unsigned short) + sizeof(unsigned short) + sizeof(tcp_seq)))) {
2747 		/* Insufficient length */
2748 		return;
2749 	}
2750 
2751 	th = (struct tcphdr*)(void*)(mtod(m, uint8_t*) + off);
2752 	icmp_tcp_seq = ntohl(th->th_seq);
2753 
2754 	inp = in_pcblookup_hash(&tcbinfo, faddr, th->th_dport,
2755 	    ip->ip_src, th->th_sport, 0, NULL);
2756 
2757 	if (inp == NULL ||
2758 	    inp->inp_socket == NULL) {
2759 #if SKYWALK
2760 		if (cmd == PRC_MSGSIZE) {
2761 			prctl_ev_val.val = ntohs(icp->icmp_nextmtu);
2762 		}
2763 		prctl_ev_val.tcp_seq_number = icmp_tcp_seq;
2764 
2765 		sock_laddr.sin.sin_family = AF_INET;
2766 		sock_laddr.sin.sin_len = sizeof(sock_laddr.sin);
2767 		sock_laddr.sin.sin_addr = ip->ip_src;
2768 
2769 		protoctl_event_enqueue_nwk_wq_entry(ifp,
2770 		    SA(&sock_laddr), sa,
2771 		    th->th_sport, th->th_dport, IPPROTO_TCP,
2772 		    cmd, &prctl_ev_val);
2773 #endif /* SKYWALK */
2774 		return;
2775 	}
2776 
2777 	socket_lock(inp->inp_socket, 1);
2778 	if (in_pcb_checkstate(inp, WNT_RELEASE, 1) ==
2779 	    WNT_STOPUSING) {
2780 		socket_unlock(inp->inp_socket, 1);
2781 		return;
2782 	}
2783 
2784 	if (PRC_IS_REDIRECT(cmd)) {
2785 		/* signal EHOSTDOWN, as it flushes the cached route */
2786 		(*notify)(inp, EHOSTDOWN);
2787 	} else {
2788 		tp = intotcpcb(inp);
2789 		if (SEQ_GEQ(icmp_tcp_seq, tp->snd_una) &&
2790 		    SEQ_LT(icmp_tcp_seq, tp->snd_max)) {
2791 			if (cmd == PRC_MSGSIZE) {
2792 				tcp_handle_msgsize(ip, inp);
2793 			}
2794 
2795 			(*notify)(inp, inetctlerrmap[cmd]);
2796 		}
2797 	}
2798 	socket_unlock(inp->inp_socket, 1);
2799 }
2800 
2801 void
tcp6_ctlinput(int cmd,struct sockaddr * sa,void * d,__unused struct ifnet * ifp)2802 tcp6_ctlinput(int cmd, struct sockaddr *sa, void *d, __unused struct ifnet *ifp)
2803 {
2804 	tcp_seq icmp_tcp_seq;
2805 	struct in6_addr *dst;
2806 	void (*notify)(struct inpcb *, int) = tcp_notify;
2807 	struct ip6_hdr *ip6;
2808 	struct mbuf *m;
2809 	struct inpcb *inp;
2810 	struct tcpcb *tp;
2811 	struct icmp6_hdr *icmp6;
2812 	struct ip6ctlparam *ip6cp = NULL;
2813 	const struct sockaddr_in6 *sa6_src = NULL;
2814 	unsigned int mtu;
2815 	unsigned int off;
2816 
2817 	struct tcp_ports {
2818 		uint16_t th_sport;
2819 		uint16_t th_dport;
2820 	} t_ports;
2821 #if SKYWALK
2822 	union sockaddr_in_4_6 sock_laddr;
2823 	struct protoctl_ev_val prctl_ev_val;
2824 #endif /* SKYWALK */
2825 
2826 	if (sa->sa_family != AF_INET6 ||
2827 	    sa->sa_len != sizeof(struct sockaddr_in6)) {
2828 		return;
2829 	}
2830 
2831 	/* Source quench is deprecated */
2832 	if (cmd == PRC_QUENCH) {
2833 		return;
2834 	}
2835 
2836 	if ((unsigned)cmd >= PRC_NCMDS) {
2837 		return;
2838 	}
2839 
2840 	/* if the parameter is from icmp6, decode it. */
2841 	if (d != NULL) {
2842 		ip6cp = (struct ip6ctlparam *)d;
2843 		icmp6 = ip6cp->ip6c_icmp6;
2844 		m = ip6cp->ip6c_m;
2845 		ip6 = ip6cp->ip6c_ip6;
2846 		off = ip6cp->ip6c_off;
2847 		sa6_src = ip6cp->ip6c_src;
2848 		dst = ip6cp->ip6c_finaldst;
2849 	} else {
2850 		m = NULL;
2851 		ip6 = NULL;
2852 		off = 0;        /* fool gcc */
2853 		sa6_src = &sa6_any;
2854 		dst = NULL;
2855 	}
2856 
2857 	if (cmd == PRC_MSGSIZE) {
2858 		notify = tcp_mtudisc;
2859 	} else if (icmp_may_rst && (cmd == PRC_UNREACH_ADMIN_PROHIB ||
2860 	    cmd == PRC_UNREACH_PORT || cmd == PRC_TIMXCEED_INTRANS) &&
2861 	    ip6 != NULL) {
2862 		notify = tcp_drop_syn_sent;
2863 	}
2864 	/*
2865 	 * Hostdead is ugly because it goes linearly through all PCBs.
2866 	 * XXX: We never get this from ICMP, otherwise it makes an
2867 	 * excellent DoS attack on machines with many connections.
2868 	 */
2869 	else if (cmd == PRC_HOSTDEAD) {
2870 		ip6 = NULL;
2871 	} else if (inet6ctlerrmap[cmd] == 0 && !PRC_IS_REDIRECT(cmd)) {
2872 		return;
2873 	}
2874 
2875 #if SKYWALK
2876 	bzero(&prctl_ev_val, sizeof(prctl_ev_val));
2877 	bzero(&sock_laddr, sizeof(sock_laddr));
2878 #endif /* SKYWALK */
2879 
2880 	if (ip6 == NULL) {
2881 		in6_pcbnotify(&tcbinfo, sa, 0, SA(sa6_src), 0, cmd, NULL, notify);
2882 #if SKYWALK
2883 		protoctl_event_enqueue_nwk_wq_entry(ifp, NULL, sa,
2884 		    0, 0, IPPROTO_TCP, cmd, NULL);
2885 #endif /* SKYWALK */
2886 		return;
2887 	}
2888 
2889 	/* Check if we can safely get the ports from the tcp hdr */
2890 	if (m == NULL ||
2891 	    (m->m_pkthdr.len <
2892 	    (int32_t) (off + sizeof(struct tcp_ports)))) {
2893 		return;
2894 	}
2895 	bzero(&t_ports, sizeof(struct tcp_ports));
2896 	m_copydata(m, off, sizeof(struct tcp_ports), (caddr_t)&t_ports);
2897 
2898 	off += sizeof(struct tcp_ports);
2899 	if (m->m_pkthdr.len < (int32_t) (off + sizeof(tcp_seq))) {
2900 		return;
2901 	}
2902 	m_copydata(m, off, sizeof(tcp_seq), (caddr_t)&icmp_tcp_seq);
2903 	icmp_tcp_seq = ntohl(icmp_tcp_seq);
2904 
2905 	if (cmd == PRC_MSGSIZE) {
2906 		mtu = ntohl(icmp6->icmp6_mtu);
2907 		/*
2908 		 * If no alternative MTU was proposed, or the proposed
2909 		 * MTU was too small, set to the min.
2910 		 */
2911 		if (mtu < IPV6_MMTU) {
2912 			mtu = IPV6_MMTU - 8;
2913 		}
2914 	}
2915 
2916 	inp = in6_pcblookup_hash(&tcbinfo, &ip6->ip6_dst, t_ports.th_dport, ip6_input_getdstifscope(m),
2917 	    &ip6->ip6_src, t_ports.th_sport, ip6_input_getsrcifscope(m), 0, NULL);
2918 
2919 	if (inp == NULL ||
2920 	    inp->inp_socket == NULL) {
2921 #if SKYWALK
2922 		if (cmd == PRC_MSGSIZE) {
2923 			prctl_ev_val.val = mtu;
2924 		}
2925 		prctl_ev_val.tcp_seq_number = icmp_tcp_seq;
2926 
2927 		sock_laddr.sin6.sin6_family = AF_INET6;
2928 		sock_laddr.sin6.sin6_len = sizeof(sock_laddr.sin6);
2929 		sock_laddr.sin6.sin6_addr = ip6->ip6_src;
2930 
2931 		protoctl_event_enqueue_nwk_wq_entry(ifp,
2932 		    SA(&sock_laddr), sa,
2933 		    t_ports.th_sport, t_ports.th_dport, IPPROTO_TCP,
2934 		    cmd, &prctl_ev_val);
2935 #endif /* SKYWALK */
2936 		return;
2937 	}
2938 
2939 	socket_lock(inp->inp_socket, 1);
2940 	if (in_pcb_checkstate(inp, WNT_RELEASE, 1) ==
2941 	    WNT_STOPUSING) {
2942 		socket_unlock(inp->inp_socket, 1);
2943 		return;
2944 	}
2945 
2946 	if (PRC_IS_REDIRECT(cmd)) {
2947 		/* signal EHOSTDOWN, as it flushes the cached route */
2948 		(*notify)(inp, EHOSTDOWN);
2949 	} else {
2950 		tp = intotcpcb(inp);
2951 		if (SEQ_GEQ(icmp_tcp_seq, tp->snd_una) &&
2952 		    SEQ_LT(icmp_tcp_seq, tp->snd_max)) {
2953 			if (cmd == PRC_MSGSIZE) {
2954 				/*
2955 				 * Only process the offered MTU if it
2956 				 * is smaller than the current one.
2957 				 */
2958 				if (mtu < tp->t_maxseg +
2959 				    (sizeof(struct tcphdr) + sizeof(struct ip6_hdr))) {
2960 					(*notify)(inp, inetctlerrmap[cmd]);
2961 				}
2962 			} else {
2963 				(*notify)(inp, inetctlerrmap[cmd]);
2964 			}
2965 		}
2966 	}
2967 	socket_unlock(inp->inp_socket, 1);
2968 }
2969 
2970 
2971 /*
2972  * Following is where TCP initial sequence number generation occurs.
2973  *
2974  * There are two places where we must use initial sequence numbers:
2975  * 1.  In SYN-ACK packets.
2976  * 2.  In SYN packets.
2977  *
2978  * The ISNs in SYN-ACK packets have no monotonicity requirement,
2979  * and should be as unpredictable as possible to avoid the possibility
2980  * of spoofing and/or connection hijacking.  To satisfy this
2981  * requirement, SYN-ACK ISNs are generated via the arc4random()
2982  * function.  If exact RFC 1948 compliance is requested via sysctl,
2983  * these ISNs will be generated just like those in SYN packets.
2984  *
2985  * The ISNs in SYN packets must be monotonic; TIME_WAIT recycling
2986  * depends on this property.  In addition, these ISNs should be
2987  * unguessable so as to prevent connection hijacking.  To satisfy
2988  * the requirements of this situation, the algorithm outlined in
2989  * RFC 9293 is used to generate sequence numbers.
2990  *
2991  * For more information on the theory of operation, please see
2992  * RFC 9293.
2993  *
2994  * Implementation details:
2995  *
2996  * Time is based off the system timer, and is corrected so that it
2997  * increases by one megabyte per second.  This allows for proper
2998  * recycling on high speed LANs while still leaving over an hour
2999  * before rollover.
3000  *
3001  */
3002 
3003 #define ISN_BYTES_PER_SECOND 1048576
3004 
3005 tcp_seq
tcp_new_isn(struct tcpcb * tp)3006 tcp_new_isn(struct tcpcb *tp)
3007 {
3008 	uint32_t md5_buffer[4];
3009 	tcp_seq new_isn;
3010 	struct timespec timenow;
3011 	MD5_CTX isn_ctx;
3012 
3013 	nanouptime(&timenow);
3014 
3015 	/* Compute the md5 hash and return the ISN. */
3016 	MD5Init(&isn_ctx);
3017 	MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_fport,
3018 	    sizeof(u_short));
3019 	MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_lport,
3020 	    sizeof(u_short));
3021 	if ((tp->t_inpcb->inp_vflag & INP_IPV6) != 0) {
3022 		MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->in6p_faddr,
3023 		    sizeof(struct in6_addr));
3024 		MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->in6p_laddr,
3025 		    sizeof(struct in6_addr));
3026 	} else {
3027 		MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_faddr,
3028 		    sizeof(struct in_addr));
3029 		MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_laddr,
3030 		    sizeof(struct in_addr));
3031 	}
3032 	MD5Update(&isn_ctx, (u_char *) &isn_secret, sizeof(isn_secret));
3033 	MD5Final((u_char *) &md5_buffer, &isn_ctx);
3034 
3035 	new_isn = (tcp_seq) md5_buffer[0];
3036 
3037 	/*
3038 	 * We use a 128ns clock, which is equivalent to 600 Mbps and wraps at
3039 	 * 549 seconds, thus safe for 2 MSL lifetime of TIME-WAIT-state.
3040 	 */
3041 	new_isn += (timenow.tv_sec * NSEC_PER_SEC + timenow.tv_nsec) >> 7;
3042 
3043 	if (__probable(tcp_randomize_timestamps)) {
3044 		tp->t_ts_offset = md5_buffer[1];
3045 	}
3046 	tp->t_latest_tx = tcp_now;
3047 
3048 	return new_isn;
3049 }
3050 
3051 
3052 /*
3053  * When a specific ICMP unreachable message is received and the
3054  * connection state is SYN-SENT, drop the connection.  This behavior
3055  * is controlled by the icmp_may_rst sysctl.
3056  */
3057 void
tcp_drop_syn_sent(struct inpcb * inp,int errno)3058 tcp_drop_syn_sent(struct inpcb *inp, int errno)
3059 {
3060 	struct tcpcb *tp = intotcpcb(inp);
3061 
3062 	if (tp && tp->t_state == TCPS_SYN_SENT) {
3063 		tcp_drop(tp, errno);
3064 	}
3065 }
3066 
3067 /*
3068  * Get effective MTU for redirect virtual interface. Redirect
3069  * virtual interface switches between multiple delegated interfaces.
3070  * For cases, where redirect forwards packets to an ipsec interface,
3071  * MTU should be adjusted to consider ESP encapsulation overhead.
3072  */
3073 uint32_t
tcp_get_effective_mtu(struct rtentry * rt,uint32_t current_mtu)3074 tcp_get_effective_mtu(struct rtentry *rt, uint32_t current_mtu)
3075 {
3076 	ifnet_t ifp = NULL;
3077 	ifnet_t delegated_ifp = NULL;
3078 	ifnet_t outgoing_ifp = NULL;
3079 	uint32_t min_mtu = 0;
3080 	uint32_t outgoing_mtu = 0;
3081 	uint32_t tunnel_overhead = 0;
3082 
3083 	if (rt == NULL || rt->rt_ifp == NULL) {
3084 		return current_mtu;
3085 	}
3086 
3087 	ifp = rt->rt_ifp;
3088 	if (ifp->if_subfamily != IFNET_SUBFAMILY_REDIRECT) {
3089 		return current_mtu;
3090 	}
3091 
3092 	delegated_ifp = ifp->if_delegated.ifp;
3093 	if (delegated_ifp == NULL || delegated_ifp->if_family != IFNET_FAMILY_IPSEC) {
3094 		return current_mtu;
3095 	}
3096 
3097 	min_mtu = MIN(delegated_ifp->if_mtu, current_mtu);
3098 
3099 	outgoing_ifp = delegated_ifp->if_delegated.ifp;
3100 	if (outgoing_ifp == NULL) {
3101 		return min_mtu;
3102 	}
3103 
3104 	outgoing_mtu = outgoing_ifp->if_mtu;
3105 	if (outgoing_mtu > 0) {
3106 		tunnel_overhead = (u_int32_t)(esp_hdrsiz(NULL) + sizeof(struct ip6_hdr));
3107 		if (outgoing_mtu > tunnel_overhead) {
3108 			outgoing_mtu -= tunnel_overhead;
3109 		}
3110 		if (outgoing_mtu < min_mtu) {
3111 			return outgoing_mtu;
3112 		}
3113 	}
3114 
3115 	return min_mtu;
3116 }
3117 
3118 /*
3119  * When `need fragmentation' ICMP is received, update our idea of the MSS
3120  * based on the new value in the route.  Also nudge TCP to send something,
3121  * since we know the packet we just sent was dropped.
3122  * This duplicates some code in the tcp_mss() function in tcp_input.c.
3123  */
3124 void
tcp_mtudisc(struct inpcb * inp,__unused int errno)3125 tcp_mtudisc(struct inpcb *inp, __unused int errno)
3126 {
3127 	struct tcpcb *tp = intotcpcb(inp);
3128 	struct rtentry *rt;
3129 	struct socket *so = inp->inp_socket;
3130 	int mss;
3131 	u_int32_t mtu;
3132 	u_int32_t protoHdrOverhead = sizeof(struct tcpiphdr);
3133 	int isipv6 = (tp->t_inpcb->inp_vflag & INP_IPV6) != 0;
3134 
3135 	/*
3136 	 * Nothing left to send after the socket is defunct or TCP is in the closed state
3137 	 */
3138 	if ((so->so_state & SS_DEFUNCT) || (tp != NULL && tp->t_state == TCPS_CLOSED)) {
3139 		return;
3140 	}
3141 
3142 	if (isipv6) {
3143 		protoHdrOverhead = sizeof(struct ip6_hdr) +
3144 		    sizeof(struct tcphdr);
3145 	}
3146 
3147 	if (tp != NULL) {
3148 		if (isipv6) {
3149 			rt = tcp_rtlookup6(inp, IFSCOPE_NONE);
3150 		} else {
3151 			rt = tcp_rtlookup(inp, IFSCOPE_NONE);
3152 		}
3153 		if (!rt || !rt->rt_rmx.rmx_mtu) {
3154 			tp->t_maxopd = tp->t_maxseg =
3155 			    isipv6 ? tcp_v6mssdflt :
3156 			    tcp_mssdflt;
3157 
3158 			/* Route locked during lookup above */
3159 			if (rt != NULL) {
3160 				RT_UNLOCK(rt);
3161 			}
3162 			return;
3163 		}
3164 		mtu = rt->rt_rmx.rmx_mtu;
3165 
3166 		mtu = tcp_get_effective_mtu(rt, mtu);
3167 
3168 		/* Route locked during lookup above */
3169 		RT_UNLOCK(rt);
3170 
3171 #if NECP
3172 		// Adjust MTU if necessary.
3173 		mtu = necp_socket_get_effective_mtu(inp, mtu);
3174 #endif /* NECP */
3175 		mss = mtu - protoHdrOverhead;
3176 
3177 		if (tp->t_maxopd) {
3178 			mss = min(mss, tp->t_maxopd);
3179 		}
3180 		/*
3181 		 * XXX - The above conditional probably violates the TCP
3182 		 * spec.  The problem is that, since we don't know the
3183 		 * other end's MSS, we are supposed to use a conservative
3184 		 * default.  But, if we do that, then MTU discovery will
3185 		 * never actually take place, because the conservative
3186 		 * default is much less than the MTUs typically seen
3187 		 * on the Internet today.  For the moment, we'll sweep
3188 		 * this under the carpet.
3189 		 *
3190 		 * The conservative default might not actually be a problem
3191 		 * if the only case this occurs is when sending an initial
3192 		 * SYN with options and data to a host we've never talked
3193 		 * to before.  Then, they will reply with an MSS value which
3194 		 * will get recorded and the new parameters should get
3195 		 * recomputed.  For Further Study.
3196 		 */
3197 		if (tp->t_maxopd <= mss) {
3198 			return;
3199 		}
3200 		tp->t_maxopd = mss;
3201 
3202 		if ((tp->t_flags & (TF_REQ_TSTMP | TF_NOOPT)) == TF_REQ_TSTMP &&
3203 		    (tp->t_flags & TF_RCVD_TSTMP) == TF_RCVD_TSTMP) {
3204 			mss -= TCPOLEN_TSTAMP_APPA;
3205 		}
3206 
3207 #if MPTCP
3208 		mss -= mptcp_adj_mss(tp, TRUE);
3209 #endif
3210 		if (so->so_snd.sb_hiwat < mss) {
3211 			mss = so->so_snd.sb_hiwat;
3212 		}
3213 
3214 		tp->t_maxseg = mss;
3215 
3216 		ASSERT(tp->t_maxseg);
3217 
3218 		/*
3219 		 * Reset the slow-start flight size as it may depends on the
3220 		 * new MSS
3221 		 */
3222 		if (CC_ALGO(tp)->cwnd_init != NULL) {
3223 			CC_ALGO(tp)->cwnd_init(tp);
3224 		}
3225 
3226 		if (TCP_USE_RLEDBAT(tp, so) && tcp_cc_rledbat.rwnd_init != NULL) {
3227 			tcp_cc_rledbat.rwnd_init(tp);
3228 		}
3229 
3230 		tcpstat.tcps_mturesent++;
3231 		tp->t_rtttime = 0;
3232 		tp->snd_nxt = tp->snd_una;
3233 		tcp_output(tp);
3234 	}
3235 }
3236 
3237 /*
3238  * Look-up the routing entry to the peer of this inpcb.  If no route
3239  * is found and it cannot be allocated the return NULL.  This routine
3240  * is called by TCP routines that access the rmx structure and by tcp_mss
3241  * to get the interface MTU.  If a route is found, this routine will
3242  * hold the rtentry lock; the caller is responsible for unlocking.
3243  */
3244 struct rtentry *
tcp_rtlookup(struct inpcb * inp,unsigned int input_ifscope)3245 tcp_rtlookup(struct inpcb *inp, unsigned int input_ifscope)
3246 {
3247 	struct route *ro;
3248 	struct rtentry *rt;
3249 	struct tcpcb *tp;
3250 
3251 	LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_NOTOWNED);
3252 
3253 	ro = &inp->inp_route;
3254 	if ((rt = ro->ro_rt) != NULL) {
3255 		RT_LOCK(rt);
3256 	}
3257 
3258 	if (ROUTE_UNUSABLE(ro)) {
3259 		if (rt != NULL) {
3260 			RT_UNLOCK(rt);
3261 			rt = NULL;
3262 		}
3263 		ROUTE_RELEASE(ro);
3264 		/* No route yet, so try to acquire one */
3265 		if (inp->inp_faddr.s_addr != INADDR_ANY) {
3266 			unsigned int ifscope;
3267 
3268 			ro->ro_dst.sa_family = AF_INET;
3269 			ro->ro_dst.sa_len = sizeof(struct sockaddr_in);
3270 			SIN(&ro->ro_dst)->sin_addr = inp->inp_faddr;
3271 
3272 			/*
3273 			 * If the socket was bound to an interface, then
3274 			 * the bound-to-interface takes precedence over
3275 			 * the inbound interface passed in by the caller
3276 			 * (if we get here as part of the output path then
3277 			 * input_ifscope is IFSCOPE_NONE).
3278 			 */
3279 			ifscope = (inp->inp_flags & INP_BOUND_IF) ?
3280 			    inp->inp_boundifp->if_index : input_ifscope;
3281 
3282 			rtalloc_scoped(ro, ifscope);
3283 			if ((rt = ro->ro_rt) != NULL) {
3284 				RT_LOCK(rt);
3285 			}
3286 		}
3287 	}
3288 	if (rt != NULL) {
3289 		RT_LOCK_ASSERT_HELD(rt);
3290 	}
3291 
3292 	/*
3293 	 * Update MTU discovery determination. Don't do it if:
3294 	 *	1) it is disabled via the sysctl
3295 	 *	2) the route isn't up
3296 	 *	3) the MTU is locked (if it is, then discovery has been
3297 	 *	   disabled)
3298 	 */
3299 
3300 	tp = intotcpcb(inp);
3301 
3302 	if (!path_mtu_discovery || ((rt != NULL) &&
3303 	    (!(rt->rt_flags & RTF_UP) || (rt->rt_rmx.rmx_locks & RTV_MTU)))) {
3304 		tp->t_flags &= ~TF_PMTUD;
3305 	} else {
3306 		tp->t_flags |= TF_PMTUD;
3307 	}
3308 
3309 	if (rt != NULL && rt->rt_ifp != NULL) {
3310 		somultipages(inp->inp_socket,
3311 		    (rt->rt_ifp->if_hwassist & IFNET_MULTIPAGES));
3312 		tcp_set_tso(tp, rt->rt_ifp);
3313 		soif2kcl(inp->inp_socket,
3314 		    (rt->rt_ifp->if_eflags & IFEF_2KCL));
3315 		/* Don't do ECN and L4S for Loopback & Cellular (if L4S is default) */
3316 		if ((rt->rt_ifp->if_flags & IFF_LOOPBACK) == 0 &&
3317 		    !(IFNET_IS_CELLULAR(rt->rt_ifp) && rt->rt_ifp->if_l4s_mode == IFRTYPE_L4S_DEFAULT)) {
3318 			tcp_set_ecn(tp);
3319 			tcp_set_l4s(tp, rt->rt_ifp);
3320 		}
3321 		if (inp->inp_last_outifp == NULL) {
3322 			inp->inp_last_outifp = rt->rt_ifp;
3323 #if SKYWALK
3324 			if (NETNS_TOKEN_VALID(&inp->inp_netns_token)) {
3325 				netns_set_ifnet(&inp->inp_netns_token,
3326 				    inp->inp_last_outifp);
3327 			}
3328 #endif /* SKYWALK */
3329 		}
3330 	}
3331 
3332 	/* Note if the peer is local */
3333 	if (rt != NULL && !(rt->rt_ifp->if_flags & IFF_POINTOPOINT) &&
3334 	    (rt->rt_gateway->sa_family == AF_LINK ||
3335 	    rt->rt_ifp->if_flags & IFF_LOOPBACK ||
3336 	    in_localaddr(inp->inp_faddr))) {
3337 		tp->t_flags |= TF_LOCAL;
3338 	}
3339 
3340 	/*
3341 	 * Caller needs to call RT_UNLOCK(rt).
3342 	 */
3343 	return rt;
3344 }
3345 
3346 struct rtentry *
tcp_rtlookup6(struct inpcb * inp,unsigned int input_ifscope)3347 tcp_rtlookup6(struct inpcb *inp, unsigned int input_ifscope)
3348 {
3349 	struct route_in6 *ro6;
3350 	struct rtentry *rt;
3351 	struct tcpcb *tp;
3352 
3353 	LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_NOTOWNED);
3354 
3355 	ro6 = &inp->in6p_route;
3356 	if ((rt = ro6->ro_rt) != NULL) {
3357 		RT_LOCK(rt);
3358 	}
3359 
3360 	if (ROUTE_UNUSABLE(ro6)) {
3361 		if (rt != NULL) {
3362 			RT_UNLOCK(rt);
3363 			rt = NULL;
3364 		}
3365 		ROUTE_RELEASE(ro6);
3366 		/* No route yet, so try to acquire one */
3367 		if (!IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_faddr)) {
3368 			struct sockaddr_in6 *dst6;
3369 			unsigned int ifscope;
3370 
3371 			dst6 = SIN6(&ro6->ro_dst);
3372 			dst6->sin6_family = AF_INET6;
3373 			dst6->sin6_len = sizeof(*dst6);
3374 			dst6->sin6_addr = inp->in6p_faddr;
3375 
3376 			/*
3377 			 * If the socket was bound to an interface, then
3378 			 * the bound-to-interface takes precedence over
3379 			 * the inbound interface passed in by the caller
3380 			 * (if we get here as part of the output path then
3381 			 * input_ifscope is IFSCOPE_NONE).
3382 			 */
3383 			ifscope = (inp->inp_flags & INP_BOUND_IF) ?
3384 			    inp->inp_boundifp->if_index : input_ifscope;
3385 
3386 			rtalloc_scoped((struct route *)ro6, ifscope);
3387 			if ((rt = ro6->ro_rt) != NULL) {
3388 				RT_LOCK(rt);
3389 			}
3390 		}
3391 	}
3392 	if (rt != NULL) {
3393 		RT_LOCK_ASSERT_HELD(rt);
3394 	}
3395 
3396 	/*
3397 	 * Update path MTU Discovery determination
3398 	 * while looking up the route:
3399 	 *  1) we have a valid route to the destination
3400 	 *  2) the MTU is not locked (if it is, then discovery has been
3401 	 *    disabled)
3402 	 */
3403 
3404 
3405 	tp = intotcpcb(inp);
3406 
3407 	/*
3408 	 * Update MTU discovery determination. Don't do it if:
3409 	 *	1) it is disabled via the sysctl
3410 	 *	2) the route isn't up
3411 	 *	3) the MTU is locked (if it is, then discovery has been
3412 	 *	   disabled)
3413 	 */
3414 
3415 	if (!path_mtu_discovery || ((rt != NULL) &&
3416 	    (!(rt->rt_flags & RTF_UP) || (rt->rt_rmx.rmx_locks & RTV_MTU)))) {
3417 		tp->t_flags &= ~TF_PMTUD;
3418 	} else {
3419 		tp->t_flags |= TF_PMTUD;
3420 	}
3421 
3422 	if (rt != NULL && rt->rt_ifp != NULL) {
3423 		somultipages(inp->inp_socket,
3424 		    (rt->rt_ifp->if_hwassist & IFNET_MULTIPAGES));
3425 		tcp_set_tso(tp, rt->rt_ifp);
3426 		soif2kcl(inp->inp_socket,
3427 		    (rt->rt_ifp->if_eflags & IFEF_2KCL));
3428 		/* Don't do ECN and L4S for Loopback & Cellular (if L4S is default) */
3429 		if ((rt->rt_ifp->if_flags & IFF_LOOPBACK) == 0 &&
3430 		    !(IFNET_IS_CELLULAR(rt->rt_ifp) && rt->rt_ifp->if_l4s_mode == IFRTYPE_L4S_DEFAULT)) {
3431 			tcp_set_ecn(tp);
3432 			tcp_set_l4s(tp, rt->rt_ifp);
3433 		}
3434 		if (inp->inp_last_outifp == NULL) {
3435 			inp->inp_last_outifp = rt->rt_ifp;
3436 #if SKYWALK
3437 			if (NETNS_TOKEN_VALID(&inp->inp_netns_token)) {
3438 				netns_set_ifnet(&inp->inp_netns_token,
3439 				    inp->inp_last_outifp);
3440 			}
3441 #endif /* SKYWALK */
3442 		}
3443 
3444 		/* Note if the peer is local */
3445 		if (!(rt->rt_ifp->if_flags & IFF_POINTOPOINT) &&
3446 		    (IN6_IS_ADDR_LOOPBACK(&inp->in6p_faddr) ||
3447 		    IN6_IS_ADDR_LINKLOCAL(&inp->in6p_faddr) ||
3448 		    rt->rt_gateway->sa_family == AF_LINK ||
3449 		    in6_localaddr(&inp->in6p_faddr))) {
3450 			tp->t_flags |= TF_LOCAL;
3451 		}
3452 	}
3453 
3454 	/*
3455 	 * Caller needs to call RT_UNLOCK(rt).
3456 	 */
3457 	return rt;
3458 }
3459 
3460 #if IPSEC
3461 /* compute ESP/AH header size for TCP, including outer IP header. */
3462 size_t
ipsec_hdrsiz_tcp(struct tcpcb * tp)3463 ipsec_hdrsiz_tcp(struct tcpcb *tp)
3464 {
3465 	struct inpcb *inp;
3466 	struct mbuf *m;
3467 	size_t hdrsiz;
3468 	struct ip *ip;
3469 	struct ip6_hdr *ip6 = NULL;
3470 	struct tcphdr *th;
3471 
3472 	if ((tp == NULL) || ((inp = tp->t_inpcb) == NULL)) {
3473 		return 0;
3474 	}
3475 	MGETHDR(m, M_DONTWAIT, MT_DATA);        /* MAC-OK */
3476 	if (!m) {
3477 		return 0;
3478 	}
3479 
3480 	if ((inp->inp_vflag & INP_IPV6) != 0) {
3481 		ip6 = mtod(m, struct ip6_hdr *);
3482 		th = (struct tcphdr *)(void *)(ip6 + 1);
3483 		m->m_pkthdr.len = m->m_len =
3484 		    sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
3485 		tcp_fillheaders(m, tp, ip6, th, NULL, NULL);
3486 		hdrsiz = ipsec6_hdrsiz(m, IPSEC_DIR_OUTBOUND, inp);
3487 	} else {
3488 		ip = mtod(m, struct ip *);
3489 		th = (struct tcphdr *)(ip + 1);
3490 		m->m_pkthdr.len = m->m_len = sizeof(struct tcpiphdr);
3491 		tcp_fillheaders(m, tp, ip, th, NULL, NULL);
3492 		hdrsiz = ipsec4_hdrsiz(m, IPSEC_DIR_OUTBOUND, inp);
3493 	}
3494 	m_free(m);
3495 	return hdrsiz;
3496 }
3497 #endif /* IPSEC */
3498 
3499 int
tcp_lock(struct socket * so,int refcount,void * lr)3500 tcp_lock(struct socket *so, int refcount, void *lr)
3501 {
3502 	lr_ref_t lr_saved = TCP_INIT_LR_SAVED(lr);
3503 
3504 retry:
3505 	if (so->so_pcb != NULL) {
3506 		if (so->so_flags & SOF_MP_SUBFLOW) {
3507 			struct mptcb *mp_tp = tptomptp(sototcpcb(so));
3508 			struct socket *mp_so = mptetoso(mp_tp->mpt_mpte);
3509 
3510 			socket_lock(mp_so, refcount);
3511 
3512 			/*
3513 			 * Check if we became non-MPTCP while waiting for the lock.
3514 			 * If yes, we have to retry to grab the right lock.
3515 			 */
3516 			if (!(so->so_flags & SOF_MP_SUBFLOW)) {
3517 				socket_unlock(mp_so, refcount);
3518 				goto retry;
3519 			}
3520 		} else {
3521 			lck_mtx_lock(&((struct inpcb *)so->so_pcb)->inpcb_mtx);
3522 
3523 			if (so->so_flags & SOF_MP_SUBFLOW) {
3524 				/*
3525 				 * While waiting for the lock, we might have
3526 				 * become MPTCP-enabled (see mptcp_subflow_socreate).
3527 				 */
3528 				lck_mtx_unlock(&((struct inpcb *)so->so_pcb)->inpcb_mtx);
3529 				goto retry;
3530 			}
3531 		}
3532 	} else {
3533 		panic("tcp_lock: so=%p NO PCB! lr=%p lrh= %s",
3534 		    so, lr_saved, solockhistory_nr(so));
3535 		/* NOTREACHED */
3536 	}
3537 
3538 	if (so->so_usecount < 0) {
3539 		panic("tcp_lock: so=%p so_pcb=%p lr=%p ref=%x lrh= %s",
3540 		    so, so->so_pcb, lr_saved, so->so_usecount,
3541 		    solockhistory_nr(so));
3542 		/* NOTREACHED */
3543 	}
3544 	if (refcount) {
3545 		so->so_usecount++;
3546 	}
3547 	so->lock_lr[so->next_lock_lr] = lr_saved;
3548 	so->next_lock_lr = (so->next_lock_lr + 1) % SO_LCKDBG_MAX;
3549 	return 0;
3550 }
3551 
3552 int
tcp_unlock(struct socket * so,int refcount,void * lr)3553 tcp_unlock(struct socket *so, int refcount, void *lr)
3554 {
3555 	lr_ref_t lr_saved = TCP_INIT_LR_SAVED(lr);
3556 
3557 
3558 #ifdef MORE_TCPLOCK_DEBUG
3559 	printf("tcp_unlock: so=0x%llx sopcb=0x%llx lock=0x%llx ref=%x "
3560 	    "lr=0x%llx\n", (uint64_t)VM_KERNEL_ADDRPERM(so),
3561 	    (uint64_t)VM_KERNEL_ADDRPERM(so->so_pcb),
3562 	    (uint64_t)VM_KERNEL_ADDRPERM(&(sotoinpcb(so)->inpcb_mtx)),
3563 	    so->so_usecount, (uint64_t)VM_KERNEL_ADDRPERM(lr_saved));
3564 #endif
3565 	if (refcount) {
3566 		so->so_usecount--;
3567 	}
3568 
3569 	if (so->so_usecount < 0) {
3570 		panic("tcp_unlock: so=%p usecount=%x lrh= %s",
3571 		    so, so->so_usecount, solockhistory_nr(so));
3572 		/* NOTREACHED */
3573 	}
3574 	if (so->so_pcb == NULL) {
3575 		panic("tcp_unlock: so=%p NO PCB usecount=%x lr=%p lrh= %s",
3576 		    so, so->so_usecount, lr_saved, solockhistory_nr(so));
3577 		/* NOTREACHED */
3578 	} else {
3579 		so->unlock_lr[so->next_unlock_lr] = lr_saved;
3580 		so->next_unlock_lr = (so->next_unlock_lr + 1) % SO_LCKDBG_MAX;
3581 
3582 		if (so->so_flags & SOF_MP_SUBFLOW) {
3583 			struct mptcb *mp_tp = tptomptp(sototcpcb(so));
3584 			struct socket *mp_so = mptetoso(mp_tp->mpt_mpte);
3585 
3586 			socket_lock_assert_owned(mp_so);
3587 
3588 			socket_unlock(mp_so, refcount);
3589 		} else {
3590 			LCK_MTX_ASSERT(&((struct inpcb *)so->so_pcb)->inpcb_mtx,
3591 			    LCK_MTX_ASSERT_OWNED);
3592 			lck_mtx_unlock(&((struct inpcb *)so->so_pcb)->inpcb_mtx);
3593 		}
3594 	}
3595 	return 0;
3596 }
3597 
3598 lck_mtx_t *
tcp_getlock(struct socket * so,int flags)3599 tcp_getlock(struct socket *so, int flags)
3600 {
3601 	struct inpcb *inp = sotoinpcb(so);
3602 
3603 	if (so->so_pcb) {
3604 		if (so->so_usecount < 0) {
3605 			panic("tcp_getlock: so=%p usecount=%x lrh= %s",
3606 			    so, so->so_usecount, solockhistory_nr(so));
3607 		}
3608 
3609 		if (so->so_flags & SOF_MP_SUBFLOW) {
3610 			struct mptcb *mp_tp = tptomptp(sototcpcb(so));
3611 			struct socket *mp_so = mptetoso(mp_tp->mpt_mpte);
3612 
3613 			return mp_so->so_proto->pr_getlock(mp_so, flags);
3614 		} else {
3615 			return &inp->inpcb_mtx;
3616 		}
3617 	} else {
3618 		panic("tcp_getlock: so=%p NULL so_pcb %s",
3619 		    so, solockhistory_nr(so));
3620 		return so->so_proto->pr_domain->dom_mtx;
3621 	}
3622 }
3623 
3624 /*
3625  * Determine if we can grow the recieve socket buffer to avoid sending
3626  * a zero window update to the peer. We allow even socket buffers that
3627  * have fixed size (set by the application) to grow if the resource
3628  * constraints are met. They will also be trimmed after the application
3629  * reads data.
3630  */
3631 static void
tcp_sbrcv_grow_rwin(struct tcpcb * tp,struct sockbuf * sb)3632 tcp_sbrcv_grow_rwin(struct tcpcb *tp, struct sockbuf *sb)
3633 {
3634 	u_int32_t rcvbufinc = tp->t_maxseg << 4;
3635 	u_int32_t rcvbuf = sb->sb_hiwat;
3636 	struct socket *so = tp->t_inpcb->inp_socket;
3637 
3638 	if (tcp_recv_bg == 1 || IS_TCP_RECV_BG(so)) {
3639 		return;
3640 	}
3641 
3642 	if (tcp_do_autorcvbuf == 1 &&
3643 	    (tp->t_flags & TF_SLOWLINK) == 0 &&
3644 	    (so->so_flags1 & SOF1_EXTEND_BK_IDLE_WANTED) == 0 &&
3645 	    (rcvbuf - sb->sb_cc) < rcvbufinc &&
3646 	    rcvbuf < tcp_autorcvbuf_max &&
3647 	    (sb->sb_idealsize > 0 &&
3648 	    sb->sb_hiwat <= (sb->sb_idealsize + rcvbufinc))) {
3649 		sbreserve(sb,
3650 		    min((sb->sb_hiwat + rcvbufinc), tcp_autorcvbuf_max));
3651 	}
3652 }
3653 
3654 int32_t
tcp_sbspace(struct tcpcb * tp)3655 tcp_sbspace(struct tcpcb *tp)
3656 {
3657 	struct socket *so = tp->t_inpcb->inp_socket;
3658 	struct sockbuf *sb = &so->so_rcv;
3659 	u_int32_t rcvbuf;
3660 	int32_t space;
3661 	int32_t pending = 0;
3662 
3663 	if (so->so_flags & SOF_MP_SUBFLOW) {
3664 		/* We still need to grow TCP's buffer to have a BDP-estimate */
3665 		tcp_sbrcv_grow_rwin(tp, sb);
3666 
3667 		return mptcp_sbspace(tptomptp(tp));
3668 	}
3669 
3670 	tcp_sbrcv_grow_rwin(tp, sb);
3671 
3672 	/* hiwat might have changed */
3673 	rcvbuf = sb->sb_hiwat;
3674 
3675 	space =  ((int32_t) imin((rcvbuf - sb->sb_cc),
3676 	    (sb->sb_mbmax - sb->sb_mbcnt)));
3677 	if (space < 0) {
3678 		space = 0;
3679 	}
3680 
3681 #if CONTENT_FILTER
3682 	/* Compensate for data being processed by content filters */
3683 	pending = cfil_sock_data_space(sb);
3684 #endif /* CONTENT_FILTER */
3685 	if (pending > space) {
3686 		space = 0;
3687 	} else {
3688 		space -= pending;
3689 	}
3690 
3691 	/*
3692 	 * Avoid increasing window size if the current window
3693 	 * is already very low, we could be in "persist" mode and
3694 	 * we could break some apps (see rdar://5409343)
3695 	 */
3696 
3697 	if (space < tp->t_maxseg) {
3698 		return space;
3699 	}
3700 
3701 	/* Clip window size for slower link */
3702 
3703 	if (((tp->t_flags & TF_SLOWLINK) != 0) && slowlink_wsize > 0) {
3704 		return imin(space, slowlink_wsize);
3705 	}
3706 
3707 	return space;
3708 }
3709 /*
3710  * Checks TCP Segment Offloading capability for a given connection
3711  * and interface pair.
3712  */
3713 void
tcp_set_tso(struct tcpcb * tp,struct ifnet * ifp)3714 tcp_set_tso(struct tcpcb *tp, struct ifnet *ifp)
3715 {
3716 	struct inpcb *inp;
3717 	int isipv6;
3718 	struct ifnet *tunnel_ifp = NULL;
3719 #define IFNET_TSO_MASK (IFNET_TSO_IPV6 | IFNET_TSO_IPV4)
3720 
3721 	tp->t_flags &= ~TF_TSO;
3722 
3723 	/*
3724 	 * Bail if there's a non-TSO-capable filter on the interface.
3725 	 */
3726 	if (ifp == NULL || ifp->if_flt_no_tso_count > 0) {
3727 		return;
3728 	}
3729 
3730 	inp = tp->t_inpcb;
3731 	isipv6 = (inp->inp_vflag & INP_IPV6) != 0;
3732 
3733 #if MPTCP
3734 	/*
3735 	 * We can't use TSO if this tcpcb belongs to an MPTCP session.
3736 	 */
3737 	if (inp->inp_socket->so_flags & SOF_MP_SUBFLOW) {
3738 		return;
3739 	}
3740 #endif
3741 	/*
3742 	 * We can't use TSO if the TSO capability of the tunnel interface does
3743 	 * not match the capability of another interface known by TCP
3744 	 */
3745 	if (inp->inp_policyresult.results.result == NECP_KERNEL_POLICY_RESULT_IP_TUNNEL) {
3746 		u_int tunnel_if_index = inp->inp_policyresult.results.result_parameter.tunnel_interface_index;
3747 
3748 		if (tunnel_if_index != 0) {
3749 			ifnet_head_lock_shared();
3750 			tunnel_ifp = ifindex2ifnet[tunnel_if_index];
3751 			ifnet_head_done();
3752 		}
3753 
3754 		if (tunnel_ifp == NULL) {
3755 			return;
3756 		}
3757 
3758 		if ((ifp->if_hwassist & IFNET_TSO_MASK) != (tunnel_ifp->if_hwassist & IFNET_TSO_MASK)) {
3759 			if (tso_debug > 0) {
3760 				os_log(OS_LOG_DEFAULT,
3761 				    "%s: %u > %u TSO 0 tunnel_ifp %s hwassist mismatch with ifp %s",
3762 				    __func__,
3763 				    ntohs(tp->t_inpcb->inp_lport), ntohs(tp->t_inpcb->inp_fport),
3764 				    tunnel_ifp->if_xname, ifp->if_xname);
3765 			}
3766 			return;
3767 		}
3768 		if (inp->inp_last_outifp != NULL &&
3769 		    (inp->inp_last_outifp->if_hwassist & IFNET_TSO_MASK) != (tunnel_ifp->if_hwassist & IFNET_TSO_MASK)) {
3770 			if (tso_debug > 0) {
3771 				os_log(OS_LOG_DEFAULT,
3772 				    "%s: %u > %u TSO 0 tunnel_ifp %s hwassist mismatch with inp_last_outifp %s",
3773 				    __func__,
3774 				    ntohs(tp->t_inpcb->inp_lport), ntohs(tp->t_inpcb->inp_fport),
3775 				    tunnel_ifp->if_xname, inp->inp_last_outifp->if_xname);
3776 			}
3777 			return;
3778 		}
3779 		if ((inp->inp_flags & INP_BOUND_IF) && inp->inp_boundifp != NULL &&
3780 		    (inp->inp_boundifp->if_hwassist & IFNET_TSO_MASK) != (tunnel_ifp->if_hwassist & IFNET_TSO_MASK)) {
3781 			if (tso_debug > 0) {
3782 				os_log(OS_LOG_DEFAULT,
3783 				    "%s: %u > %u TSO 0 tunnel_ifp %s hwassist mismatch with inp_boundifp %s",
3784 				    __func__,
3785 				    ntohs(tp->t_inpcb->inp_lport), ntohs(tp->t_inpcb->inp_fport),
3786 				    tunnel_ifp->if_xname, inp->inp_boundifp->if_xname);
3787 			}
3788 			return;
3789 		}
3790 	}
3791 
3792 	if (isipv6) {
3793 		if (ifp->if_hwassist & IFNET_TSO_IPV6) {
3794 			tp->t_flags |= TF_TSO;
3795 			if (ifp->if_tso_v6_mtu != 0) {
3796 				tp->tso_max_segment_size = ifp->if_tso_v6_mtu;
3797 			} else {
3798 				tp->tso_max_segment_size = TCP_MAXWIN;
3799 			}
3800 		}
3801 	} else {
3802 		if (ifp->if_hwassist & IFNET_TSO_IPV4) {
3803 			tp->t_flags |= TF_TSO;
3804 			if (ifp->if_tso_v4_mtu != 0) {
3805 				tp->tso_max_segment_size = ifp->if_tso_v4_mtu;
3806 			} else {
3807 				tp->tso_max_segment_size = TCP_MAXWIN;
3808 			}
3809 			if (INTF_ADJUST_MTU_FOR_CLAT46(ifp)) {
3810 				tp->tso_max_segment_size -=
3811 				    CLAT46_HDR_EXPANSION_OVERHD;
3812 			}
3813 		}
3814 	}
3815 
3816 	if (tso_debug > 1) {
3817 		os_log(OS_LOG_DEFAULT, "%s: %u > %u TSO %d ifp %s",
3818 		    __func__,
3819 		    ntohs(tp->t_inpcb->inp_lport),
3820 		    ntohs(tp->t_inpcb->inp_fport),
3821 		    (tp->t_flags & TF_TSO) != 0,
3822 		    ifp != NULL ? ifp->if_xname : "<NULL>");
3823 	}
3824 }
3825 
3826 /*
3827  * Function to calculate the tcp clock. The tcp clock will get updated
3828  * at the boundaries of the tcp layer. This is done at 3 places:
3829  * 1. Right before processing an input tcp packet
3830  * 2. Whenever a connection wants to access the network using tcp_usrreqs
3831  * 3. When a tcp timer fires or before tcp slow timeout
3832  *
3833  */
3834 void
calculate_tcp_clock(void)3835 calculate_tcp_clock(void)
3836 {
3837 	uint32_t current_tcp_now;
3838 	struct timeval now;
3839 	uint32_t tmp;
3840 
3841 	microuptime(&now);
3842 
3843 	/*
3844 	 * Update coarse-grained networking timestamp (in sec.); the idea
3845 	 * is to update the counter returnable via net_uptime() when
3846 	 * we read time.
3847 	 */
3848 	net_update_uptime_with_time(&now);
3849 
3850 	current_tcp_now = (uint32_t)now.tv_sec * 1000 + now.tv_usec / TCP_RETRANSHZ_TO_USEC;
3851 
3852 	tmp = os_atomic_load(&tcp_now, relaxed);
3853 	if (tmp < current_tcp_now) {
3854 		os_atomic_cmpxchg(&tcp_now, tmp, current_tcp_now, relaxed);
3855 
3856 		/*
3857 		 * No cmpxchg loop needed here. If someone else updated quicker,
3858 		 * we can take that value. The only requirement is that
3859 		 * tcp_now never decreases.
3860 		 */
3861 	}
3862 }
3863 
3864 /*
3865  * Compute receive window scaling that we are going to request
3866  * for this connection based on  sb_hiwat. Try to leave some
3867  * room to potentially increase the window size upto a maximum
3868  * defined by the constant tcp_autorcvbuf_max.
3869  */
3870 uint8_t
tcp_get_max_rwinscale(struct tcpcb * tp,struct socket * so)3871 tcp_get_max_rwinscale(struct tcpcb *tp, struct socket *so)
3872 {
3873 	uint8_t rcv_wscale;
3874 	uint32_t maxsockbufsize;
3875 
3876 	rcv_wscale = MAX((uint8_t)tcp_win_scale, tp->request_r_scale);
3877 	maxsockbufsize = ((so->so_rcv.sb_flags & SB_USRSIZE) != 0) ?
3878 	    so->so_rcv.sb_hiwat : tcp_autorcvbuf_max;
3879 
3880 	/*
3881 	 * Window scale should not exceed what is needed
3882 	 * to send the max receive window size; adding 1 to TCP_MAXWIN
3883 	 * ensures that.
3884 	 */
3885 	while (rcv_wscale < TCP_MAX_WINSHIFT &&
3886 	    ((TCP_MAXWIN + 1) << rcv_wscale) < maxsockbufsize) {
3887 		rcv_wscale++;
3888 	}
3889 	rcv_wscale = MIN(rcv_wscale, TCP_MAX_WINSHIFT);
3890 
3891 	return rcv_wscale;
3892 }
3893 
3894 int
tcp_notsent_lowat_check(struct socket * so)3895 tcp_notsent_lowat_check(struct socket *so)
3896 {
3897 	struct inpcb *inp = sotoinpcb(so);
3898 	struct tcpcb *tp = NULL;
3899 	int notsent = 0;
3900 
3901 	if (inp != NULL) {
3902 		tp = intotcpcb(inp);
3903 	}
3904 
3905 	if (tp == NULL) {
3906 		return 0;
3907 	}
3908 
3909 	notsent = so->so_snd.sb_cc -
3910 	    (tp->snd_nxt - tp->snd_una);
3911 
3912 	/*
3913 	 * When we send a FIN or SYN, not_sent can be negative.
3914 	 * In that case also we need to send a write event to the
3915 	 * process if it is waiting. In the FIN case, it will
3916 	 * get an error from send because cantsendmore will be set.
3917 	 */
3918 	if (notsent <= tp->t_notsent_lowat) {
3919 		return 1;
3920 	}
3921 
3922 	/*
3923 	 * When Nagle's algorithm is not disabled, it is better
3924 	 * to wakeup the client until there is atleast one
3925 	 * maxseg of data to write.
3926 	 */
3927 	if ((tp->t_flags & TF_NODELAY) == 0 &&
3928 	    notsent > 0 && notsent < tp->t_maxseg) {
3929 		return 1;
3930 	}
3931 	return 0;
3932 }
3933 
3934 void
tcp_rxtseg_insert(struct tcpcb * tp,tcp_seq start,tcp_seq end)3935 tcp_rxtseg_insert(struct tcpcb *tp, tcp_seq start, tcp_seq end)
3936 {
3937 	struct tcp_rxt_seg *rxseg = NULL, *prev = NULL, *next = NULL;
3938 	uint16_t rxcount = 0;
3939 
3940 	if (SLIST_EMPTY(&tp->t_rxt_segments)) {
3941 		tp->t_dsack_lastuna = tp->snd_una;
3942 	}
3943 	/*
3944 	 * First check if there is a segment already existing for this
3945 	 * sequence space.
3946 	 */
3947 
3948 	SLIST_FOREACH(rxseg, &tp->t_rxt_segments, rx_link) {
3949 		if (SEQ_GT(rxseg->rx_start, start)) {
3950 			break;
3951 		}
3952 		prev = rxseg;
3953 	}
3954 	next = rxseg;
3955 
3956 	/* check if prev seg is for this sequence */
3957 	if (prev != NULL && SEQ_LEQ(prev->rx_start, start) &&
3958 	    SEQ_GEQ(prev->rx_end, end)) {
3959 		prev->rx_count++;
3960 		return;
3961 	}
3962 
3963 	/*
3964 	 * There are a couple of possibilities at this point.
3965 	 * 1. prev overlaps with the beginning of this sequence
3966 	 * 2. next overlaps with the end of this sequence
3967 	 * 3. there is no overlap.
3968 	 */
3969 
3970 	if (prev != NULL && SEQ_GT(prev->rx_end, start)) {
3971 		if (prev->rx_start == start && SEQ_GT(end, prev->rx_end)) {
3972 			start = prev->rx_end + 1;
3973 			prev->rx_count++;
3974 		} else {
3975 			prev->rx_end = (start - 1);
3976 			rxcount = prev->rx_count;
3977 		}
3978 	}
3979 
3980 	if (next != NULL && SEQ_LT(next->rx_start, end)) {
3981 		if (SEQ_LEQ(next->rx_end, end)) {
3982 			end = next->rx_start - 1;
3983 			next->rx_count++;
3984 		} else {
3985 			next->rx_start = end + 1;
3986 			rxcount = next->rx_count;
3987 		}
3988 	}
3989 	if (!SEQ_LT(start, end)) {
3990 		return;
3991 	}
3992 
3993 	if (tcp_rxt_seg_max > 0 && tp->t_rxt_seg_count >= tcp_rxt_seg_max) {
3994 		rxseg = SLIST_FIRST(&tp->t_rxt_segments);
3995 		if (prev == rxseg) {
3996 			prev = NULL;
3997 		}
3998 		SLIST_REMOVE(&tp->t_rxt_segments, rxseg,
3999 		    tcp_rxt_seg, rx_link);
4000 
4001 		tcp_rxt_seg_drop++;
4002 		tp->t_rxt_seg_drop++;
4003 		zfree(tcp_rxt_seg_zone, rxseg);
4004 		tcp_memacct_sub(kalloc_type_size(tcp_rxt_seg_zone));
4005 
4006 		tp->t_rxt_seg_count -= 1;
4007 	}
4008 
4009 	rxseg = zalloc_flags(tcp_rxt_seg_zone, Z_WAITOK | Z_ZERO | Z_NOFAIL);
4010 	tcp_memacct_add(kalloc_type_size(tcp_rxt_seg_zone));
4011 	rxseg->rx_start = start;
4012 	rxseg->rx_end = end;
4013 	rxseg->rx_count = rxcount + 1;
4014 
4015 	if (prev != NULL) {
4016 		SLIST_INSERT_AFTER(prev, rxseg, rx_link);
4017 	} else {
4018 		SLIST_INSERT_HEAD(&tp->t_rxt_segments, rxseg, rx_link);
4019 	}
4020 	tp->t_rxt_seg_count += 1;
4021 }
4022 
4023 struct tcp_rxt_seg *
tcp_rxtseg_find(struct tcpcb * tp,tcp_seq start,tcp_seq end)4024 tcp_rxtseg_find(struct tcpcb *tp, tcp_seq start, tcp_seq end)
4025 {
4026 	struct tcp_rxt_seg *rxseg;
4027 
4028 	if (SLIST_EMPTY(&tp->t_rxt_segments)) {
4029 		return NULL;
4030 	}
4031 
4032 	SLIST_FOREACH(rxseg, &tp->t_rxt_segments, rx_link) {
4033 		if (SEQ_LEQ(rxseg->rx_start, start) &&
4034 		    SEQ_GEQ(rxseg->rx_end, end)) {
4035 			return rxseg;
4036 		}
4037 		if (SEQ_GT(rxseg->rx_start, start)) {
4038 			break;
4039 		}
4040 	}
4041 	return NULL;
4042 }
4043 
4044 void
tcp_rxtseg_set_spurious(struct tcpcb * tp,tcp_seq start,tcp_seq end)4045 tcp_rxtseg_set_spurious(struct tcpcb *tp, tcp_seq start, tcp_seq end)
4046 {
4047 	struct tcp_rxt_seg *rxseg;
4048 
4049 	if (SLIST_EMPTY(&tp->t_rxt_segments)) {
4050 		return;
4051 	}
4052 
4053 	SLIST_FOREACH(rxseg, &tp->t_rxt_segments, rx_link) {
4054 		if (SEQ_GEQ(rxseg->rx_start, start) &&
4055 		    SEQ_LEQ(rxseg->rx_end, end)) {
4056 			/*
4057 			 * If the segment was retransmitted only once, mark it as
4058 			 * spurious.
4059 			 */
4060 			if (rxseg->rx_count == 1) {
4061 				rxseg->rx_flags |= TCP_RXT_SPURIOUS;
4062 			}
4063 		}
4064 
4065 		if (SEQ_GEQ(rxseg->rx_start, end)) {
4066 			break;
4067 		}
4068 	}
4069 	return;
4070 }
4071 
4072 void
tcp_rxtseg_clean(struct tcpcb * tp)4073 tcp_rxtseg_clean(struct tcpcb *tp)
4074 {
4075 	struct tcp_rxt_seg *rxseg, *next;
4076 
4077 	SLIST_FOREACH_SAFE(rxseg, &tp->t_rxt_segments, rx_link, next) {
4078 		SLIST_REMOVE(&tp->t_rxt_segments, rxseg,
4079 		    tcp_rxt_seg, rx_link);
4080 		zfree(tcp_rxt_seg_zone, rxseg);
4081 		tcp_memacct_sub(kalloc_type_size(tcp_rxt_seg_zone));
4082 	}
4083 	tp->t_rxt_seg_count = 0;
4084 	tp->t_dsack_lastuna = tp->snd_max;
4085 }
4086 
4087 boolean_t
tcp_rxtseg_detect_bad_rexmt(struct tcpcb * tp,tcp_seq th_ack)4088 tcp_rxtseg_detect_bad_rexmt(struct tcpcb *tp, tcp_seq th_ack)
4089 {
4090 	boolean_t bad_rexmt;
4091 	struct tcp_rxt_seg *rxseg;
4092 
4093 	if (SLIST_EMPTY(&tp->t_rxt_segments)) {
4094 		return FALSE;
4095 	}
4096 
4097 	/*
4098 	 * If all of the segments in this window are not cumulatively
4099 	 * acknowledged, then there can still be undetected packet loss.
4100 	 * Do not restore congestion window in that case.
4101 	 */
4102 	if (SEQ_LT(th_ack, tp->snd_recover)) {
4103 		return FALSE;
4104 	}
4105 
4106 	bad_rexmt = TRUE;
4107 	SLIST_FOREACH(rxseg, &tp->t_rxt_segments, rx_link) {
4108 		if (!(rxseg->rx_flags & TCP_RXT_SPURIOUS)) {
4109 			bad_rexmt = FALSE;
4110 			break;
4111 		}
4112 	}
4113 	return bad_rexmt;
4114 }
4115 
4116 u_int32_t
tcp_rxtseg_total_size(struct tcpcb * tp)4117 tcp_rxtseg_total_size(struct tcpcb *tp)
4118 {
4119 	struct tcp_rxt_seg *rxseg;
4120 	u_int32_t total_size = 0;
4121 
4122 	SLIST_FOREACH(rxseg, &tp->t_rxt_segments, rx_link) {
4123 		total_size += (rxseg->rx_end - rxseg->rx_start) + 1;
4124 	}
4125 	return total_size;
4126 }
4127 
4128 static void tcp_rack_free_and_disable(struct tcpcb *tp);
4129 
4130 int
tcp_seg_cmp(const struct tcp_seg_sent * seg1,const struct tcp_seg_sent * seg2)4131 tcp_seg_cmp(const struct tcp_seg_sent *seg1, const struct tcp_seg_sent *seg2)
4132 {
4133 	return (int)(seg1->end_seq - seg2->end_seq);
4134 }
4135 
RB_GENERATE(tcp_seg_sent_tree_head,tcp_seg_sent,seg_link,tcp_seg_cmp)4136 RB_GENERATE(tcp_seg_sent_tree_head, tcp_seg_sent, seg_link, tcp_seg_cmp)
4137 
4138 uint32_t
4139 tcp_seg_len(struct tcp_seg_sent *seg)
4140 {
4141 	if (SEQ_LT(seg->end_seq, seg->start_seq)) {
4142 		os_log_error(OS_LOG_DEFAULT, "segment end(%u) can't be smaller "
4143 		    "than segment start(%u)", seg->end_seq, seg->start_seq);
4144 	}
4145 
4146 	return seg->end_seq - seg->start_seq;
4147 }
4148 
4149 static struct tcp_seg_sent *
tcp_seg_alloc_init(struct tcpcb * tp)4150 tcp_seg_alloc_init(struct tcpcb *tp)
4151 {
4152 	struct tcp_seg_sent *seg = TAILQ_FIRST(&tp->seg_pool.free_segs);
4153 	if (seg != NULL) {
4154 		TAILQ_REMOVE(&tp->seg_pool.free_segs, seg, free_link);
4155 		tp->seg_pool.free_segs_count--;
4156 
4157 		bzero(seg, sizeof(*seg));
4158 	} else {
4159 		if (tcp_memacct_hardlimit()) {
4160 			return NULL;
4161 		}
4162 
4163 		seg = zalloc_flags(tcp_seg_sent_zone, Z_NOPAGEWAIT | Z_ZERO);
4164 		if (seg == NULL) {
4165 			return NULL;
4166 		}
4167 		tcp_memacct_add(kalloc_type_size(tcp_seg_sent_zone));
4168 	}
4169 
4170 	return seg;
4171 }
4172 
4173 static void
tcp_update_seg_after_rto(struct tcpcb * tp,struct tcp_seg_sent * found_seg,uint32_t xmit_ts,uint8_t flags)4174 tcp_update_seg_after_rto(struct tcpcb *tp, struct tcp_seg_sent *found_seg,
4175     uint32_t xmit_ts, uint8_t flags)
4176 {
4177 	tcp_rack_transmit_seg(tp, found_seg, found_seg->start_seq, found_seg->end_seq,
4178 	    xmit_ts, flags);
4179 	struct tcp_seg_sent *seg = TAILQ_FIRST(&tp->t_segs_sent);
4180 	if (found_seg == seg) {
4181 		// Move this segment to the end of time-ordered list.
4182 		TAILQ_REMOVE(&tp->t_segs_sent, seg, tx_link);
4183 		TAILQ_INSERT_TAIL(&tp->t_segs_sent, seg, tx_link);
4184 	}
4185 }
4186 
4187 static void
tcp_process_rxmt_segs_after_rto(struct tcpcb * tp,struct tcp_seg_sent * seg,tcp_seq start,uint32_t xmit_ts,uint8_t flags)4188 tcp_process_rxmt_segs_after_rto(struct tcpcb *tp, struct tcp_seg_sent *seg, tcp_seq start,
4189     uint32_t xmit_ts, uint8_t flags)
4190 {
4191 	struct tcp_seg_sent segment = {};
4192 
4193 	while (seg != NULL) {
4194 		if (SEQ_LEQ(seg->start_seq, start)) {
4195 			tcp_update_seg_after_rto(tp, seg, xmit_ts, flags);
4196 			break;
4197 		} else {
4198 			/* The segment is a part of the total RTO retransmission */
4199 			tcp_update_seg_after_rto(tp, seg, xmit_ts, flags);
4200 
4201 			/* Find the next segment ending at the start of current segment */
4202 			segment.end_seq = seg->start_seq;
4203 			seg = RB_FIND(tcp_seg_sent_tree_head, &tp->t_segs_sent_tree, &segment);
4204 		}
4205 	}
4206 }
4207 
4208 static struct tcp_seg_sent *
tcp_seg_sent_insert_before(struct tcpcb * tp,struct tcp_seg_sent * before,tcp_seq start,tcp_seq end,uint32_t xmit_ts,uint8_t flags)4209 tcp_seg_sent_insert_before(struct tcpcb *tp, struct tcp_seg_sent *before, tcp_seq start, tcp_seq end,
4210     uint32_t xmit_ts, uint8_t flags)
4211 {
4212 	struct tcp_seg_sent *seg = tcp_seg_alloc_init(tp);
4213 	if (seg == NULL) {
4214 		tcp_rack_free_and_disable(tp);
4215 		return NULL;
4216 	}
4217 	tcp_rack_transmit_seg(tp, seg, start, end, xmit_ts, flags);
4218 	struct tcp_seg_sent *not_inserted = RB_INSERT(tcp_seg_sent_tree_head, &tp->t_segs_sent_tree, seg);
4219 	if (not_inserted) {
4220 		TCP_LOG(tp, "segment %p[%u %u) was not inserted in the RB tree", not_inserted,
4221 		    not_inserted->start_seq, not_inserted->end_seq);
4222 	}
4223 	TAILQ_INSERT_BEFORE(before, seg, tx_link);
4224 
4225 	return seg;
4226 }
4227 
4228 static struct tcp_seg_sent *
tcp_seg_rto_insert_end(struct tcpcb * tp,tcp_seq start,tcp_seq end,uint32_t xmit_ts,uint8_t flags)4229 tcp_seg_rto_insert_end(struct tcpcb *tp, tcp_seq start, tcp_seq end,
4230     uint32_t xmit_ts, uint8_t flags)
4231 {
4232 	struct tcp_seg_sent *seg = tcp_seg_alloc_init(tp);
4233 	if (seg == NULL) {
4234 		tcp_rack_free_and_disable(tp);
4235 		return NULL;
4236 	}
4237 	/* segment MUST be allocated, there is no other fail-safe here */
4238 	tcp_rack_transmit_seg(tp, seg, start, end, xmit_ts, flags);
4239 	struct tcp_seg_sent *not_inserted = RB_INSERT(tcp_seg_sent_tree_head, &tp->t_segs_sent_tree, seg);
4240 	if (not_inserted) {
4241 		TCP_LOG(tp, "segment %p[%u %u) was not inserted in the RB tree", not_inserted,
4242 		    not_inserted->start_seq, not_inserted->end_seq);
4243 	}
4244 	TAILQ_INSERT_TAIL(&tp->t_segs_sent, seg, tx_link);
4245 
4246 	return seg;
4247 }
4248 
4249 void
tcp_seg_sent_insert(struct tcpcb * tp,struct tcp_seg_sent * seg,tcp_seq start,tcp_seq end,uint32_t xmit_ts,uint8_t flags)4250 tcp_seg_sent_insert(struct tcpcb *tp, struct tcp_seg_sent *seg, tcp_seq start, tcp_seq end,
4251     uint32_t xmit_ts, uint8_t flags)
4252 {
4253 	if (seg != NULL) {
4254 		uint8_t seg_flags = seg->flags | flags;
4255 		if (seg->end_seq == end) {
4256 			/* Entire seg retransmitted in RACK recovery, start and end sequence doesn't change */
4257 			if (seg->start_seq != start) {
4258 				os_log_error(OS_LOG_DEFAULT, "Segment start (%u) is not same as retransmitted "
4259 				    "start sequence number (%u)", seg->start_seq, start);
4260 			}
4261 			tcp_rack_transmit_seg(tp, seg, seg->start_seq, seg->end_seq, xmit_ts, seg_flags);
4262 			TAILQ_REMOVE(&tp->t_segs_sent, seg, tx_link);
4263 			TAILQ_INSERT_TAIL(&tp->t_segs_sent, seg, tx_link);
4264 		} else {
4265 			/*
4266 			 * Original segment is retransmitted partially, update start_seq by len
4267 			 * and create new segment for retransmitted part
4268 			 */
4269 			struct tcp_seg_sent *partial_seg = tcp_seg_alloc_init(tp);
4270 			if (partial_seg == NULL) {
4271 				tcp_rack_free_and_disable(tp);
4272 				return;
4273 			}
4274 			seg->start_seq += (end - start);
4275 			tcp_rack_transmit_seg(tp, partial_seg, start, end, xmit_ts, seg_flags);
4276 			struct tcp_seg_sent *not_inserted = RB_INSERT(tcp_seg_sent_tree_head,
4277 			    &tp->t_segs_sent_tree, partial_seg);
4278 			if (not_inserted) {
4279 				TCP_LOG(tp, "segment %p[%u %u) was not inserted in the RB tree", not_inserted,
4280 				    not_inserted->start_seq, not_inserted->end_seq);
4281 			}
4282 			TAILQ_INSERT_TAIL(&tp->t_segs_sent, partial_seg, tx_link);
4283 		}
4284 
4285 		return;
4286 	}
4287 
4288 	if ((flags & TCP_SEGMENT_RETRANSMITTED_ATLEAST_ONCE) == 0) {
4289 		/* This is a new segment */
4290 		seg = tcp_seg_alloc_init(tp);
4291 		if (seg == NULL) {
4292 			tcp_rack_free_and_disable(tp);
4293 			return;
4294 		}
4295 
4296 		tcp_rack_transmit_seg(tp, seg, start, end, xmit_ts, flags);
4297 		struct tcp_seg_sent *not_inserted = RB_INSERT(tcp_seg_sent_tree_head, &tp->t_segs_sent_tree, seg);
4298 		if (not_inserted) {
4299 			TCP_LOG(tp, "segment %p[%u %u) was not inserted in the RB tree", not_inserted,
4300 			    not_inserted->start_seq, not_inserted->end_seq);
4301 		}
4302 		TAILQ_INSERT_TAIL(&tp->t_segs_sent, seg, tx_link);
4303 
4304 		return;
4305 	}
4306 	/*
4307 	 * Either retransmitted after an RTO or PTO.
4308 	 * During RTO, time-ordered list may lose its order.
4309 	 * If retransmitted after RTO, check if the segment
4310 	 * already exists in RB tree and update its xmit_ts. Also,
4311 	 * if this seg is at the top of ordered list, then move it
4312 	 * to the end.
4313 	 */
4314 	struct tcp_seg_sent segment = {};
4315 	struct tcp_seg_sent *found_seg = NULL, *rxmt_seg = NULL;
4316 
4317 	/* Set the end sequence to search for existing segment */
4318 	segment.end_seq = end;
4319 	found_seg = RB_FIND(tcp_seg_sent_tree_head, &tp->t_segs_sent_tree, &segment);
4320 	if (found_seg != NULL) {
4321 		/* Found an exact match for retransmitted end sequence */
4322 		tcp_process_rxmt_segs_after_rto(tp, found_seg, start, xmit_ts, flags);
4323 		return;
4324 	}
4325 	/*
4326 	 * We come here when we don't find an exact match and end of segment
4327 	 * retransmitted after RTO lies within a segment.
4328 	 */
4329 	RB_FOREACH(found_seg, tcp_seg_sent_tree_head, &tp->t_segs_sent_tree) {
4330 		if (SEQ_LT(end, found_seg->end_seq) && SEQ_GT(end, found_seg->start_seq)) {
4331 			/*
4332 			 * This segment is partially retransmitted. We split this segment at the boundary of end
4333 			 * sequence. First insert the part being retransmitted at the end of time-ordered list.
4334 			 */
4335 			struct tcp_seg_sent *inserted_seg = tcp_seg_rto_insert_end(tp, found_seg->start_seq, end, xmit_ts,
4336 			    found_seg->flags | flags);
4337 			/* If segment is not allocated, RACK is already disabled and cleaned up */
4338 			if (inserted_seg == NULL) {
4339 				return;
4340 			}
4341 
4342 			if (SEQ_LEQ(found_seg->start_seq, start)) {
4343 				/*
4344 				 * We are done with the retransmitted part.
4345 				 * Move the start of existing segment
4346 				 */
4347 				found_seg->start_seq = end;
4348 			} else {
4349 				/*
4350 				 * This retransmitted sequence covers more than one segment
4351 				 * Look for segments covered by this retransmission below this segment
4352 				 */
4353 				segment.end_seq = found_seg->start_seq;
4354 				rxmt_seg = RB_FIND(tcp_seg_sent_tree_head, &tp->t_segs_sent_tree, &segment);
4355 
4356 				if (rxmt_seg != NULL) {
4357 					/* rxmt_seg is just before the current segment */
4358 					tcp_process_rxmt_segs_after_rto(tp, rxmt_seg, start, xmit_ts, flags);
4359 				}
4360 
4361 				/* Move the start of existing segment */
4362 				found_seg->start_seq = end;
4363 			}
4364 			return;
4365 		}
4366 	}
4367 }
4368 
4369 static void
tcp_seg_collect_acked_subtree(struct tcpcb * tp,struct tcp_seg_sent * seg,uint32_t acked_xmit_ts,uint32_t tsecr)4370 tcp_seg_collect_acked_subtree(struct tcpcb *tp, struct tcp_seg_sent *seg,
4371     uint32_t acked_xmit_ts, uint32_t tsecr)
4372 {
4373 	if (seg != NULL) {
4374 		tcp_seg_collect_acked_subtree(tp, RB_LEFT(seg, seg_link), acked_xmit_ts, tsecr);
4375 		tcp_seg_collect_acked_subtree(tp, RB_RIGHT(seg, seg_link), acked_xmit_ts, tsecr);
4376 		TAILQ_INSERT_TAIL(&tp->t_segs_acked, seg, ack_link);
4377 	}
4378 }
4379 
4380 /* Call this function with root of the rb tree */
4381 static void
tcp_seg_collect_acked(struct tcpcb * tp,struct tcp_seg_sent * seg,tcp_seq th_ack,uint32_t acked_xmit_ts,uint32_t tsecr)4382 tcp_seg_collect_acked(struct tcpcb *tp, struct tcp_seg_sent *seg, tcp_seq th_ack,
4383     uint32_t acked_xmit_ts, uint32_t tsecr)
4384 {
4385 	if (seg == NULL) {
4386 		return;
4387 	}
4388 
4389 	if (SEQ_GEQ(th_ack, seg->end_seq)) {
4390 		/* Delete the entire left sub-tree */
4391 		tcp_seg_collect_acked_subtree(tp, RB_LEFT(seg, seg_link), acked_xmit_ts, tsecr);
4392 		/* Evaluate the right sub-tree */
4393 		tcp_seg_collect_acked(tp, RB_RIGHT(seg, seg_link), th_ack, acked_xmit_ts, tsecr);
4394 		TAILQ_INSERT_TAIL(&tp->t_segs_acked, seg, ack_link);
4395 	} else {
4396 		/*
4397 		 * This ACK doesn't acknowledge the current root and its right sub-tree.
4398 		 * Evaluate the left sub-tree
4399 		 */
4400 		tcp_seg_collect_acked(tp, RB_LEFT(seg, seg_link), th_ack, acked_xmit_ts, tsecr);
4401 	}
4402 }
4403 
4404 static void
tcp_seg_delete_acked(struct tcpcb * tp,uint32_t acked_xmit_ts,uint32_t tsecr)4405 tcp_seg_delete_acked(struct tcpcb *tp, uint32_t acked_xmit_ts, uint32_t tsecr)
4406 {
4407 	struct tcp_seg_sent *acked_seg = NULL, *next = NULL;
4408 
4409 	TAILQ_FOREACH_SAFE(acked_seg, &tp->t_segs_acked, ack_link, next) {
4410 		/* Advance RACK state if applicable */
4411 		if (acked_seg->xmit_ts > acked_xmit_ts) {
4412 			tcp_rack_update_segment_acked(tp, tsecr, acked_seg->xmit_ts, acked_seg->end_seq,
4413 			    !!(acked_seg->flags & TCP_SEGMENT_RETRANSMITTED_ATLEAST_ONCE));
4414 		}
4415 		/* Check for reordering */
4416 		tcp_rack_detect_reordering_acked(tp, acked_seg);
4417 
4418 		const uint32_t seg_len = tcp_seg_len(acked_seg);
4419 		if (acked_seg->flags & TCP_SEGMENT_LOST) {
4420 			if (tp->bytes_lost < seg_len) {
4421 				os_log_error(OS_LOG_DEFAULT, "bytes_lost (%u) can't be smaller than already "
4422 				    "lost segment length (%u)", tp->bytes_lost, seg_len);
4423 			}
4424 			tp->bytes_lost -= seg_len;
4425 		}
4426 		if (acked_seg->flags & TCP_RACK_RETRANSMITTED) {
4427 			if (tp->bytes_retransmitted < seg_len) {
4428 				os_log_error(OS_LOG_DEFAULT, "bytes_retransmitted (%u) can't be smaller "
4429 				    "than already retransmited segment length (%u)",
4430 				    tp->bytes_retransmitted, seg_len);
4431 			}
4432 			tp->bytes_retransmitted -= seg_len;
4433 		}
4434 		if (acked_seg->flags & TCP_SEGMENT_SACKED) {
4435 			if (tp->bytes_sacked < seg_len) {
4436 				os_log_error(OS_LOG_DEFAULT, "bytes_sacked (%u) can't be smaller than already "
4437 				    "SACKed segment length (%u)", tp->bytes_sacked, seg_len);
4438 			}
4439 			tp->bytes_sacked -= seg_len;
4440 		}
4441 		TAILQ_REMOVE(&tp->t_segs_acked, acked_seg, ack_link);
4442 		TAILQ_REMOVE(&tp->t_segs_sent, acked_seg, tx_link);
4443 		RB_REMOVE(tcp_seg_sent_tree_head, &tp->t_segs_sent_tree, acked_seg);
4444 		tcp_seg_delete(tp, acked_seg);
4445 	}
4446 }
4447 
4448 void
tcp_segs_doack(struct tcpcb * tp,tcp_seq th_ack,struct tcpopt * to)4449 tcp_segs_doack(struct tcpcb *tp, tcp_seq th_ack, struct tcpopt *to)
4450 {
4451 	uint32_t tsecr = 0, acked_xmit_ts = 0;
4452 	tcp_seq acked_seq = th_ack;
4453 	bool was_retransmitted = false;
4454 
4455 	if (TAILQ_EMPTY(&tp->t_segs_sent)) {
4456 		return;
4457 	}
4458 
4459 	if (((to->to_flags & TOF_TS) != 0) && (to->to_tsecr != 0)) {
4460 		tsecr = to->to_tsecr;
4461 	}
4462 
4463 	struct tcp_seg_sent seg = {};
4464 	struct tcp_seg_sent *found_seg = NULL, *next = NULL;
4465 
4466 	found_seg = TAILQ_LAST(&tp->t_segs_sent, tcp_seg_sent_head);
4467 
4468 	if (tp->rack.segs_retransmitted == false) {
4469 		if (SEQ_GEQ(th_ack, found_seg->end_seq)) {
4470 			/*
4471 			 * ACK acknowledges the last sent segment completely (snd_max),
4472 			 * we can remove all segments from time ordered list.
4473 			 */
4474 			acked_seq = found_seg->end_seq;
4475 			acked_xmit_ts = found_seg->xmit_ts;
4476 			was_retransmitted = !!(found_seg->flags & TCP_SEGMENT_RETRANSMITTED_ATLEAST_ONCE);
4477 			tcp_segs_sent_clean(tp, false);
4478 
4479 			/* Advance RACK state */
4480 			tcp_rack_update_segment_acked(tp, tsecr, acked_xmit_ts, acked_seq, was_retransmitted);
4481 			return;
4482 		}
4483 	}
4484 	/*
4485 	 * If either not all segments are ACKed OR the time-ordered list contains retransmitted
4486 	 * segments, do a RB tree search for largest (completely) ACKed segment and remove the ACKed
4487 	 * segment and all segments left of it from both RB tree and time-ordered list.
4488 	 *
4489 	 * Set the end sequence to search for ACKed segment.
4490 	 */
4491 	seg.end_seq = th_ack;
4492 
4493 	if ((found_seg = RB_FIND(tcp_seg_sent_tree_head, &tp->t_segs_sent_tree, &seg)) != NULL) {
4494 		acked_seq = found_seg->end_seq;
4495 		acked_xmit_ts = found_seg->xmit_ts;
4496 		was_retransmitted = !!(found_seg->flags & TCP_SEGMENT_RETRANSMITTED_ATLEAST_ONCE);
4497 
4498 		/*
4499 		 * Remove all segments that are ACKed by this ACK.
4500 		 * We defer self-balancing of RB tree to the end
4501 		 * by calling RB_REMOVE after collecting all ACKed segments.
4502 		 */
4503 		tcp_seg_collect_acked(tp, RB_ROOT(&tp->t_segs_sent_tree), th_ack, acked_xmit_ts, tsecr);
4504 		tcp_seg_delete_acked(tp, acked_xmit_ts, tsecr);
4505 
4506 		/* Advance RACK state */
4507 		tcp_rack_update_segment_acked(tp, tsecr, acked_xmit_ts, acked_seq, was_retransmitted);
4508 
4509 		return;
4510 	}
4511 	/*
4512 	 * When TSO is enabled, it is possible that th_ack is less
4513 	 * than segment->end, hence we search the tree
4514 	 * until we find the largest (partially) ACKed segment.
4515 	 */
4516 	RB_FOREACH_SAFE(found_seg, tcp_seg_sent_tree_head, &tp->t_segs_sent_tree, next) {
4517 		if (SEQ_LT(th_ack, found_seg->end_seq) && SEQ_GT(th_ack, found_seg->start_seq)) {
4518 			acked_seq = th_ack;
4519 			acked_xmit_ts = found_seg->xmit_ts;
4520 			was_retransmitted = !!(found_seg->flags & TCP_SEGMENT_RETRANSMITTED_ATLEAST_ONCE);
4521 
4522 			/* Remove all segments completely ACKed by this ack */
4523 			tcp_seg_collect_acked(tp, RB_ROOT(&tp->t_segs_sent_tree), th_ack, acked_xmit_ts, tsecr);
4524 			tcp_seg_delete_acked(tp, acked_xmit_ts, tsecr);
4525 			found_seg->start_seq = th_ack;
4526 
4527 			/* Advance RACK state */
4528 			tcp_rack_update_segment_acked(tp, tsecr, acked_xmit_ts, acked_seq, was_retransmitted);
4529 			break;
4530 		}
4531 	}
4532 }
4533 
4534 static bool
tcp_seg_mark_sacked(struct tcpcb * tp,struct tcp_seg_sent * seg,uint32_t * newbytes_sacked)4535 tcp_seg_mark_sacked(struct tcpcb *tp, struct tcp_seg_sent *seg, uint32_t *newbytes_sacked)
4536 {
4537 	if (seg->flags & TCP_SEGMENT_SACKED) {
4538 		return false;
4539 	}
4540 
4541 	const uint32_t seg_len = tcp_seg_len(seg);
4542 
4543 	/* Check for reordering */
4544 	tcp_rack_detect_reordering_acked(tp, seg);
4545 
4546 	if (seg->flags & TCP_RACK_RETRANSMITTED) {
4547 		if (seg->flags & TCP_SEGMENT_LOST) {
4548 			/*
4549 			 * If the segment is not considered lost, we don't clear
4550 			 * retransmitted as it might still be in flight. The ONLY time
4551 			 * this can happen is when RTO happens and segment is retransmitted
4552 			 * and SACKed before RACK detects segment was lost.
4553 			 */
4554 			seg->flags &= ~(TCP_SEGMENT_LOST | TCP_RACK_RETRANSMITTED);
4555 			if (tp->bytes_lost < seg_len || tp->bytes_retransmitted < seg_len) {
4556 				os_log_error(OS_LOG_DEFAULT, "bytes_lost (%u) and/or bytes_retransmitted (%u) "
4557 				    "can't be smaller than already lost/retransmitted segment length (%u)", tp->bytes_lost,
4558 				    tp->bytes_retransmitted, seg_len);
4559 			}
4560 			tp->bytes_lost -= seg_len;
4561 			tp->bytes_retransmitted -= seg_len;
4562 		}
4563 	} else {
4564 		if (seg->flags & TCP_SEGMENT_LOST) {
4565 			seg->flags &= ~(TCP_SEGMENT_LOST);
4566 			if (tp->bytes_lost < seg_len) {
4567 				os_log_error(OS_LOG_DEFAULT, "bytes_lost (%u) can't be smaller "
4568 				    "than already lost segment length (%u)", tp->bytes_lost, seg_len);
4569 			}
4570 			tp->bytes_lost -= seg_len;
4571 		}
4572 	}
4573 	*newbytes_sacked += seg_len;
4574 	seg->flags |= TCP_SEGMENT_SACKED;
4575 	tp->bytes_sacked += seg_len;
4576 
4577 	return true;
4578 }
4579 
4580 static void
tcp_segs_dosack_matched(struct tcpcb * tp,struct tcp_seg_sent * found_seg,tcp_seq sblk_start,uint32_t tsecr,uint32_t * newbytes_sacked)4581 tcp_segs_dosack_matched(struct tcpcb *tp, struct tcp_seg_sent *found_seg,
4582     tcp_seq sblk_start, uint32_t tsecr,
4583     uint32_t *newbytes_sacked)
4584 {
4585 	struct tcp_seg_sent seg = {};
4586 
4587 	while (found_seg != NULL) {
4588 		if (sblk_start == found_seg->start_seq) {
4589 			/*
4590 			 * Covered the entire SACK block.
4591 			 * Record segment flags before they get erased.
4592 			 */
4593 			uint8_t seg_flags = found_seg->flags;
4594 			bool newly_marked = tcp_seg_mark_sacked(tp, found_seg, newbytes_sacked);
4595 			if (newly_marked) {
4596 				/* Advance RACK state */
4597 				tcp_rack_update_segment_acked(tp, tsecr, found_seg->xmit_ts,
4598 				    found_seg->end_seq,
4599 				    !!(seg_flags & TCP_SEGMENT_RETRANSMITTED_ATLEAST_ONCE));
4600 			}
4601 			break;
4602 		} else if (SEQ_GT(sblk_start, found_seg->start_seq)) {
4603 			if ((found_seg->flags & TCP_SEGMENT_SACKED) != 0) {
4604 				/* No need to process an already SACKED segment */
4605 				break;
4606 			}
4607 			/*
4608 			 * This segment is partially ACKed by SACK block
4609 			 * as sblk_start > segment start. Since it is
4610 			 * partially SACKed, we should split the unSACKed and
4611 			 * SACKed parts.
4612 			 */
4613 			/* First create a new segment for unSACKed part */
4614 			struct tcp_seg_sent *inserted_seg = tcp_seg_sent_insert_before(tp, found_seg, found_seg->start_seq, sblk_start,
4615 			    found_seg->xmit_ts, found_seg->flags);
4616 			/* If segment is not allocated, RACK is already disabled and cleaned up */
4617 			if (inserted_seg == NULL) {
4618 				return;
4619 			}
4620 			/* Now, update the SACKed part */
4621 			found_seg->start_seq = sblk_start;
4622 			/* Record seg flags before they get erased. */
4623 			uint8_t seg_flags = found_seg->flags;
4624 			bool newly_marked = tcp_seg_mark_sacked(tp, found_seg, newbytes_sacked);
4625 			if (newly_marked) {
4626 				/* Advance RACK state */
4627 				tcp_rack_update_segment_acked(tp, tsecr, found_seg->xmit_ts,
4628 				    found_seg->end_seq,
4629 				    !!(seg_flags & TCP_SEGMENT_RETRANSMITTED_ATLEAST_ONCE));
4630 			}
4631 			break;
4632 		} else {
4633 			/*
4634 			 * This segment lies within the SACK block
4635 			 * Record segment flags before they get erased.
4636 			 */
4637 			uint8_t seg_flags = found_seg->flags;
4638 			bool newly_marked = tcp_seg_mark_sacked(tp, found_seg, newbytes_sacked);
4639 			if (newly_marked) {
4640 				/* Advance RACK state */
4641 				tcp_rack_update_segment_acked(tp, tsecr, found_seg->xmit_ts,
4642 				    found_seg->end_seq,
4643 				    !!(seg_flags & TCP_SEGMENT_RETRANSMITTED_ATLEAST_ONCE));
4644 			}
4645 			/* Find the next segment ending at the start of current segment */
4646 			seg.end_seq = found_seg->start_seq;
4647 			found_seg = RB_FIND(tcp_seg_sent_tree_head, &tp->t_segs_sent_tree, &seg);
4648 		}
4649 	}
4650 }
4651 
4652 void
tcp_segs_dosack(struct tcpcb * tp,tcp_seq sblk_start,tcp_seq sblk_end,uint32_t tsecr,uint32_t * newbytes_sacked)4653 tcp_segs_dosack(struct tcpcb *tp, tcp_seq sblk_start, tcp_seq sblk_end,
4654     uint32_t tsecr, uint32_t *newbytes_sacked)
4655 {
4656 	/*
4657 	 * When we receive SACK, min RTT is computed after SACK processing which
4658 	 * means we are using min RTT from the previous ACK to advance RACK state
4659 	 * This is ok as we track a windowed min-filtered estimate over a period.
4660 	 */
4661 	struct tcp_seg_sent seg = {};
4662 	struct tcp_seg_sent *found_seg = NULL, *sacked_seg = NULL;
4663 
4664 	/* Set the end sequence to search for SACKed segment */
4665 	seg.end_seq = sblk_end;
4666 	found_seg = RB_FIND(tcp_seg_sent_tree_head, &tp->t_segs_sent_tree, &seg);
4667 
4668 	if (found_seg != NULL) {
4669 		/* We found an exact match for sblk_end */
4670 		tcp_segs_dosack_matched(tp, found_seg, sblk_start, tsecr, newbytes_sacked);
4671 		return;
4672 	}
4673 	/*
4674 	 * We come here when we don't find an exact match and sblk_end
4675 	 * lies within a segment. This would happen only when TSO is used.
4676 	 */
4677 	RB_FOREACH(found_seg, tcp_seg_sent_tree_head, &tp->t_segs_sent_tree) {
4678 		if (SEQ_LT(sblk_end, found_seg->end_seq) && SEQ_GT(sblk_end, found_seg->start_seq)) {
4679 			/*
4680 			 * This segment is partially SACKed. We split this segment at the boundary
4681 			 * of SACK block. First insert the newly SACKed part
4682 			 */
4683 			tcp_seq start = SEQ_LEQ(sblk_start, found_seg->start_seq) ? found_seg->start_seq : sblk_start;
4684 			struct tcp_seg_sent *newly_sacked = tcp_seg_sent_insert_before(tp, found_seg, start,
4685 			    sblk_end, found_seg->xmit_ts, found_seg->flags);
4686 			/* If segment is not allocated, RACK is already disabled and cleaned up */
4687 			if (newly_sacked == NULL) {
4688 				return;
4689 			}
4690 			/* Record seg flags before they get erased. */
4691 			uint8_t seg_flags = newly_sacked->flags;
4692 			/* Mark the SACKed segment */
4693 			tcp_seg_mark_sacked(tp, newly_sacked, newbytes_sacked);
4694 
4695 			/* Advance RACK state */
4696 			tcp_rack_update_segment_acked(tp, tsecr, newly_sacked->xmit_ts,
4697 			    newly_sacked->end_seq, !!(seg_flags & TCP_SEGMENT_RETRANSMITTED_ATLEAST_ONCE));
4698 
4699 			if (sblk_start == found_seg->start_seq) {
4700 				/*
4701 				 * We are done with this SACK block.
4702 				 * Move the start of existing segment
4703 				 */
4704 				found_seg->start_seq = sblk_end;
4705 				break;
4706 			}
4707 
4708 			if (SEQ_GT(sblk_start, found_seg->start_seq)) {
4709 				/* Insert the remaining unSACKed part before the SACKED segment inserted above */
4710 				struct tcp_seg_sent *unsacked = tcp_seg_sent_insert_before(tp, newly_sacked, found_seg->start_seq,
4711 				    sblk_start, found_seg->xmit_ts, found_seg->flags);
4712 				/* If segment is not allocated, RACK is already disabled and cleaned up */
4713 				if (unsacked == NULL) {
4714 					return;
4715 				}
4716 				/* Move the start of existing segment */
4717 				found_seg->start_seq = sblk_end;
4718 				break;
4719 			} else {
4720 				/*
4721 				 * This SACK block covers more than one segment
4722 				 * Look for segments SACKed below this segment
4723 				 */
4724 				seg.end_seq = found_seg->start_seq;
4725 				sacked_seg = RB_FIND(tcp_seg_sent_tree_head, &tp->t_segs_sent_tree, &seg);
4726 
4727 				if (sacked_seg != NULL) {
4728 					/* We found an exact match for sblk_end */
4729 					tcp_segs_dosack_matched(tp, sacked_seg, sblk_start, tsecr, newbytes_sacked);
4730 				}
4731 
4732 				/*
4733 				 * RACK might have been disabled (if a segment allocation failed) and all associated
4734 				 * state freed. If RACK hasn't been disabled, move the start of existing segment.
4735 				 */
4736 				if (TCP_RACK_ENABLED(tp)) {
4737 					found_seg->start_seq = sblk_end;
4738 				}
4739 			}
4740 			break;
4741 		}
4742 	}
4743 }
4744 
4745 void
tcp_segs_clear_sacked(struct tcpcb * tp)4746 tcp_segs_clear_sacked(struct tcpcb *tp)
4747 {
4748 	struct tcp_seg_sent *seg = NULL;
4749 
4750 	TAILQ_FOREACH(seg, &tp->t_segs_sent, tx_link)
4751 	{
4752 		const uint32_t seg_len = tcp_seg_len(seg);
4753 
4754 		if (seg->flags & TCP_SEGMENT_SACKED) {
4755 			seg->flags &= ~(TCP_SEGMENT_SACKED);
4756 			if (tp->bytes_sacked < seg_len) {
4757 				os_log_error(OS_LOG_DEFAULT, "bytes_sacked (%u) can't be smaller "
4758 				    "than already SACKed segment length (%u)", tp->bytes_sacked, seg_len);
4759 			}
4760 			tp->bytes_sacked -= seg_len;
4761 		}
4762 	}
4763 }
4764 
4765 void
tcp_mark_seg_lost(struct tcpcb * tp,struct tcp_seg_sent * seg)4766 tcp_mark_seg_lost(struct tcpcb *tp, struct tcp_seg_sent *seg)
4767 {
4768 	const uint32_t seg_len = tcp_seg_len(seg);
4769 
4770 	if (seg->flags & TCP_SEGMENT_LOST) {
4771 		if (seg->flags & TCP_RACK_RETRANSMITTED) {
4772 			/* Retransmission was lost */
4773 			seg->flags &= ~TCP_RACK_RETRANSMITTED;
4774 			if (tp->bytes_retransmitted < seg_len) {
4775 				os_log_error(OS_LOG_DEFAULT, "bytes_retransmitted (%u) can't be "
4776 				    "smaller than retransmited segment length (%u)",
4777 				    tp->bytes_retransmitted, seg_len);
4778 				return;
4779 			}
4780 			tp->bytes_retransmitted -= seg_len;
4781 		}
4782 	} else {
4783 		seg->flags |= TCP_SEGMENT_LOST;
4784 		tp->bytes_lost += seg_len;
4785 	}
4786 }
4787 
4788 void
tcp_seg_delete(struct tcpcb * tp,struct tcp_seg_sent * seg)4789 tcp_seg_delete(struct tcpcb *tp, struct tcp_seg_sent *seg)
4790 {
4791 	if (tp->seg_pool.free_segs_count >= TCP_SEG_POOL_MAX_ITEM_COUNT) {
4792 		zfree(tcp_seg_sent_zone, seg);
4793 		tcp_memacct_sub(kalloc_type_size(tcp_seg_sent_zone));
4794 	} else {
4795 		bzero(seg, sizeof(*seg));
4796 		TAILQ_INSERT_TAIL(&tp->seg_pool.free_segs, seg, free_link);
4797 		tp->seg_pool.free_segs_count++;
4798 	}
4799 }
4800 
4801 void
tcp_segs_sent_clean(struct tcpcb * tp,bool free_segs)4802 tcp_segs_sent_clean(struct tcpcb *tp, bool free_segs)
4803 {
4804 	struct tcp_seg_sent *seg = NULL, *next = NULL;
4805 
4806 	TAILQ_FOREACH_SAFE(seg, &tp->t_segs_sent, tx_link, next) {
4807 		/* Check for reordering */
4808 		tcp_rack_detect_reordering_acked(tp, seg);
4809 
4810 		TAILQ_REMOVE(&tp->t_segs_sent, seg, tx_link);
4811 		RB_REMOVE(tcp_seg_sent_tree_head, &tp->t_segs_sent_tree, seg);
4812 		tcp_seg_delete(tp, seg);
4813 	}
4814 	if (__improbable(!RB_EMPTY(&tp->t_segs_sent_tree))) {
4815 		os_log_error(OS_LOG_DEFAULT, "RB tree still contains segments while "
4816 		    "time ordered list is already empty");
4817 	}
4818 	if (__improbable(!TAILQ_EMPTY(&tp->t_segs_acked))) {
4819 		os_log_error(OS_LOG_DEFAULT, "Segment ACKed list shouldn't contain "
4820 		    "any segments as they are removed immediately after being ACKed");
4821 	}
4822 	/* Reset seg_retransmitted as we emptied the list */
4823 	tcp_rack_reset_segs_retransmitted(tp);
4824 	tp->bytes_lost = tp->bytes_sacked = tp->bytes_retransmitted = 0;
4825 
4826 	/* Empty the free segments pool */
4827 	if (free_segs) {
4828 		TAILQ_FOREACH_SAFE(seg, &tp->seg_pool.free_segs, free_link, next) {
4829 			TAILQ_REMOVE(&tp->seg_pool.free_segs, seg, free_link);
4830 			zfree(tcp_seg_sent_zone, seg);
4831 			tcp_memacct_sub(kalloc_type_size(tcp_seg_sent_zone));
4832 		}
4833 		tp->seg_pool.free_segs_count = 0;
4834 	}
4835 }
4836 
4837 void
tcp_rack_free_and_disable(struct tcpcb * tp)4838 tcp_rack_free_and_disable(struct tcpcb *tp)
4839 {
4840 	TCP_LOG(tp, "not enough memory to allocate segment, disabling RACK");
4841 	tcp_segs_sent_clean(tp, true);
4842 	tp->t_flagsext &= ~TF_RACK_ENABLED;
4843 }
4844 
4845 void
tcp_get_connectivity_status(struct tcpcb * tp,struct tcp_conn_status * connstatus)4846 tcp_get_connectivity_status(struct tcpcb *tp,
4847     struct tcp_conn_status *connstatus)
4848 {
4849 	if (tp == NULL || connstatus == NULL) {
4850 		return;
4851 	}
4852 	bzero(connstatus, sizeof(*connstatus));
4853 	if (tp->t_rxtshift >= TCP_CONNECTIVITY_PROBES_MAX) {
4854 		if (TCPS_HAVEESTABLISHED(tp->t_state)) {
4855 			connstatus->write_probe_failed = 1;
4856 		} else {
4857 			connstatus->conn_probe_failed = 1;
4858 		}
4859 	}
4860 	if (tp->t_rtimo_probes >= TCP_CONNECTIVITY_PROBES_MAX) {
4861 		connstatus->read_probe_failed = 1;
4862 	}
4863 	if (tp->t_inpcb != NULL && tp->t_inpcb->inp_last_outifp != NULL &&
4864 	    (tp->t_inpcb->inp_last_outifp->if_eflags & IFEF_PROBE_CONNECTIVITY)) {
4865 		connstatus->probe_activated = 1;
4866 	}
4867 }
4868 
4869 void
tcp_disable_tfo(struct tcpcb * tp)4870 tcp_disable_tfo(struct tcpcb *tp)
4871 {
4872 	tp->t_flagsext &= ~TF_FASTOPEN;
4873 }
4874 
4875 static struct mbuf *
tcp_make_keepalive_frame(struct tcpcb * tp,struct ifnet * ifp,boolean_t is_probe)4876 tcp_make_keepalive_frame(struct tcpcb *tp, struct ifnet *ifp,
4877     boolean_t is_probe)
4878 {
4879 	struct inpcb *inp = tp->t_inpcb;
4880 	struct tcphdr *th;
4881 	caddr_t data;
4882 	int win = 0;
4883 	struct mbuf *m;
4884 
4885 	/*
4886 	 * The code assumes the IP + TCP headers fit in an mbuf packet header
4887 	 */
4888 	static_assert(sizeof(struct ip) + sizeof(struct tcphdr) <= _MHLEN);
4889 	static_assert(sizeof(struct ip6_hdr) + sizeof(struct tcphdr) <= _MHLEN);
4890 
4891 	MGETHDR(m, M_WAIT, MT_HEADER);
4892 	if (m == NULL) {
4893 		return NULL;
4894 	}
4895 	m->m_pkthdr.pkt_proto = IPPROTO_TCP;
4896 
4897 	data = m_mtod_lower_bound(m);
4898 
4899 	if (inp->inp_vflag & INP_IPV4) {
4900 		bzero(data, sizeof(struct ip) + sizeof(struct tcphdr));
4901 		th = (struct tcphdr *)(void *) (data + sizeof(struct ip));
4902 		m->m_len = sizeof(struct ip) + sizeof(struct tcphdr);
4903 		m->m_pkthdr.len = m->m_len;
4904 	} else {
4905 		VERIFY(inp->inp_vflag & INP_IPV6);
4906 
4907 		bzero(data, sizeof(struct ip6_hdr)
4908 		    + sizeof(struct tcphdr));
4909 		th = (struct tcphdr *)(void *)(data + sizeof(struct ip6_hdr));
4910 		m->m_len = sizeof(struct ip6_hdr) +
4911 		    sizeof(struct tcphdr);
4912 		m->m_pkthdr.len = m->m_len;
4913 	}
4914 
4915 	tcp_fillheaders(m, tp, data, th, NULL, NULL);
4916 
4917 	if (inp->inp_vflag & INP_IPV4) {
4918 		struct ip *ip;
4919 
4920 		ip = (__typeof__(ip))(void *)data;
4921 
4922 		ip->ip_id = rfc6864 ? 0 : ip_randomid((uint64_t)m);
4923 		ip->ip_off = htons(IP_DF);
4924 		ip->ip_len = htons(sizeof(struct ip) + sizeof(struct tcphdr));
4925 		ip->ip_ttl = inp->inp_ip_ttl;
4926 		ip->ip_tos |= (inp->inp_ip_tos & ~IPTOS_ECN_MASK);
4927 		ip->ip_sum = in_cksum_hdr(ip);
4928 	} else {
4929 		struct ip6_hdr *ip6;
4930 
4931 		ip6 = (__typeof__(ip6))(void *)data;
4932 
4933 		ip6->ip6_plen = htons(sizeof(struct tcphdr));
4934 		ip6->ip6_hlim = in6_selecthlim(inp, ifp);
4935 		ip6->ip6_flow = ip6->ip6_flow & ~IPV6_FLOW_ECN_MASK;
4936 
4937 		if (IN6_IS_SCOPE_EMBED(&ip6->ip6_src)) {
4938 			ip6->ip6_src.s6_addr16[1] = 0;
4939 		}
4940 		if (IN6_IS_SCOPE_EMBED(&ip6->ip6_dst)) {
4941 			ip6->ip6_dst.s6_addr16[1] = 0;
4942 		}
4943 	}
4944 	th->th_flags = TH_ACK;
4945 
4946 	win = tcp_sbspace(tp);
4947 	if (win > ((int32_t)TCP_MAXWIN << tp->rcv_scale)) {
4948 		win = (int32_t)TCP_MAXWIN << tp->rcv_scale;
4949 	}
4950 	th->th_win = htons((u_short) (win >> tp->rcv_scale));
4951 
4952 	if (is_probe) {
4953 		th->th_seq = htonl(tp->snd_una - 1);
4954 	} else {
4955 		th->th_seq = htonl(tp->snd_una);
4956 	}
4957 	th->th_ack = htonl(tp->rcv_nxt);
4958 
4959 	/* Force recompute TCP checksum to be the final value */
4960 	th->th_sum = 0;
4961 	if (inp->inp_vflag & INP_IPV4) {
4962 		th->th_sum = inet_cksum(m, IPPROTO_TCP,
4963 		    sizeof(struct ip), sizeof(struct tcphdr));
4964 	} else {
4965 		th->th_sum = inet6_cksum(m, IPPROTO_TCP,
4966 		    sizeof(struct ip6_hdr), sizeof(struct tcphdr));
4967 	}
4968 
4969 	return m;
4970 }
4971 
4972 void
tcp_fill_keepalive_offload_frames(ifnet_t ifp,struct ifnet_keepalive_offload_frame * frames_array __counted_by (frames_array_count),u_int32_t frames_array_count,size_t frame_data_offset,u_int32_t * used_frames_count)4973 tcp_fill_keepalive_offload_frames(ifnet_t ifp,
4974     struct ifnet_keepalive_offload_frame *frames_array __counted_by(frames_array_count),
4975     u_int32_t frames_array_count, size_t frame_data_offset,
4976     u_int32_t *used_frames_count)
4977 {
4978 	struct inpcb *inp;
4979 	inp_gen_t gencnt;
4980 	u_int32_t frame_index = *used_frames_count;
4981 
4982 	/* Validation of the parameters */
4983 	if (ifp == NULL || frames_array == NULL ||
4984 	    frames_array_count == 0 ||
4985 	    frame_index >= frames_array_count ||
4986 	    frame_data_offset >= IFNET_KEEPALIVE_OFFLOAD_FRAME_DATA_SIZE) {
4987 		return;
4988 	}
4989 
4990 	/* Fast exit when no process is using the socket option TCP_KEEPALIVE_OFFLOAD */
4991 	if (ifp->if_tcp_kao_cnt == 0) {
4992 		return;
4993 	}
4994 
4995 	/*
4996 	 * This function is called outside the regular TCP processing
4997 	 * so we need to update the TCP clock.
4998 	 */
4999 	calculate_tcp_clock();
5000 
5001 	lck_rw_lock_shared(&tcbinfo.ipi_lock);
5002 	gencnt = tcbinfo.ipi_gencnt;
5003 	LIST_FOREACH(inp, tcbinfo.ipi_listhead, inp_list) {
5004 		struct socket *so;
5005 		struct ifnet_keepalive_offload_frame *frame;
5006 		struct mbuf *m = NULL;
5007 		struct tcpcb *tp = intotcpcb(inp);
5008 
5009 		if (frame_index >= frames_array_count) {
5010 			break;
5011 		}
5012 
5013 		if (inp->inp_gencnt > gencnt ||
5014 		    inp->inp_state == INPCB_STATE_DEAD) {
5015 			continue;
5016 		}
5017 
5018 		if ((so = inp->inp_socket) == NULL ||
5019 		    (so->so_state & SS_DEFUNCT)) {
5020 			continue;
5021 		}
5022 		/*
5023 		 * check for keepalive offload flag without socket
5024 		 * lock to avoid a deadlock
5025 		 */
5026 		if (!(inp->inp_flags2 & INP2_KEEPALIVE_OFFLOAD)) {
5027 			continue;
5028 		}
5029 
5030 		if (!(inp->inp_vflag & (INP_IPV4 | INP_IPV6))) {
5031 			continue;
5032 		}
5033 		if (inp->inp_ppcb == NULL ||
5034 		    in_pcb_checkstate(inp, WNT_ACQUIRE, 0) == WNT_STOPUSING) {
5035 			continue;
5036 		}
5037 		socket_lock(so, 1);
5038 		/* Release the want count */
5039 		if (inp->inp_ppcb == NULL ||
5040 		    (in_pcb_checkstate(inp, WNT_RELEASE, 1) == WNT_STOPUSING)) {
5041 			socket_unlock(so, 1);
5042 			continue;
5043 		}
5044 		if ((inp->inp_vflag & INP_IPV4) &&
5045 		    (inp->inp_laddr.s_addr == INADDR_ANY ||
5046 		    inp->inp_faddr.s_addr == INADDR_ANY)) {
5047 			socket_unlock(so, 1);
5048 			continue;
5049 		}
5050 		if ((inp->inp_vflag & INP_IPV6) &&
5051 		    (IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr) ||
5052 		    IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_faddr))) {
5053 			socket_unlock(so, 1);
5054 			continue;
5055 		}
5056 		if (inp->inp_lport == 0 || inp->inp_fport == 0) {
5057 			socket_unlock(so, 1);
5058 			continue;
5059 		}
5060 		if (inp->inp_last_outifp == NULL ||
5061 		    inp->inp_last_outifp->if_index != ifp->if_index) {
5062 			socket_unlock(so, 1);
5063 			continue;
5064 		}
5065 		if ((inp->inp_vflag & INP_IPV4) && frame_data_offset +
5066 		    sizeof(struct ip) + sizeof(struct tcphdr) >
5067 		    IFNET_KEEPALIVE_OFFLOAD_FRAME_DATA_SIZE) {
5068 			socket_unlock(so, 1);
5069 			continue;
5070 		} else if (!(inp->inp_vflag & INP_IPV4) && frame_data_offset +
5071 		    sizeof(struct ip6_hdr) + sizeof(struct tcphdr) >
5072 		    IFNET_KEEPALIVE_OFFLOAD_FRAME_DATA_SIZE) {
5073 			socket_unlock(so, 1);
5074 			continue;
5075 		}
5076 		/*
5077 		 * There is no point in waking up the device for connections
5078 		 * that are not established. Long lived connection are meant
5079 		 * for processes that will sent and receive data
5080 		 */
5081 		if (tp->t_state != TCPS_ESTABLISHED) {
5082 			socket_unlock(so, 1);
5083 			continue;
5084 		}
5085 		/*
5086 		 * This inp has all the information that is needed to
5087 		 * generate an offload frame.
5088 		 */
5089 		frame = &frames_array[frame_index];
5090 		frame->type = IFNET_KEEPALIVE_OFFLOAD_FRAME_TCP;
5091 		frame->ether_type = (inp->inp_vflag & INP_IPV4) ?
5092 		    IFNET_KEEPALIVE_OFFLOAD_FRAME_ETHERTYPE_IPV4 :
5093 		    IFNET_KEEPALIVE_OFFLOAD_FRAME_ETHERTYPE_IPV6;
5094 		frame->interval = (uint16_t)(tp->t_keepidle > 0 ? tp->t_keepidle :
5095 		    tcp_keepidle);
5096 		frame->keep_cnt = (uint8_t)TCP_CONN_KEEPCNT(tp);
5097 		frame->keep_retry = (uint16_t)TCP_CONN_KEEPINTVL(tp);
5098 		if (so->so_options & SO_NOWAKEFROMSLEEP) {
5099 			frame->flags |=
5100 			    IFNET_KEEPALIVE_OFFLOAD_FLAG_NOWAKEFROMSLEEP;
5101 		}
5102 		frame->local_port = ntohs(inp->inp_lport);
5103 		frame->remote_port = ntohs(inp->inp_fport);
5104 		frame->local_seq = tp->snd_nxt;
5105 		frame->remote_seq = tp->rcv_nxt;
5106 		if (inp->inp_vflag & INP_IPV4) {
5107 			ASSERT(frame_data_offset + sizeof(struct ip) + sizeof(struct tcphdr) <= UINT8_MAX);
5108 			frame->length = (uint8_t)(frame_data_offset +
5109 			    sizeof(struct ip) + sizeof(struct tcphdr));
5110 			frame->reply_length =  frame->length;
5111 
5112 			frame->addr_length = sizeof(struct in_addr);
5113 			bcopy(&inp->inp_laddr, frame->local_addr,
5114 			    sizeof(struct in_addr));
5115 			bcopy(&inp->inp_faddr, frame->remote_addr,
5116 			    sizeof(struct in_addr));
5117 		} else {
5118 			struct in6_addr *ip6;
5119 
5120 			ASSERT(frame_data_offset + sizeof(struct ip6_hdr) + sizeof(struct tcphdr) <= UINT8_MAX);
5121 			frame->length = (uint8_t)(frame_data_offset +
5122 			    sizeof(struct ip6_hdr) + sizeof(struct tcphdr));
5123 			frame->reply_length =  frame->length;
5124 
5125 			frame->addr_length = sizeof(struct in6_addr);
5126 			ip6 = (struct in6_addr *)(void *)frame->local_addr;
5127 			bcopy(&inp->in6p_laddr, ip6, sizeof(struct in6_addr));
5128 			if (IN6_IS_SCOPE_EMBED(ip6)) {
5129 				ip6->s6_addr16[1] = 0;
5130 			}
5131 
5132 			ip6 = (struct in6_addr *)(void *)frame->remote_addr;
5133 			bcopy(&inp->in6p_faddr, ip6, sizeof(struct in6_addr));
5134 			if (IN6_IS_SCOPE_EMBED(ip6)) {
5135 				ip6->s6_addr16[1] = 0;
5136 			}
5137 		}
5138 
5139 		/*
5140 		 * First the probe
5141 		 */
5142 		m = tcp_make_keepalive_frame(tp, ifp, TRUE);
5143 		if (m == NULL) {
5144 			socket_unlock(so, 1);
5145 			continue;
5146 		}
5147 		bcopy(m_mtod_current(m), frame->data + frame_data_offset, m->m_len);
5148 		m_freem(m);
5149 
5150 		/*
5151 		 * Now the response packet to incoming probes
5152 		 */
5153 		m = tcp_make_keepalive_frame(tp, ifp, FALSE);
5154 		if (m == NULL) {
5155 			socket_unlock(so, 1);
5156 			continue;
5157 		}
5158 		bcopy(m_mtod_current(m), frame->reply_data + frame_data_offset,
5159 		    m->m_len);
5160 		m_freem(m);
5161 
5162 		frame_index++;
5163 		socket_unlock(so, 1);
5164 	}
5165 	lck_rw_done(&tcbinfo.ipi_lock);
5166 	*used_frames_count = frame_index;
5167 }
5168 
5169 static bool
inp_matches_kao_frame(ifnet_t ifp,struct ifnet_keepalive_offload_frame * frame,struct inpcb * inp)5170 inp_matches_kao_frame(ifnet_t ifp, struct ifnet_keepalive_offload_frame *frame,
5171     struct inpcb *inp)
5172 {
5173 	if (inp->inp_ppcb == NULL) {
5174 		return false;
5175 	}
5176 	/* Release the want count */
5177 	if (in_pcb_checkstate(inp, WNT_RELEASE, 1) == WNT_STOPUSING) {
5178 		return false;
5179 	}
5180 	if (inp->inp_last_outifp == NULL ||
5181 	    inp->inp_last_outifp->if_index != ifp->if_index) {
5182 		return false;
5183 	}
5184 	if (frame->local_port != ntohs(inp->inp_lport) ||
5185 	    frame->remote_port != ntohs(inp->inp_fport)) {
5186 		return false;
5187 	}
5188 	if (inp->inp_vflag & INP_IPV4) {
5189 		if (memcmp(&inp->inp_laddr, frame->local_addr,
5190 		    sizeof(struct in_addr)) != 0 ||
5191 		    memcmp(&inp->inp_faddr, frame->remote_addr,
5192 		    sizeof(struct in_addr)) != 0) {
5193 			return false;
5194 		}
5195 	} else if (inp->inp_vflag & INP_IPV6) {
5196 		if (memcmp(&inp->inp_laddr, frame->local_addr,
5197 		    sizeof(struct in6_addr)) != 0 ||
5198 		    memcmp(&inp->inp_faddr, frame->remote_addr,
5199 		    sizeof(struct in6_addr)) != 0) {
5200 			return false;
5201 		}
5202 	} else {
5203 		return false;
5204 	}
5205 	return true;
5206 }
5207 
5208 int
tcp_notify_kao_timeout(ifnet_t ifp,struct ifnet_keepalive_offload_frame * frame)5209 tcp_notify_kao_timeout(ifnet_t ifp,
5210     struct ifnet_keepalive_offload_frame *frame)
5211 {
5212 	struct inpcb *inp = NULL;
5213 	struct socket *so = NULL;
5214 	bool found = false;
5215 
5216 	/*
5217 	 *  Unlock the list before posting event on the matching socket
5218 	 */
5219 	lck_rw_lock_shared(&tcbinfo.ipi_lock);
5220 
5221 	LIST_FOREACH(inp, tcbinfo.ipi_listhead, inp_list) {
5222 		if ((so = inp->inp_socket) == NULL ||
5223 		    (so->so_state & SS_DEFUNCT)) {
5224 			continue;
5225 		}
5226 		if (!(inp->inp_flags2 & INP2_KEEPALIVE_OFFLOAD)) {
5227 			continue;
5228 		}
5229 		if (!(inp->inp_vflag & (INP_IPV4 | INP_IPV6))) {
5230 			continue;
5231 		}
5232 		if (inp->inp_ppcb == NULL ||
5233 		    in_pcb_checkstate(inp, WNT_ACQUIRE, 0) == WNT_STOPUSING) {
5234 			continue;
5235 		}
5236 		socket_lock(so, 1);
5237 		if (inp_matches_kao_frame(ifp, frame, inp)) {
5238 			/*
5239 			 * Keep the matching socket locked
5240 			 */
5241 			found = true;
5242 			break;
5243 		}
5244 		socket_unlock(so, 1);
5245 	}
5246 	lck_rw_done(&tcbinfo.ipi_lock);
5247 
5248 	if (found) {
5249 		ASSERT(inp != NULL);
5250 		ASSERT(so != NULL);
5251 		ASSERT(so == inp->inp_socket);
5252 		/*
5253 		 * Drop the TCP connection like tcptimers() does
5254 		 */
5255 		tcpcb_ref_t tp = inp->inp_ppcb;
5256 
5257 		tcpstat.tcps_keepdrops++;
5258 		soevent(so,
5259 		    (SO_FILT_HINT_LOCKED | SO_FILT_HINT_TIMEOUT));
5260 		tp = tcp_drop(tp, ETIMEDOUT);
5261 
5262 		tcpstat.tcps_ka_offload_drops++;
5263 		os_log_info(OS_LOG_DEFAULT, "%s: dropped lport %u fport %u\n",
5264 		    __func__, frame->local_port, frame->remote_port);
5265 
5266 		socket_unlock(so, 1);
5267 	}
5268 
5269 	return 0;
5270 }
5271 
5272 errno_t
tcp_notify_ack_id_valid(struct tcpcb * tp,struct socket * so,u_int32_t notify_id)5273 tcp_notify_ack_id_valid(struct tcpcb *tp, struct socket *so,
5274     u_int32_t notify_id)
5275 {
5276 	struct tcp_notify_ack_marker *elm;
5277 
5278 	if (so->so_snd.sb_cc == 0) {
5279 		return ENOBUFS;
5280 	}
5281 
5282 	SLIST_FOREACH(elm, &tp->t_notify_ack, notify_next) {
5283 		/* Duplicate id is not allowed */
5284 		if (elm->notify_id == notify_id) {
5285 			return EINVAL;
5286 		}
5287 		/* Duplicate position is not allowed */
5288 		if (elm->notify_snd_una == tp->snd_una + so->so_snd.sb_cc) {
5289 			return EINVAL;
5290 		}
5291 	}
5292 	return 0;
5293 }
5294 
5295 errno_t
tcp_add_notify_ack_marker(struct tcpcb * tp,u_int32_t notify_id)5296 tcp_add_notify_ack_marker(struct tcpcb *tp, u_int32_t notify_id)
5297 {
5298 	struct tcp_notify_ack_marker *nm, *elm = NULL;
5299 	struct socket *so = tp->t_inpcb->inp_socket;
5300 
5301 	nm = kalloc_type(struct tcp_notify_ack_marker, M_WAIT | Z_ZERO);
5302 	if (nm == NULL) {
5303 		return ENOMEM;
5304 	}
5305 	nm->notify_id = notify_id;
5306 	nm->notify_snd_una = tp->snd_una + so->so_snd.sb_cc;
5307 
5308 	SLIST_FOREACH(elm, &tp->t_notify_ack, notify_next) {
5309 		if (SEQ_GT(nm->notify_snd_una, elm->notify_snd_una)) {
5310 			break;
5311 		}
5312 	}
5313 
5314 	if (elm == NULL) {
5315 		VERIFY(SLIST_EMPTY(&tp->t_notify_ack));
5316 		SLIST_INSERT_HEAD(&tp->t_notify_ack, nm, notify_next);
5317 	} else {
5318 		SLIST_INSERT_AFTER(elm, nm, notify_next);
5319 	}
5320 	tp->t_notify_ack_count++;
5321 	return 0;
5322 }
5323 
5324 void
tcp_notify_ack_free(struct tcpcb * tp)5325 tcp_notify_ack_free(struct tcpcb *tp)
5326 {
5327 	struct tcp_notify_ack_marker *elm, *next;
5328 	if (SLIST_EMPTY(&tp->t_notify_ack)) {
5329 		return;
5330 	}
5331 
5332 	SLIST_FOREACH_SAFE(elm, &tp->t_notify_ack, notify_next, next) {
5333 		SLIST_REMOVE(&tp->t_notify_ack, elm, tcp_notify_ack_marker,
5334 		    notify_next);
5335 		kfree_type(struct tcp_notify_ack_marker, elm);
5336 	}
5337 	SLIST_INIT(&tp->t_notify_ack);
5338 	tp->t_notify_ack_count = 0;
5339 }
5340 
5341 inline void
tcp_notify_acknowledgement(struct tcpcb * tp,struct socket * so)5342 tcp_notify_acknowledgement(struct tcpcb *tp, struct socket *so)
5343 {
5344 	struct tcp_notify_ack_marker *elm;
5345 
5346 	elm = SLIST_FIRST(&tp->t_notify_ack);
5347 	if (SEQ_GEQ(tp->snd_una, elm->notify_snd_una)) {
5348 		soevent(so, SO_FILT_HINT_LOCKED | SO_FILT_HINT_NOTIFY_ACK);
5349 	}
5350 }
5351 
5352 void
tcp_get_notify_ack_count(struct tcpcb * tp,struct tcp_notify_ack_complete * retid)5353 tcp_get_notify_ack_count(struct tcpcb *tp,
5354     struct tcp_notify_ack_complete *retid)
5355 {
5356 	struct tcp_notify_ack_marker *elm;
5357 	uint32_t  complete = 0;
5358 
5359 	SLIST_FOREACH(elm, &tp->t_notify_ack, notify_next) {
5360 		if (SEQ_GEQ(tp->snd_una, elm->notify_snd_una)) {
5361 			ASSERT(complete < UINT32_MAX);
5362 			complete++;
5363 		} else {
5364 			break;
5365 		}
5366 	}
5367 	retid->notify_pending = tp->t_notify_ack_count - complete;
5368 	retid->notify_complete_count = min(TCP_MAX_NOTIFY_ACK, complete);
5369 }
5370 
5371 void
tcp_get_notify_ack_ids(struct tcpcb * tp,struct tcp_notify_ack_complete * retid)5372 tcp_get_notify_ack_ids(struct tcpcb *tp,
5373     struct tcp_notify_ack_complete *retid)
5374 {
5375 	size_t i = 0;
5376 	struct tcp_notify_ack_marker *elm, *next;
5377 
5378 	SLIST_FOREACH_SAFE(elm, &tp->t_notify_ack, notify_next, next) {
5379 		if (i >= retid->notify_complete_count) {
5380 			break;
5381 		}
5382 		if (SEQ_GEQ(tp->snd_una, elm->notify_snd_una)) {
5383 			retid->notify_complete_id[i++] = elm->notify_id;
5384 			SLIST_REMOVE(&tp->t_notify_ack, elm,
5385 			    tcp_notify_ack_marker, notify_next);
5386 			kfree_type(struct tcp_notify_ack_marker, elm);
5387 			tp->t_notify_ack_count--;
5388 		} else {
5389 			break;
5390 		}
5391 	}
5392 }
5393 
5394 bool
tcp_notify_ack_active(struct socket * so)5395 tcp_notify_ack_active(struct socket *so)
5396 {
5397 	if ((SOCK_DOM(so) == PF_INET || SOCK_DOM(so) == PF_INET6) &&
5398 	    SOCK_TYPE(so) == SOCK_STREAM) {
5399 		struct tcpcb *tp = intotcpcb(sotoinpcb(so));
5400 
5401 		if (!SLIST_EMPTY(&tp->t_notify_ack)) {
5402 			struct tcp_notify_ack_marker *elm;
5403 			elm = SLIST_FIRST(&tp->t_notify_ack);
5404 			if (SEQ_GEQ(tp->snd_una, elm->notify_snd_una)) {
5405 				return true;
5406 			}
5407 		}
5408 	}
5409 	return false;
5410 }
5411 
5412 inline int32_t
inp_get_sndbytes_allunsent(struct socket * so,u_int32_t th_ack)5413 inp_get_sndbytes_allunsent(struct socket *so, u_int32_t th_ack)
5414 {
5415 	struct inpcb *inp = sotoinpcb(so);
5416 	struct tcpcb *tp = intotcpcb(inp);
5417 
5418 	if ((so->so_snd.sb_flags & SB_SNDBYTE_CNT) &&
5419 	    so->so_snd.sb_cc > 0) {
5420 		int32_t unsent, sent;
5421 		sent = tp->snd_max - th_ack;
5422 		if (tp->t_flags & TF_SENTFIN) {
5423 			sent--;
5424 		}
5425 		unsent = so->so_snd.sb_cc - sent;
5426 		return unsent;
5427 	}
5428 	return 0;
5429 }
5430 
5431 uint8_t
tcp_get_ace(struct tcphdr * th)5432 tcp_get_ace(struct tcphdr *th)
5433 {
5434 	uint8_t ace = 0;
5435 	if (th->th_flags & TH_ECE) {
5436 		ace += 1;
5437 	}
5438 	if (th->th_flags & TH_CWR) {
5439 		ace += 2;
5440 	}
5441 	if (th->th_x2 & (TH_AE >> 8)) {
5442 		ace += 4;
5443 	}
5444 
5445 	return ace;
5446 }
5447 
5448 #define IFP_PER_FLOW_STAT(_ipv4_, _stat_) { \
5449 	if (_ipv4_) { \
5450 	        ifp->if_ipv4_stat->_stat_++; \
5451 	} else { \
5452 	        ifp->if_ipv6_stat->_stat_++; \
5453 	} \
5454 }
5455 
5456 #define FLOW_ECN_ENABLED(_flags_) \
5457     ((_flags_ & (TE_ECN_ON)) == (TE_ECN_ON))
5458 
5459 void
tcp_update_stats_per_flow(struct ifnet_stats_per_flow * ifs,struct ifnet * ifp)5460 tcp_update_stats_per_flow(struct ifnet_stats_per_flow *ifs,
5461     struct ifnet *ifp)
5462 {
5463 	if (ifp == NULL || !ifnet_is_fully_attached(ifp)) {
5464 		return;
5465 	}
5466 
5467 	ifnet_lock_shared(ifp);
5468 	if (ifs->ecn_flags & TE_SETUPSENT) {
5469 		if (ifs->ecn_flags & TE_CLIENT_SETUP) {
5470 			IFP_PER_FLOW_STAT(ifs->ipv4, ecn_client_setup);
5471 			if (FLOW_ECN_ENABLED(ifs->ecn_flags)) {
5472 				IFP_PER_FLOW_STAT(ifs->ipv4,
5473 				    ecn_client_success);
5474 			} else if (ifs->ecn_flags & TE_LOST_SYN) {
5475 				IFP_PER_FLOW_STAT(ifs->ipv4,
5476 				    ecn_syn_lost);
5477 			} else {
5478 				IFP_PER_FLOW_STAT(ifs->ipv4,
5479 				    ecn_peer_nosupport);
5480 			}
5481 		} else {
5482 			IFP_PER_FLOW_STAT(ifs->ipv4, ecn_server_setup);
5483 			if (FLOW_ECN_ENABLED(ifs->ecn_flags)) {
5484 				IFP_PER_FLOW_STAT(ifs->ipv4,
5485 				    ecn_server_success);
5486 			} else if (ifs->ecn_flags & TE_LOST_SYN) {
5487 				IFP_PER_FLOW_STAT(ifs->ipv4,
5488 				    ecn_synack_lost);
5489 			} else {
5490 				IFP_PER_FLOW_STAT(ifs->ipv4,
5491 				    ecn_peer_nosupport);
5492 			}
5493 		}
5494 	} else {
5495 		IFP_PER_FLOW_STAT(ifs->ipv4, ecn_off_conn);
5496 	}
5497 	if (FLOW_ECN_ENABLED(ifs->ecn_flags)) {
5498 		if (ifs->ecn_flags & TE_RECV_ECN_CE) {
5499 			tcpstat.tcps_ecn_conn_recv_ce++;
5500 			IFP_PER_FLOW_STAT(ifs->ipv4, ecn_conn_recv_ce);
5501 		}
5502 		if (ifs->ecn_flags & TE_RECV_ECN_ECE) {
5503 			tcpstat.tcps_ecn_conn_recv_ece++;
5504 			IFP_PER_FLOW_STAT(ifs->ipv4, ecn_conn_recv_ece);
5505 		}
5506 		if (ifs->ecn_flags & (TE_RECV_ECN_CE | TE_RECV_ECN_ECE)) {
5507 			if (ifs->txretransmitbytes > 0 ||
5508 			    ifs->rxoutoforderbytes > 0) {
5509 				tcpstat.tcps_ecn_conn_pl_ce++;
5510 				IFP_PER_FLOW_STAT(ifs->ipv4, ecn_conn_plce);
5511 			} else {
5512 				tcpstat.tcps_ecn_conn_nopl_ce++;
5513 				IFP_PER_FLOW_STAT(ifs->ipv4, ecn_conn_noplce);
5514 			}
5515 		} else {
5516 			if (ifs->txretransmitbytes > 0 ||
5517 			    ifs->rxoutoforderbytes > 0) {
5518 				tcpstat.tcps_ecn_conn_plnoce++;
5519 				IFP_PER_FLOW_STAT(ifs->ipv4, ecn_conn_plnoce);
5520 			}
5521 		}
5522 	}
5523 
5524 	/* Other stats are interesting for non-local connections only */
5525 	if (ifs->local) {
5526 		ifnet_lock_done(ifp);
5527 		return;
5528 	}
5529 
5530 	if (ifs->ipv4) {
5531 		ifp->if_ipv4_stat->timestamp = net_uptime();
5532 		if (FLOW_ECN_ENABLED(ifs->ecn_flags)) {
5533 			tcp_flow_ecn_perf_stats(ifs, &ifp->if_ipv4_stat->ecn_on);
5534 		} else {
5535 			tcp_flow_ecn_perf_stats(ifs, &ifp->if_ipv4_stat->ecn_off);
5536 		}
5537 	} else {
5538 		ifp->if_ipv6_stat->timestamp = net_uptime();
5539 		if (FLOW_ECN_ENABLED(ifs->ecn_flags)) {
5540 			tcp_flow_ecn_perf_stats(ifs, &ifp->if_ipv6_stat->ecn_on);
5541 		} else {
5542 			tcp_flow_ecn_perf_stats(ifs, &ifp->if_ipv6_stat->ecn_off);
5543 		}
5544 	}
5545 
5546 	if (ifs->rxmit_drop) {
5547 		if (FLOW_ECN_ENABLED(ifs->ecn_flags)) {
5548 			IFP_PER_FLOW_STAT(ifs->ipv4, ecn_on.rxmit_drop);
5549 		} else {
5550 			IFP_PER_FLOW_STAT(ifs->ipv4, ecn_off.rxmit_drop);
5551 		}
5552 	}
5553 	if (ifs->ecn_fallback_synloss) {
5554 		IFP_PER_FLOW_STAT(ifs->ipv4, ecn_fallback_synloss);
5555 	}
5556 	if (ifs->ecn_fallback_droprst) {
5557 		IFP_PER_FLOW_STAT(ifs->ipv4, ecn_fallback_droprst);
5558 	}
5559 	if (ifs->ecn_fallback_droprxmt) {
5560 		IFP_PER_FLOW_STAT(ifs->ipv4, ecn_fallback_droprxmt);
5561 	}
5562 	if (ifs->ecn_fallback_ce) {
5563 		IFP_PER_FLOW_STAT(ifs->ipv4, ecn_fallback_ce);
5564 	}
5565 	if (ifs->ecn_fallback_reorder) {
5566 		IFP_PER_FLOW_STAT(ifs->ipv4, ecn_fallback_reorder);
5567 	}
5568 	if (ifs->ecn_recv_ce > 0) {
5569 		IFP_PER_FLOW_STAT(ifs->ipv4, ecn_recv_ce);
5570 	}
5571 	if (ifs->ecn_recv_ece > 0) {
5572 		IFP_PER_FLOW_STAT(ifs->ipv4, ecn_recv_ece);
5573 	}
5574 
5575 	tcp_flow_lim_stats(ifs, &ifp->if_lim_stat);
5576 
5577 	/*
5578 	 * Link heuristics are updated here only for NECP client flow when they close
5579 	 * Socket flows are updated live
5580 	 */
5581 	os_atomic_add(&ifp->if_tcp_stat->linkheur_noackpri, ifs->linkheur_noackpri, relaxed);
5582 	os_atomic_add(&ifp->if_tcp_stat->linkheur_comprxmt, ifs->linkheur_comprxmt, relaxed);
5583 	os_atomic_add(&ifp->if_tcp_stat->linkheur_synrxmt, ifs->linkheur_synrxmt, relaxed);
5584 	os_atomic_add(&ifp->if_tcp_stat->linkheur_rxmtfloor, ifs->linkheur_rxmtfloor, relaxed);
5585 
5586 	ifnet_lock_done(ifp);
5587 }
5588 
5589 #if SKYWALK
5590 
5591 #include <skywalk/core/skywalk_var.h>
5592 #include <skywalk/nexus/flowswitch/nx_flowswitch.h>
5593 
5594 void
tcp_add_fsw_flow(struct tcpcb * tp,struct ifnet * ifp)5595 tcp_add_fsw_flow(struct tcpcb *tp, struct ifnet *ifp)
5596 {
5597 	struct inpcb *inp = tp->t_inpcb;
5598 	struct socket *so = inp->inp_socket;
5599 	uuid_t fsw_uuid;
5600 	struct nx_flow_req nfr;
5601 	int err;
5602 
5603 	if (!NX_FSW_TCP_RX_AGG_ENABLED()) {
5604 		return;
5605 	}
5606 
5607 	if (ifp == NULL || kern_nexus_get_flowswitch_instance(ifp, fsw_uuid)) {
5608 		TCP_LOG_FSW_FLOW(tp, "skip ifp no fsw");
5609 		return;
5610 	}
5611 
5612 	memset(&nfr, 0, sizeof(nfr));
5613 
5614 	if (inp->inp_vflag & INP_IPV4) {
5615 		ASSERT(!(inp->inp_laddr.s_addr == INADDR_ANY ||
5616 		    inp->inp_faddr.s_addr == INADDR_ANY ||
5617 		    IN_MULTICAST(ntohl(inp->inp_laddr.s_addr)) ||
5618 		    IN_MULTICAST(ntohl(inp->inp_faddr.s_addr))));
5619 		nfr.nfr_saddr.sin.sin_len = sizeof(struct sockaddr_in);
5620 		nfr.nfr_saddr.sin.sin_family = AF_INET;
5621 		nfr.nfr_saddr.sin.sin_port = inp->inp_lport;
5622 		memcpy(&nfr.nfr_saddr.sin.sin_addr, &inp->inp_laddr,
5623 		    sizeof(struct in_addr));
5624 		nfr.nfr_daddr.sin.sin_len = sizeof(struct sockaddr_in);
5625 		nfr.nfr_daddr.sin.sin_family = AF_INET;
5626 		nfr.nfr_daddr.sin.sin_port = inp->inp_fport;
5627 		memcpy(&nfr.nfr_daddr.sin.sin_addr, &inp->inp_faddr,
5628 		    sizeof(struct in_addr));
5629 	} else {
5630 		ASSERT(!(IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr) ||
5631 		    IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_faddr) ||
5632 		    IN6_IS_ADDR_MULTICAST(&inp->in6p_laddr) ||
5633 		    IN6_IS_ADDR_MULTICAST(&inp->in6p_faddr)));
5634 		nfr.nfr_saddr.sin6.sin6_len = sizeof(struct sockaddr_in6);
5635 		nfr.nfr_saddr.sin6.sin6_family = AF_INET6;
5636 		nfr.nfr_saddr.sin6.sin6_port = inp->inp_lport;
5637 		memcpy(&nfr.nfr_saddr.sin6.sin6_addr, &inp->in6p_laddr,
5638 		    sizeof(struct in6_addr));
5639 		nfr.nfr_daddr.sin6.sin6_len = sizeof(struct sockaddr_in6);
5640 		nfr.nfr_daddr.sin.sin_family = AF_INET6;
5641 		nfr.nfr_daddr.sin6.sin6_port = inp->inp_fport;
5642 		memcpy(&nfr.nfr_daddr.sin6.sin6_addr, &inp->in6p_faddr,
5643 		    sizeof(struct in6_addr));
5644 		/* clear embedded scope ID */
5645 		if (IN6_IS_SCOPE_EMBED(&nfr.nfr_saddr.sin6.sin6_addr)) {
5646 			nfr.nfr_saddr.sin6.sin6_addr.s6_addr16[1] = 0;
5647 		}
5648 		if (IN6_IS_SCOPE_EMBED(&nfr.nfr_daddr.sin6.sin6_addr)) {
5649 			nfr.nfr_daddr.sin6.sin6_addr.s6_addr16[1] = 0;
5650 		}
5651 	}
5652 
5653 	nfr.nfr_nx_port = 1;
5654 	nfr.nfr_ip_protocol = IPPROTO_TCP;
5655 	nfr.nfr_transport_protocol = IPPROTO_TCP;
5656 	nfr.nfr_flags = NXFLOWREQF_ASIS;
5657 	nfr.nfr_epid = (so != NULL ? so->last_pid : 0);
5658 	if (NETNS_TOKEN_VALID(&inp->inp_netns_token)) {
5659 		nfr.nfr_port_reservation = inp->inp_netns_token;
5660 		nfr.nfr_flags |= NXFLOWREQF_EXT_PORT_RSV;
5661 	}
5662 	ASSERT(inp->inp_flowhash != 0);
5663 	nfr.nfr_inp_flowhash = inp->inp_flowhash;
5664 
5665 	uuid_generate_random(nfr.nfr_flow_uuid);
5666 	err = kern_nexus_flow_add(kern_nexus_shared_controller(), fsw_uuid,
5667 	    &nfr, sizeof(nfr));
5668 
5669 	if (err == 0) {
5670 		uuid_copy(tp->t_fsw_uuid, fsw_uuid);
5671 		uuid_copy(tp->t_flow_uuid, nfr.nfr_flow_uuid);
5672 	}
5673 
5674 	TCP_LOG_FSW_FLOW(tp, "add err %d\n", err);
5675 }
5676 
5677 void
tcp_del_fsw_flow(struct tcpcb * tp)5678 tcp_del_fsw_flow(struct tcpcb *tp)
5679 {
5680 	if (uuid_is_null(tp->t_fsw_uuid) || uuid_is_null(tp->t_flow_uuid)) {
5681 		return;
5682 	}
5683 
5684 	struct nx_flow_req nfr;
5685 	uuid_copy(nfr.nfr_flow_uuid, tp->t_flow_uuid);
5686 
5687 	/* It's possible for this call to fail if the nexus has detached */
5688 	int err = kern_nexus_flow_del(kern_nexus_shared_controller(),
5689 	    tp->t_fsw_uuid, &nfr, sizeof(nfr));
5690 	VERIFY(err == 0 || err == ENOENT || err == ENXIO);
5691 
5692 	uuid_clear(tp->t_fsw_uuid);
5693 	uuid_clear(tp->t_flow_uuid);
5694 
5695 	TCP_LOG_FSW_FLOW(tp, "del err %d\n", err);
5696 }
5697 
5698 #endif /* SKYWALK */
5699