xref: /xnu-12377.41.6/bsd/netinet/tcp_subr.c (revision bbb1b6f9e71b8cdde6e5cd6f4841f207dee3d828)
1 /*
2  * Copyright (c) 2000-2024 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995
30  *	The Regents of the University of California.  All rights reserved.
31  *
32  * Redistribution and use in source and binary forms, with or without
33  * modification, are permitted provided that the following conditions
34  * are met:
35  * 1. Redistributions of source code must retain the above copyright
36  *    notice, this list of conditions and the following disclaimer.
37  * 2. Redistributions in binary form must reproduce the above copyright
38  *    notice, this list of conditions and the following disclaimer in the
39  *    documentation and/or other materials provided with the distribution.
40  * 3. All advertising materials mentioning features or use of this software
41  *    must display the following acknowledgement:
42  *	This product includes software developed by the University of
43  *	California, Berkeley and its contributors.
44  * 4. Neither the name of the University nor the names of its contributors
45  *    may be used to endorse or promote products derived from this software
46  *    without specific prior written permission.
47  *
48  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58  * SUCH DAMAGE.
59  *
60  *	@(#)tcp_subr.c	8.2 (Berkeley) 5/24/95
61  */
62 /*
63  * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
64  * support for mandatory and extensible security protections.  This notice
65  * is included in support of clause 2.2 (b) of the Apple Public License,
66  * Version 2.0.
67  */
68 
69 #include "tcp_includes.h"
70 
71 #include <sys/param.h>
72 #include <sys/systm.h>
73 #include <sys/kernel.h>
74 #include <sys/sysctl.h>
75 #include <sys/malloc.h>
76 #include <sys/mbuf.h>
77 #include <sys/domain.h>
78 #include <sys/proc.h>
79 #include <sys/kauth.h>
80 #include <sys/socket.h>
81 #include <sys/socketvar.h>
82 #include <sys/protosw.h>
83 #include <sys/random.h>
84 #include <sys/syslog.h>
85 #include <sys/mcache.h>
86 #include <kern/locks.h>
87 #include <kern/uipc_domain.h>
88 #include <kern/zalloc.h>
89 
90 #include <dev/random/randomdev.h>
91 
92 #include <net/route.h>
93 #include <net/if.h>
94 #include <net/content_filter.h>
95 #include <net/ntstat.h>
96 #include <net/multi_layer_pkt_log.h>
97 
98 #define tcp_minmssoverload fring
99 #define _IP_VHL
100 #include <netinet/in.h>
101 #include <netinet/in_systm.h>
102 #include <netinet/ip.h>
103 #include <netinet/ip_icmp.h>
104 #include <netinet/ip6.h>
105 #include <netinet/icmp6.h>
106 #include <netinet/in_pcb.h>
107 #include <netinet6/in6_pcb.h>
108 #include <netinet/in_var.h>
109 #include <netinet/ip_var.h>
110 #include <netinet/icmp_var.h>
111 #include <netinet6/ip6_var.h>
112 #include <netinet/mptcp_var.h>
113 #include <netinet/tcp.h>
114 #include <netinet/tcp_fsm.h>
115 #include <netinet/tcp_seq.h>
116 #include <netinet/tcp_syncookie.h>
117 #include <netinet/tcp_timer.h>
118 #include <netinet/tcp_var.h>
119 #include <netinet/tcp_cc.h>
120 #include <netinet/tcp_cache.h>
121 #include <kern/thread_call.h>
122 
123 #include <netinet6/tcp6_var.h>
124 #include <netinet/tcpip.h>
125 #include <netinet/tcp_log.h>
126 
127 #include <netinet6/ip6protosw.h>
128 #include <netinet6/esp.h>
129 
130 #if IPSEC
131 #include <netinet6/ipsec.h>
132 #include <netinet6/ipsec6.h>
133 #endif /* IPSEC */
134 
135 #if NECP
136 #include <net/necp.h>
137 #endif /* NECP */
138 
139 #undef tcp_minmssoverload
140 
141 #include <net/sockaddr_utils.h>
142 
143 #include <corecrypto/ccaes.h>
144 #include <libkern/crypto/aes.h>
145 #include <libkern/crypto/md5.h>
146 #include <sys/kdebug.h>
147 #include <mach/sdt.h>
148 #include <pexpert/pexpert.h>
149 #include <mach/mach_time.h>
150 #include <os/ptrtools.h>
151 
152 #define DBG_FNC_TCP_CLOSE       NETDBG_CODE(DBG_NETTCP, ((5 << 8) | 2))
153 
154 static tcp_cc tcp_ccgen;
155 
156 struct mem_acct *tcp_memacct;
157 
158 extern struct tcptimerlist tcp_timer_list;
159 extern struct tcptailq tcp_tw_tailq;
160 
161 extern int tcp_awdl_rtobase;
162 
163 SYSCTL_SKMEM_TCP_INT(TCPCTL_MSSDFLT, mssdflt, CTLFLAG_RW | CTLFLAG_LOCKED,
164     int, tcp_mssdflt, TCP_MSS, "Default TCP Maximum Segment Size");
165 
166 SYSCTL_SKMEM_TCP_INT(TCPCTL_V6MSSDFLT, v6mssdflt,
167     CTLFLAG_RW | CTLFLAG_LOCKED, int, tcp_v6mssdflt, TCP6_MSS,
168     "Default TCP Maximum Segment Size for IPv6");
169 
170 int tcp_sysctl_fastopenkey(struct sysctl_oid *, void *, int,
171     struct sysctl_req *);
172 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, fastopen_key, CTLTYPE_STRING | CTLFLAG_WR,
173     0, 0, tcp_sysctl_fastopenkey, "S", "TCP Fastopen key");
174 
175 /* Current count of half-open TFO connections */
176 int     tcp_tfo_halfcnt = 0;
177 
178 /* Maximum of half-open TFO connection backlog */
179 SYSCTL_SKMEM_TCP_INT(OID_AUTO, fastopen_backlog,
180     CTLFLAG_RW | CTLFLAG_LOCKED, int, tcp_tfo_backlog, 10,
181     "Backlog queue for half-open TFO connections");
182 
183 SYSCTL_SKMEM_TCP_INT(OID_AUTO, fastopen, CTLFLAG_RW | CTLFLAG_LOCKED,
184     int, tcp_fastopen, TCP_FASTOPEN_CLIENT | TCP_FASTOPEN_SERVER,
185     "Enable TCP Fastopen (RFC 7413)");
186 
187 /* ToDo - remove once uTCP stops using it */
188 SYSCTL_SKMEM_TCP_INT(OID_AUTO, now_init, CTLFLAG_RD | CTLFLAG_LOCKED,
189     uint32_t, tcp_now_init, 0, "Initial tcp now value");
190 
191 /* ToDo - remove once uTCP stops using it */
192 SYSCTL_SKMEM_TCP_INT(OID_AUTO, microuptime_init, CTLFLAG_RD | CTLFLAG_LOCKED,
193     uint32_t, tcp_microuptime_init, 0, "Initial tcp uptime value in micro seconds");
194 
195 /*
196  * Minimum MSS we accept and use. This prevents DoS attacks where
197  * we are forced to a ridiculous low MSS like 20 and send hundreds
198  * of packets instead of one. The effect scales with the available
199  * bandwidth and quickly saturates the CPU and network interface
200  * with packet generation and sending. Set to zero to disable MINMSS
201  * checking. This setting prevents us from sending too small packets.
202  */
203 SYSCTL_SKMEM_TCP_INT(OID_AUTO, minmss, CTLFLAG_RW | CTLFLAG_LOCKED,
204     int, tcp_minmss, TCP_MINMSS, "Minmum TCP Maximum Segment Size");
205 
206 SYSCTL_UINT(_net_inet_tcp, OID_AUTO, pcbcount, CTLFLAG_RD | CTLFLAG_LOCKED,
207     &tcbinfo.ipi_count, 0, "Number of active PCBs");
208 
209 SYSCTL_SKMEM_TCP_INT(OID_AUTO, icmp_may_rst, CTLFLAG_RW | CTLFLAG_LOCKED,
210     static int, icmp_may_rst, 1,
211     "Certain ICMP unreachable messages may abort connections in SYN_SENT");
212 
213 int             tcp_do_timestamps = 1;
214 #if (DEVELOPMENT || DEBUG)
215 SYSCTL_INT(_net_inet_tcp, OID_AUTO, do_timestamps,
216     CTLFLAG_RW | CTLFLAG_LOCKED, &tcp_do_timestamps, 0, "enable TCP timestamps");
217 #endif /* (DEVELOPMENT || DEBUG) */
218 
219 SYSCTL_SKMEM_TCP_INT(OID_AUTO, rtt_min, CTLFLAG_RW | CTLFLAG_LOCKED,
220     int, tcp_TCPTV_MIN, 100, "min rtt value allowed");
221 
222 SYSCTL_SKMEM_TCP_INT(OID_AUTO, rexmt_slop, CTLFLAG_RW,
223     int, tcp_rexmt_slop, TCPTV_REXMTSLOP, "Slop added to retransmit timeout");
224 
225 SYSCTL_SKMEM_TCP_INT(OID_AUTO, randomize_ports, CTLFLAG_RW | CTLFLAG_LOCKED,
226     __private_extern__ int, tcp_use_randomport, 0,
227     "Randomize TCP port numbers");
228 
229 SYSCTL_SKMEM_TCP_INT(OID_AUTO, win_scale_factor, CTLFLAG_RW | CTLFLAG_LOCKED,
230     __private_extern__ int, tcp_win_scale, 3, "Window scaling factor");
231 
232 #if (DEVELOPMENT || DEBUG)
233 SYSCTL_SKMEM_TCP_INT(OID_AUTO, init_rtt_from_cache,
234     CTLFLAG_RW | CTLFLAG_LOCKED, static int, tcp_init_rtt_from_cache, 1,
235     "Initalize RTT from route cache");
236 #else
237 SYSCTL_SKMEM_TCP_INT(OID_AUTO, init_rtt_from_cache,
238     CTLFLAG_RD | CTLFLAG_LOCKED, static int, tcp_init_rtt_from_cache, 1,
239     "Initalize RTT from route cache");
240 #endif /* (DEVELOPMENT || DEBUG) */
241 
242 static int tso_debug = 0;
243 SYSCTL_INT(_net_inet_tcp, OID_AUTO, tso_debug, CTLFLAG_RW | CTLFLAG_LOCKED,
244     &tso_debug, 0, "TSO verbosity");
245 
246 static int tcp_rxt_seg_max = 1024;
247 SYSCTL_INT(_net_inet_tcp, OID_AUTO, rxt_seg_max, CTLFLAG_RW | CTLFLAG_LOCKED,
248     &tcp_rxt_seg_max, 0, "");
249 
250 static unsigned long tcp_rxt_seg_drop = 0;
251 SYSCTL_ULONG(_net_inet_tcp, OID_AUTO, rxt_seg_drop, CTLFLAG_RD | CTLFLAG_LOCKED,
252     &tcp_rxt_seg_drop, "");
253 
254 static void     tcp_notify(struct inpcb *, int);
255 
256 static KALLOC_TYPE_DEFINE(tcp_bwmeas_zone, struct bwmeas, NET_KT_DEFAULT);
257 KALLOC_TYPE_DEFINE(tcp_reass_zone, struct tseg_qent, NET_KT_DEFAULT);
258 KALLOC_TYPE_DEFINE(tcp_rxt_seg_zone, struct tcp_rxt_seg, NET_KT_DEFAULT);
259 KALLOC_TYPE_DEFINE(tcp_seg_sent_zone, struct tcp_seg_sent, NET_KT_DEFAULT);
260 
261 extern int slowlink_wsize;      /* window correction for slow links */
262 extern int path_mtu_discovery;
263 
264 static void tcp_sbrcv_grow_rwin(struct tcpcb *tp, struct sockbuf *sb);
265 
266 #define TCP_BWMEAS_BURST_MINSIZE 6
267 #define TCP_BWMEAS_BURST_MAXSIZE 25
268 
269 /*
270  * Target size of TCP PCB hash tables. Must be a power of two.
271  *
272  * Note that this can be overridden by the kernel environment
273  * variable net.inet.tcp.tcbhashsize
274  */
275 #ifndef TCBHASHSIZE
276 #define TCBHASHSIZE     CONFIG_TCBHASHSIZE
277 #endif
278 
279 __private_extern__ int  tcp_tcbhashsize = TCBHASHSIZE;
280 SYSCTL_INT(_net_inet_tcp, OID_AUTO, tcbhashsize, CTLFLAG_RD | CTLFLAG_LOCKED,
281     &tcp_tcbhashsize, 0, "Size of TCP control-block hashtable");
282 
283 /*
284  * This is the actual shape of what we allocate using the zone
285  * allocator.  Doing it this way allows us to protect both structures
286  * using the same generation count, and also eliminates the overhead
287  * of allocating tcpcbs separately.  By hiding the structure here,
288  * we avoid changing most of the rest of the code (although it needs
289  * to be changed, eventually, for greater efficiency).
290  */
291 #define ALIGNMENT       32
292 struct  inp_tp {
293 	struct  inpcb   inp;
294 	struct  tcpcb   tcb __attribute__((aligned(ALIGNMENT)));
295 };
296 #undef ALIGNMENT
297 
298 static KALLOC_TYPE_DEFINE(tcpcbzone, struct inp_tp, NET_KT_DEFAULT);
299 
300 os_log_t tcp_mpkl_log_object = NULL;
301 
302 static void tcpcb_to_otcpcb(struct tcpcb *, struct otcpcb *);
303 
304 int tcp_notsent_lowat_check(struct socket *so);
305 static void tcp_flow_lim_stats(struct ifnet_stats_per_flow *ifs,
306     struct if_lim_perf_stat *stat);
307 static void tcp_flow_ecn_perf_stats(struct ifnet_stats_per_flow *ifs,
308     struct if_tcp_ecn_perf_stat *stat);
309 
310 static aes_encrypt_ctx tfo_ctx; /* Crypto-context for TFO */
311 
312 /* TCP RST duplicate suppression */
313 static LCK_ATTR_DECLARE(tcp_rst_rlc_attr, 0, 0);
314 static LCK_GRP_DECLARE(tcp_rst_rlc_mtx_grp, "rst_rlc");
315 static LCK_MTX_DECLARE_ATTR(tcp_rst_rlc_mtx_data, &tcp_rst_rlc_mtx_grp, &tcp_rst_rlc_attr);
316 static lck_mtx_t  * const tcp_rst_rlc_mtx = &tcp_rst_rlc_mtx_data;
317 
318 static struct in_endpoints      tcp_rst_rlc_state;
319 static uint32_t                 tcp_rst_rlc_ts;
320 static uint32_t                 tcp_rst_rlc_cnt = 0;
321 
322 SYSCTL_SKMEM_TCP_INT(OID_AUTO, rst_rlc_enable,
323     CTLFLAG_RW | CTLFLAG_LOCKED, static int, tcp_rst_rlc_enable, 1,
324     "Enable RST run-length-compression");
325 
326 SYSCTL_SKMEM_TCP_INT(OID_AUTO, rst_rlc_bucket_ms,
327     CTLFLAG_RW | CTLFLAG_LOCKED, static int, tcp_rst_rlc_bucket_ms, 200,
328     "Duration of RLC bucket in milliseconds for the RST run-length-compression");
329 
330 SYSCTL_SKMEM_TCP_INT(OID_AUTO, rst_rlc_use_ts,
331     CTLFLAG_RW | CTLFLAG_LOCKED, static int, tcp_rst_rlc_use_ts, 1,
332     "Include timestamp in RST run-length-compression");
333 
334 SYSCTL_SKMEM_TCP_INT(OID_AUTO, rst_rlc_verbose,
335     CTLFLAG_RW | CTLFLAG_LOCKED, static int, tcp_rst_rlc_verbose, 0,
336     "Verbose output: 0: no output; 1: log whenever the RST RLC buffer changes");
337 
338 
339 bool
tcp_rst_rlc_compress(void * ipgen __sized_by (ipgen_size),size_t ipgen_size __unused,struct tcphdr * th)340 tcp_rst_rlc_compress(void *ipgen __sized_by(ipgen_size), size_t ipgen_size __unused, struct tcphdr *th)
341 {
342 	struct ip *ip;
343 	struct ip6_hdr *ip6;
344 	bool isipv6;
345 	struct in_endpoints flow;
346 	bool should_throttle = false;
347 	uint32_t last_tcp_rst_rlc_cnt = 0;
348 	in_port_t last_sport = 0;
349 	in_port_t last_dport = 0;
350 
351 	if (tcp_rst_rlc_enable == 0 || (th->th_flags & TH_RST) == 0) {
352 		return false;
353 	}
354 	bzero(&flow, sizeof(flow));
355 
356 	isipv6 = IP_VHL_V(((struct ip *)ipgen)->ip_vhl) == 6;
357 
358 	ip6 = ipgen;
359 	ip = ipgen;
360 
361 	flow.ie_lport = th->th_sport;
362 	flow.ie_fport = th->th_dport;
363 
364 	if (isipv6) {
365 		bcopy(&ip6->ip6_src, &flow.ie6_laddr, sizeof(struct in6_addr));
366 		bcopy(&ip6->ip6_dst, &flow.ie6_faddr, sizeof(struct in6_addr));
367 	} else {
368 		bcopy(&ip->ip_src, &flow.ie_laddr, sizeof(struct in_addr));
369 		bcopy(&ip->ip_dst, &flow.ie_faddr, sizeof(struct in_addr));
370 	}
371 
372 	lck_mtx_lock(tcp_rst_rlc_mtx);
373 	if (__improbable((tcp_rst_rlc_use_ts == false || tcp_now - tcp_rst_rlc_ts < tcp_rst_rlc_bucket_ms) &&
374 	    bcmp(&flow, &tcp_rst_rlc_state, sizeof(struct in_endpoints)) == 0)) {
375 		/*
376 		 * The rst rlc state hasn't changed changed, we should throttle.
377 		 */
378 		should_throttle = true;
379 		tcp_rst_rlc_cnt++;
380 		tcpstat.tcps_rst_dup_suppressed++;
381 	} else {
382 		should_throttle = false;
383 		last_tcp_rst_rlc_cnt = tcp_rst_rlc_cnt;
384 		last_sport = tcp_rst_rlc_state.ie_lport;
385 		last_dport = tcp_rst_rlc_state.ie_fport;
386 
387 		bcopy(&flow, &tcp_rst_rlc_state, sizeof(struct in_endpoints));
388 		tcp_rst_rlc_ts = tcp_now;
389 
390 		tcp_rst_rlc_cnt = 0;
391 		tcpstat.tcps_rst_not_suppressed++;
392 	}
393 	lck_mtx_unlock(tcp_rst_rlc_mtx);
394 
395 	if (tcp_rst_rlc_verbose) {
396 		if (last_tcp_rst_rlc_cnt != 0) {
397 			os_log(OS_LOG_DEFAULT, "RST RLC compression: compressed %u RST segments [%hu:%hu]",
398 			    last_tcp_rst_rlc_cnt, ntohs(last_sport), ntohs(last_dport));
399 		}
400 	}
401 
402 	return should_throttle;
403 }
404 
405 void
tcp_tfo_gen_cookie(struct inpcb * inp,u_char * out __sized_by (blk_size),size_t blk_size)406 tcp_tfo_gen_cookie(struct inpcb *inp, u_char *out __sized_by(blk_size), size_t blk_size)
407 {
408 	u_char in[CCAES_BLOCK_SIZE];
409 	int isipv6 = inp->inp_vflag & INP_IPV6;
410 
411 	VERIFY(blk_size == CCAES_BLOCK_SIZE);
412 
413 	bzero(&in[0], CCAES_BLOCK_SIZE);
414 	bzero(&out[0], CCAES_BLOCK_SIZE);
415 
416 	if (isipv6) {
417 		memcpy(in, &inp->in6p_faddr, sizeof(struct in6_addr));
418 	} else {
419 		memcpy(in, &inp->inp_faddr, sizeof(struct in_addr));
420 	}
421 
422 	aes_encrypt_cbc(in, NULL, 1, out, &tfo_ctx);
423 }
424 
425 __private_extern__ int
tcp_sysctl_fastopenkey(__unused struct sysctl_oid * oidp,__unused void * arg1,__unused int arg2,struct sysctl_req * req)426 tcp_sysctl_fastopenkey(__unused struct sysctl_oid *oidp, __unused void *arg1,
427     __unused int arg2, struct sysctl_req *req)
428 {
429 	int error = 0;
430 	/*
431 	 * TFO-key is expressed as a string in hex format
432 	 *  +1 to account for the \0 char
433 	 *  +1 because sysctl_io_string() expects a string length but the sysctl command
434 	 *     now includes the terminating \0 in newlen -- see rdar://77205344
435 	 */
436 	char keystring[TCP_FASTOPEN_KEYLEN * 2 + 2];
437 	u_int32_t key[TCP_FASTOPEN_KEYLEN / sizeof(u_int32_t)];
438 	int i;
439 	size_t ks_len;
440 
441 	/*
442 	 * sysctl_io_string copies keystring into the oldptr of the sysctl_req.
443 	 * Make sure everything is zero, to avoid putting garbage in there or
444 	 * leaking the stack.
445 	 */
446 	bzero(keystring, sizeof(keystring));
447 
448 	error = sysctl_io_string(req, keystring, sizeof(keystring), 0, NULL);
449 	if (error) {
450 		os_log(OS_LOG_DEFAULT,
451 		    "%s: sysctl_io_string() error %d, req->newlen %lu, sizeof(keystring) %lu",
452 		    __func__, error, req->newlen, sizeof(keystring));
453 		goto exit;
454 	}
455 	if (req->newptr == USER_ADDR_NULL) {
456 		goto exit;
457 	}
458 
459 	ks_len = strbuflen(keystring, sizeof(keystring));
460 	if (ks_len != TCP_FASTOPEN_KEYLEN * 2) {
461 		os_log(OS_LOG_DEFAULT,
462 		    "%s: strlen(keystring) %lu != TCP_FASTOPEN_KEYLEN * 2 %u, newlen %lu",
463 		    __func__, ks_len, TCP_FASTOPEN_KEYLEN * 2, req->newlen);
464 		error = EINVAL;
465 		goto exit;
466 	}
467 
468 	for (i = 0; i < (TCP_FASTOPEN_KEYLEN / sizeof(u_int32_t)); i++) {
469 		/*
470 		 * We jump over the keystring in 8-character (4 byte in hex)
471 		 * steps
472 		 */
473 		if (sscanf(__unsafe_null_terminated_from_indexable(&keystring[i * 8]), "%8x", &key[i]) != 1) {
474 			error = EINVAL;
475 			os_log(OS_LOG_DEFAULT,
476 			    "%s: sscanf() != 1, error EINVAL", __func__);
477 			goto exit;
478 		}
479 	}
480 
481 	aes_encrypt_key128((u_char *)key, &tfo_ctx);
482 
483 exit:
484 	return error;
485 }
486 
487 static int scale_to_powerof2(int size);
488 
489 /*
490  * This helper routine returns one of the following scaled value of size:
491  * 1. Rounded down power of two value of size if the size value passed as
492  *    argument is not a power of two and the rounded up value overflows.
493  * OR
494  * 2. Rounded up power of two value of size if the size value passed as
495  *    argument is not a power of two and the rounded up value does not overflow
496  * OR
497  * 3. Same value as argument size if it is already a power of two.
498  */
499 static int
scale_to_powerof2(int size)500 scale_to_powerof2(int size)
501 {
502 	/* Handle special case of size = 0 */
503 	int ret = size ? size : 1;
504 
505 	if (!powerof2(ret)) {
506 		while (!powerof2(size)) {
507 			/*
508 			 * Clear out least significant
509 			 * set bit till size is left with
510 			 * its highest set bit at which point
511 			 * it is rounded down power of two.
512 			 */
513 			size = size & (size - 1);
514 		}
515 
516 		/* Check for overflow when rounding up */
517 		if (0 == (size << 1)) {
518 			ret = size;
519 		} else {
520 			ret = size << 1;
521 		}
522 	}
523 
524 	return ret;
525 }
526 
527 /*
528  * Round the floating point to the next integer
529  * Eg. 1.3 will round up to 2.
530  */
531 uint32_t
tcp_ceil(double a)532 tcp_ceil(double a)
533 {
534 	double res = (uint32_t) a;
535 	return (uint32_t)(res + (res < a));
536 }
537 
538 uint32_t
tcp_round_to(uint32_t val,uint32_t round)539 tcp_round_to(uint32_t val, uint32_t round)
540 {
541 	/*
542 	 * Round up or down based on the middle. Meaning, if we round upon a
543 	 * multiple of 10, 16 will round to 20 and 14 will round to 10.
544 	 */
545 	return ((val + (round / 2)) / round) * round;
546 }
547 
548 /*
549  * Round up to the next multiple of base.
550  * Eg. for a base of 64, 65 will become 128,
551  * 2896 will become 2944.
552  */
553 uint32_t
tcp_round_up(uint32_t val,uint32_t base)554 tcp_round_up(uint32_t val, uint32_t base)
555 {
556 	if (base == 1 || val % base == 0) {
557 		return val;
558 	}
559 
560 	return ((val + base) / base) * base;
561 }
562 
563 uint32_t
564 ntoh24(u_char *p __sized_by(3))
565 {
566 	uint32_t v;
567 
568 	v  = (uint32_t)(p[0] << 16);
569 	v |= (uint32_t)(p[1] << 8);
570 	v |= (uint32_t)(p[2] << 0);
571 	return v;
572 }
573 
574 uint32_t
tcp_packets_this_ack(struct tcpcb * tp,uint32_t acked)575 tcp_packets_this_ack(struct tcpcb *tp, uint32_t acked)
576 {
577 	return acked / tp->t_maxseg +
578 	       (((acked % tp->t_maxseg) != 0) ? 1 : 0);
579 }
580 
581 static void
tcp_tfo_init(void)582 tcp_tfo_init(void)
583 {
584 	u_char key[TCP_FASTOPEN_KEYLEN];
585 
586 	read_frandom(key, sizeof(key));
587 	aes_encrypt_key128(key, &tfo_ctx);
588 }
589 
590 static u_char isn_secret[32];
591 
592 /*
593  * Tcp initialization
594  */
595 void
tcp_init(struct protosw * pp,struct domain * dp)596 tcp_init(struct protosw *pp, struct domain *dp)
597 {
598 #pragma unused(dp)
599 	static int tcp_initialized = 0;
600 	struct inpcbinfo *pcbinfo;
601 	struct timeval now;
602 
603 	VERIFY((pp->pr_flags & (PR_INITIALIZED | PR_ATTACHED)) == PR_ATTACHED);
604 
605 	if (tcp_memacct == NULL) {
606 		uint64_t hlimit = max_mem_actual >> 5;
607 		tcp_memacct = mem_acct_register("TCP", hlimit, 80);
608 		if (tcp_memacct == NULL) {
609 			panic("mem_acct_register returned NULL");
610 		}
611 	}
612 	pp->pr_mem_acct = tcp_memacct;
613 
614 	if (!os_atomic_cmpxchg(&tcp_initialized, 0, 1, relaxed)) {
615 		return;
616 	}
617 
618 #if DEBUG || DEVELOPMENT
619 	(void) PE_parse_boot_argn("tcp_rxt_seg_max", &tcp_rxt_seg_max,
620 	    sizeof(tcp_rxt_seg_max));
621 #endif /* DEBUG || DEVELOPMENT */
622 
623 	tcp_ccgen = 1;
624 	tcp_keepinit = TCPTV_KEEP_INIT;
625 	tcp_keepidle = TCPTV_KEEP_IDLE;
626 	tcp_keepintvl = TCPTV_KEEPINTVL;
627 	tcp_keepcnt = TCPTV_KEEPCNT;
628 	tcp_maxpersistidle = TCPTV_KEEP_IDLE;
629 	tcp_msl = TCPTV_MSL;
630 
631 	microuptime(&now);
632 	tcp_now = (uint32_t)now.tv_sec * 1000 + now.tv_usec / TCP_RETRANSHZ_TO_USEC;
633 
634 	/* ToDo - remove once uTCP stops using it */
635 	tcp_now_init = tcp_now;
636 	tcp_microuptime_init = tcp_now;
637 	SYSCTL_SKMEM_UPDATE_FIELD(tcp.microuptime_init, tcp_microuptime_init);
638 	SYSCTL_SKMEM_UPDATE_FIELD(tcp.now_init, tcp_now_init);
639 
640 	tcp_tfo_init();
641 	tcp_syncookie_init();
642 
643 	LIST_INIT(&tcb);
644 	tcbinfo.ipi_listhead = &tcb;
645 
646 	pcbinfo = &tcbinfo;
647 
648 	/*
649 	 * allocate group, lock attributes and lock for tcp pcb mutexes
650 	 */
651 	pcbinfo->ipi_lock_grp = lck_grp_alloc_init("tcppcb",
652 	    LCK_GRP_ATTR_NULL);
653 	lck_attr_setdefault(&pcbinfo->ipi_lock_attr);
654 	lck_rw_init(&pcbinfo->ipi_lock, pcbinfo->ipi_lock_grp,
655 	    &pcbinfo->ipi_lock_attr);
656 
657 	if (tcp_tcbhashsize == 0) {
658 		/* Set to default */
659 		tcp_tcbhashsize = 512;
660 	}
661 
662 	if (!powerof2(tcp_tcbhashsize)) {
663 		int old_hash_size = tcp_tcbhashsize;
664 		tcp_tcbhashsize = scale_to_powerof2(tcp_tcbhashsize);
665 		/* Lower limit of 16  */
666 		if (tcp_tcbhashsize < 16) {
667 			tcp_tcbhashsize = 16;
668 		}
669 		printf("WARNING: TCB hash size not a power of 2, "
670 		    "scaled from %d to %d.\n",
671 		    old_hash_size,
672 		    tcp_tcbhashsize);
673 	}
674 
675 	hashinit_counted_by(tcp_tcbhashsize, tcbinfo.ipi_hashbase,
676 	    tcbinfo.ipi_hashbase_count);
677 	tcbinfo.ipi_hashmask = tcbinfo.ipi_hashbase_count - 1;
678 	hashinit_counted_by(tcp_tcbhashsize, tcbinfo.ipi_porthashbase,
679 	    tcbinfo.ipi_porthashbase_count);
680 	tcbinfo.ipi_porthashmask = tcbinfo.ipi_porthashbase_count - 1;
681 	tcbinfo.ipi_zone = tcpcbzone;
682 
683 	tcbinfo.ipi_gc = tcp_gc;
684 	tcbinfo.ipi_timer = tcp_itimer;
685 	in_pcbinfo_attach(&tcbinfo);
686 
687 #define TCP_MINPROTOHDR (sizeof(struct ip6_hdr) + sizeof(struct tcphdr))
688 	if (max_protohdr < TCP_MINPROTOHDR) {
689 		max_protohdr = (int)P2ROUNDUP(TCP_MINPROTOHDR, sizeof(uint32_t));
690 	}
691 	if (max_linkhdr + max_protohdr > MCLBYTES) {
692 		panic("tcp_init");
693 	}
694 #undef TCP_MINPROTOHDR
695 
696 	/* Initialize time wait and timer lists */
697 	TAILQ_INIT(&tcp_tw_tailq);
698 
699 	bzero(&tcp_timer_list, sizeof(tcp_timer_list));
700 	LIST_INIT(&tcp_timer_list.lhead);
701 	/*
702 	 * allocate group and attribute for the tcp timer list
703 	 */
704 	tcp_timer_list.mtx_grp = lck_grp_alloc_init("tcptimerlist",
705 	    LCK_GRP_ATTR_NULL);
706 	lck_mtx_init(&tcp_timer_list.mtx, tcp_timer_list.mtx_grp,
707 	    LCK_ATTR_NULL);
708 
709 	tcp_timer_list.call = thread_call_allocate(tcp_run_timerlist, NULL);
710 	if (tcp_timer_list.call == NULL) {
711 		panic("failed to allocate call entry 1 in tcp_init");
712 	}
713 
714 	/* Initialize TCP Cache */
715 	tcp_cache_init();
716 
717 	tcp_mpkl_log_object = MPKL_CREATE_LOGOBJECT("com.apple.xnu.tcp");
718 	if (tcp_mpkl_log_object == NULL) {
719 		panic("MPKL_CREATE_LOGOBJECT failed");
720 	}
721 
722 	if (PE_parse_boot_argn("tcp_log", &tcp_log_enable_flags, sizeof(tcp_log_enable_flags))) {
723 		os_log(OS_LOG_DEFAULT, "tcp_init: set tcp_log_enable_flags to 0x%x", tcp_log_enable_flags);
724 	}
725 
726 	if (PE_parse_boot_argn("tcp_link_heuristics", &tcp_link_heuristics_flags, sizeof(tcp_link_heuristics_flags))) {
727 		os_log(OS_LOG_DEFAULT, "tcp_init: set tcp_link_heuristics_flags to 0x%x", tcp_link_heuristics_flags);
728 	}
729 
730 	/*
731 	 * If more than 4GB of actual memory is available, increase the
732 	 * maximum allowed receive and send socket buffer size.
733 	 */
734 	if (mem_actual >= (1ULL << (GBSHIFT + 2))) {
735 		if (serverperfmode) {
736 			tcp_autorcvbuf_max = 8 * 1024 * 1024;
737 			tcp_autosndbuf_max = 8 * 1024 * 1024;
738 		} else {
739 			tcp_autorcvbuf_max = 4 * 1024 * 1024;
740 			tcp_autosndbuf_max = 4 * 1024 * 1024;
741 		}
742 
743 		SYSCTL_SKMEM_UPDATE_FIELD(tcp.autorcvbufmax, tcp_autorcvbuf_max);
744 		SYSCTL_SKMEM_UPDATE_FIELD(tcp.autosndbufmax, tcp_autosndbuf_max);
745 	}
746 
747 	/* Initialize the TCP CCA array */
748 	tcp_cc_init();
749 
750 	read_frandom(&isn_secret, sizeof(isn_secret));
751 
752 	bzero(&tcp_rst_rlc_state, sizeof(struct in_endpoints));
753 }
754 
755 /*
756  * Fill in the IP and TCP headers for an outgoing packet, given the tcpcb.
757  * tcp_template used to store this data in mbufs, but we now recopy it out
758  * of the tcpcb each time to conserve mbufs.
759  */
760 void
tcp_fillheaders(struct mbuf * m,struct tcpcb * tp,void * ip_ptr,void * tcp_ptr,struct sockaddr * local,struct sockaddr * remote)761 tcp_fillheaders(struct mbuf *m, struct tcpcb *tp, void *ip_ptr, void *tcp_ptr,
762     struct sockaddr *local, struct sockaddr *remote)
763 {
764 	struct inpcb *inp = tp->t_inpcb;
765 	struct tcphdr *tcp_hdr = (struct tcphdr *)tcp_ptr;
766 
767 	bool isipv6 = false;
768 
769 	if (local != NULL && remote != NULL) {
770 		isipv6 = (local->sa_family == AF_INET6);
771 	} else {
772 		isipv6 = (inp->inp_vflag & INP_IPV6) != 0;
773 	}
774 
775 	if (isipv6) {
776 		struct ip6_hdr *ip6;
777 
778 		ip6 = (struct ip6_hdr *)ip_ptr;
779 		ip6->ip6_flow = (ip6->ip6_flow & ~IPV6_FLOWINFO_MASK) |
780 		    (inp->inp_flow & IPV6_FLOWINFO_MASK);
781 		ip6->ip6_vfc = (ip6->ip6_vfc & ~IPV6_VERSION_MASK) |
782 		    (IPV6_VERSION & IPV6_VERSION_MASK);
783 		ip6->ip6_plen = htons(sizeof(struct tcphdr));
784 		ip6->ip6_nxt = IPPROTO_TCP;
785 		ip6->ip6_hlim = 0;
786 		if (local != NULL) {
787 			ip6->ip6_src = SIN6(local)->sin6_addr;
788 		} else {
789 			ip6->ip6_src = inp->in6p_laddr;
790 		}
791 		if (remote != NULL) {
792 			ip6->ip6_dst = SIN6(remote)->sin6_addr;
793 		} else {
794 			ip6->ip6_dst = inp->in6p_faddr;
795 		}
796 
797 		if (m->m_flags & M_PKTHDR) {
798 			uint32_t lifscope = IFSCOPE_NONE, fifscope = IFSCOPE_NONE;
799 			if (!IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr)) {
800 				lifscope = inp->inp_lifscope;
801 			} else if (SIN6(local)->sin6_scope_id != IFSCOPE_NONE) {
802 				lifscope = SIN6(local)->sin6_scope_id;
803 			}
804 			if (!IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_faddr)) {
805 				fifscope = inp->inp_fifscope;
806 			} else if (SIN6(remote)->sin6_scope_id != IFSCOPE_NONE) {
807 				fifscope = SIN6(remote)->sin6_scope_id;
808 			}
809 			ip6_output_setsrcifscope(m, lifscope, NULL);
810 			ip6_output_setdstifscope(m, fifscope, NULL);
811 		}
812 		tcp_hdr->th_sum = in6_pseudo(&ip6->ip6_src, &ip6->ip6_dst,
813 		    htonl(sizeof(struct tcphdr) + IPPROTO_TCP));
814 	} else {
815 		struct ip *ip = (struct ip *) ip_ptr;
816 
817 		ip->ip_vhl = IP_VHL_BORING;
818 		ip->ip_tos = 0;
819 		ip->ip_len = 0;
820 		ip->ip_id = 0;
821 		ip->ip_off = 0;
822 		ip->ip_ttl = 0;
823 		ip->ip_sum = 0;
824 		ip->ip_p = IPPROTO_TCP;
825 		if (local != NULL) {
826 			ip->ip_src = SIN(local)->sin_addr;
827 		} else {
828 			ip->ip_src = inp->inp_laddr;
829 		}
830 		if (remote != NULL) {
831 			ip->ip_dst = SIN(remote)->sin_addr;
832 		} else {
833 			ip->ip_dst = inp->inp_faddr;
834 		}
835 		tcp_hdr->th_sum =
836 		    in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr,
837 		    htons(sizeof(struct tcphdr) + IPPROTO_TCP));
838 	}
839 	if (local != NULL) {
840 		tcp_hdr->th_sport = SIN(local)->sin_port;
841 	} else {
842 		tcp_hdr->th_sport = inp->inp_lport;
843 	}
844 	if (remote != NULL) {
845 		tcp_hdr->th_dport = SIN(remote)->sin_port;
846 	} else {
847 		tcp_hdr->th_dport = inp->inp_fport;
848 	}
849 	tcp_hdr->th_seq = 0;
850 	tcp_hdr->th_ack = 0;
851 	tcp_hdr->th_x2 = 0;
852 	tcp_hdr->th_off = 5;
853 	tcp_hdr->th_flags = 0;
854 	tcp_hdr->th_win = 0;
855 	tcp_hdr->th_urp = 0;
856 }
857 
858 static uint8_t
tcp_filloptions(struct tcpopt * peer_to,uint16_t thflags,uint16_t mss,uint8_t rcv_scale,uint32_t ts_offset,u_char * __counted_by (TCP_MAXOLEN)optp)859 tcp_filloptions(struct tcpopt *peer_to, uint16_t thflags, uint16_t mss, uint8_t rcv_scale,
860     uint32_t ts_offset, u_char *__counted_by(TCP_MAXOLEN) optp)
861 {
862 	uint8_t optlen = 0;
863 	struct tcpopt to;
864 
865 	to.to_flags = 0;
866 
867 	if (thflags & TH_SYN) {
868 		to.to_mss = mss;
869 		to.to_flags = TOF_MSS;
870 		if (peer_to->to_flags & TOF_SCALE) {
871 			to.to_wscale = rcv_scale;
872 			to.to_flags |= TOF_SCALE;
873 		}
874 		if (peer_to->to_flags & TOF_SACKPERM) {
875 			to.to_flags |= TOF_SACKPERM;
876 		}
877 	}
878 	if ((peer_to->to_flags & TOF_TS)) {
879 		uint32_t tcp_now_local = os_access_once(tcp_now);
880 		to.to_tsval = ts_offset + tcp_now_local;
881 		to.to_tsecr = peer_to->to_tsval;
882 		to.to_flags |= TOF_TS;
883 	}
884 	optlen = tcp_addoptions(&to, optp, optp + TCP_MAXOLEN);
885 
886 	return optlen;
887 }
888 
889 /*
890  * Create template to be used to send tcp packets on a connection.
891  * Allocates an mbuf and fills in a skeletal tcp/ip header.  The only
892  * use for this function is in keepalives, which use tcp_respond.
893  */
894 struct tcptemp *
tcp_maketemplate(struct tcpcb * tp,struct mbuf ** mp,struct sockaddr * local,struct sockaddr * remote)895 tcp_maketemplate(struct tcpcb *tp, struct mbuf **mp,
896     struct sockaddr *local, struct sockaddr *remote)
897 {
898 	struct mbuf *m;
899 	struct tcptemp *n;
900 
901 	*mp = m = m_get(M_DONTWAIT, MT_HEADER);
902 	if (m == NULL) {
903 		return NULL;
904 	}
905 	m->m_len = sizeof(struct tcptemp);
906 	n = mtod(m, struct tcptemp *);
907 
908 	tcp_fillheaders(m, tp, (void *)&n->tt_ipgen, (void *)&n->tt_t, local, remote);
909 	return n;
910 }
911 
912 /*
913  * Send a single message to the TCP at address specified by
914  * the given TCP/IP header.  If m == 0, then we make a copy
915  * of the tcpiphdr at ti and send directly to the addressed host.
916  * This is used to force keep alive messages out using the TCP
917  * template for a connection.  If flags are given then we send
918  * a message back to the TCP which originated the * segment ti,
919  * and discard the mbuf containing it and any other attached mbufs.
920  *
921  * In any case the ack and sequence number of the transmitted
922  * segment are as specified by the parameters.
923  *
924  * NOTE: If m != NULL, then ti must point to *inside* the mbuf.
925  */
926 void
tcp_respond(struct tcpcb * tp,void * ipgen __sized_by (ipgen_size),size_t ipgen_size __unused,struct tcphdr * th,struct mbuf * m,tcp_seq ack,tcp_seq seq,uint32_t rcv_win,uint16_t flags,struct tcpopt * peer_to,uint16_t mss,uint8_t rcv_scale,uint32_t ts_offset,struct tcp_respond_args * tra,bool send_syncookie)927 tcp_respond(struct tcpcb *tp, void *ipgen __sized_by(ipgen_size), size_t ipgen_size __unused,
928     struct tcphdr *th, struct mbuf *m, tcp_seq ack, tcp_seq seq, uint32_t rcv_win, uint16_t flags,
929     struct tcpopt *peer_to, uint16_t mss, uint8_t rcv_scale, uint32_t ts_offset,
930     struct tcp_respond_args *tra, bool send_syncookie)
931 {
932 	uint16_t tlen;
933 	uint8_t optlen = 0;
934 	int win = 0;
935 	struct route *ro = 0;
936 	struct route sro;
937 	struct ip *ip;
938 	struct tcphdr *nth;
939 	struct route_in6 *ro6 = 0;
940 	struct route_in6 sro6;
941 	struct ip6_hdr *ip6;
942 	int isipv6;
943 	struct ifnet *outif;
944 	int sotc = SO_TC_UNSPEC;
945 	bool check_qos_marking_again = FALSE;
946 	uint32_t sifscope = IFSCOPE_NONE, fifscope = IFSCOPE_NONE;
947 
948 	isipv6 = IP_VHL_V(((struct ip *)ipgen)->ip_vhl) == 6;
949 	ip6 = ipgen;
950 	ip = ipgen;
951 
952 	if (tp) {
953 		check_qos_marking_again = tp->t_inpcb->inp_socket->so_flags1 & SOF1_QOSMARKING_POLICY_OVERRIDE ? FALSE : TRUE;
954 		sifscope = tp->t_inpcb->inp_lifscope;
955 		fifscope = tp->t_inpcb->inp_fifscope;
956 		if (!(flags & TH_RST)) {
957 			win = tcp_sbspace(tp);
958 			if (win > (int32_t)TCP_MAXWIN << tp->rcv_scale) {
959 				win = (int32_t)TCP_MAXWIN << tp->rcv_scale;
960 			}
961 		}
962 		if (isipv6) {
963 			ro6 = &tp->t_inpcb->in6p_route;
964 		} else {
965 			ro = &tp->t_inpcb->inp_route;
966 		}
967 	} else {
968 		if (isipv6) {
969 			ro6 = &sro6;
970 			bzero(ro6, sizeof(*ro6));
971 		} else {
972 			ro = &sro;
973 			bzero(ro, sizeof(*ro));
974 		}
975 		if (rcv_win != 0) {
976 			/* Set TCP receive window if provided */
977 			win = rcv_win;
978 		}
979 	}
980 	if (m == 0) {
981 		m = m_gethdr(M_DONTWAIT, MT_HEADER);    /* MAC-OK */
982 		if (m == NULL) {
983 			return;
984 		}
985 		tlen = 0;
986 		m->m_data += max_linkhdr;
987 		if (isipv6) {
988 			VERIFY((MHLEN - max_linkhdr) >=
989 			    (sizeof(*ip6) + sizeof(*nth)));
990 			bcopy((caddr_t)ip6, mtod(m, caddr_t),
991 			    sizeof(struct ip6_hdr));
992 			ip6 = mtod(m, struct ip6_hdr *);
993 			nth = (struct tcphdr *)(void *)(ip6 + 1);
994 		} else {
995 			VERIFY((MHLEN - max_linkhdr) >=
996 			    (sizeof(*ip) + sizeof(*nth)));
997 			bcopy((caddr_t)ip, mtod(m, caddr_t), sizeof(struct ip));
998 			ip = mtod(m, struct ip *);
999 			nth = (struct tcphdr *)(void *)(ip + 1);
1000 		}
1001 		bcopy(th, nth, sizeof(struct tcphdr));
1002 #if MPTCP
1003 		if ((tp) && (tp->t_mpflags & TMPF_RESET)) {
1004 			flags = (TH_RST | TH_ACK);
1005 		} else if (!send_syncookie)
1006 #endif
1007 		flags = TH_ACK;
1008 	} else {
1009 		m_freem(m->m_next);
1010 		m->m_next = 0;
1011 		m->m_data = (uintptr_t)ipgen;
1012 		/* m_len is set later */
1013 		tlen = 0;
1014 #define xchg(a, b, type) { type t; t = a; a = b; b = t; }
1015 		if (isipv6) {
1016 			ip6_getsrcifaddr_info(m, &sifscope, NULL);
1017 			ip6_getdstifaddr_info(m, &fifscope, NULL);
1018 			if (!in6_embedded_scope) {
1019 				m->m_pkthdr.pkt_flags &= ~PKTF_IFAINFO;
1020 			}
1021 			/* Expect 32-bit aligned IP on strict-align platforms */
1022 			IP6_HDR_STRICT_ALIGNMENT_CHECK(ip6);
1023 			xchg(ip6->ip6_dst, ip6->ip6_src, struct in6_addr);
1024 			nth = (struct tcphdr *)(void *)(ip6 + 1);
1025 		} else {
1026 			/* Expect 32-bit aligned IP on strict-align platforms */
1027 			IP_HDR_STRICT_ALIGNMENT_CHECK(ip);
1028 			xchg(ip->ip_dst.s_addr, ip->ip_src.s_addr, n_long);
1029 			nth = (struct tcphdr *)(void *)(ip + 1);
1030 		}
1031 		if (th != nth) {
1032 			/*
1033 			 * this is usually a case when an extension header
1034 			 * exists between the IPv6 header and the
1035 			 * TCP header.
1036 			 */
1037 			nth->th_sport = th->th_sport;
1038 			nth->th_dport = th->th_dport;
1039 		}
1040 		xchg(nth->th_dport, nth->th_sport, n_short);
1041 #undef xchg
1042 	}
1043 
1044 	if (peer_to != NULL) {
1045 		u_char *optp = (u_char *)(nth + 1);
1046 		optlen = tcp_filloptions(peer_to, flags, mss, rcv_scale, ts_offset, optp);
1047 		tlen += optlen;
1048 	}
1049 
1050 	if (isipv6) {
1051 		ip6->ip6_plen = htons((u_short)(sizeof(struct tcphdr) +
1052 		    tlen));
1053 		tlen += sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
1054 		ip6_output_setsrcifscope(m, sifscope, NULL);
1055 		ip6_output_setdstifscope(m, fifscope, NULL);
1056 	} else {
1057 		tlen += sizeof(struct tcpiphdr);
1058 		ip->ip_len = tlen;
1059 		ip->ip_ttl = (uint8_t)ip_defttl;
1060 	}
1061 	m->m_len = tlen;
1062 	m->m_pkthdr.len = tlen;
1063 	m->m_pkthdr.rcvif = 0;
1064 	if (tra->keep_alive) {
1065 		m->m_pkthdr.pkt_flags |= PKTF_KEEPALIVE;
1066 	}
1067 
1068 	nth->th_seq = htonl(seq);
1069 	nth->th_ack = htonl(ack);
1070 	nth->th_x2 = 0;
1071 	nth->th_off = (sizeof(struct tcphdr) + optlen) >> 2;
1072 	tcp_set_flags(nth, flags);
1073 	if (tp) {
1074 		nth->th_win = htons((u_short) (win >> tp->rcv_scale));
1075 	} else {
1076 		nth->th_win = htons((u_short)win);
1077 	}
1078 	nth->th_urp = 0;
1079 	if (isipv6) {
1080 		nth->th_sum = 0;
1081 		nth->th_sum = in6_pseudo(&ip6->ip6_src, &ip6->ip6_dst,
1082 		    htonl((tlen - sizeof(struct ip6_hdr)) + IPPROTO_TCP));
1083 		m->m_pkthdr.csum_flags = CSUM_TCPIPV6;
1084 		m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
1085 		ip6->ip6_hlim = in6_selecthlim(tp ? tp->t_inpcb : NULL,
1086 		    ro6 && ro6->ro_rt ? ro6->ro_rt->rt_ifp : NULL);
1087 	} else {
1088 		nth->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr,
1089 		    htons((u_short)(tlen - sizeof(struct ip) + ip->ip_p)));
1090 		m->m_pkthdr.csum_flags = CSUM_TCP;
1091 		m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
1092 	}
1093 
1094 	if (tcp_rst_rlc_compress(mtod(m, void *), m->m_len, nth) == true) {
1095 		m_freem(m);
1096 		return;
1097 	}
1098 
1099 #if NECP
1100 	necp_mark_packet_from_socket(m, tp ? tp->t_inpcb : NULL, 0, 0, 0, 0);
1101 #endif /* NECP */
1102 
1103 #if IPSEC
1104 	if (tp != NULL && tp->t_inpcb->inp_sp != NULL &&
1105 	    ipsec_setsocket(m, tp ? tp->t_inpcb->inp_socket : NULL) != 0) {
1106 		m_freem(m);
1107 		return;
1108 	}
1109 #endif
1110 
1111 	if (tp != NULL) {
1112 		u_int32_t svc_flags = 0;
1113 		if (isipv6) {
1114 			svc_flags |= PKT_SCF_IPV6;
1115 		}
1116 		sotc = tp->t_inpcb->inp_socket->so_traffic_class;
1117 		if ((flags & TH_RST) == 0) {
1118 			set_packet_service_class(m, tp->t_inpcb->inp_socket,
1119 			    sotc, svc_flags);
1120 		} else {
1121 			m_set_service_class(m, MBUF_SC_BK_SYS);
1122 		}
1123 
1124 		/* Embed flowhash and flow control flags */
1125 		m->m_pkthdr.pkt_flowsrc = FLOWSRC_INPCB;
1126 		m->m_pkthdr.pkt_flowid = tp->t_inpcb->inp_flowhash;
1127 		m->m_pkthdr.pkt_flags |= (PKTF_FLOW_ID | PKTF_FLOW_LOCALSRC | PKTF_FLOW_ADV);
1128 		m->m_pkthdr.pkt_proto = IPPROTO_TCP;
1129 		m->m_pkthdr.tx_tcp_pid = tp->t_inpcb->inp_socket->last_pid;
1130 		m->m_pkthdr.tx_tcp_e_pid = tp->t_inpcb->inp_socket->e_pid;
1131 
1132 		if (flags & TH_RST) {
1133 			m->m_pkthdr.comp_gencnt = tp->t_comp_ack_gencnt;
1134 		}
1135 	} else {
1136 		if (flags & TH_RST) {
1137 			m->m_pkthdr.comp_gencnt = TCP_ACK_COMPRESSION_DUMMY;
1138 			m_set_service_class(m, MBUF_SC_BK_SYS);
1139 		}
1140 	}
1141 
1142 	if (isipv6) {
1143 		struct ip6_out_args ip6oa;
1144 		bzero(&ip6oa, sizeof(ip6oa));
1145 		ip6oa.ip6oa_boundif = tra->ifscope;
1146 		ip6oa.ip6oa_flags = IP6OAF_SELECT_SRCIF | IP6OAF_BOUND_SRCADDR;
1147 		ip6oa.ip6oa_sotc = SO_TC_UNSPEC;
1148 		ip6oa.ip6oa_netsvctype = _NET_SERVICE_TYPE_UNSPEC;
1149 
1150 		if (tra->ifscope != IFSCOPE_NONE) {
1151 			ip6oa.ip6oa_flags |= IP6OAF_BOUND_IF;
1152 		}
1153 		if (tra->nocell) {
1154 			ip6oa.ip6oa_flags |= IP6OAF_NO_CELLULAR;
1155 		}
1156 		if (tra->noexpensive) {
1157 			ip6oa.ip6oa_flags |= IP6OAF_NO_EXPENSIVE;
1158 		}
1159 		if (tra->noconstrained) {
1160 			ip6oa.ip6oa_flags |= IP6OAF_NO_CONSTRAINED;
1161 		}
1162 		if (tra->awdl_unrestricted) {
1163 			ip6oa.ip6oa_flags |= IP6OAF_AWDL_UNRESTRICTED;
1164 		}
1165 		if (tra->intcoproc_allowed) {
1166 			ip6oa.ip6oa_flags |= IP6OAF_INTCOPROC_ALLOWED;
1167 		}
1168 		if (tra->management_allowed) {
1169 			ip6oa.ip6oa_flags |= IP6OAF_MANAGEMENT_ALLOWED;
1170 		}
1171 		if (tra->ultra_constrained_allowed) {
1172 			ip6oa.ip6oa_flags |= IP6OAF_ULTRA_CONSTRAINED_ALLOWED;
1173 		}
1174 		ip6oa.ip6oa_sotc = sotc;
1175 		if (tp != NULL) {
1176 			if ((tp->t_inpcb->inp_socket->so_flags1 & SOF1_QOSMARKING_ALLOWED)) {
1177 				ip6oa.ip6oa_flags |= IP6OAF_QOSMARKING_ALLOWED;
1178 			}
1179 			ip6oa.qos_marking_gencount = tp->t_inpcb->inp_policyresult.results.qos_marking_gencount;
1180 			if (check_qos_marking_again) {
1181 				ip6oa.ip6oa_flags |= IP6OAF_REDO_QOSMARKING_POLICY;
1182 			}
1183 			ip6oa.ip6oa_netsvctype = tp->t_inpcb->inp_socket->so_netsvctype;
1184 		}
1185 		(void) ip6_output(m, NULL, ro6, IPV6_OUTARGS, NULL,
1186 		    NULL, &ip6oa);
1187 
1188 		if (check_qos_marking_again) {
1189 			struct inpcb *inp = tp->t_inpcb;
1190 			inp->inp_policyresult.results.qos_marking_gencount = ip6oa.qos_marking_gencount;
1191 			if (ip6oa.ip6oa_flags & IP6OAF_QOSMARKING_ALLOWED) {
1192 				inp->inp_socket->so_flags1 |= SOF1_QOSMARKING_ALLOWED;
1193 			} else {
1194 				inp->inp_socket->so_flags1 &= ~SOF1_QOSMARKING_ALLOWED;
1195 			}
1196 		}
1197 
1198 		if (tp != NULL && ro6 != NULL && ro6->ro_rt != NULL &&
1199 		    (outif = ro6->ro_rt->rt_ifp) !=
1200 		    tp->t_inpcb->in6p_last_outifp) {
1201 			tp->t_inpcb->in6p_last_outifp = outif;
1202 #if SKYWALK
1203 			if (NETNS_TOKEN_VALID(&tp->t_inpcb->inp_netns_token)) {
1204 				netns_set_ifnet(&tp->t_inpcb->inp_netns_token,
1205 				    tp->t_inpcb->in6p_last_outifp);
1206 			}
1207 #endif /* SKYWALK */
1208 		}
1209 
1210 		if (ro6 == &sro6) {
1211 			ROUTE_RELEASE(ro6);
1212 		}
1213 	} else {
1214 		struct ip_out_args ipoa;
1215 		bzero(&ipoa, sizeof(ipoa));
1216 		ipoa.ipoa_boundif = tra->ifscope;
1217 		ipoa.ipoa_flags = IPOAF_SELECT_SRCIF | IPOAF_BOUND_SRCADDR;
1218 		ipoa.ipoa_sotc = SO_TC_UNSPEC;
1219 		ipoa.ipoa_netsvctype = _NET_SERVICE_TYPE_UNSPEC;
1220 
1221 		if (tra->ifscope != IFSCOPE_NONE) {
1222 			ipoa.ipoa_flags |= IPOAF_BOUND_IF;
1223 		}
1224 		if (tra->nocell) {
1225 			ipoa.ipoa_flags |= IPOAF_NO_CELLULAR;
1226 		}
1227 		if (tra->noexpensive) {
1228 			ipoa.ipoa_flags |= IPOAF_NO_EXPENSIVE;
1229 		}
1230 		if (tra->noconstrained) {
1231 			ipoa.ipoa_flags |= IPOAF_NO_CONSTRAINED;
1232 		}
1233 		if (tra->awdl_unrestricted) {
1234 			ipoa.ipoa_flags |= IPOAF_AWDL_UNRESTRICTED;
1235 		}
1236 		if (tra->management_allowed) {
1237 			ipoa.ipoa_flags |= IPOAF_MANAGEMENT_ALLOWED;
1238 		}
1239 		ipoa.ipoa_sotc = sotc;
1240 		if (tp != NULL) {
1241 			if ((tp->t_inpcb->inp_socket->so_flags1 & SOF1_QOSMARKING_ALLOWED)) {
1242 				ipoa.ipoa_flags |= IPOAF_QOSMARKING_ALLOWED;
1243 			}
1244 			if (!(tp->t_inpcb->inp_socket->so_flags1 & SOF1_QOSMARKING_POLICY_OVERRIDE)) {
1245 				ipoa.ipoa_flags |= IPOAF_REDO_QOSMARKING_POLICY;
1246 			}
1247 			ipoa.qos_marking_gencount = tp->t_inpcb->inp_policyresult.results.qos_marking_gencount;
1248 			ipoa.ipoa_netsvctype = tp->t_inpcb->inp_socket->so_netsvctype;
1249 		}
1250 		if (ro != &sro) {
1251 			/* Copy the cached route and take an extra reference */
1252 			inp_route_copyout(tp->t_inpcb, &sro);
1253 		}
1254 		/*
1255 		 * For consistency, pass a local route copy.
1256 		 */
1257 		(void) ip_output(m, NULL, &sro, IP_OUTARGS, NULL, &ipoa);
1258 
1259 		if (check_qos_marking_again) {
1260 			struct inpcb *inp = tp->t_inpcb;
1261 			inp->inp_policyresult.results.qos_marking_gencount = ipoa.qos_marking_gencount;
1262 			if (ipoa.ipoa_flags & IPOAF_QOSMARKING_ALLOWED) {
1263 				inp->inp_socket->so_flags1 |= SOF1_QOSMARKING_ALLOWED;
1264 			} else {
1265 				inp->inp_socket->so_flags1 &= ~SOF1_QOSMARKING_ALLOWED;
1266 			}
1267 		}
1268 		if (tp != NULL && sro.ro_rt != NULL &&
1269 		    (outif = sro.ro_rt->rt_ifp) !=
1270 		    tp->t_inpcb->inp_last_outifp) {
1271 			tp->t_inpcb->inp_last_outifp = outif;
1272 #if SKYWALK
1273 			if (NETNS_TOKEN_VALID(&tp->t_inpcb->inp_netns_token)) {
1274 				netns_set_ifnet(&tp->t_inpcb->inp_netns_token, outif);
1275 			}
1276 #endif /* SKYWALK */
1277 		}
1278 		if (ro != &sro) {
1279 			/* Synchronize cached PCB route */
1280 			inp_route_copyin(tp->t_inpcb, &sro);
1281 		} else {
1282 			ROUTE_RELEASE(&sro);
1283 		}
1284 	}
1285 }
1286 
1287 /*
1288  * Create a new TCP control block, making an
1289  * empty reassembly queue and hooking it to the argument
1290  * protocol control block.  The `inp' parameter must have
1291  * come from the zone allocator set up in tcp_init().
1292  */
1293 struct tcpcb *
tcp_newtcpcb(struct inpcb * inp)1294 tcp_newtcpcb(struct inpcb *inp)
1295 {
1296 	struct inp_tp *it;
1297 	struct tcpcb *tp;
1298 	int isipv6 = (inp->inp_vflag & INP_IPV6) != 0;
1299 	uint32_t random_32;
1300 
1301 	calculate_tcp_clock();
1302 
1303 	it = (struct inp_tp *)(void *)inp;
1304 	tp = &it->tcb;
1305 
1306 	bzero((char *) tp, sizeof(struct tcpcb));
1307 	LIST_INIT(&tp->t_segq);
1308 	tp->t_maxseg = tp->t_maxopd = isipv6 ? tcp_v6mssdflt : tcp_mssdflt;
1309 
1310 	tp->t_flags = TF_REQ_SCALE | (tcp_do_timestamps ? TF_REQ_TSTMP : 0);
1311 	tp->t_flagsext |= TF_SACK_ENABLE;
1312 
1313 	if (tcp_rack) {
1314 		tp->t_flagsext |= TF_RACK_ENABLED;
1315 	}
1316 
1317 	if (tcp_syncookie == 1) {
1318 		tp->t_flagsext |= TF_SYN_COOKIE_ENABLED;
1319 	} else if (tcp_syncookie == 2) {
1320 		tp->t_flagsext |= TF_SYN_COOKIE_FORCE_ENABLED;
1321 	}
1322 
1323 	TAILQ_INIT(&tp->snd_holes);
1324 	SLIST_INIT(&tp->t_rxt_segments);
1325 	TAILQ_INIT(&tp->t_segs_sent);
1326 	RB_INIT(&tp->t_segs_sent_tree);
1327 	TAILQ_INIT(&tp->t_segs_acked);
1328 	TAILQ_INIT(&tp->seg_pool.free_segs);
1329 	SLIST_INIT(&tp->t_notify_ack);
1330 	tp->t_inpcb = inp;
1331 	/*
1332 	 * Init srtt to TCPTV_SRTTBASE (0), so we can tell that we have no
1333 	 * rtt estimate.  Set rttvar so that srtt + 4 * rttvar gives
1334 	 * reasonable initial retransmit time.
1335 	 */
1336 	tp->t_srtt = TCPTV_SRTTBASE;
1337 	tp->t_rttvar =
1338 	    ((TCPTV_RTOBASE - TCPTV_SRTTBASE) << TCP_RTTVAR_SHIFT) / 4;
1339 	tp->t_rttmin = tcp_TCPTV_MIN;
1340 	tp->t_rxtcur = TCPTV_RTOBASE;
1341 
1342 	if (tcp_use_newreno) {
1343 		/* use newreno by default */
1344 		tp->tcp_cc_index = TCP_CC_ALGO_NEWRENO_INDEX;
1345 #if (DEVELOPMENT || DEBUG)
1346 	} else if (tcp_use_ledbat) {
1347 		/* use ledbat for testing */
1348 		tp->tcp_cc_index = TCP_CC_ALGO_BACKGROUND_INDEX;
1349 #endif
1350 	} else {
1351 		/* Set L4S state even if ifp might be NULL */
1352 		tcp_set_l4s(tp, inp->inp_last_outifp);
1353 		if (tp->l4s_enabled) {
1354 			tp->tcp_cc_index = TCP_CC_ALGO_PRAGUE_INDEX;
1355 		} else {
1356 			tp->tcp_cc_index = TCP_CC_ALGO_CUBIC_INDEX;
1357 		}
1358 	}
1359 
1360 	tcp_cc_allocate_state(tp);
1361 
1362 	if (CC_ALGO(tp)->init != NULL) {
1363 		CC_ALGO(tp)->init(tp);
1364 	}
1365 
1366 	/* Initialize rledbat if we are using recv_bg */
1367 	if (tcp_rledbat == 1 && TCP_RECV_BG(inp->inp_socket) &&
1368 	    tcp_cc_rledbat.init != NULL) {
1369 		tcp_cc_rledbat.init(tp);
1370 	}
1371 
1372 	tp->snd_cwnd = tcp_initial_cwnd(tp);
1373 	tp->snd_ssthresh = TCP_MAXWIN << TCP_MAX_WINSHIFT;
1374 	tp->snd_ssthresh_prev = TCP_MAXWIN << TCP_MAX_WINSHIFT;
1375 	tp->t_rcvtime = tcp_now;
1376 	tp->tentry.te_timer_start = tcp_now;
1377 	tp->t_persist_timeout = tcp_max_persist_timeout;
1378 	tp->t_persist_stop = 0;
1379 	tp->t_rexmtthresh = (uint8_t)tcprexmtthresh;
1380 	tp->rack.reo_wnd_multi = 1;
1381 	tp->rfbuf_ts = tcp_now;
1382 	tp->rfbuf_space = tcp_initial_cwnd(tp);
1383 	tp->t_forced_acks = TCP_FORCED_ACKS_COUNT;
1384 	tp->bytes_lost = tp->bytes_sacked = tp->bytes_retransmitted = 0;
1385 
1386 	/* Enable bandwidth measurement on this connection */
1387 	tp->t_flagsext |= TF_MEASURESNDBW;
1388 	if (tp->t_bwmeas == NULL) {
1389 		tp->t_bwmeas = tcp_bwmeas_alloc(tp);
1390 		if (tp->t_bwmeas == NULL) {
1391 			tp->t_flagsext &= ~TF_MEASURESNDBW;
1392 		}
1393 	}
1394 
1395 	/* Clear time wait tailq entry */
1396 	tp->t_twentry.tqe_next = NULL;
1397 	tp->t_twentry.tqe_prev = NULL;
1398 
1399 	read_frandom(&random_32, sizeof(random_32));
1400 	tp->t_comp_ack_gencnt = random_32;
1401 	if (tp->t_comp_ack_gencnt <= TCP_ACK_COMPRESSION_DUMMY ||
1402 	    tp->t_comp_ack_gencnt > INT_MAX) {
1403 		tp->t_comp_ack_gencnt = TCP_ACK_COMPRESSION_DUMMY + 1;
1404 	}
1405 	tp->t_comp_ack_lastinc = tcp_now;
1406 
1407 	/* Initialize Accurate ECN state */
1408 	tp->t_client_accecn_state = tcp_connection_client_accurate_ecn_feature_disabled;
1409 	tp->t_server_accecn_state = tcp_connection_server_accurate_ecn_feature_disabled;
1410 
1411 	/*
1412 	 * IPv4 TTL initialization is necessary for an IPv6 socket as well,
1413 	 * because the socket may be bound to an IPv6 wildcard address,
1414 	 * which may match an IPv4-mapped IPv6 address.
1415 	 */
1416 	inp->inp_ip_ttl = (uint8_t)ip_defttl;
1417 	inp->inp_ppcb = (caddr_t)tp;
1418 	return tp;            /* XXX */
1419 }
1420 
1421 /*
1422  * Drop a TCP connection, reporting
1423  * the specified error.  If connection is synchronized,
1424  * then send a RST to peer.
1425  */
1426 struct tcpcb *
tcp_drop(struct tcpcb * tp,int errno)1427 tcp_drop(struct tcpcb *tp, int errno)
1428 {
1429 	struct socket *so = tp->t_inpcb->inp_socket;
1430 #if CONFIG_DTRACE
1431 	struct inpcb *inp = tp->t_inpcb;
1432 #endif
1433 
1434 	if (TCPS_HAVERCVDSYN(tp->t_state)) {
1435 		DTRACE_TCP4(state__change, void, NULL, struct inpcb *, inp,
1436 		    struct tcpcb *, tp, int32_t, TCPS_CLOSED);
1437 		TCP_LOG_STATE(tp, TCPS_CLOSED);
1438 		tp->t_state = TCPS_CLOSED;
1439 		(void) tcp_output(tp);
1440 		tcpstat.tcps_drops++;
1441 	} else {
1442 		tcpstat.tcps_conndrops++;
1443 	}
1444 	if (errno == ETIMEDOUT && tp->t_softerror) {
1445 		errno = tp->t_softerror;
1446 	}
1447 	so->so_error = (u_short)errno;
1448 
1449 	TCP_LOG_CONNECTION_SUMMARY(tp);
1450 
1451 	return tcp_close(tp);
1452 }
1453 
1454 void
tcp_getrt_rtt(struct tcpcb * tp,struct rtentry * rt)1455 tcp_getrt_rtt(struct tcpcb *tp, struct rtentry *rt)
1456 {
1457 	TCP_LOG_RTM_RTT(tp, rt);
1458 
1459 	if (rt->rt_rmx.rmx_rtt != 0 && tcp_init_rtt_from_cache != 0) {
1460 		uint32_t rtt = rt->rt_rmx.rmx_rtt;
1461 		uint32_t rttvar;
1462 		/*
1463 		 * XXX the lock bit for RTT indicates that the value
1464 		 * is also a minimum value; this is subject to time.
1465 		 */
1466 		if (rt->rt_rmx.rmx_locks & RTV_RTT) {
1467 			tp->t_rttmin = rtt / (RTM_RTTUNIT / TCP_RETRANSHZ);
1468 		} else {
1469 			tp->t_rttmin = TCPTV_REXMTMIN;
1470 		}
1471 
1472 		rtt = rtt / (RTM_RTTUNIT / (TCP_RETRANSHZ * TCP_RTT_SCALE));
1473 		tcpstat.tcps_usedrtt++;
1474 
1475 		if (rt->rt_rmx.rmx_rttvar) {
1476 			rttvar = rt->rt_rmx.rmx_rttvar /
1477 			    (RTM_RTTUNIT / (TCP_RETRANSHZ * TCP_RTTVAR_SCALE));
1478 			tcpstat.tcps_usedrttvar++;
1479 		} else {
1480 			/* default variation is +- 1 rtt */
1481 			rttvar =
1482 			    tp->t_srtt * TCP_RTTVAR_SCALE / TCP_RTT_SCALE;
1483 		}
1484 
1485 		TCPT_RANGESET(tp->t_rxtcur,
1486 		    tcp_rto_formula(tp->t_rttmin, rtt, rttvar),
1487 		    tp->t_rttmin, TCPTV_REXMTMAX,
1488 		    TCP_ADD_REXMTSLOP(tp));
1489 	} else if (tp->t_state < TCPS_ESTABLISHED && tp->t_srtt == 0 &&
1490 	    tp->t_rxtshift == 0) {
1491 		struct ifnet *ifp = rt->rt_ifp;
1492 
1493 		if (ifp != NULL && (ifp->if_eflags & IFEF_AWDL) != 0) {
1494 			/*
1495 			 * AWDL needs a special value for the default initial retransmission timeout
1496 			 */
1497 			if (tcp_awdl_rtobase > tcp_TCPTV_MIN) {
1498 				tp->t_rttvar = ((tcp_awdl_rtobase - TCPTV_SRTTBASE) << TCP_RTTVAR_SHIFT) / 4;
1499 			} else {
1500 				tp->t_rttvar = ((tcp_TCPTV_MIN - TCPTV_SRTTBASE) << TCP_RTTVAR_SHIFT) / 4;
1501 			}
1502 			TCPT_RANGESET(tp->t_rxtcur,
1503 			    TCP_REXMTVAL(tp),
1504 			    tp->t_rttmin, TCPTV_REXMTMAX,
1505 			    TCP_ADD_REXMTSLOP(tp));
1506 		}
1507 	}
1508 
1509 	TCP_LOG_RTT_INFO(tp);
1510 }
1511 
1512 static inline void
tcp_create_ifnet_stats_per_flow(struct tcpcb * tp,struct ifnet_stats_per_flow * ifs)1513 tcp_create_ifnet_stats_per_flow(struct tcpcb *tp,
1514     struct ifnet_stats_per_flow *ifs)
1515 {
1516 	struct inpcb *inp;
1517 	struct socket *so;
1518 	if (tp == NULL || ifs == NULL) {
1519 		return;
1520 	}
1521 
1522 	bzero(ifs, sizeof(*ifs));
1523 	inp = tp->t_inpcb;
1524 	so = inp->inp_socket;
1525 
1526 	ifs->ipv4 = (inp->inp_vflag & INP_IPV6) ? 0 : 1;
1527 	ifs->local = (tp->t_flags & TF_LOCAL) ? 1 : 0;
1528 	ifs->connreset = (so->so_error == ECONNRESET) ? 1 : 0;
1529 	ifs->conntimeout = (so->so_error == ETIMEDOUT) ? 1 : 0;
1530 	ifs->ecn_flags = tp->ecn_flags;
1531 	ifs->txretransmitbytes = tp->t_stat.txretransmitbytes;
1532 	ifs->rxoutoforderbytes = tp->t_stat.rxoutoforderbytes;
1533 	ifs->rxmitpkts = tp->t_stat.rxmitpkts;
1534 	ifs->rcvoopack = tp->t_rcvoopack;
1535 	ifs->pawsdrop = tp->t_pawsdrop;
1536 	ifs->sack_recovery_episodes = tp->t_sack_recovery_episode;
1537 	ifs->reordered_pkts = tp->t_reordered_pkts;
1538 	ifs->dsack_sent = tp->t_dsack_sent;
1539 	ifs->dsack_recvd = tp->t_dsack_recvd;
1540 	ifs->srtt = tp->t_srtt;
1541 	ifs->rttupdated = tp->t_rttupdated;
1542 	ifs->rttvar = tp->t_rttvar;
1543 	ifs->rttmin = get_base_rtt(tp);
1544 	if (tp->t_bwmeas != NULL && tp->t_bwmeas->bw_sndbw_max > 0) {
1545 		ifs->bw_sndbw_max = tp->t_bwmeas->bw_sndbw_max;
1546 	} else {
1547 		ifs->bw_sndbw_max = 0;
1548 	}
1549 	if (tp->t_bwmeas != NULL && tp->t_bwmeas->bw_rcvbw_max > 0) {
1550 		ifs->bw_rcvbw_max = tp->t_bwmeas->bw_rcvbw_max;
1551 	} else {
1552 		ifs->bw_rcvbw_max = 0;
1553 	}
1554 	ifs->bk_txpackets = so->so_tc_stats[MBUF_TC_BK].txpackets;
1555 	ifs->txpackets = inp->inp_mstat.ms_total.ts_txpackets;
1556 	ifs->rxpackets = inp->inp_mstat.ms_total.ts_rxpackets;
1557 }
1558 
1559 static inline void
tcp_flow_ecn_perf_stats(struct ifnet_stats_per_flow * ifs,struct if_tcp_ecn_perf_stat * stat)1560 tcp_flow_ecn_perf_stats(struct ifnet_stats_per_flow *ifs,
1561     struct if_tcp_ecn_perf_stat *stat)
1562 {
1563 	u_int64_t curval, oldval;
1564 	stat->total_txpkts += ifs->txpackets;
1565 	stat->total_rxpkts += ifs->rxpackets;
1566 	stat->total_rxmitpkts += ifs->rxmitpkts;
1567 	stat->total_oopkts += ifs->rcvoopack;
1568 	stat->total_reorderpkts += (ifs->reordered_pkts +
1569 	    ifs->pawsdrop + ifs->dsack_sent + ifs->dsack_recvd);
1570 
1571 	/* Average RTT */
1572 	curval = ifs->srtt >> TCP_RTT_SHIFT;
1573 	if (curval > 0 && ifs->rttupdated >= 16) {
1574 		if (stat->rtt_avg == 0) {
1575 			stat->rtt_avg = curval;
1576 		} else {
1577 			oldval = stat->rtt_avg;
1578 			stat->rtt_avg = ((oldval << 4) - oldval + curval) >> 4;
1579 		}
1580 	}
1581 
1582 	/* RTT variance */
1583 	curval = ifs->rttvar >> TCP_RTTVAR_SHIFT;
1584 	if (curval > 0 && ifs->rttupdated >= 16) {
1585 		if (stat->rtt_var == 0) {
1586 			stat->rtt_var = curval;
1587 		} else {
1588 			oldval = stat->rtt_var;
1589 			stat->rtt_var =
1590 			    ((oldval << 4) - oldval + curval) >> 4;
1591 		}
1592 	}
1593 
1594 	/* SACK episodes */
1595 	stat->sack_episodes += ifs->sack_recovery_episodes;
1596 	if (ifs->connreset) {
1597 		stat->rst_drop++;
1598 	}
1599 }
1600 
1601 static inline void
tcp_flow_lim_stats(struct ifnet_stats_per_flow * ifs,struct if_lim_perf_stat * stat)1602 tcp_flow_lim_stats(struct ifnet_stats_per_flow *ifs,
1603     struct if_lim_perf_stat *stat)
1604 {
1605 	u_int64_t curval, oldval;
1606 
1607 	stat->lim_total_txpkts += ifs->txpackets;
1608 	stat->lim_total_rxpkts += ifs->rxpackets;
1609 	stat->lim_total_retxpkts += ifs->rxmitpkts;
1610 	stat->lim_total_oopkts += ifs->rcvoopack;
1611 
1612 	if (ifs->bw_sndbw_max > 0) {
1613 		/* convert from bytes per ms to bits per second */
1614 		ifs->bw_sndbw_max *= 8000;
1615 		stat->lim_ul_max_bandwidth = MAX(stat->lim_ul_max_bandwidth,
1616 		    ifs->bw_sndbw_max);
1617 	}
1618 
1619 	if (ifs->bw_rcvbw_max > 0) {
1620 		/* convert from bytes per ms to bits per second */
1621 		ifs->bw_rcvbw_max *= 8000;
1622 		stat->lim_dl_max_bandwidth = MAX(stat->lim_dl_max_bandwidth,
1623 		    ifs->bw_rcvbw_max);
1624 	}
1625 
1626 	/* Average RTT */
1627 	curval = ifs->srtt >> TCP_RTT_SHIFT;
1628 	if (curval > 0 && ifs->rttupdated >= 16) {
1629 		if (stat->lim_rtt_average == 0) {
1630 			stat->lim_rtt_average = curval;
1631 		} else {
1632 			oldval = stat->lim_rtt_average;
1633 			stat->lim_rtt_average =
1634 			    ((oldval << 4) - oldval + curval) >> 4;
1635 		}
1636 	}
1637 
1638 	/* RTT variance */
1639 	curval = ifs->rttvar >> TCP_RTTVAR_SHIFT;
1640 	if (curval > 0 && ifs->rttupdated >= 16) {
1641 		if (stat->lim_rtt_variance == 0) {
1642 			stat->lim_rtt_variance = curval;
1643 		} else {
1644 			oldval = stat->lim_rtt_variance;
1645 			stat->lim_rtt_variance =
1646 			    ((oldval << 4) - oldval + curval) >> 4;
1647 		}
1648 	}
1649 
1650 	if (stat->lim_rtt_min == 0) {
1651 		stat->lim_rtt_min = ifs->rttmin;
1652 	} else {
1653 		stat->lim_rtt_min = MIN(stat->lim_rtt_min, ifs->rttmin);
1654 	}
1655 
1656 	/* connection timeouts */
1657 	stat->lim_conn_attempts++;
1658 	if (ifs->conntimeout) {
1659 		stat->lim_conn_timeouts++;
1660 	}
1661 
1662 	/* bytes sent using background delay-based algorithms */
1663 	stat->lim_bk_txpkts += ifs->bk_txpackets;
1664 }
1665 
1666 static void
tcp_free_reassq(struct tcpcb * tp)1667 tcp_free_reassq(struct tcpcb *tp)
1668 {
1669 	struct tseg_qent *q;
1670 
1671 	while ((q = LIST_FIRST(&tp->t_segq)) != NULL) {
1672 		struct mbuf *m;
1673 
1674 		LIST_REMOVE(q, tqe_q);
1675 		m = tcp_destroy_reass_qent(tp, q);
1676 		m_freem(m);
1677 	}
1678 }
1679 
1680 struct tseg_qent *
tcp_create_reass_qent(struct tcpcb * tp,struct mbuf * m,struct tcphdr * th,int len)1681 tcp_create_reass_qent(struct tcpcb *tp, struct mbuf *m,
1682     struct tcphdr *th, int len)
1683 {
1684 	struct tseg_qent *te;
1685 	int size;
1686 
1687 	te = tcp_reass_qent_alloc(tp->t_inpcb->inp_socket->so_proto);
1688 	if (te == NULL) {
1689 		return NULL;
1690 	}
1691 
1692 	tp->t_reassqlen++;
1693 	OSIncrementAtomic(&tcp_reass_total_qlen);
1694 
1695 	size = m_chain_capacity(m);
1696 	tcp_memacct_add(size);
1697 	tp->t_reassq_mbcnt += size;
1698 
1699 	te->tqe_m = m;
1700 	te->tqe_th = th;
1701 	te->tqe_len = len;
1702 
1703 	return te;
1704 }
1705 
1706 struct mbuf *
tcp_destroy_reass_qent(struct tcpcb * tp,struct tseg_qent * q)1707 tcp_destroy_reass_qent(struct tcpcb *tp, struct tseg_qent *q)
1708 {
1709 	struct mbuf *m = q->tqe_m;
1710 	int size;
1711 
1712 	size = m_chain_capacity(m);
1713 	tcp_memacct_sub(size);
1714 	tp->t_reassq_mbcnt -= size;
1715 
1716 	tp->t_reassqlen--;
1717 	OSDecrementAtomic(&tcp_reass_total_qlen);
1718 	tcp_reass_qent_free(tp->t_inpcb->inp_socket->so_proto, q);
1719 
1720 	return m;
1721 }
1722 
1723 struct tseg_qent *
tcp_reass_qent_alloc(struct protosw * proto)1724 tcp_reass_qent_alloc(struct protosw *proto)
1725 {
1726 	struct tseg_qent *reass;
1727 
1728 	if (proto_memacct_hardlimit(proto)) {
1729 		return NULL;
1730 	}
1731 	reass = zalloc_flags(tcp_reass_zone, Z_NOPAGEWAIT);
1732 	if (reass == NULL) {
1733 		return NULL;
1734 	}
1735 
1736 	proto_memacct_add(proto, kalloc_type_size(tcp_reass_zone));
1737 
1738 	return reass;
1739 }
1740 
1741 void
tcp_reass_qent_free(struct protosw * proto,struct tseg_qent * te)1742 tcp_reass_qent_free(struct protosw *proto, struct tseg_qent *te)
1743 {
1744 	proto_memacct_sub(proto, kalloc_type_size(tcp_reass_zone));
1745 	zfree(tcp_reass_zone, te);
1746 }
1747 
1748 /*
1749  * Close a TCP control block:
1750  *	discard all space held by the tcp
1751  *	discard internet protocol block
1752  *	wake up any sleepers
1753  */
1754 struct tcpcb *
tcp_close(struct tcpcb * tp)1755 tcp_close(struct tcpcb *tp)
1756 {
1757 	struct inpcb *inp = tp->t_inpcb;
1758 	struct socket *so = inp->inp_socket;
1759 	int isipv6 = (inp->inp_vflag & INP_IPV6) != 0;
1760 	struct route *ro;
1761 	struct rtentry *rt;
1762 	int dosavessthresh;
1763 	struct ifnet_stats_per_flow ifs;
1764 
1765 	/* tcp_close was called previously, bail */
1766 	if (inp->inp_ppcb == NULL) {
1767 		return NULL;
1768 	}
1769 
1770 	tcp_del_fsw_flow(tp);
1771 
1772 	tcp_canceltimers(tp);
1773 	KERNEL_DEBUG(DBG_FNC_TCP_CLOSE | DBG_FUNC_START, tp, 0, 0, 0, 0);
1774 
1775 	/*
1776 	 * If another thread for this tcp is currently in ip (indicated by
1777 	 * the TF_SENDINPROG flag), defer the cleanup until after it returns
1778 	 * back to tcp.  This is done to serialize the close until after all
1779 	 * pending output is finished, in order to avoid having the PCB be
1780 	 * detached and the cached route cleaned, only for ip to cache the
1781 	 * route back into the PCB again.  Note that we've cleared all the
1782 	 * timers at this point.  Set TF_CLOSING to indicate to tcp_output()
1783 	 * that is should call us again once it returns from ip; at that
1784 	 * point both flags should be cleared and we can proceed further
1785 	 * with the cleanup.
1786 	 */
1787 	if ((tp->t_flags & TF_CLOSING) ||
1788 	    inp->inp_sndinprog_cnt > 0) {
1789 		tp->t_flags |= TF_CLOSING;
1790 		return NULL;
1791 	}
1792 
1793 	TCP_LOG_CONNECTION_SUMMARY(tp);
1794 
1795 	DTRACE_TCP4(state__change, void, NULL, struct inpcb *, inp,
1796 	    struct tcpcb *, tp, int32_t, TCPS_CLOSED);
1797 
1798 	ro = (isipv6 ? (struct route *)&inp->in6p_route : &inp->inp_route);
1799 	rt = ro->ro_rt;
1800 	if (rt != NULL) {
1801 		RT_LOCK_SPIN(rt);
1802 	}
1803 
1804 	/*
1805 	 * If we got enough samples through the srtt filter,
1806 	 * save the rtt and rttvar in the routing entry.
1807 	 * 'Enough' is arbitrarily defined as the 16 samples.
1808 	 * 16 samples is enough for the srtt filter to converge
1809 	 * to within 5% of the correct value; fewer samples and
1810 	 * we could save a very bogus rtt.
1811 	 *
1812 	 * Don't update the default route's characteristics and don't
1813 	 * update anything that the user "locked".
1814 	 */
1815 	if (tp->t_rttupdated >= 16) {
1816 		u_int32_t i = 0;
1817 		bool log_rtt = false;
1818 
1819 		if (isipv6) {
1820 			struct sockaddr_in6 *sin6;
1821 
1822 			if (rt == NULL) {
1823 				goto no_valid_rt;
1824 			}
1825 			sin6 = SIN6(rt_key(rt));
1826 			if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
1827 				goto no_valid_rt;
1828 			}
1829 		} else if (ROUTE_UNUSABLE(ro) ||
1830 		    SIN(rt_key(rt))->sin_addr.s_addr == INADDR_ANY) {
1831 			DTRACE_TCP4(state__change, void, NULL,
1832 			    struct inpcb *, inp, struct tcpcb *, tp,
1833 			    int32_t, TCPS_CLOSED);
1834 			TCP_LOG_STATE(tp, TCPS_CLOSED);
1835 			tp->t_state = TCPS_CLOSED;
1836 			goto no_valid_rt;
1837 		}
1838 
1839 		RT_LOCK_ASSERT_HELD(rt);
1840 		if ((rt->rt_rmx.rmx_locks & RTV_RTT) == 0) {
1841 			i = tp->t_srtt *
1842 			    (RTM_RTTUNIT / (TCP_RETRANSHZ * TCP_RTT_SCALE));
1843 			if (rt->rt_rmx.rmx_rtt && i) {
1844 				/*
1845 				 * filter this update to half the old & half
1846 				 * the new values, converting scale.
1847 				 * See route.h and tcp_var.h for a
1848 				 * description of the scaling constants.
1849 				 */
1850 				rt->rt_rmx.rmx_rtt =
1851 				    (rt->rt_rmx.rmx_rtt + i) / 2;
1852 			} else {
1853 				rt->rt_rmx.rmx_rtt = i;
1854 			}
1855 			tcpstat.tcps_cachedrtt++;
1856 			log_rtt = true;
1857 		}
1858 		if ((rt->rt_rmx.rmx_locks & RTV_RTTVAR) == 0) {
1859 			i = tp->t_rttvar *
1860 			    (RTM_RTTUNIT / (TCP_RETRANSHZ * TCP_RTTVAR_SCALE));
1861 			if (rt->rt_rmx.rmx_rttvar && i) {
1862 				rt->rt_rmx.rmx_rttvar =
1863 				    (rt->rt_rmx.rmx_rttvar + i) / 2;
1864 			} else {
1865 				rt->rt_rmx.rmx_rttvar = i;
1866 			}
1867 			tcpstat.tcps_cachedrttvar++;
1868 			log_rtt = true;
1869 		}
1870 		if (log_rtt) {
1871 			TCP_LOG_RTM_RTT(tp, rt);
1872 			TCP_LOG_RTT_INFO(tp);
1873 		}
1874 		/*
1875 		 * The old comment here said:
1876 		 * update the pipelimit (ssthresh) if it has been updated
1877 		 * already or if a pipesize was specified & the threshhold
1878 		 * got below half the pipesize.  I.e., wait for bad news
1879 		 * before we start updating, then update on both good
1880 		 * and bad news.
1881 		 *
1882 		 * But we want to save the ssthresh even if no pipesize is
1883 		 * specified explicitly in the route, because such
1884 		 * connections still have an implicit pipesize specified
1885 		 * by the global tcp_sendspace.  In the absence of a reliable
1886 		 * way to calculate the pipesize, it will have to do.
1887 		 */
1888 		i = tp->snd_ssthresh;
1889 		if (rt->rt_rmx.rmx_sendpipe != 0) {
1890 			dosavessthresh = (i < rt->rt_rmx.rmx_sendpipe / 2);
1891 		} else {
1892 			dosavessthresh = (i < so->so_snd.sb_hiwat / 2);
1893 		}
1894 		if (((rt->rt_rmx.rmx_locks & RTV_SSTHRESH) == 0 &&
1895 		    i != 0 && rt->rt_rmx.rmx_ssthresh != 0) ||
1896 		    dosavessthresh) {
1897 			/*
1898 			 * convert the limit from user data bytes to
1899 			 * packets then to packet data bytes.
1900 			 */
1901 			i = (i + tp->t_maxseg / 2) / tp->t_maxseg;
1902 			if (i < 2) {
1903 				i = 2;
1904 			}
1905 			i *= (u_int32_t)(tp->t_maxseg +
1906 			    isipv6 ? sizeof(struct ip6_hdr) +
1907 			    sizeof(struct tcphdr) :
1908 			    sizeof(struct tcpiphdr));
1909 			if (rt->rt_rmx.rmx_ssthresh) {
1910 				rt->rt_rmx.rmx_ssthresh =
1911 				    (rt->rt_rmx.rmx_ssthresh + i) / 2;
1912 			} else {
1913 				rt->rt_rmx.rmx_ssthresh = i;
1914 			}
1915 			tcpstat.tcps_cachedssthresh++;
1916 		}
1917 	}
1918 
1919 	/*
1920 	 * Mark route for deletion if no information is cached.
1921 	 */
1922 	if (rt != NULL && (so->so_flags & SOF_OVERFLOW)) {
1923 		if (!(rt->rt_rmx.rmx_locks & RTV_RTT) &&
1924 		    rt->rt_rmx.rmx_rtt == 0) {
1925 			rt->rt_flags |= RTF_DELCLONE;
1926 		}
1927 	}
1928 
1929 no_valid_rt:
1930 	if (rt != NULL) {
1931 		RT_UNLOCK(rt);
1932 	}
1933 
1934 	/* free the reassembly queue, if any */
1935 	tcp_free_reassq(tp);
1936 
1937 	/* performance stats per interface */
1938 	tcp_create_ifnet_stats_per_flow(tp, &ifs);
1939 	tcp_update_stats_per_flow(&ifs, inp->inp_last_outifp);
1940 
1941 	tcp_free_sackholes(tp);
1942 	tcp_notify_ack_free(tp);
1943 
1944 	inp_decr_sndbytes_allunsent(so, tp->snd_una);
1945 
1946 	if (tp->t_bwmeas != NULL) {
1947 		tcp_bwmeas_free(tp);
1948 	}
1949 	tcp_rxtseg_clean(tp);
1950 	tcp_segs_sent_clean(tp, true);
1951 
1952 	/* Free the packet list */
1953 	if (tp->t_pktlist_head != NULL) {
1954 		m_freem_list(tp->t_pktlist_head);
1955 	}
1956 	TCP_PKTLIST_CLEAR(tp);
1957 
1958 	TCP_LOG_STATE(tp, TCPS_CLOSED);
1959 	tp->t_state = TCPS_CLOSED;
1960 
1961 	/*
1962 	 * Issue a wakeup before detach so that we don't miss
1963 	 * a wakeup
1964 	 */
1965 	sodisconnectwakeup(so);
1966 
1967 	/*
1968 	 * Make sure to clear the TCP Keep Alive Offload as it is
1969 	 * ref counted on the interface
1970 	 */
1971 	tcp_clear_keep_alive_offload(so);
1972 
1973 	/*
1974 	 * If this is a socket that does not want to wakeup the device
1975 	 * for it's traffic, the application might need to know that the
1976 	 * socket is closed, send a notification.
1977 	 */
1978 	if ((so->so_options & SO_NOWAKEFROMSLEEP) &&
1979 	    inp->inp_state != INPCB_STATE_DEAD &&
1980 	    !(inp->inp_flags2 & INP2_TIMEWAIT)) {
1981 		socket_post_kev_msg_closed(so);
1982 	}
1983 
1984 	if (CC_ALGO(tp)->cleanup != NULL) {
1985 		CC_ALGO(tp)->cleanup(tp);
1986 	}
1987 
1988 	tp->tcp_cc_index = TCP_CC_ALGO_NONE;
1989 
1990 	if (TCP_USE_RLEDBAT(tp, so) && tcp_cc_rledbat.cleanup != NULL) {
1991 		tcp_cc_rledbat.cleanup(tp);
1992 	}
1993 
1994 	/* Can happen if we close the socket before receiving the third ACK */
1995 	if ((tp->t_tfo_flags & TFO_F_COOKIE_VALID)) {
1996 		OSDecrementAtomic(&tcp_tfo_halfcnt);
1997 
1998 		/* Panic if something has gone terribly wrong. */
1999 		VERIFY(tcp_tfo_halfcnt >= 0);
2000 
2001 		tp->t_tfo_flags &= ~TFO_F_COOKIE_VALID;
2002 	}
2003 
2004 	if (SOCK_CHECK_DOM(so, PF_INET6)) {
2005 		in6_pcbdetach(inp);
2006 	} else {
2007 		in_pcbdetach(inp);
2008 	}
2009 
2010 	/*
2011 	 * Call soisdisconnected after detach because it might unlock the socket
2012 	 */
2013 	soisdisconnected(so);
2014 	tcpstat.tcps_closed++;
2015 	KERNEL_DEBUG(DBG_FNC_TCP_CLOSE | DBG_FUNC_END,
2016 	    tcpstat.tcps_closed, 0, 0, 0, 0);
2017 	return NULL;
2018 }
2019 
2020 void
tcp_drain(void)2021 tcp_drain(void)
2022 {
2023 	struct inpcb *inp;
2024 	struct tcpcb *tp;
2025 
2026 	if (!lck_rw_try_lock_exclusive(&tcbinfo.ipi_lock)) {
2027 		return;
2028 	}
2029 
2030 	LIST_FOREACH(inp, tcbinfo.ipi_listhead, inp_list) {
2031 		if (in_pcb_checkstate(inp, WNT_ACQUIRE, 0) !=
2032 		    WNT_STOPUSING) {
2033 			socket_lock(inp->inp_socket, 1);
2034 			if (in_pcb_checkstate(inp, WNT_RELEASE, 1)
2035 			    == WNT_STOPUSING) {
2036 				/* lost a race, try the next one */
2037 				socket_unlock(inp->inp_socket, 1);
2038 				continue;
2039 			}
2040 			tp = intotcpcb(inp);
2041 
2042 			so_drain_extended_bk_idle(inp->inp_socket);
2043 
2044 			socket_unlock(inp->inp_socket, 1);
2045 		}
2046 	}
2047 	lck_rw_done(&tcbinfo.ipi_lock);
2048 }
2049 
2050 /*
2051  * Notify a tcp user of an asynchronous error;
2052  * store error as soft error, but wake up user
2053  * (for now, won't do anything until can select for soft error).
2054  *
2055  * Do not wake up user since there currently is no mechanism for
2056  * reporting soft errors (yet - a kqueue filter may be added).
2057  */
2058 static void
tcp_notify(struct inpcb * inp,int error)2059 tcp_notify(struct inpcb *inp, int error)
2060 {
2061 	struct tcpcb *tp;
2062 
2063 	if (inp == NULL || (inp->inp_state == INPCB_STATE_DEAD)) {
2064 		return; /* pcb is gone already */
2065 	}
2066 	tp = (struct tcpcb *)inp->inp_ppcb;
2067 
2068 	VERIFY(tp != NULL);
2069 	/*
2070 	 * Ignore some errors if we are hooked up.
2071 	 * If connection hasn't completed, has retransmitted several times,
2072 	 * and receives a second error, give up now.  This is better
2073 	 * than waiting a long time to establish a connection that
2074 	 * can never complete.
2075 	 */
2076 	if (tp->t_state == TCPS_ESTABLISHED &&
2077 	    (error == EHOSTUNREACH || error == ENETUNREACH ||
2078 	    error == EHOSTDOWN)) {
2079 		if (inp->inp_route.ro_rt) {
2080 			rtfree(inp->inp_route.ro_rt);
2081 			inp->inp_route.ro_rt = (struct rtentry *)NULL;
2082 		}
2083 	} else if (tp->t_state < TCPS_ESTABLISHED && tp->t_rxtshift > 3 &&
2084 	    tp->t_softerror) {
2085 		tcp_drop(tp, error);
2086 	} else {
2087 		tp->t_softerror = error;
2088 	}
2089 }
2090 
2091 struct bwmeas *
tcp_bwmeas_alloc(struct tcpcb * tp)2092 tcp_bwmeas_alloc(struct tcpcb *tp)
2093 {
2094 	struct bwmeas *elm;
2095 	elm = zalloc_flags(tcp_bwmeas_zone, Z_ZERO | Z_WAITOK);
2096 	elm->bw_minsizepkts = TCP_BWMEAS_BURST_MINSIZE;
2097 	elm->bw_minsize = elm->bw_minsizepkts * tp->t_maxseg;
2098 	return elm;
2099 }
2100 
2101 void
tcp_bwmeas_free(struct tcpcb * tp)2102 tcp_bwmeas_free(struct tcpcb *tp)
2103 {
2104 	zfree(tcp_bwmeas_zone, tp->t_bwmeas);
2105 	tp->t_bwmeas = NULL;
2106 	tp->t_flagsext &= ~(TF_MEASURESNDBW);
2107 }
2108 
2109 int
get_tcp_inp_list(struct inpcb * __single * inp_list __counted_by (n),size_t n,inp_gen_t gencnt)2110 get_tcp_inp_list(struct inpcb * __single *inp_list __counted_by(n), size_t n, inp_gen_t gencnt)
2111 {
2112 	struct tcpcb *tp;
2113 	struct inpcb *inp;
2114 	int i = 0;
2115 
2116 	LIST_FOREACH(inp, tcbinfo.ipi_listhead, inp_list) {
2117 		if (i >= n) {
2118 			break;
2119 		}
2120 		if (inp->inp_gencnt <= gencnt &&
2121 		    inp->inp_state != INPCB_STATE_DEAD) {
2122 			inp_list[i++] = inp;
2123 		}
2124 	}
2125 
2126 	TAILQ_FOREACH(tp, &tcp_tw_tailq, t_twentry) {
2127 		if (i >= n) {
2128 			break;
2129 		}
2130 		inp = tp->t_inpcb;
2131 		if (inp->inp_gencnt <= gencnt &&
2132 		    inp->inp_state != INPCB_STATE_DEAD) {
2133 			inp_list[i++] = inp;
2134 		}
2135 	}
2136 	return i;
2137 }
2138 
2139 /*
2140  * tcpcb_to_otcpcb copies specific bits of a tcpcb to a otcpcb format.
2141  * The otcpcb data structure is passed to user space and must not change.
2142  */
2143 static void
tcpcb_to_otcpcb(struct tcpcb * tp,struct otcpcb * otp)2144 tcpcb_to_otcpcb(struct tcpcb *tp, struct otcpcb *otp)
2145 {
2146 	otp->t_segq = (uint32_t)VM_KERNEL_ADDRHASH(tp->t_segq.lh_first);
2147 	otp->t_dupacks = tp->t_dupacks;
2148 	otp->t_timer[TCPT_REXMT_EXT] = tp->t_timer[TCPT_REXMT];
2149 	otp->t_timer[TCPT_PERSIST_EXT] = tp->t_timer[TCPT_PERSIST];
2150 	otp->t_timer[TCPT_KEEP_EXT] = tp->t_timer[TCPT_KEEP];
2151 	otp->t_timer[TCPT_2MSL_EXT] = tp->t_timer[TCPT_2MSL];
2152 	otp->t_inpcb =
2153 	    (_TCPCB_PTR(struct inpcb *))VM_KERNEL_ADDRHASH(tp->t_inpcb);
2154 	otp->t_state = tp->t_state;
2155 	otp->t_flags = tp->t_flags;
2156 	otp->t_force = (tp->t_flagsext & TF_FORCE) ? 1 : 0;
2157 	otp->snd_una = tp->snd_una;
2158 	otp->snd_max = tp->snd_max;
2159 	otp->snd_nxt = tp->snd_nxt;
2160 	otp->snd_up = tp->snd_up;
2161 	otp->snd_wl1 = tp->snd_wl1;
2162 	otp->snd_wl2 = tp->snd_wl2;
2163 	otp->iss = tp->iss;
2164 	otp->irs = tp->irs;
2165 	otp->rcv_nxt = tp->rcv_nxt;
2166 	otp->rcv_adv = tp->rcv_adv;
2167 	otp->rcv_wnd = tp->rcv_wnd;
2168 	otp->rcv_up = tp->rcv_up;
2169 	otp->snd_wnd = tp->snd_wnd;
2170 	otp->snd_cwnd = tp->snd_cwnd;
2171 	otp->snd_ssthresh = tp->snd_ssthresh;
2172 	otp->t_maxopd = tp->t_maxopd;
2173 	otp->t_rcvtime = tp->t_rcvtime;
2174 	otp->t_starttime = tp->t_starttime;
2175 	otp->t_rtttime = tp->t_rtttime;
2176 	otp->t_rtseq = tp->t_rtseq;
2177 	otp->t_rxtcur = tp->t_rxtcur;
2178 	otp->t_maxseg = tp->t_maxseg;
2179 	otp->t_srtt = tp->t_srtt;
2180 	otp->t_rttvar = tp->t_rttvar;
2181 	otp->t_rxtshift = tp->t_rxtshift;
2182 	otp->t_rttmin = tp->t_rttmin;
2183 	otp->t_rttupdated = tp->t_rttupdated;
2184 	otp->max_sndwnd = tp->max_sndwnd;
2185 	otp->t_softerror = tp->t_softerror;
2186 	otp->t_oobflags = tp->t_oobflags;
2187 	otp->t_iobc = tp->t_iobc;
2188 	otp->snd_scale = tp->snd_scale;
2189 	otp->rcv_scale = tp->rcv_scale;
2190 	otp->request_r_scale = tp->request_r_scale;
2191 	otp->requested_s_scale = tp->requested_s_scale;
2192 	otp->ts_recent = tp->ts_recent;
2193 	otp->ts_recent_age = tp->ts_recent_age;
2194 	otp->last_ack_sent = tp->last_ack_sent;
2195 	otp->cc_send = 0;
2196 	otp->cc_recv = 0;
2197 	otp->snd_recover = tp->snd_recover;
2198 	otp->snd_cwnd_prev = tp->snd_cwnd_prev;
2199 	otp->snd_ssthresh_prev = tp->snd_ssthresh_prev;
2200 	otp->t_badrxtwin = 0;
2201 }
2202 
2203 static int
2204 tcp_pcblist SYSCTL_HANDLER_ARGS
2205 {
2206 #pragma unused(oidp, arg1, arg2)
2207 	int error, i = 0, n, sz;
2208 	struct inpcb **inp_list;
2209 	inp_gen_t gencnt;
2210 	struct xinpgen xig;
2211 
2212 	/*
2213 	 * The process of preparing the TCB list is too time-consuming and
2214 	 * resource-intensive to repeat twice on every request.
2215 	 */
2216 	lck_rw_lock_shared(&tcbinfo.ipi_lock);
2217 	if (req->oldptr == USER_ADDR_NULL) {
2218 		n = tcbinfo.ipi_count;
2219 		req->oldidx = 2 * (sizeof(xig))
2220 		    + (n + n / 8) * sizeof(struct xtcpcb);
2221 		lck_rw_done(&tcbinfo.ipi_lock);
2222 		return 0;
2223 	}
2224 
2225 	if (req->newptr != USER_ADDR_NULL) {
2226 		lck_rw_done(&tcbinfo.ipi_lock);
2227 		return EPERM;
2228 	}
2229 
2230 	/*
2231 	 * OK, now we're committed to doing something.
2232 	 */
2233 	gencnt = tcbinfo.ipi_gencnt;
2234 	sz = n = tcbinfo.ipi_count;
2235 
2236 	bzero(&xig, sizeof(xig));
2237 	xig.xig_len = sizeof(xig);
2238 	xig.xig_count = n;
2239 	xig.xig_gen = gencnt;
2240 	xig.xig_sogen = so_gencnt;
2241 	error = SYSCTL_OUT(req, &xig, sizeof(xig));
2242 	if (error) {
2243 		lck_rw_done(&tcbinfo.ipi_lock);
2244 		return error;
2245 	}
2246 	/*
2247 	 * We are done if there is no pcb
2248 	 */
2249 	if (n == 0) {
2250 		lck_rw_done(&tcbinfo.ipi_lock);
2251 		return 0;
2252 	}
2253 
2254 	inp_list = kalloc_type(struct inpcb *, n, Z_WAITOK);
2255 	if (inp_list == NULL) {
2256 		lck_rw_done(&tcbinfo.ipi_lock);
2257 		return ENOMEM;
2258 	}
2259 
2260 	n = get_tcp_inp_list(inp_list, n, gencnt);
2261 
2262 	error = 0;
2263 	for (i = 0; i < n; i++) {
2264 		struct xtcpcb xt;
2265 		caddr_t inp_ppcb __single;
2266 		struct inpcb *inp;
2267 
2268 		inp = inp_list[i];
2269 
2270 		if (in_pcb_checkstate(inp, WNT_ACQUIRE, 0) == WNT_STOPUSING) {
2271 			continue;
2272 		}
2273 		socket_lock(inp->inp_socket, 1);
2274 		if (in_pcb_checkstate(inp, WNT_RELEASE, 1) == WNT_STOPUSING) {
2275 			socket_unlock(inp->inp_socket, 1);
2276 			continue;
2277 		}
2278 		if (inp->inp_gencnt > gencnt) {
2279 			socket_unlock(inp->inp_socket, 1);
2280 			continue;
2281 		}
2282 
2283 		bzero(&xt, sizeof(xt));
2284 		xt.xt_len = sizeof(xt);
2285 		/* XXX should avoid extra copy */
2286 		inpcb_to_compat(inp, &xt.xt_inp);
2287 		inp_ppcb = inp->inp_ppcb;
2288 		if (inp_ppcb != NULL) {
2289 			tcpcb_to_otcpcb((struct tcpcb *)(void *)inp_ppcb,
2290 			    &xt.xt_tp);
2291 		} else {
2292 			bzero((char *) &xt.xt_tp, sizeof(xt.xt_tp));
2293 		}
2294 		if (inp->inp_socket) {
2295 			sotoxsocket(inp->inp_socket, &xt.xt_socket);
2296 		}
2297 
2298 		socket_unlock(inp->inp_socket, 1);
2299 
2300 		error = SYSCTL_OUT(req, &xt, sizeof(xt));
2301 	}
2302 	if (!error) {
2303 		/*
2304 		 * Give the user an updated idea of our state.
2305 		 * If the generation differs from what we told
2306 		 * her before, she knows that something happened
2307 		 * while we were processing this request, and it
2308 		 * might be necessary to retry.
2309 		 */
2310 		bzero(&xig, sizeof(xig));
2311 		xig.xig_len = sizeof(xig);
2312 		xig.xig_gen = tcbinfo.ipi_gencnt;
2313 		xig.xig_sogen = so_gencnt;
2314 		xig.xig_count = tcbinfo.ipi_count;
2315 		error = SYSCTL_OUT(req, &xig, sizeof(xig));
2316 	}
2317 
2318 	lck_rw_done(&tcbinfo.ipi_lock);
2319 	kfree_type(struct inpcb *, sz, inp_list);
2320 	return error;
2321 }
2322 
2323 SYSCTL_PROC(_net_inet_tcp, TCPCTL_PCBLIST, pcblist,
2324     CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
2325     tcp_pcblist, "S,xtcpcb", "List of active TCP connections");
2326 
2327 #if XNU_TARGET_OS_OSX
2328 
2329 static void
tcpcb_to_xtcpcb64(struct tcpcb * tp,struct xtcpcb64 * otp)2330 tcpcb_to_xtcpcb64(struct tcpcb *tp, struct xtcpcb64 *otp)
2331 {
2332 	otp->t_segq = (uint32_t)VM_KERNEL_ADDRHASH(tp->t_segq.lh_first);
2333 	otp->t_dupacks = tp->t_dupacks;
2334 	otp->t_timer[TCPT_REXMT_EXT] = tp->t_timer[TCPT_REXMT];
2335 	otp->t_timer[TCPT_PERSIST_EXT] = tp->t_timer[TCPT_PERSIST];
2336 	otp->t_timer[TCPT_KEEP_EXT] = tp->t_timer[TCPT_KEEP];
2337 	otp->t_timer[TCPT_2MSL_EXT] = tp->t_timer[TCPT_2MSL];
2338 	otp->t_state = tp->t_state;
2339 	otp->t_flags = tp->t_flags;
2340 	otp->t_force = (tp->t_flagsext & TF_FORCE) ? 1 : 0;
2341 	otp->snd_una = tp->snd_una;
2342 	otp->snd_max = tp->snd_max;
2343 	otp->snd_nxt = tp->snd_nxt;
2344 	otp->snd_up = tp->snd_up;
2345 	otp->snd_wl1 = tp->snd_wl1;
2346 	otp->snd_wl2 = tp->snd_wl2;
2347 	otp->iss = tp->iss;
2348 	otp->irs = tp->irs;
2349 	otp->rcv_nxt = tp->rcv_nxt;
2350 	otp->rcv_adv = tp->rcv_adv;
2351 	otp->rcv_wnd = tp->rcv_wnd;
2352 	otp->rcv_up = tp->rcv_up;
2353 	otp->snd_wnd = tp->snd_wnd;
2354 	otp->snd_cwnd = tp->snd_cwnd;
2355 	otp->snd_ssthresh = tp->snd_ssthresh;
2356 	otp->t_maxopd = tp->t_maxopd;
2357 	otp->t_rcvtime = tp->t_rcvtime;
2358 	otp->t_starttime = tp->t_starttime;
2359 	otp->t_rtttime = tp->t_rtttime;
2360 	otp->t_rtseq = tp->t_rtseq;
2361 	otp->t_rxtcur = tp->t_rxtcur;
2362 	otp->t_maxseg = tp->t_maxseg;
2363 	otp->t_srtt = tp->t_srtt;
2364 	otp->t_rttvar = tp->t_rttvar;
2365 	otp->t_rxtshift = tp->t_rxtshift;
2366 	otp->t_rttmin = tp->t_rttmin;
2367 	otp->t_rttupdated = tp->t_rttupdated;
2368 	otp->max_sndwnd = tp->max_sndwnd;
2369 	otp->t_softerror = tp->t_softerror;
2370 	otp->t_oobflags = tp->t_oobflags;
2371 	otp->t_iobc = tp->t_iobc;
2372 	otp->snd_scale = tp->snd_scale;
2373 	otp->rcv_scale = tp->rcv_scale;
2374 	otp->request_r_scale = tp->request_r_scale;
2375 	otp->requested_s_scale = tp->requested_s_scale;
2376 	otp->ts_recent = tp->ts_recent;
2377 	otp->ts_recent_age = tp->ts_recent_age;
2378 	otp->last_ack_sent = tp->last_ack_sent;
2379 	otp->cc_send = 0;
2380 	otp->cc_recv = 0;
2381 	otp->snd_recover = tp->snd_recover;
2382 	otp->snd_cwnd_prev = tp->snd_cwnd_prev;
2383 	otp->snd_ssthresh_prev = tp->snd_ssthresh_prev;
2384 	otp->t_badrxtwin = 0;
2385 }
2386 
2387 
2388 static int
2389 tcp_pcblist64 SYSCTL_HANDLER_ARGS
2390 {
2391 #pragma unused(oidp, arg1, arg2)
2392 	int error, i = 0, n, sz;
2393 	struct inpcb **inp_list;
2394 	inp_gen_t gencnt;
2395 	struct xinpgen xig;
2396 
2397 	/*
2398 	 * The process of preparing the TCB list is too time-consuming and
2399 	 * resource-intensive to repeat twice on every request.
2400 	 */
2401 	lck_rw_lock_shared(&tcbinfo.ipi_lock);
2402 	if (req->oldptr == USER_ADDR_NULL) {
2403 		n = tcbinfo.ipi_count;
2404 		req->oldidx = 2 * (sizeof(xig))
2405 		    + (n + n / 8) * sizeof(struct xtcpcb64);
2406 		lck_rw_done(&tcbinfo.ipi_lock);
2407 		return 0;
2408 	}
2409 
2410 	if (req->newptr != USER_ADDR_NULL) {
2411 		lck_rw_done(&tcbinfo.ipi_lock);
2412 		return EPERM;
2413 	}
2414 
2415 	/*
2416 	 * OK, now we're committed to doing something.
2417 	 */
2418 	gencnt = tcbinfo.ipi_gencnt;
2419 	sz = n = tcbinfo.ipi_count;
2420 
2421 	bzero(&xig, sizeof(xig));
2422 	xig.xig_len = sizeof(xig);
2423 	xig.xig_count = n;
2424 	xig.xig_gen = gencnt;
2425 	xig.xig_sogen = so_gencnt;
2426 	error = SYSCTL_OUT(req, &xig, sizeof(xig));
2427 	if (error) {
2428 		lck_rw_done(&tcbinfo.ipi_lock);
2429 		return error;
2430 	}
2431 	/*
2432 	 * We are done if there is no pcb
2433 	 */
2434 	if (n == 0) {
2435 		lck_rw_done(&tcbinfo.ipi_lock);
2436 		return 0;
2437 	}
2438 
2439 	inp_list = kalloc_type(struct inpcb *, n, Z_WAITOK);
2440 	if (inp_list == NULL) {
2441 		lck_rw_done(&tcbinfo.ipi_lock);
2442 		return ENOMEM;
2443 	}
2444 
2445 	n = get_tcp_inp_list(inp_list, n, gencnt);
2446 
2447 	error = 0;
2448 	for (i = 0; i < n; i++) {
2449 		struct xtcpcb64 xt;
2450 		struct inpcb *inp;
2451 
2452 		inp = inp_list[i];
2453 
2454 		if (in_pcb_checkstate(inp, WNT_ACQUIRE, 0) == WNT_STOPUSING) {
2455 			continue;
2456 		}
2457 		socket_lock(inp->inp_socket, 1);
2458 		if (in_pcb_checkstate(inp, WNT_RELEASE, 1) == WNT_STOPUSING) {
2459 			socket_unlock(inp->inp_socket, 1);
2460 			continue;
2461 		}
2462 		if (inp->inp_gencnt > gencnt) {
2463 			socket_unlock(inp->inp_socket, 1);
2464 			continue;
2465 		}
2466 
2467 		bzero(&xt, sizeof(xt));
2468 		xt.xt_len = sizeof(xt);
2469 		inpcb_to_xinpcb64(inp, &xt.xt_inpcb);
2470 		xt.xt_inpcb.inp_ppcb =
2471 		    (uint64_t)VM_KERNEL_ADDRHASH(inp->inp_ppcb);
2472 		if (inp->inp_ppcb != NULL) {
2473 			tcpcb_to_xtcpcb64((struct tcpcb *)inp->inp_ppcb,
2474 			    &xt);
2475 		}
2476 		if (inp->inp_socket) {
2477 			sotoxsocket64(inp->inp_socket,
2478 			    &xt.xt_inpcb.xi_socket);
2479 		}
2480 
2481 		socket_unlock(inp->inp_socket, 1);
2482 
2483 		error = SYSCTL_OUT(req, &xt, sizeof(xt));
2484 	}
2485 	if (!error) {
2486 		/*
2487 		 * Give the user an updated idea of our state.
2488 		 * If the generation differs from what we told
2489 		 * her before, she knows that something happened
2490 		 * while we were processing this request, and it
2491 		 * might be necessary to retry.
2492 		 */
2493 		bzero(&xig, sizeof(xig));
2494 		xig.xig_len = sizeof(xig);
2495 		xig.xig_gen = tcbinfo.ipi_gencnt;
2496 		xig.xig_sogen = so_gencnt;
2497 		xig.xig_count = tcbinfo.ipi_count;
2498 		error = SYSCTL_OUT(req, &xig, sizeof(xig));
2499 	}
2500 
2501 	lck_rw_done(&tcbinfo.ipi_lock);
2502 	kfree_type(struct inpcb *, sz, inp_list);
2503 	return error;
2504 }
2505 
2506 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, pcblist64,
2507     CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
2508     tcp_pcblist64, "S,xtcpcb64", "List of active TCP connections");
2509 
2510 #endif /* XNU_TARGET_OS_OSX */
2511 
2512 static int
2513 tcp_pcblist_n SYSCTL_HANDLER_ARGS
2514 {
2515 #pragma unused(oidp, arg1, arg2)
2516 	int error = 0;
2517 
2518 	error = get_pcblist_n(IPPROTO_TCP, req, &tcbinfo);
2519 
2520 	return error;
2521 }
2522 
2523 
2524 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, pcblist_n,
2525     CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
2526     tcp_pcblist_n, "S,xtcpcb_n", "List of active TCP connections");
2527 
2528 static int
2529 tcp_progress_probe_enable SYSCTL_HANDLER_ARGS
2530 {
2531 #pragma unused(oidp, arg1, arg2)
2532 
2533 	return ntstat_tcp_progress_enable(req);
2534 }
2535 
2536 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, progress_enable,
2537     CTLTYPE_STRUCT | CTLFLAG_RW | CTLFLAG_LOCKED | CTLFLAG_ANYBODY, 0, 0,
2538     tcp_progress_probe_enable, "S", "Enable/disable TCP keepalive probing on the specified link(s)");
2539 
2540 
2541 __private_extern__ void
tcp_get_ports_used(ifnet_t ifp,int protocol,uint32_t flags,bitstr_t * __counted_by (bitstr_size (IP_PORTRANGE_SIZE))bitfield)2542 tcp_get_ports_used(ifnet_t ifp, int protocol, uint32_t flags,
2543     bitstr_t *__counted_by(bitstr_size(IP_PORTRANGE_SIZE)) bitfield)
2544 {
2545 	inpcb_get_ports_used(ifp, protocol, flags, bitfield,
2546 	    &tcbinfo);
2547 }
2548 
2549 __private_extern__ uint32_t
tcp_count_opportunistic(unsigned int ifindex,u_int32_t flags)2550 tcp_count_opportunistic(unsigned int ifindex, u_int32_t flags)
2551 {
2552 	return inpcb_count_opportunistic(ifindex, &tcbinfo, flags);
2553 }
2554 
2555 __private_extern__ uint32_t
tcp_find_anypcb_byaddr(struct ifaddr * ifa)2556 tcp_find_anypcb_byaddr(struct ifaddr *ifa)
2557 {
2558 #if SKYWALK
2559 	if (netns_is_enabled()) {
2560 		return netns_find_anyres_byaddr(ifa, IPPROTO_TCP);
2561 	} else
2562 #endif /* SKYWALK */
2563 	return inpcb_find_anypcb_byaddr(ifa, &tcbinfo);
2564 }
2565 
2566 static void
tcp_handle_msgsize(struct ip * ip,struct inpcb * inp)2567 tcp_handle_msgsize(struct ip *ip, struct inpcb *inp)
2568 {
2569 	struct rtentry *rt = NULL;
2570 	u_short ifscope = IFSCOPE_NONE;
2571 	int mtu;
2572 	struct sockaddr_in icmpsrc = {
2573 		.sin_len = sizeof(struct sockaddr_in),
2574 		.sin_family = AF_INET, .sin_port = 0, .sin_addr = { .s_addr = 0 },
2575 		.sin_zero = { 0, 0, 0, 0, 0, 0, 0, 0 }
2576 	};
2577 	struct icmp *icp = NULL;
2578 
2579 	icp = __container_of(ip, struct icmp, icmp_ip);
2580 	icmpsrc.sin_addr = icp->icmp_ip.ip_dst;
2581 
2582 	/*
2583 	 * MTU discovery:
2584 	 * If we got a needfrag and there is a host route to the
2585 	 * original destination, and the MTU is not locked, then
2586 	 * set the MTU in the route to the suggested new value
2587 	 * (if given) and then notify as usual.  The ULPs will
2588 	 * notice that the MTU has changed and adapt accordingly.
2589 	 * If no new MTU was suggested, then we guess a new one
2590 	 * less than the current value.  If the new MTU is
2591 	 * unreasonably small (defined by sysctl tcp_minmss), then
2592 	 * we reset the MTU to the interface value and enable the
2593 	 * lock bit, indicating that we are no longer doing MTU
2594 	 * discovery.
2595 	 */
2596 	if (ROUTE_UNUSABLE(&(inp->inp_route)) == false) {
2597 		rt = inp->inp_route.ro_rt;
2598 	}
2599 
2600 	/*
2601 	 * icmp6_mtudisc_update scopes the routing lookup
2602 	 * to the incoming interface (delivered from mbuf
2603 	 * packet header.
2604 	 * That is mostly ok but for asymmetric networks
2605 	 * that may be an issue.
2606 	 * Frag needed OR Packet too big really communicates
2607 	 * MTU for the out data path.
2608 	 * Take the interface scope from cached route or
2609 	 * the last outgoing interface from inp
2610 	 */
2611 	if (rt != NULL) {
2612 		ifscope = (rt->rt_ifp != NULL) ?
2613 		    rt->rt_ifp->if_index : IFSCOPE_NONE;
2614 	} else {
2615 		ifscope = (inp->inp_last_outifp != NULL) ?
2616 		    inp->inp_last_outifp->if_index : IFSCOPE_NONE;
2617 	}
2618 
2619 	if ((rt == NULL) ||
2620 	    !(rt->rt_flags & RTF_HOST) ||
2621 	    (rt->rt_flags & (RTF_CLONING | RTF_PRCLONING))) {
2622 		rt = rtalloc1_scoped(SA(&icmpsrc), 0, RTF_CLONING | RTF_PRCLONING, ifscope);
2623 	} else if (rt) {
2624 		RT_LOCK(rt);
2625 		rtref(rt);
2626 		RT_UNLOCK(rt);
2627 	}
2628 
2629 	if (rt != NULL) {
2630 		RT_LOCK(rt);
2631 		if ((rt->rt_flags & RTF_HOST) &&
2632 		    !(rt->rt_rmx.rmx_locks & RTV_MTU)) {
2633 			mtu = ntohs(icp->icmp_nextmtu);
2634 			/*
2635 			 * XXX Stock BSD has changed the following
2636 			 * to compare with icp->icmp_ip.ip_len
2637 			 * to converge faster when sent packet
2638 			 * < route's MTU. We may want to adopt
2639 			 * that change.
2640 			 */
2641 			if (mtu == 0) {
2642 				mtu = ip_next_mtu(rt->rt_rmx.
2643 				    rmx_mtu, 1);
2644 			}
2645 #if DEBUG_MTUDISC
2646 			printf("MTU for %s reduced to %d\n",
2647 			    inet_ntop(AF_INET,
2648 			    &icmpsrc.sin_addr, ipv4str,
2649 			    sizeof(ipv4str)), mtu);
2650 #endif
2651 			if (mtu < max(296, (tcp_minmss +
2652 			    sizeof(struct tcpiphdr)))) {
2653 				rt->rt_rmx.rmx_locks |= RTV_MTU;
2654 			} else if (rt->rt_rmx.rmx_mtu > mtu) {
2655 				rt->rt_rmx.rmx_mtu = mtu;
2656 			}
2657 		}
2658 		RT_UNLOCK(rt);
2659 		rtfree(rt);
2660 	}
2661 }
2662 
2663 void
tcp_ctlinput(int cmd,struct sockaddr * sa,void * vip,__unused struct ifnet * ifp)2664 tcp_ctlinput(int cmd, struct sockaddr *sa, void *vip, __unused struct ifnet *ifp)
2665 {
2666 	tcp_seq icmp_tcp_seq;
2667 	struct ipctlparam *ctl_param __single = vip;
2668 	struct ip *ip = NULL;
2669 	struct mbuf *m = NULL;
2670 	struct in_addr faddr;
2671 	struct inpcb *inp;
2672 	struct tcpcb *tp;
2673 	struct tcphdr *th;
2674 	struct icmp *icp;
2675 	size_t off;
2676 #if SKYWALK
2677 	union sockaddr_in_4_6 sock_laddr;
2678 	struct protoctl_ev_val prctl_ev_val;
2679 #endif /* SKYWALK */
2680 	void (*notify)(struct inpcb *, int) = tcp_notify;
2681 
2682 	if (ctl_param != NULL) {
2683 		ip = ctl_param->ipc_icmp_ip;
2684 		icp = ctl_param->ipc_icmp;
2685 		m = ctl_param->ipc_m;
2686 		off = ctl_param->ipc_off;
2687 	} else {
2688 		ip = NULL;
2689 		icp = NULL;
2690 		m = NULL;
2691 		off = 0;
2692 	}
2693 
2694 	faddr = SIN(sa)->sin_addr;
2695 	if (sa->sa_family != AF_INET || faddr.s_addr == INADDR_ANY) {
2696 		return;
2697 	}
2698 
2699 	if ((unsigned)cmd >= PRC_NCMDS) {
2700 		return;
2701 	}
2702 
2703 	/* Source quench is deprecated */
2704 	if (cmd == PRC_QUENCH) {
2705 		return;
2706 	}
2707 
2708 	if (cmd == PRC_MSGSIZE) {
2709 		notify = tcp_mtudisc;
2710 	} else if (icmp_may_rst && (cmd == PRC_UNREACH_ADMIN_PROHIB ||
2711 	    cmd == PRC_UNREACH_PORT || cmd == PRC_UNREACH_PROTOCOL ||
2712 	    cmd == PRC_TIMXCEED_INTRANS) && ip) {
2713 		notify = tcp_drop_syn_sent;
2714 	}
2715 	/*
2716 	 * Hostdead is ugly because it goes linearly through all PCBs.
2717 	 * XXX: We never get this from ICMP, otherwise it makes an
2718 	 * excellent DoS attack on machines with many connections.
2719 	 */
2720 	else if (cmd == PRC_HOSTDEAD) {
2721 		ip = NULL;
2722 	} else if (inetctlerrmap[cmd] == 0 && !PRC_IS_REDIRECT(cmd)) {
2723 		return;
2724 	}
2725 
2726 #if SKYWALK
2727 	bzero(&prctl_ev_val, sizeof(prctl_ev_val));
2728 	bzero(&sock_laddr, sizeof(sock_laddr));
2729 #endif /* SKYWALK */
2730 
2731 	if (ip == NULL) {
2732 		in_pcbnotifyall(&tcbinfo, faddr, inetctlerrmap[cmd], notify);
2733 #if SKYWALK
2734 		protoctl_event_enqueue_nwk_wq_entry(ifp, NULL,
2735 		    sa, 0, 0, IPPROTO_TCP, cmd, NULL);
2736 #endif /* SKYWALK */
2737 		return;
2738 	}
2739 
2740 	/* Check if we can safely get the sport, dport and the sequence number from the tcp header. */
2741 	if (m == NULL ||
2742 	    (m->m_len < off + (sizeof(unsigned short) + sizeof(unsigned short) + sizeof(tcp_seq)))) {
2743 		/* Insufficient length */
2744 		return;
2745 	}
2746 
2747 	th = (struct tcphdr*)(void*)(mtod(m, uint8_t*) + off);
2748 	icmp_tcp_seq = ntohl(th->th_seq);
2749 
2750 	inp = in_pcblookup_hash(&tcbinfo, faddr, th->th_dport,
2751 	    ip->ip_src, th->th_sport, 0, NULL);
2752 
2753 	if (inp == NULL ||
2754 	    inp->inp_socket == NULL) {
2755 #if SKYWALK
2756 		if (cmd == PRC_MSGSIZE) {
2757 			prctl_ev_val.val = ntohs(icp->icmp_nextmtu);
2758 		}
2759 		prctl_ev_val.tcp_seq_number = icmp_tcp_seq;
2760 
2761 		sock_laddr.sin.sin_family = AF_INET;
2762 		sock_laddr.sin.sin_len = sizeof(sock_laddr.sin);
2763 		sock_laddr.sin.sin_addr = ip->ip_src;
2764 
2765 		protoctl_event_enqueue_nwk_wq_entry(ifp,
2766 		    SA(&sock_laddr), sa,
2767 		    th->th_sport, th->th_dport, IPPROTO_TCP,
2768 		    cmd, &prctl_ev_val);
2769 #endif /* SKYWALK */
2770 		return;
2771 	}
2772 
2773 	socket_lock(inp->inp_socket, 1);
2774 	if (in_pcb_checkstate(inp, WNT_RELEASE, 1) ==
2775 	    WNT_STOPUSING) {
2776 		socket_unlock(inp->inp_socket, 1);
2777 		return;
2778 	}
2779 
2780 	if (PRC_IS_REDIRECT(cmd)) {
2781 		/* signal EHOSTDOWN, as it flushes the cached route */
2782 		(*notify)(inp, EHOSTDOWN);
2783 	} else {
2784 		tp = intotcpcb(inp);
2785 		if (SEQ_GEQ(icmp_tcp_seq, tp->snd_una) &&
2786 		    SEQ_LT(icmp_tcp_seq, tp->snd_max)) {
2787 			if (cmd == PRC_MSGSIZE) {
2788 				tcp_handle_msgsize(ip, inp);
2789 			}
2790 
2791 			(*notify)(inp, inetctlerrmap[cmd]);
2792 		}
2793 	}
2794 	socket_unlock(inp->inp_socket, 1);
2795 }
2796 
2797 void
tcp6_ctlinput(int cmd,struct sockaddr * sa,void * d,__unused struct ifnet * ifp)2798 tcp6_ctlinput(int cmd, struct sockaddr *sa, void *d, __unused struct ifnet *ifp)
2799 {
2800 	tcp_seq icmp_tcp_seq;
2801 	struct in6_addr *dst;
2802 	void (*notify)(struct inpcb *, int) = tcp_notify;
2803 	struct ip6_hdr *ip6;
2804 	struct mbuf *m;
2805 	struct inpcb *inp;
2806 	struct tcpcb *tp;
2807 	struct icmp6_hdr *icmp6;
2808 	struct ip6ctlparam *ip6cp = NULL;
2809 	const struct sockaddr_in6 *sa6_src = NULL;
2810 	unsigned int mtu;
2811 	unsigned int off;
2812 
2813 	struct tcp_ports {
2814 		uint16_t th_sport;
2815 		uint16_t th_dport;
2816 	} t_ports;
2817 #if SKYWALK
2818 	union sockaddr_in_4_6 sock_laddr;
2819 	struct protoctl_ev_val prctl_ev_val;
2820 #endif /* SKYWALK */
2821 
2822 	if (sa->sa_family != AF_INET6 ||
2823 	    sa->sa_len != sizeof(struct sockaddr_in6)) {
2824 		return;
2825 	}
2826 
2827 	/* Source quench is deprecated */
2828 	if (cmd == PRC_QUENCH) {
2829 		return;
2830 	}
2831 
2832 	if ((unsigned)cmd >= PRC_NCMDS) {
2833 		return;
2834 	}
2835 
2836 	/* if the parameter is from icmp6, decode it. */
2837 	if (d != NULL) {
2838 		ip6cp = (struct ip6ctlparam *)d;
2839 		icmp6 = ip6cp->ip6c_icmp6;
2840 		m = ip6cp->ip6c_m;
2841 		ip6 = ip6cp->ip6c_ip6;
2842 		off = ip6cp->ip6c_off;
2843 		sa6_src = ip6cp->ip6c_src;
2844 		dst = ip6cp->ip6c_finaldst;
2845 	} else {
2846 		m = NULL;
2847 		ip6 = NULL;
2848 		off = 0;        /* fool gcc */
2849 		sa6_src = &sa6_any;
2850 		dst = NULL;
2851 	}
2852 
2853 	if (cmd == PRC_MSGSIZE) {
2854 		notify = tcp_mtudisc;
2855 	} else if (icmp_may_rst && (cmd == PRC_UNREACH_ADMIN_PROHIB ||
2856 	    cmd == PRC_UNREACH_PORT || cmd == PRC_TIMXCEED_INTRANS) &&
2857 	    ip6 != NULL) {
2858 		notify = tcp_drop_syn_sent;
2859 	}
2860 	/*
2861 	 * Hostdead is ugly because it goes linearly through all PCBs.
2862 	 * XXX: We never get this from ICMP, otherwise it makes an
2863 	 * excellent DoS attack on machines with many connections.
2864 	 */
2865 	else if (cmd == PRC_HOSTDEAD) {
2866 		ip6 = NULL;
2867 	} else if (inet6ctlerrmap[cmd] == 0 && !PRC_IS_REDIRECT(cmd)) {
2868 		return;
2869 	}
2870 
2871 #if SKYWALK
2872 	bzero(&prctl_ev_val, sizeof(prctl_ev_val));
2873 	bzero(&sock_laddr, sizeof(sock_laddr));
2874 #endif /* SKYWALK */
2875 
2876 	if (ip6 == NULL) {
2877 		in6_pcbnotify(&tcbinfo, sa, 0, SA(sa6_src), 0, cmd, NULL, notify);
2878 #if SKYWALK
2879 		protoctl_event_enqueue_nwk_wq_entry(ifp, NULL, sa,
2880 		    0, 0, IPPROTO_TCP, cmd, NULL);
2881 #endif /* SKYWALK */
2882 		return;
2883 	}
2884 
2885 	/* Check if we can safely get the ports from the tcp hdr */
2886 	if (m == NULL ||
2887 	    (m->m_pkthdr.len <
2888 	    (int32_t) (off + sizeof(struct tcp_ports)))) {
2889 		return;
2890 	}
2891 	bzero(&t_ports, sizeof(struct tcp_ports));
2892 	m_copydata(m, off, sizeof(struct tcp_ports), (caddr_t)&t_ports);
2893 
2894 	off += sizeof(struct tcp_ports);
2895 	if (m->m_pkthdr.len < (int32_t) (off + sizeof(tcp_seq))) {
2896 		return;
2897 	}
2898 	m_copydata(m, off, sizeof(tcp_seq), (caddr_t)&icmp_tcp_seq);
2899 	icmp_tcp_seq = ntohl(icmp_tcp_seq);
2900 
2901 	if (cmd == PRC_MSGSIZE) {
2902 		mtu = ntohl(icmp6->icmp6_mtu);
2903 		/*
2904 		 * If no alternative MTU was proposed, or the proposed
2905 		 * MTU was too small, set to the min.
2906 		 */
2907 		if (mtu < IPV6_MMTU) {
2908 			mtu = IPV6_MMTU - 8;
2909 		}
2910 	}
2911 
2912 	inp = in6_pcblookup_hash(&tcbinfo, &ip6->ip6_dst, t_ports.th_dport, ip6_input_getdstifscope(m),
2913 	    &ip6->ip6_src, t_ports.th_sport, ip6_input_getsrcifscope(m), 0, NULL);
2914 
2915 	if (inp == NULL ||
2916 	    inp->inp_socket == NULL) {
2917 #if SKYWALK
2918 		if (cmd == PRC_MSGSIZE) {
2919 			prctl_ev_val.val = mtu;
2920 		}
2921 		prctl_ev_val.tcp_seq_number = icmp_tcp_seq;
2922 
2923 		sock_laddr.sin6.sin6_family = AF_INET6;
2924 		sock_laddr.sin6.sin6_len = sizeof(sock_laddr.sin6);
2925 		sock_laddr.sin6.sin6_addr = ip6->ip6_src;
2926 
2927 		protoctl_event_enqueue_nwk_wq_entry(ifp,
2928 		    SA(&sock_laddr), sa,
2929 		    t_ports.th_sport, t_ports.th_dport, IPPROTO_TCP,
2930 		    cmd, &prctl_ev_val);
2931 #endif /* SKYWALK */
2932 		return;
2933 	}
2934 
2935 	socket_lock(inp->inp_socket, 1);
2936 	if (in_pcb_checkstate(inp, WNT_RELEASE, 1) ==
2937 	    WNT_STOPUSING) {
2938 		socket_unlock(inp->inp_socket, 1);
2939 		return;
2940 	}
2941 
2942 	if (PRC_IS_REDIRECT(cmd)) {
2943 		/* signal EHOSTDOWN, as it flushes the cached route */
2944 		(*notify)(inp, EHOSTDOWN);
2945 	} else {
2946 		tp = intotcpcb(inp);
2947 		if (SEQ_GEQ(icmp_tcp_seq, tp->snd_una) &&
2948 		    SEQ_LT(icmp_tcp_seq, tp->snd_max)) {
2949 			if (cmd == PRC_MSGSIZE) {
2950 				/*
2951 				 * Only process the offered MTU if it
2952 				 * is smaller than the current one.
2953 				 */
2954 				if (mtu < tp->t_maxseg +
2955 				    (sizeof(struct tcphdr) + sizeof(struct ip6_hdr))) {
2956 					(*notify)(inp, inetctlerrmap[cmd]);
2957 				}
2958 			} else {
2959 				(*notify)(inp, inetctlerrmap[cmd]);
2960 			}
2961 		}
2962 	}
2963 	socket_unlock(inp->inp_socket, 1);
2964 }
2965 
2966 
2967 /*
2968  * Following is where TCP initial sequence number generation occurs.
2969  *
2970  * There are two places where we must use initial sequence numbers:
2971  * 1.  In SYN-ACK packets.
2972  * 2.  In SYN packets.
2973  *
2974  * The ISNs in SYN-ACK packets have no monotonicity requirement,
2975  * and should be as unpredictable as possible to avoid the possibility
2976  * of spoofing and/or connection hijacking.  To satisfy this
2977  * requirement, SYN-ACK ISNs are generated via the arc4random()
2978  * function.  If exact RFC 1948 compliance is requested via sysctl,
2979  * these ISNs will be generated just like those in SYN packets.
2980  *
2981  * The ISNs in SYN packets must be monotonic; TIME_WAIT recycling
2982  * depends on this property.  In addition, these ISNs should be
2983  * unguessable so as to prevent connection hijacking.  To satisfy
2984  * the requirements of this situation, the algorithm outlined in
2985  * RFC 9293 is used to generate sequence numbers.
2986  *
2987  * For more information on the theory of operation, please see
2988  * RFC 9293.
2989  *
2990  * Implementation details:
2991  *
2992  * Time is based off the system timer, and is corrected so that it
2993  * increases by one megabyte per second.  This allows for proper
2994  * recycling on high speed LANs while still leaving over an hour
2995  * before rollover.
2996  *
2997  */
2998 
2999 #define ISN_BYTES_PER_SECOND 1048576
3000 
3001 tcp_seq
tcp_new_isn(struct tcpcb * tp)3002 tcp_new_isn(struct tcpcb *tp)
3003 {
3004 	uint32_t md5_buffer[4];
3005 	tcp_seq new_isn;
3006 	struct timespec timenow;
3007 	MD5_CTX isn_ctx;
3008 
3009 	nanouptime(&timenow);
3010 
3011 	/* Compute the md5 hash and return the ISN. */
3012 	MD5Init(&isn_ctx);
3013 	MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_fport,
3014 	    sizeof(u_short));
3015 	MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_lport,
3016 	    sizeof(u_short));
3017 	if ((tp->t_inpcb->inp_vflag & INP_IPV6) != 0) {
3018 		MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->in6p_faddr,
3019 		    sizeof(struct in6_addr));
3020 		MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->in6p_laddr,
3021 		    sizeof(struct in6_addr));
3022 	} else {
3023 		MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_faddr,
3024 		    sizeof(struct in_addr));
3025 		MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_laddr,
3026 		    sizeof(struct in_addr));
3027 	}
3028 	MD5Update(&isn_ctx, (u_char *) &isn_secret, sizeof(isn_secret));
3029 	MD5Final((u_char *) &md5_buffer, &isn_ctx);
3030 
3031 	new_isn = (tcp_seq) md5_buffer[0];
3032 
3033 	/*
3034 	 * We use a 128ns clock, which is equivalent to 600 Mbps and wraps at
3035 	 * 549 seconds, thus safe for 2 MSL lifetime of TIME-WAIT-state.
3036 	 */
3037 	new_isn += (timenow.tv_sec * NSEC_PER_SEC + timenow.tv_nsec) >> 7;
3038 
3039 	if (__probable(tcp_randomize_timestamps)) {
3040 		tp->t_ts_offset = md5_buffer[1];
3041 	}
3042 	tp->t_latest_tx = tcp_now;
3043 
3044 	return new_isn;
3045 }
3046 
3047 
3048 /*
3049  * When a specific ICMP unreachable message is received and the
3050  * connection state is SYN-SENT, drop the connection.  This behavior
3051  * is controlled by the icmp_may_rst sysctl.
3052  */
3053 void
tcp_drop_syn_sent(struct inpcb * inp,int errno)3054 tcp_drop_syn_sent(struct inpcb *inp, int errno)
3055 {
3056 	struct tcpcb *tp = intotcpcb(inp);
3057 
3058 	if (tp && tp->t_state == TCPS_SYN_SENT) {
3059 		tcp_drop(tp, errno);
3060 	}
3061 }
3062 
3063 /*
3064  * Get effective MTU for redirect virtual interface. Redirect
3065  * virtual interface switches between multiple delegated interfaces.
3066  * For cases, where redirect forwards packets to an ipsec interface,
3067  * MTU should be adjusted to consider ESP encapsulation overhead.
3068  */
3069 uint32_t
tcp_get_effective_mtu(struct rtentry * rt,uint32_t current_mtu)3070 tcp_get_effective_mtu(struct rtentry *rt, uint32_t current_mtu)
3071 {
3072 	ifnet_t ifp = NULL;
3073 	ifnet_t delegated_ifp = NULL;
3074 	ifnet_t outgoing_ifp = NULL;
3075 	uint32_t min_mtu = 0;
3076 	uint32_t outgoing_mtu = 0;
3077 	uint32_t tunnel_overhead = 0;
3078 
3079 	if (rt == NULL || rt->rt_ifp == NULL) {
3080 		return current_mtu;
3081 	}
3082 
3083 	ifp = rt->rt_ifp;
3084 	if (ifp->if_subfamily != IFNET_SUBFAMILY_REDIRECT) {
3085 		return current_mtu;
3086 	}
3087 
3088 	delegated_ifp = ifp->if_delegated.ifp;
3089 	if (delegated_ifp == NULL || delegated_ifp->if_family != IFNET_FAMILY_IPSEC) {
3090 		return current_mtu;
3091 	}
3092 
3093 	min_mtu = MIN(delegated_ifp->if_mtu, current_mtu);
3094 
3095 	outgoing_ifp = delegated_ifp->if_delegated.ifp;
3096 	if (outgoing_ifp == NULL) {
3097 		return min_mtu;
3098 	}
3099 
3100 	outgoing_mtu = outgoing_ifp->if_mtu;
3101 	if (outgoing_mtu > 0) {
3102 		tunnel_overhead = (u_int32_t)(esp_hdrsiz(NULL) + sizeof(struct ip6_hdr));
3103 		if (outgoing_mtu > tunnel_overhead) {
3104 			outgoing_mtu -= tunnel_overhead;
3105 		}
3106 		if (outgoing_mtu < min_mtu) {
3107 			return outgoing_mtu;
3108 		}
3109 	}
3110 
3111 	return min_mtu;
3112 }
3113 
3114 /*
3115  * When `need fragmentation' ICMP is received, update our idea of the MSS
3116  * based on the new value in the route.  Also nudge TCP to send something,
3117  * since we know the packet we just sent was dropped.
3118  * This duplicates some code in the tcp_mss() function in tcp_input.c.
3119  */
3120 void
tcp_mtudisc(struct inpcb * inp,__unused int errno)3121 tcp_mtudisc(struct inpcb *inp, __unused int errno)
3122 {
3123 	struct tcpcb *tp = intotcpcb(inp);
3124 	struct rtentry *rt;
3125 	struct socket *so = inp->inp_socket;
3126 	int mss;
3127 	u_int32_t mtu;
3128 	u_int32_t protoHdrOverhead = sizeof(struct tcpiphdr);
3129 	int isipv6 = (tp->t_inpcb->inp_vflag & INP_IPV6) != 0;
3130 
3131 	/*
3132 	 * Nothing left to send after the socket is defunct or TCP is in the closed state
3133 	 */
3134 	if ((so->so_state & SS_DEFUNCT) || (tp != NULL && tp->t_state == TCPS_CLOSED)) {
3135 		return;
3136 	}
3137 
3138 	if (isipv6) {
3139 		protoHdrOverhead = sizeof(struct ip6_hdr) +
3140 		    sizeof(struct tcphdr);
3141 	}
3142 
3143 	if (tp != NULL) {
3144 		if (isipv6) {
3145 			rt = tcp_rtlookup6(inp, IFSCOPE_NONE);
3146 		} else {
3147 			rt = tcp_rtlookup(inp, IFSCOPE_NONE);
3148 		}
3149 		if (!rt || !rt->rt_rmx.rmx_mtu) {
3150 			tp->t_maxopd = tp->t_maxseg =
3151 			    isipv6 ? tcp_v6mssdflt :
3152 			    tcp_mssdflt;
3153 
3154 			/* Route locked during lookup above */
3155 			if (rt != NULL) {
3156 				RT_UNLOCK(rt);
3157 			}
3158 			return;
3159 		}
3160 		mtu = rt->rt_rmx.rmx_mtu;
3161 
3162 		mtu = tcp_get_effective_mtu(rt, mtu);
3163 
3164 		/* Route locked during lookup above */
3165 		RT_UNLOCK(rt);
3166 
3167 #if NECP
3168 		// Adjust MTU if necessary.
3169 		mtu = necp_socket_get_effective_mtu(inp, mtu);
3170 #endif /* NECP */
3171 		mss = mtu - protoHdrOverhead;
3172 
3173 		if (tp->t_maxopd) {
3174 			mss = min(mss, tp->t_maxopd);
3175 		}
3176 		/*
3177 		 * XXX - The above conditional probably violates the TCP
3178 		 * spec.  The problem is that, since we don't know the
3179 		 * other end's MSS, we are supposed to use a conservative
3180 		 * default.  But, if we do that, then MTU discovery will
3181 		 * never actually take place, because the conservative
3182 		 * default is much less than the MTUs typically seen
3183 		 * on the Internet today.  For the moment, we'll sweep
3184 		 * this under the carpet.
3185 		 *
3186 		 * The conservative default might not actually be a problem
3187 		 * if the only case this occurs is when sending an initial
3188 		 * SYN with options and data to a host we've never talked
3189 		 * to before.  Then, they will reply with an MSS value which
3190 		 * will get recorded and the new parameters should get
3191 		 * recomputed.  For Further Study.
3192 		 */
3193 		if (tp->t_maxopd <= mss) {
3194 			return;
3195 		}
3196 		tp->t_maxopd = mss;
3197 
3198 		if ((tp->t_flags & (TF_REQ_TSTMP | TF_NOOPT)) == TF_REQ_TSTMP &&
3199 		    (tp->t_flags & TF_RCVD_TSTMP) == TF_RCVD_TSTMP) {
3200 			mss -= TCPOLEN_TSTAMP_APPA;
3201 		}
3202 
3203 #if MPTCP
3204 		mss -= mptcp_adj_mss(tp, TRUE);
3205 #endif
3206 		if (so->so_snd.sb_hiwat < mss) {
3207 			mss = so->so_snd.sb_hiwat;
3208 		}
3209 
3210 		tp->t_maxseg = mss;
3211 
3212 		ASSERT(tp->t_maxseg);
3213 
3214 		/*
3215 		 * Reset the slow-start flight size as it may depends on the
3216 		 * new MSS
3217 		 */
3218 		if (CC_ALGO(tp)->cwnd_init != NULL) {
3219 			CC_ALGO(tp)->cwnd_init(tp);
3220 		}
3221 
3222 		if (TCP_USE_RLEDBAT(tp, so) && tcp_cc_rledbat.rwnd_init != NULL) {
3223 			tcp_cc_rledbat.rwnd_init(tp);
3224 		}
3225 
3226 		tcpstat.tcps_mturesent++;
3227 		tp->t_rtttime = 0;
3228 		tp->snd_nxt = tp->snd_una;
3229 		tcp_output(tp);
3230 	}
3231 }
3232 
3233 /*
3234  * Look-up the routing entry to the peer of this inpcb.  If no route
3235  * is found and it cannot be allocated the return NULL.  This routine
3236  * is called by TCP routines that access the rmx structure and by tcp_mss
3237  * to get the interface MTU.  If a route is found, this routine will
3238  * hold the rtentry lock; the caller is responsible for unlocking.
3239  */
3240 struct rtentry *
tcp_rtlookup(struct inpcb * inp,unsigned int input_ifscope)3241 tcp_rtlookup(struct inpcb *inp, unsigned int input_ifscope)
3242 {
3243 	struct route *ro;
3244 	struct rtentry *rt;
3245 	struct tcpcb *tp;
3246 
3247 	LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_NOTOWNED);
3248 
3249 	ro = &inp->inp_route;
3250 	if ((rt = ro->ro_rt) != NULL) {
3251 		RT_LOCK(rt);
3252 	}
3253 
3254 	if (ROUTE_UNUSABLE(ro)) {
3255 		if (rt != NULL) {
3256 			RT_UNLOCK(rt);
3257 			rt = NULL;
3258 		}
3259 		ROUTE_RELEASE(ro);
3260 		/* No route yet, so try to acquire one */
3261 		if (inp->inp_faddr.s_addr != INADDR_ANY) {
3262 			unsigned int ifscope;
3263 
3264 			ro->ro_dst.sa_family = AF_INET;
3265 			ro->ro_dst.sa_len = sizeof(struct sockaddr_in);
3266 			SIN(&ro->ro_dst)->sin_addr = inp->inp_faddr;
3267 
3268 			/*
3269 			 * If the socket was bound to an interface, then
3270 			 * the bound-to-interface takes precedence over
3271 			 * the inbound interface passed in by the caller
3272 			 * (if we get here as part of the output path then
3273 			 * input_ifscope is IFSCOPE_NONE).
3274 			 */
3275 			ifscope = (inp->inp_flags & INP_BOUND_IF) ?
3276 			    inp->inp_boundifp->if_index : input_ifscope;
3277 
3278 			rtalloc_scoped(ro, ifscope);
3279 			if ((rt = ro->ro_rt) != NULL) {
3280 				RT_LOCK(rt);
3281 			}
3282 		}
3283 	}
3284 	if (rt != NULL) {
3285 		RT_LOCK_ASSERT_HELD(rt);
3286 	}
3287 
3288 	/*
3289 	 * Update MTU discovery determination. Don't do it if:
3290 	 *	1) it is disabled via the sysctl
3291 	 *	2) the route isn't up
3292 	 *	3) the MTU is locked (if it is, then discovery has been
3293 	 *	   disabled)
3294 	 */
3295 
3296 	tp = intotcpcb(inp);
3297 
3298 	if (!path_mtu_discovery || ((rt != NULL) &&
3299 	    (!(rt->rt_flags & RTF_UP) || (rt->rt_rmx.rmx_locks & RTV_MTU)))) {
3300 		tp->t_flags &= ~TF_PMTUD;
3301 	} else {
3302 		tp->t_flags |= TF_PMTUD;
3303 	}
3304 
3305 	if (rt != NULL && rt->rt_ifp != NULL) {
3306 		somultipages(inp->inp_socket,
3307 		    (rt->rt_ifp->if_hwassist & IFNET_MULTIPAGES));
3308 		tcp_set_tso(tp, rt->rt_ifp);
3309 		soif2kcl(inp->inp_socket,
3310 		    (rt->rt_ifp->if_eflags & IFEF_2KCL));
3311 		/* Don't do ECN and L4S for Loopback & Cellular (if L4S is default) */
3312 		if ((rt->rt_ifp->if_flags & IFF_LOOPBACK) == 0 &&
3313 		    !(IFNET_IS_CELLULAR(rt->rt_ifp) && rt->rt_ifp->if_l4s_mode == IFRTYPE_L4S_DEFAULT)) {
3314 			tcp_set_ecn(tp);
3315 			tcp_set_l4s(tp, rt->rt_ifp);
3316 		}
3317 		if (inp->inp_last_outifp == NULL) {
3318 			inp->inp_last_outifp = rt->rt_ifp;
3319 #if SKYWALK
3320 			if (NETNS_TOKEN_VALID(&inp->inp_netns_token)) {
3321 				netns_set_ifnet(&inp->inp_netns_token,
3322 				    inp->inp_last_outifp);
3323 			}
3324 #endif /* SKYWALK */
3325 		}
3326 	}
3327 
3328 	/* Note if the peer is local */
3329 	if (rt != NULL && !(rt->rt_ifp->if_flags & IFF_POINTOPOINT) &&
3330 	    (rt->rt_gateway->sa_family == AF_LINK ||
3331 	    rt->rt_ifp->if_flags & IFF_LOOPBACK ||
3332 	    in_localaddr(inp->inp_faddr))) {
3333 		tp->t_flags |= TF_LOCAL;
3334 	}
3335 
3336 	/*
3337 	 * Caller needs to call RT_UNLOCK(rt).
3338 	 */
3339 	return rt;
3340 }
3341 
3342 struct rtentry *
tcp_rtlookup6(struct inpcb * inp,unsigned int input_ifscope)3343 tcp_rtlookup6(struct inpcb *inp, unsigned int input_ifscope)
3344 {
3345 	struct route_in6 *ro6;
3346 	struct rtentry *rt;
3347 	struct tcpcb *tp;
3348 
3349 	LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_NOTOWNED);
3350 
3351 	ro6 = &inp->in6p_route;
3352 	if ((rt = ro6->ro_rt) != NULL) {
3353 		RT_LOCK(rt);
3354 	}
3355 
3356 	if (ROUTE_UNUSABLE(ro6)) {
3357 		if (rt != NULL) {
3358 			RT_UNLOCK(rt);
3359 			rt = NULL;
3360 		}
3361 		ROUTE_RELEASE(ro6);
3362 		/* No route yet, so try to acquire one */
3363 		if (!IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_faddr)) {
3364 			struct sockaddr_in6 *dst6;
3365 			unsigned int ifscope;
3366 
3367 			dst6 = SIN6(&ro6->ro_dst);
3368 			dst6->sin6_family = AF_INET6;
3369 			dst6->sin6_len = sizeof(*dst6);
3370 			dst6->sin6_addr = inp->in6p_faddr;
3371 
3372 			/*
3373 			 * If the socket was bound to an interface, then
3374 			 * the bound-to-interface takes precedence over
3375 			 * the inbound interface passed in by the caller
3376 			 * (if we get here as part of the output path then
3377 			 * input_ifscope is IFSCOPE_NONE).
3378 			 */
3379 			ifscope = (inp->inp_flags & INP_BOUND_IF) ?
3380 			    inp->inp_boundifp->if_index : input_ifscope;
3381 
3382 			rtalloc_scoped((struct route *)ro6, ifscope);
3383 			if ((rt = ro6->ro_rt) != NULL) {
3384 				RT_LOCK(rt);
3385 			}
3386 		}
3387 	}
3388 	if (rt != NULL) {
3389 		RT_LOCK_ASSERT_HELD(rt);
3390 	}
3391 
3392 	/*
3393 	 * Update path MTU Discovery determination
3394 	 * while looking up the route:
3395 	 *  1) we have a valid route to the destination
3396 	 *  2) the MTU is not locked (if it is, then discovery has been
3397 	 *    disabled)
3398 	 */
3399 
3400 
3401 	tp = intotcpcb(inp);
3402 
3403 	/*
3404 	 * Update MTU discovery determination. Don't do it if:
3405 	 *	1) it is disabled via the sysctl
3406 	 *	2) the route isn't up
3407 	 *	3) the MTU is locked (if it is, then discovery has been
3408 	 *	   disabled)
3409 	 */
3410 
3411 	if (!path_mtu_discovery || ((rt != NULL) &&
3412 	    (!(rt->rt_flags & RTF_UP) || (rt->rt_rmx.rmx_locks & RTV_MTU)))) {
3413 		tp->t_flags &= ~TF_PMTUD;
3414 	} else {
3415 		tp->t_flags |= TF_PMTUD;
3416 	}
3417 
3418 	if (rt != NULL && rt->rt_ifp != NULL) {
3419 		somultipages(inp->inp_socket,
3420 		    (rt->rt_ifp->if_hwassist & IFNET_MULTIPAGES));
3421 		tcp_set_tso(tp, rt->rt_ifp);
3422 		soif2kcl(inp->inp_socket,
3423 		    (rt->rt_ifp->if_eflags & IFEF_2KCL));
3424 		/* Don't do ECN and L4S for Loopback & Cellular (if L4S is default) */
3425 		if ((rt->rt_ifp->if_flags & IFF_LOOPBACK) == 0 &&
3426 		    !(IFNET_IS_CELLULAR(rt->rt_ifp) && rt->rt_ifp->if_l4s_mode == IFRTYPE_L4S_DEFAULT)) {
3427 			tcp_set_ecn(tp);
3428 			tcp_set_l4s(tp, rt->rt_ifp);
3429 		}
3430 		if (inp->inp_last_outifp == NULL) {
3431 			inp->inp_last_outifp = rt->rt_ifp;
3432 #if SKYWALK
3433 			if (NETNS_TOKEN_VALID(&inp->inp_netns_token)) {
3434 				netns_set_ifnet(&inp->inp_netns_token,
3435 				    inp->inp_last_outifp);
3436 			}
3437 #endif /* SKYWALK */
3438 		}
3439 
3440 		/* Note if the peer is local */
3441 		if (!(rt->rt_ifp->if_flags & IFF_POINTOPOINT) &&
3442 		    (IN6_IS_ADDR_LOOPBACK(&inp->in6p_faddr) ||
3443 		    IN6_IS_ADDR_LINKLOCAL(&inp->in6p_faddr) ||
3444 		    rt->rt_gateway->sa_family == AF_LINK ||
3445 		    in6_localaddr(&inp->in6p_faddr))) {
3446 			tp->t_flags |= TF_LOCAL;
3447 		}
3448 	}
3449 
3450 	/*
3451 	 * Caller needs to call RT_UNLOCK(rt).
3452 	 */
3453 	return rt;
3454 }
3455 
3456 #if IPSEC
3457 /* compute ESP/AH header size for TCP, including outer IP header. */
3458 size_t
ipsec_hdrsiz_tcp(struct tcpcb * tp)3459 ipsec_hdrsiz_tcp(struct tcpcb *tp)
3460 {
3461 	struct inpcb *inp;
3462 	struct mbuf *m;
3463 	size_t hdrsiz;
3464 	struct ip *ip;
3465 	struct ip6_hdr *ip6 = NULL;
3466 	struct tcphdr *th;
3467 
3468 	if ((tp == NULL) || ((inp = tp->t_inpcb) == NULL)) {
3469 		return 0;
3470 	}
3471 	MGETHDR(m, M_DONTWAIT, MT_DATA);        /* MAC-OK */
3472 	if (!m) {
3473 		return 0;
3474 	}
3475 
3476 	if ((inp->inp_vflag & INP_IPV6) != 0) {
3477 		ip6 = mtod(m, struct ip6_hdr *);
3478 		th = (struct tcphdr *)(void *)(ip6 + 1);
3479 		m->m_pkthdr.len = m->m_len =
3480 		    sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
3481 		tcp_fillheaders(m, tp, ip6, th, NULL, NULL);
3482 		hdrsiz = ipsec6_hdrsiz(m, IPSEC_DIR_OUTBOUND, inp);
3483 	} else {
3484 		ip = mtod(m, struct ip *);
3485 		th = (struct tcphdr *)(ip + 1);
3486 		m->m_pkthdr.len = m->m_len = sizeof(struct tcpiphdr);
3487 		tcp_fillheaders(m, tp, ip, th, NULL, NULL);
3488 		hdrsiz = ipsec4_hdrsiz(m, IPSEC_DIR_OUTBOUND, inp);
3489 	}
3490 	m_free(m);
3491 	return hdrsiz;
3492 }
3493 #endif /* IPSEC */
3494 
3495 int
tcp_lock(struct socket * so,int refcount,void * lr)3496 tcp_lock(struct socket *so, int refcount, void *lr)
3497 {
3498 	lr_ref_t lr_saved = TCP_INIT_LR_SAVED(lr);
3499 
3500 retry:
3501 	if (so->so_pcb != NULL) {
3502 		if (so->so_flags & SOF_MP_SUBFLOW) {
3503 			struct mptcb *mp_tp = tptomptp(sototcpcb(so));
3504 			struct socket *mp_so = mptetoso(mp_tp->mpt_mpte);
3505 
3506 			socket_lock(mp_so, refcount);
3507 
3508 			/*
3509 			 * Check if we became non-MPTCP while waiting for the lock.
3510 			 * If yes, we have to retry to grab the right lock.
3511 			 */
3512 			if (!(so->so_flags & SOF_MP_SUBFLOW)) {
3513 				socket_unlock(mp_so, refcount);
3514 				goto retry;
3515 			}
3516 		} else {
3517 			lck_mtx_lock(&((struct inpcb *)so->so_pcb)->inpcb_mtx);
3518 
3519 			if (so->so_flags & SOF_MP_SUBFLOW) {
3520 				/*
3521 				 * While waiting for the lock, we might have
3522 				 * become MPTCP-enabled (see mptcp_subflow_socreate).
3523 				 */
3524 				lck_mtx_unlock(&((struct inpcb *)so->so_pcb)->inpcb_mtx);
3525 				goto retry;
3526 			}
3527 		}
3528 	} else {
3529 		panic("tcp_lock: so=%p NO PCB! lr=%p lrh= %s",
3530 		    so, lr_saved, solockhistory_nr(so));
3531 		/* NOTREACHED */
3532 	}
3533 
3534 	if (so->so_usecount < 0) {
3535 		panic("tcp_lock: so=%p so_pcb=%p lr=%p ref=%x lrh= %s",
3536 		    so, so->so_pcb, lr_saved, so->so_usecount,
3537 		    solockhistory_nr(so));
3538 		/* NOTREACHED */
3539 	}
3540 	if (refcount) {
3541 		so->so_usecount++;
3542 	}
3543 	so->lock_lr[so->next_lock_lr] = lr_saved;
3544 	so->next_lock_lr = (so->next_lock_lr + 1) % SO_LCKDBG_MAX;
3545 	return 0;
3546 }
3547 
3548 int
tcp_unlock(struct socket * so,int refcount,void * lr)3549 tcp_unlock(struct socket *so, int refcount, void *lr)
3550 {
3551 	lr_ref_t lr_saved = TCP_INIT_LR_SAVED(lr);
3552 
3553 
3554 #ifdef MORE_TCPLOCK_DEBUG
3555 	printf("tcp_unlock: so=0x%llx sopcb=0x%llx lock=0x%llx ref=%x "
3556 	    "lr=0x%llx\n", (uint64_t)VM_KERNEL_ADDRPERM(so),
3557 	    (uint64_t)VM_KERNEL_ADDRPERM(so->so_pcb),
3558 	    (uint64_t)VM_KERNEL_ADDRPERM(&(sotoinpcb(so)->inpcb_mtx)),
3559 	    so->so_usecount, (uint64_t)VM_KERNEL_ADDRPERM(lr_saved));
3560 #endif
3561 	if (refcount) {
3562 		so->so_usecount--;
3563 	}
3564 
3565 	if (so->so_usecount < 0) {
3566 		panic("tcp_unlock: so=%p usecount=%x lrh= %s",
3567 		    so, so->so_usecount, solockhistory_nr(so));
3568 		/* NOTREACHED */
3569 	}
3570 	if (so->so_pcb == NULL) {
3571 		panic("tcp_unlock: so=%p NO PCB usecount=%x lr=%p lrh= %s",
3572 		    so, so->so_usecount, lr_saved, solockhistory_nr(so));
3573 		/* NOTREACHED */
3574 	} else {
3575 		so->unlock_lr[so->next_unlock_lr] = lr_saved;
3576 		so->next_unlock_lr = (so->next_unlock_lr + 1) % SO_LCKDBG_MAX;
3577 
3578 		if (so->so_flags & SOF_MP_SUBFLOW) {
3579 			struct mptcb *mp_tp = tptomptp(sototcpcb(so));
3580 			struct socket *mp_so = mptetoso(mp_tp->mpt_mpte);
3581 
3582 			socket_lock_assert_owned(mp_so);
3583 
3584 			socket_unlock(mp_so, refcount);
3585 		} else {
3586 			LCK_MTX_ASSERT(&((struct inpcb *)so->so_pcb)->inpcb_mtx,
3587 			    LCK_MTX_ASSERT_OWNED);
3588 			lck_mtx_unlock(&((struct inpcb *)so->so_pcb)->inpcb_mtx);
3589 		}
3590 	}
3591 	return 0;
3592 }
3593 
3594 lck_mtx_t *
tcp_getlock(struct socket * so,int flags)3595 tcp_getlock(struct socket *so, int flags)
3596 {
3597 	struct inpcb *inp = sotoinpcb(so);
3598 
3599 	if (so->so_pcb) {
3600 		if (so->so_usecount < 0) {
3601 			panic("tcp_getlock: so=%p usecount=%x lrh= %s",
3602 			    so, so->so_usecount, solockhistory_nr(so));
3603 		}
3604 
3605 		if (so->so_flags & SOF_MP_SUBFLOW) {
3606 			struct mptcb *mp_tp = tptomptp(sototcpcb(so));
3607 			struct socket *mp_so = mptetoso(mp_tp->mpt_mpte);
3608 
3609 			return mp_so->so_proto->pr_getlock(mp_so, flags);
3610 		} else {
3611 			return &inp->inpcb_mtx;
3612 		}
3613 	} else {
3614 		panic("tcp_getlock: so=%p NULL so_pcb %s",
3615 		    so, solockhistory_nr(so));
3616 		return so->so_proto->pr_domain->dom_mtx;
3617 	}
3618 }
3619 
3620 /*
3621  * Determine if we can grow the recieve socket buffer to avoid sending
3622  * a zero window update to the peer. We allow even socket buffers that
3623  * have fixed size (set by the application) to grow if the resource
3624  * constraints are met. They will also be trimmed after the application
3625  * reads data.
3626  */
3627 static void
tcp_sbrcv_grow_rwin(struct tcpcb * tp,struct sockbuf * sb)3628 tcp_sbrcv_grow_rwin(struct tcpcb *tp, struct sockbuf *sb)
3629 {
3630 	u_int32_t rcvbufinc = tp->t_maxseg << 4;
3631 	u_int32_t rcvbuf = sb->sb_hiwat;
3632 	struct socket *so = tp->t_inpcb->inp_socket;
3633 
3634 	if (tcp_recv_bg == 1 || IS_TCP_RECV_BG(so)) {
3635 		return;
3636 	}
3637 
3638 	if (tcp_do_autorcvbuf == 1 &&
3639 	    (tp->t_flags & TF_SLOWLINK) == 0 &&
3640 	    (so->so_flags1 & SOF1_EXTEND_BK_IDLE_WANTED) == 0 &&
3641 	    (rcvbuf - sb->sb_cc) < rcvbufinc &&
3642 	    rcvbuf < tcp_autorcvbuf_max &&
3643 	    (sb->sb_idealsize > 0 &&
3644 	    sb->sb_hiwat <= (sb->sb_idealsize + rcvbufinc))) {
3645 		sbreserve(sb,
3646 		    min((sb->sb_hiwat + rcvbufinc), tcp_autorcvbuf_max));
3647 	}
3648 }
3649 
3650 int32_t
tcp_sbspace(struct tcpcb * tp)3651 tcp_sbspace(struct tcpcb *tp)
3652 {
3653 	struct socket *so = tp->t_inpcb->inp_socket;
3654 	struct sockbuf *sb = &so->so_rcv;
3655 	u_int32_t rcvbuf;
3656 	int32_t space;
3657 	int32_t pending = 0;
3658 
3659 	if (so->so_flags & SOF_MP_SUBFLOW) {
3660 		/* We still need to grow TCP's buffer to have a BDP-estimate */
3661 		tcp_sbrcv_grow_rwin(tp, sb);
3662 
3663 		return mptcp_sbspace(tptomptp(tp));
3664 	}
3665 
3666 	tcp_sbrcv_grow_rwin(tp, sb);
3667 
3668 	/* hiwat might have changed */
3669 	rcvbuf = sb->sb_hiwat;
3670 
3671 	space =  ((int32_t) imin((rcvbuf - sb->sb_cc),
3672 	    (sb->sb_mbmax - sb->sb_mbcnt)));
3673 	if (space < 0) {
3674 		space = 0;
3675 	}
3676 
3677 #if CONTENT_FILTER
3678 	/* Compensate for data being processed by content filters */
3679 	pending = cfil_sock_data_space(sb);
3680 #endif /* CONTENT_FILTER */
3681 	if (pending > space) {
3682 		space = 0;
3683 	} else {
3684 		space -= pending;
3685 	}
3686 
3687 	/*
3688 	 * Avoid increasing window size if the current window
3689 	 * is already very low, we could be in "persist" mode and
3690 	 * we could break some apps (see rdar://5409343)
3691 	 */
3692 
3693 	if (space < tp->t_maxseg) {
3694 		return space;
3695 	}
3696 
3697 	/* Clip window size for slower link */
3698 
3699 	if (((tp->t_flags & TF_SLOWLINK) != 0) && slowlink_wsize > 0) {
3700 		return imin(space, slowlink_wsize);
3701 	}
3702 
3703 	return space;
3704 }
3705 /*
3706  * Checks TCP Segment Offloading capability for a given connection
3707  * and interface pair.
3708  */
3709 void
tcp_set_tso(struct tcpcb * tp,struct ifnet * ifp)3710 tcp_set_tso(struct tcpcb *tp, struct ifnet *ifp)
3711 {
3712 	struct inpcb *inp;
3713 	int isipv6;
3714 	struct ifnet *tunnel_ifp = NULL;
3715 #define IFNET_TSO_MASK (IFNET_TSO_IPV6 | IFNET_TSO_IPV4)
3716 
3717 	tp->t_flags &= ~TF_TSO;
3718 
3719 	/*
3720 	 * Bail if there's a non-TSO-capable filter on the interface.
3721 	 */
3722 	if (ifp == NULL || ifp->if_flt_no_tso_count > 0) {
3723 		return;
3724 	}
3725 
3726 	inp = tp->t_inpcb;
3727 	isipv6 = (inp->inp_vflag & INP_IPV6) != 0;
3728 
3729 #if MPTCP
3730 	/*
3731 	 * We can't use TSO if this tcpcb belongs to an MPTCP session.
3732 	 */
3733 	if (inp->inp_socket->so_flags & SOF_MP_SUBFLOW) {
3734 		return;
3735 	}
3736 #endif
3737 	/*
3738 	 * We can't use TSO if the TSO capability of the tunnel interface does
3739 	 * not match the capability of another interface known by TCP
3740 	 */
3741 	if (inp->inp_policyresult.results.result == NECP_KERNEL_POLICY_RESULT_IP_TUNNEL) {
3742 		u_int tunnel_if_index = inp->inp_policyresult.results.result_parameter.tunnel_interface_index;
3743 
3744 		if (tunnel_if_index != 0) {
3745 			ifnet_head_lock_shared();
3746 			tunnel_ifp = ifindex2ifnet[tunnel_if_index];
3747 			ifnet_head_done();
3748 		}
3749 
3750 		if (tunnel_ifp == NULL) {
3751 			return;
3752 		}
3753 
3754 		if ((ifp->if_hwassist & IFNET_TSO_MASK) != (tunnel_ifp->if_hwassist & IFNET_TSO_MASK)) {
3755 			if (tso_debug > 0) {
3756 				os_log(OS_LOG_DEFAULT,
3757 				    "%s: %u > %u TSO 0 tunnel_ifp %s hwassist mismatch with ifp %s",
3758 				    __func__,
3759 				    ntohs(tp->t_inpcb->inp_lport), ntohs(tp->t_inpcb->inp_fport),
3760 				    tunnel_ifp->if_xname, ifp->if_xname);
3761 			}
3762 			return;
3763 		}
3764 		if (inp->inp_last_outifp != NULL &&
3765 		    (inp->inp_last_outifp->if_hwassist & IFNET_TSO_MASK) != (tunnel_ifp->if_hwassist & IFNET_TSO_MASK)) {
3766 			if (tso_debug > 0) {
3767 				os_log(OS_LOG_DEFAULT,
3768 				    "%s: %u > %u TSO 0 tunnel_ifp %s hwassist mismatch with inp_last_outifp %s",
3769 				    __func__,
3770 				    ntohs(tp->t_inpcb->inp_lport), ntohs(tp->t_inpcb->inp_fport),
3771 				    tunnel_ifp->if_xname, inp->inp_last_outifp->if_xname);
3772 			}
3773 			return;
3774 		}
3775 		if ((inp->inp_flags & INP_BOUND_IF) && inp->inp_boundifp != NULL &&
3776 		    (inp->inp_boundifp->if_hwassist & IFNET_TSO_MASK) != (tunnel_ifp->if_hwassist & IFNET_TSO_MASK)) {
3777 			if (tso_debug > 0) {
3778 				os_log(OS_LOG_DEFAULT,
3779 				    "%s: %u > %u TSO 0 tunnel_ifp %s hwassist mismatch with inp_boundifp %s",
3780 				    __func__,
3781 				    ntohs(tp->t_inpcb->inp_lport), ntohs(tp->t_inpcb->inp_fport),
3782 				    tunnel_ifp->if_xname, inp->inp_boundifp->if_xname);
3783 			}
3784 			return;
3785 		}
3786 	}
3787 
3788 	if (isipv6) {
3789 		if (ifp->if_hwassist & IFNET_TSO_IPV6) {
3790 			tp->t_flags |= TF_TSO;
3791 			if (ifp->if_tso_v6_mtu != 0) {
3792 				tp->tso_max_segment_size = ifp->if_tso_v6_mtu;
3793 			} else {
3794 				tp->tso_max_segment_size = TCP_MAXWIN;
3795 			}
3796 		}
3797 	} else {
3798 		if (ifp->if_hwassist & IFNET_TSO_IPV4) {
3799 			tp->t_flags |= TF_TSO;
3800 			if (ifp->if_tso_v4_mtu != 0) {
3801 				tp->tso_max_segment_size = ifp->if_tso_v4_mtu;
3802 			} else {
3803 				tp->tso_max_segment_size = TCP_MAXWIN;
3804 			}
3805 			if (INTF_ADJUST_MTU_FOR_CLAT46(ifp)) {
3806 				tp->tso_max_segment_size -=
3807 				    CLAT46_HDR_EXPANSION_OVERHD;
3808 			}
3809 		}
3810 	}
3811 
3812 	if (tso_debug > 1) {
3813 		os_log(OS_LOG_DEFAULT, "%s: %u > %u TSO %d ifp %s",
3814 		    __func__,
3815 		    ntohs(tp->t_inpcb->inp_lport),
3816 		    ntohs(tp->t_inpcb->inp_fport),
3817 		    (tp->t_flags & TF_TSO) != 0,
3818 		    ifp != NULL ? ifp->if_xname : "<NULL>");
3819 	}
3820 }
3821 
3822 /*
3823  * Function to calculate the tcp clock. The tcp clock will get updated
3824  * at the boundaries of the tcp layer. This is done at 3 places:
3825  * 1. Right before processing an input tcp packet
3826  * 2. Whenever a connection wants to access the network using tcp_usrreqs
3827  * 3. When a tcp timer fires or before tcp slow timeout
3828  *
3829  */
3830 void
calculate_tcp_clock(void)3831 calculate_tcp_clock(void)
3832 {
3833 	uint32_t current_tcp_now;
3834 	struct timeval now;
3835 	uint32_t tmp;
3836 
3837 	microuptime(&now);
3838 
3839 	/*
3840 	 * Update coarse-grained networking timestamp (in sec.); the idea
3841 	 * is to update the counter returnable via net_uptime() when
3842 	 * we read time.
3843 	 */
3844 	net_update_uptime_with_time(&now);
3845 
3846 	current_tcp_now = (uint32_t)now.tv_sec * 1000 + now.tv_usec / TCP_RETRANSHZ_TO_USEC;
3847 
3848 	tmp = os_atomic_load(&tcp_now, relaxed);
3849 	if (tmp < current_tcp_now) {
3850 		os_atomic_cmpxchg(&tcp_now, tmp, current_tcp_now, relaxed);
3851 
3852 		/*
3853 		 * No cmpxchg loop needed here. If someone else updated quicker,
3854 		 * we can take that value. The only requirement is that
3855 		 * tcp_now never decreases.
3856 		 */
3857 	}
3858 }
3859 
3860 /*
3861  * Compute receive window scaling that we are going to request
3862  * for this connection based on  sb_hiwat. Try to leave some
3863  * room to potentially increase the window size upto a maximum
3864  * defined by the constant tcp_autorcvbuf_max.
3865  */
3866 uint8_t
tcp_get_max_rwinscale(struct tcpcb * tp,struct socket * so)3867 tcp_get_max_rwinscale(struct tcpcb *tp, struct socket *so)
3868 {
3869 	uint8_t rcv_wscale;
3870 	uint32_t maxsockbufsize;
3871 
3872 	rcv_wscale = MAX((uint8_t)tcp_win_scale, tp->request_r_scale);
3873 	maxsockbufsize = ((so->so_rcv.sb_flags & SB_USRSIZE) != 0) ?
3874 	    so->so_rcv.sb_hiwat : tcp_autorcvbuf_max;
3875 
3876 	/*
3877 	 * Window scale should not exceed what is needed
3878 	 * to send the max receive window size; adding 1 to TCP_MAXWIN
3879 	 * ensures that.
3880 	 */
3881 	while (rcv_wscale < TCP_MAX_WINSHIFT &&
3882 	    ((TCP_MAXWIN + 1) << rcv_wscale) < maxsockbufsize) {
3883 		rcv_wscale++;
3884 	}
3885 	rcv_wscale = MIN(rcv_wscale, TCP_MAX_WINSHIFT);
3886 
3887 	return rcv_wscale;
3888 }
3889 
3890 int
tcp_notsent_lowat_check(struct socket * so)3891 tcp_notsent_lowat_check(struct socket *so)
3892 {
3893 	struct inpcb *inp = sotoinpcb(so);
3894 	struct tcpcb *tp = NULL;
3895 	int notsent = 0;
3896 
3897 	if (inp != NULL) {
3898 		tp = intotcpcb(inp);
3899 	}
3900 
3901 	if (tp == NULL) {
3902 		return 0;
3903 	}
3904 
3905 	notsent = so->so_snd.sb_cc -
3906 	    (tp->snd_nxt - tp->snd_una);
3907 
3908 	/*
3909 	 * When we send a FIN or SYN, not_sent can be negative.
3910 	 * In that case also we need to send a write event to the
3911 	 * process if it is waiting. In the FIN case, it will
3912 	 * get an error from send because cantsendmore will be set.
3913 	 */
3914 	if (notsent <= tp->t_notsent_lowat) {
3915 		return 1;
3916 	}
3917 
3918 	/*
3919 	 * When Nagle's algorithm is not disabled, it is better
3920 	 * to wakeup the client until there is atleast one
3921 	 * maxseg of data to write.
3922 	 */
3923 	if ((tp->t_flags & TF_NODELAY) == 0 &&
3924 	    notsent > 0 && notsent < tp->t_maxseg) {
3925 		return 1;
3926 	}
3927 	return 0;
3928 }
3929 
3930 void
tcp_rxtseg_insert(struct tcpcb * tp,tcp_seq start,tcp_seq end)3931 tcp_rxtseg_insert(struct tcpcb *tp, tcp_seq start, tcp_seq end)
3932 {
3933 	struct tcp_rxt_seg *rxseg = NULL, *prev = NULL, *next = NULL;
3934 	uint16_t rxcount = 0;
3935 
3936 	if (SLIST_EMPTY(&tp->t_rxt_segments)) {
3937 		tp->t_dsack_lastuna = tp->snd_una;
3938 	}
3939 	/*
3940 	 * First check if there is a segment already existing for this
3941 	 * sequence space.
3942 	 */
3943 
3944 	SLIST_FOREACH(rxseg, &tp->t_rxt_segments, rx_link) {
3945 		if (SEQ_GT(rxseg->rx_start, start)) {
3946 			break;
3947 		}
3948 		prev = rxseg;
3949 	}
3950 	next = rxseg;
3951 
3952 	/* check if prev seg is for this sequence */
3953 	if (prev != NULL && SEQ_LEQ(prev->rx_start, start) &&
3954 	    SEQ_GEQ(prev->rx_end, end)) {
3955 		prev->rx_count++;
3956 		return;
3957 	}
3958 
3959 	/*
3960 	 * There are a couple of possibilities at this point.
3961 	 * 1. prev overlaps with the beginning of this sequence
3962 	 * 2. next overlaps with the end of this sequence
3963 	 * 3. there is no overlap.
3964 	 */
3965 
3966 	if (prev != NULL && SEQ_GT(prev->rx_end, start)) {
3967 		if (prev->rx_start == start && SEQ_GT(end, prev->rx_end)) {
3968 			start = prev->rx_end + 1;
3969 			prev->rx_count++;
3970 		} else {
3971 			prev->rx_end = (start - 1);
3972 			rxcount = prev->rx_count;
3973 		}
3974 	}
3975 
3976 	if (next != NULL && SEQ_LT(next->rx_start, end)) {
3977 		if (SEQ_LEQ(next->rx_end, end)) {
3978 			end = next->rx_start - 1;
3979 			next->rx_count++;
3980 		} else {
3981 			next->rx_start = end + 1;
3982 			rxcount = next->rx_count;
3983 		}
3984 	}
3985 	if (!SEQ_LT(start, end)) {
3986 		return;
3987 	}
3988 
3989 	if (tcp_rxt_seg_max > 0 && tp->t_rxt_seg_count >= tcp_rxt_seg_max) {
3990 		rxseg = SLIST_FIRST(&tp->t_rxt_segments);
3991 		if (prev == rxseg) {
3992 			prev = NULL;
3993 		}
3994 		SLIST_REMOVE(&tp->t_rxt_segments, rxseg,
3995 		    tcp_rxt_seg, rx_link);
3996 
3997 		tcp_rxt_seg_drop++;
3998 		tp->t_rxt_seg_drop++;
3999 		zfree(tcp_rxt_seg_zone, rxseg);
4000 		tcp_memacct_sub(kalloc_type_size(tcp_rxt_seg_zone));
4001 
4002 		tp->t_rxt_seg_count -= 1;
4003 	}
4004 
4005 	rxseg = zalloc_flags(tcp_rxt_seg_zone, Z_WAITOK | Z_ZERO | Z_NOFAIL);
4006 	tcp_memacct_add(kalloc_type_size(tcp_rxt_seg_zone));
4007 	rxseg->rx_start = start;
4008 	rxseg->rx_end = end;
4009 	rxseg->rx_count = rxcount + 1;
4010 
4011 	if (prev != NULL) {
4012 		SLIST_INSERT_AFTER(prev, rxseg, rx_link);
4013 	} else {
4014 		SLIST_INSERT_HEAD(&tp->t_rxt_segments, rxseg, rx_link);
4015 	}
4016 	tp->t_rxt_seg_count += 1;
4017 }
4018 
4019 struct tcp_rxt_seg *
tcp_rxtseg_find(struct tcpcb * tp,tcp_seq start,tcp_seq end)4020 tcp_rxtseg_find(struct tcpcb *tp, tcp_seq start, tcp_seq end)
4021 {
4022 	struct tcp_rxt_seg *rxseg;
4023 
4024 	if (SLIST_EMPTY(&tp->t_rxt_segments)) {
4025 		return NULL;
4026 	}
4027 
4028 	SLIST_FOREACH(rxseg, &tp->t_rxt_segments, rx_link) {
4029 		if (SEQ_LEQ(rxseg->rx_start, start) &&
4030 		    SEQ_GEQ(rxseg->rx_end, end)) {
4031 			return rxseg;
4032 		}
4033 		if (SEQ_GT(rxseg->rx_start, start)) {
4034 			break;
4035 		}
4036 	}
4037 	return NULL;
4038 }
4039 
4040 void
tcp_rxtseg_set_spurious(struct tcpcb * tp,tcp_seq start,tcp_seq end)4041 tcp_rxtseg_set_spurious(struct tcpcb *tp, tcp_seq start, tcp_seq end)
4042 {
4043 	struct tcp_rxt_seg *rxseg;
4044 
4045 	if (SLIST_EMPTY(&tp->t_rxt_segments)) {
4046 		return;
4047 	}
4048 
4049 	SLIST_FOREACH(rxseg, &tp->t_rxt_segments, rx_link) {
4050 		if (SEQ_GEQ(rxseg->rx_start, start) &&
4051 		    SEQ_LEQ(rxseg->rx_end, end)) {
4052 			/*
4053 			 * If the segment was retransmitted only once, mark it as
4054 			 * spurious.
4055 			 */
4056 			if (rxseg->rx_count == 1) {
4057 				rxseg->rx_flags |= TCP_RXT_SPURIOUS;
4058 			}
4059 		}
4060 
4061 		if (SEQ_GEQ(rxseg->rx_start, end)) {
4062 			break;
4063 		}
4064 	}
4065 	return;
4066 }
4067 
4068 void
tcp_rxtseg_clean(struct tcpcb * tp)4069 tcp_rxtseg_clean(struct tcpcb *tp)
4070 {
4071 	struct tcp_rxt_seg *rxseg, *next;
4072 
4073 	SLIST_FOREACH_SAFE(rxseg, &tp->t_rxt_segments, rx_link, next) {
4074 		SLIST_REMOVE(&tp->t_rxt_segments, rxseg,
4075 		    tcp_rxt_seg, rx_link);
4076 		zfree(tcp_rxt_seg_zone, rxseg);
4077 		tcp_memacct_sub(kalloc_type_size(tcp_rxt_seg_zone));
4078 	}
4079 	tp->t_rxt_seg_count = 0;
4080 	tp->t_dsack_lastuna = tp->snd_max;
4081 }
4082 
4083 boolean_t
tcp_rxtseg_detect_bad_rexmt(struct tcpcb * tp,tcp_seq th_ack)4084 tcp_rxtseg_detect_bad_rexmt(struct tcpcb *tp, tcp_seq th_ack)
4085 {
4086 	boolean_t bad_rexmt;
4087 	struct tcp_rxt_seg *rxseg;
4088 
4089 	if (SLIST_EMPTY(&tp->t_rxt_segments)) {
4090 		return FALSE;
4091 	}
4092 
4093 	/*
4094 	 * If all of the segments in this window are not cumulatively
4095 	 * acknowledged, then there can still be undetected packet loss.
4096 	 * Do not restore congestion window in that case.
4097 	 */
4098 	if (SEQ_LT(th_ack, tp->snd_recover)) {
4099 		return FALSE;
4100 	}
4101 
4102 	bad_rexmt = TRUE;
4103 	SLIST_FOREACH(rxseg, &tp->t_rxt_segments, rx_link) {
4104 		if (!(rxseg->rx_flags & TCP_RXT_SPURIOUS)) {
4105 			bad_rexmt = FALSE;
4106 			break;
4107 		}
4108 	}
4109 	return bad_rexmt;
4110 }
4111 
4112 u_int32_t
tcp_rxtseg_total_size(struct tcpcb * tp)4113 tcp_rxtseg_total_size(struct tcpcb *tp)
4114 {
4115 	struct tcp_rxt_seg *rxseg;
4116 	u_int32_t total_size = 0;
4117 
4118 	SLIST_FOREACH(rxseg, &tp->t_rxt_segments, rx_link) {
4119 		total_size += (rxseg->rx_end - rxseg->rx_start) + 1;
4120 	}
4121 	return total_size;
4122 }
4123 
4124 static void tcp_rack_free_and_disable(struct tcpcb *tp);
4125 
4126 int
tcp_seg_cmp(const struct tcp_seg_sent * seg1,const struct tcp_seg_sent * seg2)4127 tcp_seg_cmp(const struct tcp_seg_sent *seg1, const struct tcp_seg_sent *seg2)
4128 {
4129 	return (int)(seg1->end_seq - seg2->end_seq);
4130 }
4131 
RB_GENERATE(tcp_seg_sent_tree_head,tcp_seg_sent,seg_link,tcp_seg_cmp)4132 RB_GENERATE(tcp_seg_sent_tree_head, tcp_seg_sent, seg_link, tcp_seg_cmp)
4133 
4134 uint32_t
4135 tcp_seg_len(struct tcp_seg_sent *seg)
4136 {
4137 	if (SEQ_LT(seg->end_seq, seg->start_seq)) {
4138 		os_log_error(OS_LOG_DEFAULT, "segment end(%u) can't be smaller "
4139 		    "than segment start(%u)", seg->end_seq, seg->start_seq);
4140 	}
4141 
4142 	return seg->end_seq - seg->start_seq;
4143 }
4144 
4145 static struct tcp_seg_sent *
tcp_seg_alloc_init(struct tcpcb * tp)4146 tcp_seg_alloc_init(struct tcpcb *tp)
4147 {
4148 	struct tcp_seg_sent *seg = TAILQ_FIRST(&tp->seg_pool.free_segs);
4149 	if (seg != NULL) {
4150 		TAILQ_REMOVE(&tp->seg_pool.free_segs, seg, free_link);
4151 		tp->seg_pool.free_segs_count--;
4152 
4153 		bzero(seg, sizeof(*seg));
4154 	} else {
4155 		if (tcp_memacct_hardlimit()) {
4156 			return NULL;
4157 		}
4158 
4159 		seg = zalloc_flags(tcp_seg_sent_zone, Z_NOPAGEWAIT | Z_ZERO);
4160 		if (seg == NULL) {
4161 			return NULL;
4162 		}
4163 		tcp_memacct_add(kalloc_type_size(tcp_seg_sent_zone));
4164 	}
4165 
4166 	return seg;
4167 }
4168 
4169 static void
tcp_update_seg_after_rto(struct tcpcb * tp,struct tcp_seg_sent * found_seg,uint32_t xmit_ts,uint8_t flags)4170 tcp_update_seg_after_rto(struct tcpcb *tp, struct tcp_seg_sent *found_seg,
4171     uint32_t xmit_ts, uint8_t flags)
4172 {
4173 	tcp_rack_transmit_seg(tp, found_seg, found_seg->start_seq, found_seg->end_seq,
4174 	    xmit_ts, flags);
4175 	struct tcp_seg_sent *seg = TAILQ_FIRST(&tp->t_segs_sent);
4176 	if (found_seg == seg) {
4177 		// Move this segment to the end of time-ordered list.
4178 		TAILQ_REMOVE(&tp->t_segs_sent, seg, tx_link);
4179 		TAILQ_INSERT_TAIL(&tp->t_segs_sent, seg, tx_link);
4180 	}
4181 }
4182 
4183 static void
tcp_process_rxmt_segs_after_rto(struct tcpcb * tp,struct tcp_seg_sent * seg,tcp_seq start,uint32_t xmit_ts,uint8_t flags)4184 tcp_process_rxmt_segs_after_rto(struct tcpcb *tp, struct tcp_seg_sent *seg, tcp_seq start,
4185     uint32_t xmit_ts, uint8_t flags)
4186 {
4187 	struct tcp_seg_sent segment = {};
4188 
4189 	while (seg != NULL) {
4190 		if (SEQ_LEQ(seg->start_seq, start)) {
4191 			tcp_update_seg_after_rto(tp, seg, xmit_ts, flags);
4192 			break;
4193 		} else {
4194 			/* The segment is a part of the total RTO retransmission */
4195 			tcp_update_seg_after_rto(tp, seg, xmit_ts, flags);
4196 
4197 			/* Find the next segment ending at the start of current segment */
4198 			segment.end_seq = seg->start_seq;
4199 			seg = RB_FIND(tcp_seg_sent_tree_head, &tp->t_segs_sent_tree, &segment);
4200 		}
4201 	}
4202 }
4203 
4204 static struct tcp_seg_sent *
tcp_seg_sent_insert_before(struct tcpcb * tp,struct tcp_seg_sent * before,tcp_seq start,tcp_seq end,uint32_t xmit_ts,uint8_t flags)4205 tcp_seg_sent_insert_before(struct tcpcb *tp, struct tcp_seg_sent *before, tcp_seq start, tcp_seq end,
4206     uint32_t xmit_ts, uint8_t flags)
4207 {
4208 	struct tcp_seg_sent *seg = tcp_seg_alloc_init(tp);
4209 	if (seg == NULL) {
4210 		tcp_rack_free_and_disable(tp);
4211 		return NULL;
4212 	}
4213 	tcp_rack_transmit_seg(tp, seg, start, end, xmit_ts, flags);
4214 	struct tcp_seg_sent *not_inserted = RB_INSERT(tcp_seg_sent_tree_head, &tp->t_segs_sent_tree, seg);
4215 	if (not_inserted) {
4216 		TCP_LOG(tp, "segment %p[%u %u) was not inserted in the RB tree", not_inserted,
4217 		    not_inserted->start_seq, not_inserted->end_seq);
4218 	}
4219 	TAILQ_INSERT_BEFORE(before, seg, tx_link);
4220 
4221 	return seg;
4222 }
4223 
4224 static struct tcp_seg_sent *
tcp_seg_rto_insert_end(struct tcpcb * tp,tcp_seq start,tcp_seq end,uint32_t xmit_ts,uint8_t flags)4225 tcp_seg_rto_insert_end(struct tcpcb *tp, tcp_seq start, tcp_seq end,
4226     uint32_t xmit_ts, uint8_t flags)
4227 {
4228 	struct tcp_seg_sent *seg = tcp_seg_alloc_init(tp);
4229 	if (seg == NULL) {
4230 		tcp_rack_free_and_disable(tp);
4231 		return NULL;
4232 	}
4233 	/* segment MUST be allocated, there is no other fail-safe here */
4234 	tcp_rack_transmit_seg(tp, seg, start, end, xmit_ts, flags);
4235 	struct tcp_seg_sent *not_inserted = RB_INSERT(tcp_seg_sent_tree_head, &tp->t_segs_sent_tree, seg);
4236 	if (not_inserted) {
4237 		TCP_LOG(tp, "segment %p[%u %u) was not inserted in the RB tree", not_inserted,
4238 		    not_inserted->start_seq, not_inserted->end_seq);
4239 	}
4240 	TAILQ_INSERT_TAIL(&tp->t_segs_sent, seg, tx_link);
4241 
4242 	return seg;
4243 }
4244 
4245 void
tcp_seg_sent_insert(struct tcpcb * tp,struct tcp_seg_sent * seg,tcp_seq start,tcp_seq end,uint32_t xmit_ts,uint8_t flags)4246 tcp_seg_sent_insert(struct tcpcb *tp, struct tcp_seg_sent *seg, tcp_seq start, tcp_seq end,
4247     uint32_t xmit_ts, uint8_t flags)
4248 {
4249 	if (seg != NULL) {
4250 		uint8_t seg_flags = seg->flags | flags;
4251 		if (seg->end_seq == end) {
4252 			/* Entire seg retransmitted in RACK recovery, start and end sequence doesn't change */
4253 			if (seg->start_seq != start) {
4254 				os_log_error(OS_LOG_DEFAULT, "Segment start (%u) is not same as retransmitted "
4255 				    "start sequence number (%u)", seg->start_seq, start);
4256 			}
4257 			tcp_rack_transmit_seg(tp, seg, seg->start_seq, seg->end_seq, xmit_ts, seg_flags);
4258 			TAILQ_REMOVE(&tp->t_segs_sent, seg, tx_link);
4259 			TAILQ_INSERT_TAIL(&tp->t_segs_sent, seg, tx_link);
4260 		} else {
4261 			/*
4262 			 * Original segment is retransmitted partially, update start_seq by len
4263 			 * and create new segment for retransmitted part
4264 			 */
4265 			struct tcp_seg_sent *partial_seg = tcp_seg_alloc_init(tp);
4266 			if (partial_seg == NULL) {
4267 				tcp_rack_free_and_disable(tp);
4268 				return;
4269 			}
4270 			seg->start_seq += (end - start);
4271 			tcp_rack_transmit_seg(tp, partial_seg, start, end, xmit_ts, seg_flags);
4272 			struct tcp_seg_sent *not_inserted = RB_INSERT(tcp_seg_sent_tree_head,
4273 			    &tp->t_segs_sent_tree, partial_seg);
4274 			if (not_inserted) {
4275 				TCP_LOG(tp, "segment %p[%u %u) was not inserted in the RB tree", not_inserted,
4276 				    not_inserted->start_seq, not_inserted->end_seq);
4277 			}
4278 			TAILQ_INSERT_TAIL(&tp->t_segs_sent, partial_seg, tx_link);
4279 		}
4280 
4281 		return;
4282 	}
4283 
4284 	if ((flags & TCP_SEGMENT_RETRANSMITTED_ATLEAST_ONCE) == 0) {
4285 		/* This is a new segment */
4286 		seg = tcp_seg_alloc_init(tp);
4287 		if (seg == NULL) {
4288 			tcp_rack_free_and_disable(tp);
4289 			return;
4290 		}
4291 
4292 		tcp_rack_transmit_seg(tp, seg, start, end, xmit_ts, flags);
4293 		struct tcp_seg_sent *not_inserted = RB_INSERT(tcp_seg_sent_tree_head, &tp->t_segs_sent_tree, seg);
4294 		if (not_inserted) {
4295 			TCP_LOG(tp, "segment %p[%u %u) was not inserted in the RB tree", not_inserted,
4296 			    not_inserted->start_seq, not_inserted->end_seq);
4297 		}
4298 		TAILQ_INSERT_TAIL(&tp->t_segs_sent, seg, tx_link);
4299 
4300 		return;
4301 	}
4302 	/*
4303 	 * Either retransmitted after an RTO or PTO.
4304 	 * During RTO, time-ordered list may lose its order.
4305 	 * If retransmitted after RTO, check if the segment
4306 	 * already exists in RB tree and update its xmit_ts. Also,
4307 	 * if this seg is at the top of ordered list, then move it
4308 	 * to the end.
4309 	 */
4310 	struct tcp_seg_sent segment = {};
4311 	struct tcp_seg_sent *found_seg = NULL, *rxmt_seg = NULL;
4312 
4313 	/* Set the end sequence to search for existing segment */
4314 	segment.end_seq = end;
4315 	found_seg = RB_FIND(tcp_seg_sent_tree_head, &tp->t_segs_sent_tree, &segment);
4316 	if (found_seg != NULL) {
4317 		/* Found an exact match for retransmitted end sequence */
4318 		tcp_process_rxmt_segs_after_rto(tp, found_seg, start, xmit_ts, flags);
4319 		return;
4320 	}
4321 	/*
4322 	 * We come here when we don't find an exact match and end of segment
4323 	 * retransmitted after RTO lies within a segment.
4324 	 */
4325 	RB_FOREACH(found_seg, tcp_seg_sent_tree_head, &tp->t_segs_sent_tree) {
4326 		if (SEQ_LT(end, found_seg->end_seq) && SEQ_GT(end, found_seg->start_seq)) {
4327 			/*
4328 			 * This segment is partially retransmitted. We split this segment at the boundary of end
4329 			 * sequence. First insert the part being retransmitted at the end of time-ordered list.
4330 			 */
4331 			struct tcp_seg_sent *inserted_seg = tcp_seg_rto_insert_end(tp, found_seg->start_seq, end, xmit_ts,
4332 			    found_seg->flags | flags);
4333 			/* If segment is not allocated, RACK is already disabled and cleaned up */
4334 			if (inserted_seg == NULL) {
4335 				return;
4336 			}
4337 
4338 			if (SEQ_LEQ(found_seg->start_seq, start)) {
4339 				/*
4340 				 * We are done with the retransmitted part.
4341 				 * Move the start of existing segment
4342 				 */
4343 				found_seg->start_seq = end;
4344 			} else {
4345 				/*
4346 				 * This retransmitted sequence covers more than one segment
4347 				 * Look for segments covered by this retransmission below this segment
4348 				 */
4349 				segment.end_seq = found_seg->start_seq;
4350 				rxmt_seg = RB_FIND(tcp_seg_sent_tree_head, &tp->t_segs_sent_tree, &segment);
4351 
4352 				if (rxmt_seg != NULL) {
4353 					/* rxmt_seg is just before the current segment */
4354 					tcp_process_rxmt_segs_after_rto(tp, rxmt_seg, start, xmit_ts, flags);
4355 				}
4356 
4357 				/* Move the start of existing segment */
4358 				found_seg->start_seq = end;
4359 			}
4360 			return;
4361 		}
4362 	}
4363 }
4364 
4365 static void
tcp_seg_collect_acked_subtree(struct tcpcb * tp,struct tcp_seg_sent * seg,uint32_t acked_xmit_ts,uint32_t tsecr)4366 tcp_seg_collect_acked_subtree(struct tcpcb *tp, struct tcp_seg_sent *seg,
4367     uint32_t acked_xmit_ts, uint32_t tsecr)
4368 {
4369 	if (seg != NULL) {
4370 		tcp_seg_collect_acked_subtree(tp, RB_LEFT(seg, seg_link), acked_xmit_ts, tsecr);
4371 		tcp_seg_collect_acked_subtree(tp, RB_RIGHT(seg, seg_link), acked_xmit_ts, tsecr);
4372 		TAILQ_INSERT_TAIL(&tp->t_segs_acked, seg, ack_link);
4373 	}
4374 }
4375 
4376 /* Call this function with root of the rb tree */
4377 static void
tcp_seg_collect_acked(struct tcpcb * tp,struct tcp_seg_sent * seg,tcp_seq th_ack,uint32_t acked_xmit_ts,uint32_t tsecr)4378 tcp_seg_collect_acked(struct tcpcb *tp, struct tcp_seg_sent *seg, tcp_seq th_ack,
4379     uint32_t acked_xmit_ts, uint32_t tsecr)
4380 {
4381 	if (seg == NULL) {
4382 		return;
4383 	}
4384 
4385 	if (SEQ_GEQ(th_ack, seg->end_seq)) {
4386 		/* Delete the entire left sub-tree */
4387 		tcp_seg_collect_acked_subtree(tp, RB_LEFT(seg, seg_link), acked_xmit_ts, tsecr);
4388 		/* Evaluate the right sub-tree */
4389 		tcp_seg_collect_acked(tp, RB_RIGHT(seg, seg_link), th_ack, acked_xmit_ts, tsecr);
4390 		TAILQ_INSERT_TAIL(&tp->t_segs_acked, seg, ack_link);
4391 	} else {
4392 		/*
4393 		 * This ACK doesn't acknowledge the current root and its right sub-tree.
4394 		 * Evaluate the left sub-tree
4395 		 */
4396 		tcp_seg_collect_acked(tp, RB_LEFT(seg, seg_link), th_ack, acked_xmit_ts, tsecr);
4397 	}
4398 }
4399 
4400 static void
tcp_seg_delete_acked(struct tcpcb * tp,uint32_t acked_xmit_ts,uint32_t tsecr)4401 tcp_seg_delete_acked(struct tcpcb *tp, uint32_t acked_xmit_ts, uint32_t tsecr)
4402 {
4403 	struct tcp_seg_sent *acked_seg = NULL, *next = NULL;
4404 
4405 	TAILQ_FOREACH_SAFE(acked_seg, &tp->t_segs_acked, ack_link, next) {
4406 		/* Advance RACK state if applicable */
4407 		if (acked_seg->xmit_ts > acked_xmit_ts) {
4408 			tcp_rack_update_segment_acked(tp, tsecr, acked_seg->xmit_ts, acked_seg->end_seq,
4409 			    !!(acked_seg->flags & TCP_SEGMENT_RETRANSMITTED_ATLEAST_ONCE));
4410 		}
4411 		/* Check for reordering */
4412 		tcp_rack_detect_reordering_acked(tp, acked_seg);
4413 
4414 		const uint32_t seg_len = tcp_seg_len(acked_seg);
4415 		if (acked_seg->flags & TCP_SEGMENT_LOST) {
4416 			if (tp->bytes_lost < seg_len) {
4417 				os_log_error(OS_LOG_DEFAULT, "bytes_lost (%u) can't be smaller than already "
4418 				    "lost segment length (%u)", tp->bytes_lost, seg_len);
4419 			}
4420 			tp->bytes_lost -= seg_len;
4421 		}
4422 		if (acked_seg->flags & TCP_RACK_RETRANSMITTED) {
4423 			if (tp->bytes_retransmitted < seg_len) {
4424 				os_log_error(OS_LOG_DEFAULT, "bytes_retransmitted (%u) can't be smaller "
4425 				    "than already retransmited segment length (%u)",
4426 				    tp->bytes_retransmitted, seg_len);
4427 			}
4428 			tp->bytes_retransmitted -= seg_len;
4429 		}
4430 		if (acked_seg->flags & TCP_SEGMENT_SACKED) {
4431 			if (tp->bytes_sacked < seg_len) {
4432 				os_log_error(OS_LOG_DEFAULT, "bytes_sacked (%u) can't be smaller than already "
4433 				    "SACKed segment length (%u)", tp->bytes_sacked, seg_len);
4434 			}
4435 			tp->bytes_sacked -= seg_len;
4436 		}
4437 		TAILQ_REMOVE(&tp->t_segs_acked, acked_seg, ack_link);
4438 		TAILQ_REMOVE(&tp->t_segs_sent, acked_seg, tx_link);
4439 		RB_REMOVE(tcp_seg_sent_tree_head, &tp->t_segs_sent_tree, acked_seg);
4440 		tcp_seg_delete(tp, acked_seg);
4441 	}
4442 }
4443 
4444 void
tcp_segs_doack(struct tcpcb * tp,tcp_seq th_ack,struct tcpopt * to)4445 tcp_segs_doack(struct tcpcb *tp, tcp_seq th_ack, struct tcpopt *to)
4446 {
4447 	uint32_t tsecr = 0, acked_xmit_ts = 0;
4448 	tcp_seq acked_seq = th_ack;
4449 	bool was_retransmitted = false;
4450 
4451 	if (TAILQ_EMPTY(&tp->t_segs_sent)) {
4452 		return;
4453 	}
4454 
4455 	if (((to->to_flags & TOF_TS) != 0) && (to->to_tsecr != 0)) {
4456 		tsecr = to->to_tsecr;
4457 	}
4458 
4459 	struct tcp_seg_sent seg = {};
4460 	struct tcp_seg_sent *found_seg = NULL, *next = NULL;
4461 
4462 	found_seg = TAILQ_LAST(&tp->t_segs_sent, tcp_seg_sent_head);
4463 
4464 	if (tp->rack.segs_retransmitted == false) {
4465 		if (SEQ_GEQ(th_ack, found_seg->end_seq)) {
4466 			/*
4467 			 * ACK acknowledges the last sent segment completely (snd_max),
4468 			 * we can remove all segments from time ordered list.
4469 			 */
4470 			acked_seq = found_seg->end_seq;
4471 			acked_xmit_ts = found_seg->xmit_ts;
4472 			was_retransmitted = !!(found_seg->flags & TCP_SEGMENT_RETRANSMITTED_ATLEAST_ONCE);
4473 			tcp_segs_sent_clean(tp, false);
4474 
4475 			/* Advance RACK state */
4476 			tcp_rack_update_segment_acked(tp, tsecr, acked_xmit_ts, acked_seq, was_retransmitted);
4477 			return;
4478 		}
4479 	}
4480 	/*
4481 	 * If either not all segments are ACKed OR the time-ordered list contains retransmitted
4482 	 * segments, do a RB tree search for largest (completely) ACKed segment and remove the ACKed
4483 	 * segment and all segments left of it from both RB tree and time-ordered list.
4484 	 *
4485 	 * Set the end sequence to search for ACKed segment.
4486 	 */
4487 	seg.end_seq = th_ack;
4488 
4489 	if ((found_seg = RB_FIND(tcp_seg_sent_tree_head, &tp->t_segs_sent_tree, &seg)) != NULL) {
4490 		acked_seq = found_seg->end_seq;
4491 		acked_xmit_ts = found_seg->xmit_ts;
4492 		was_retransmitted = !!(found_seg->flags & TCP_SEGMENT_RETRANSMITTED_ATLEAST_ONCE);
4493 
4494 		/*
4495 		 * Remove all segments that are ACKed by this ACK.
4496 		 * We defer self-balancing of RB tree to the end
4497 		 * by calling RB_REMOVE after collecting all ACKed segments.
4498 		 */
4499 		tcp_seg_collect_acked(tp, RB_ROOT(&tp->t_segs_sent_tree), th_ack, acked_xmit_ts, tsecr);
4500 		tcp_seg_delete_acked(tp, acked_xmit_ts, tsecr);
4501 
4502 		/* Advance RACK state */
4503 		tcp_rack_update_segment_acked(tp, tsecr, acked_xmit_ts, acked_seq, was_retransmitted);
4504 
4505 		return;
4506 	}
4507 	/*
4508 	 * When TSO is enabled, it is possible that th_ack is less
4509 	 * than segment->end, hence we search the tree
4510 	 * until we find the largest (partially) ACKed segment.
4511 	 */
4512 	RB_FOREACH_SAFE(found_seg, tcp_seg_sent_tree_head, &tp->t_segs_sent_tree, next) {
4513 		if (SEQ_LT(th_ack, found_seg->end_seq) && SEQ_GT(th_ack, found_seg->start_seq)) {
4514 			acked_seq = th_ack;
4515 			acked_xmit_ts = found_seg->xmit_ts;
4516 			was_retransmitted = !!(found_seg->flags & TCP_SEGMENT_RETRANSMITTED_ATLEAST_ONCE);
4517 
4518 			/* Remove all segments completely ACKed by this ack */
4519 			tcp_seg_collect_acked(tp, RB_ROOT(&tp->t_segs_sent_tree), th_ack, acked_xmit_ts, tsecr);
4520 			tcp_seg_delete_acked(tp, acked_xmit_ts, tsecr);
4521 			found_seg->start_seq = th_ack;
4522 
4523 			/* Advance RACK state */
4524 			tcp_rack_update_segment_acked(tp, tsecr, acked_xmit_ts, acked_seq, was_retransmitted);
4525 			break;
4526 		}
4527 	}
4528 }
4529 
4530 static bool
tcp_seg_mark_sacked(struct tcpcb * tp,struct tcp_seg_sent * seg,uint32_t * newbytes_sacked)4531 tcp_seg_mark_sacked(struct tcpcb *tp, struct tcp_seg_sent *seg, uint32_t *newbytes_sacked)
4532 {
4533 	if (seg->flags & TCP_SEGMENT_SACKED) {
4534 		return false;
4535 	}
4536 
4537 	const uint32_t seg_len = tcp_seg_len(seg);
4538 
4539 	/* Check for reordering */
4540 	tcp_rack_detect_reordering_acked(tp, seg);
4541 
4542 	if (seg->flags & TCP_RACK_RETRANSMITTED) {
4543 		if (seg->flags & TCP_SEGMENT_LOST) {
4544 			/*
4545 			 * If the segment is not considered lost, we don't clear
4546 			 * retransmitted as it might still be in flight. The ONLY time
4547 			 * this can happen is when RTO happens and segment is retransmitted
4548 			 * and SACKed before RACK detects segment was lost.
4549 			 */
4550 			seg->flags &= ~(TCP_SEGMENT_LOST | TCP_RACK_RETRANSMITTED);
4551 			if (tp->bytes_lost < seg_len || tp->bytes_retransmitted < seg_len) {
4552 				os_log_error(OS_LOG_DEFAULT, "bytes_lost (%u) and/or bytes_retransmitted (%u) "
4553 				    "can't be smaller than already lost/retransmitted segment length (%u)", tp->bytes_lost,
4554 				    tp->bytes_retransmitted, seg_len);
4555 			}
4556 			tp->bytes_lost -= seg_len;
4557 			tp->bytes_retransmitted -= seg_len;
4558 		}
4559 	} else {
4560 		if (seg->flags & TCP_SEGMENT_LOST) {
4561 			seg->flags &= ~(TCP_SEGMENT_LOST);
4562 			if (tp->bytes_lost < seg_len) {
4563 				os_log_error(OS_LOG_DEFAULT, "bytes_lost (%u) can't be smaller "
4564 				    "than already lost segment length (%u)", tp->bytes_lost, seg_len);
4565 			}
4566 			tp->bytes_lost -= seg_len;
4567 		}
4568 	}
4569 	*newbytes_sacked += seg_len;
4570 	seg->flags |= TCP_SEGMENT_SACKED;
4571 	tp->bytes_sacked += seg_len;
4572 
4573 	return true;
4574 }
4575 
4576 static void
tcp_segs_dosack_matched(struct tcpcb * tp,struct tcp_seg_sent * found_seg,tcp_seq sblk_start,uint32_t tsecr,uint32_t * newbytes_sacked)4577 tcp_segs_dosack_matched(struct tcpcb *tp, struct tcp_seg_sent *found_seg,
4578     tcp_seq sblk_start, uint32_t tsecr,
4579     uint32_t *newbytes_sacked)
4580 {
4581 	struct tcp_seg_sent seg = {};
4582 
4583 	while (found_seg != NULL) {
4584 		if (sblk_start == found_seg->start_seq) {
4585 			/*
4586 			 * Covered the entire SACK block.
4587 			 * Record segment flags before they get erased.
4588 			 */
4589 			uint8_t seg_flags = found_seg->flags;
4590 			bool newly_marked = tcp_seg_mark_sacked(tp, found_seg, newbytes_sacked);
4591 			if (newly_marked) {
4592 				/* Advance RACK state */
4593 				tcp_rack_update_segment_acked(tp, tsecr, found_seg->xmit_ts,
4594 				    found_seg->end_seq,
4595 				    !!(seg_flags & TCP_SEGMENT_RETRANSMITTED_ATLEAST_ONCE));
4596 			}
4597 			break;
4598 		} else if (SEQ_GT(sblk_start, found_seg->start_seq)) {
4599 			if ((found_seg->flags & TCP_SEGMENT_SACKED) != 0) {
4600 				/* No need to process an already SACKED segment */
4601 				break;
4602 			}
4603 			/*
4604 			 * This segment is partially ACKed by SACK block
4605 			 * as sblk_start > segment start. Since it is
4606 			 * partially SACKed, we should split the unSACKed and
4607 			 * SACKed parts.
4608 			 */
4609 			/* First create a new segment for unSACKed part */
4610 			struct tcp_seg_sent *inserted_seg = tcp_seg_sent_insert_before(tp, found_seg, found_seg->start_seq, sblk_start,
4611 			    found_seg->xmit_ts, found_seg->flags);
4612 			/* If segment is not allocated, RACK is already disabled and cleaned up */
4613 			if (inserted_seg == NULL) {
4614 				return;
4615 			}
4616 			/* Now, update the SACKed part */
4617 			found_seg->start_seq = sblk_start;
4618 			/* Record seg flags before they get erased. */
4619 			uint8_t seg_flags = found_seg->flags;
4620 			bool newly_marked = tcp_seg_mark_sacked(tp, found_seg, newbytes_sacked);
4621 			if (newly_marked) {
4622 				/* Advance RACK state */
4623 				tcp_rack_update_segment_acked(tp, tsecr, found_seg->xmit_ts,
4624 				    found_seg->end_seq,
4625 				    !!(seg_flags & TCP_SEGMENT_RETRANSMITTED_ATLEAST_ONCE));
4626 			}
4627 			break;
4628 		} else {
4629 			/*
4630 			 * This segment lies within the SACK block
4631 			 * Record segment flags before they get erased.
4632 			 */
4633 			uint8_t seg_flags = found_seg->flags;
4634 			bool newly_marked = tcp_seg_mark_sacked(tp, found_seg, newbytes_sacked);
4635 			if (newly_marked) {
4636 				/* Advance RACK state */
4637 				tcp_rack_update_segment_acked(tp, tsecr, found_seg->xmit_ts,
4638 				    found_seg->end_seq,
4639 				    !!(seg_flags & TCP_SEGMENT_RETRANSMITTED_ATLEAST_ONCE));
4640 			}
4641 			/* Find the next segment ending at the start of current segment */
4642 			seg.end_seq = found_seg->start_seq;
4643 			found_seg = RB_FIND(tcp_seg_sent_tree_head, &tp->t_segs_sent_tree, &seg);
4644 		}
4645 	}
4646 }
4647 
4648 void
tcp_segs_dosack(struct tcpcb * tp,tcp_seq sblk_start,tcp_seq sblk_end,uint32_t tsecr,uint32_t * newbytes_sacked)4649 tcp_segs_dosack(struct tcpcb *tp, tcp_seq sblk_start, tcp_seq sblk_end,
4650     uint32_t tsecr, uint32_t *newbytes_sacked)
4651 {
4652 	/*
4653 	 * When we receive SACK, min RTT is computed after SACK processing which
4654 	 * means we are using min RTT from the previous ACK to advance RACK state
4655 	 * This is ok as we track a windowed min-filtered estimate over a period.
4656 	 */
4657 	struct tcp_seg_sent seg = {};
4658 	struct tcp_seg_sent *found_seg = NULL, *sacked_seg = NULL;
4659 
4660 	/* Set the end sequence to search for SACKed segment */
4661 	seg.end_seq = sblk_end;
4662 	found_seg = RB_FIND(tcp_seg_sent_tree_head, &tp->t_segs_sent_tree, &seg);
4663 
4664 	if (found_seg != NULL) {
4665 		/* We found an exact match for sblk_end */
4666 		tcp_segs_dosack_matched(tp, found_seg, sblk_start, tsecr, newbytes_sacked);
4667 		return;
4668 	}
4669 	/*
4670 	 * We come here when we don't find an exact match and sblk_end
4671 	 * lies within a segment. This would happen only when TSO is used.
4672 	 */
4673 	RB_FOREACH(found_seg, tcp_seg_sent_tree_head, &tp->t_segs_sent_tree) {
4674 		if (SEQ_LT(sblk_end, found_seg->end_seq) && SEQ_GT(sblk_end, found_seg->start_seq)) {
4675 			/*
4676 			 * This segment is partially SACKed. We split this segment at the boundary
4677 			 * of SACK block. First insert the newly SACKed part
4678 			 */
4679 			tcp_seq start = SEQ_LEQ(sblk_start, found_seg->start_seq) ? found_seg->start_seq : sblk_start;
4680 			struct tcp_seg_sent *newly_sacked = tcp_seg_sent_insert_before(tp, found_seg, start,
4681 			    sblk_end, found_seg->xmit_ts, found_seg->flags);
4682 			/* If segment is not allocated, RACK is already disabled and cleaned up */
4683 			if (newly_sacked == NULL) {
4684 				return;
4685 			}
4686 			/* Record seg flags before they get erased. */
4687 			uint8_t seg_flags = newly_sacked->flags;
4688 			/* Mark the SACKed segment */
4689 			tcp_seg_mark_sacked(tp, newly_sacked, newbytes_sacked);
4690 
4691 			/* Advance RACK state */
4692 			tcp_rack_update_segment_acked(tp, tsecr, newly_sacked->xmit_ts,
4693 			    newly_sacked->end_seq, !!(seg_flags & TCP_SEGMENT_RETRANSMITTED_ATLEAST_ONCE));
4694 
4695 			if (sblk_start == found_seg->start_seq) {
4696 				/*
4697 				 * We are done with this SACK block.
4698 				 * Move the start of existing segment
4699 				 */
4700 				found_seg->start_seq = sblk_end;
4701 				break;
4702 			}
4703 
4704 			if (SEQ_GT(sblk_start, found_seg->start_seq)) {
4705 				/* Insert the remaining unSACKed part before the SACKED segment inserted above */
4706 				struct tcp_seg_sent *unsacked = tcp_seg_sent_insert_before(tp, newly_sacked, found_seg->start_seq,
4707 				    sblk_start, found_seg->xmit_ts, found_seg->flags);
4708 				/* If segment is not allocated, RACK is already disabled and cleaned up */
4709 				if (unsacked == NULL) {
4710 					return;
4711 				}
4712 				/* Move the start of existing segment */
4713 				found_seg->start_seq = sblk_end;
4714 				break;
4715 			} else {
4716 				/*
4717 				 * This SACK block covers more than one segment
4718 				 * Look for segments SACKed below this segment
4719 				 */
4720 				seg.end_seq = found_seg->start_seq;
4721 				sacked_seg = RB_FIND(tcp_seg_sent_tree_head, &tp->t_segs_sent_tree, &seg);
4722 
4723 				if (sacked_seg != NULL) {
4724 					/* We found an exact match for sblk_end */
4725 					tcp_segs_dosack_matched(tp, sacked_seg, sblk_start, tsecr, newbytes_sacked);
4726 				}
4727 
4728 				/*
4729 				 * RACK might have been disabled (if a segment allocation failed) and all associated
4730 				 * state freed. If RACK hasn't been disabled, move the start of existing segment.
4731 				 */
4732 				if (TCP_RACK_ENABLED(tp)) {
4733 					found_seg->start_seq = sblk_end;
4734 				}
4735 			}
4736 			break;
4737 		}
4738 	}
4739 }
4740 
4741 void
tcp_segs_clear_sacked(struct tcpcb * tp)4742 tcp_segs_clear_sacked(struct tcpcb *tp)
4743 {
4744 	struct tcp_seg_sent *seg = NULL;
4745 
4746 	TAILQ_FOREACH(seg, &tp->t_segs_sent, tx_link)
4747 	{
4748 		const uint32_t seg_len = tcp_seg_len(seg);
4749 
4750 		if (seg->flags & TCP_SEGMENT_SACKED) {
4751 			seg->flags &= ~(TCP_SEGMENT_SACKED);
4752 			if (tp->bytes_sacked < seg_len) {
4753 				os_log_error(OS_LOG_DEFAULT, "bytes_sacked (%u) can't be smaller "
4754 				    "than already SACKed segment length (%u)", tp->bytes_sacked, seg_len);
4755 			}
4756 			tp->bytes_sacked -= seg_len;
4757 		}
4758 	}
4759 }
4760 
4761 void
tcp_mark_seg_lost(struct tcpcb * tp,struct tcp_seg_sent * seg)4762 tcp_mark_seg_lost(struct tcpcb *tp, struct tcp_seg_sent *seg)
4763 {
4764 	const uint32_t seg_len = tcp_seg_len(seg);
4765 
4766 	if (seg->flags & TCP_SEGMENT_LOST) {
4767 		if (seg->flags & TCP_RACK_RETRANSMITTED) {
4768 			/* Retransmission was lost */
4769 			seg->flags &= ~TCP_RACK_RETRANSMITTED;
4770 			if (tp->bytes_retransmitted < seg_len) {
4771 				os_log_error(OS_LOG_DEFAULT, "bytes_retransmitted (%u) can't be "
4772 				    "smaller than retransmited segment length (%u)",
4773 				    tp->bytes_retransmitted, seg_len);
4774 				return;
4775 			}
4776 			tp->bytes_retransmitted -= seg_len;
4777 		}
4778 	} else {
4779 		seg->flags |= TCP_SEGMENT_LOST;
4780 		tp->bytes_lost += seg_len;
4781 	}
4782 }
4783 
4784 void
tcp_seg_delete(struct tcpcb * tp,struct tcp_seg_sent * seg)4785 tcp_seg_delete(struct tcpcb *tp, struct tcp_seg_sent *seg)
4786 {
4787 	if (tp->seg_pool.free_segs_count >= TCP_SEG_POOL_MAX_ITEM_COUNT) {
4788 		zfree(tcp_seg_sent_zone, seg);
4789 		tcp_memacct_sub(kalloc_type_size(tcp_seg_sent_zone));
4790 	} else {
4791 		bzero(seg, sizeof(*seg));
4792 		TAILQ_INSERT_TAIL(&tp->seg_pool.free_segs, seg, free_link);
4793 		tp->seg_pool.free_segs_count++;
4794 	}
4795 }
4796 
4797 void
tcp_segs_sent_clean(struct tcpcb * tp,bool free_segs)4798 tcp_segs_sent_clean(struct tcpcb *tp, bool free_segs)
4799 {
4800 	struct tcp_seg_sent *seg = NULL, *next = NULL;
4801 
4802 	TAILQ_FOREACH_SAFE(seg, &tp->t_segs_sent, tx_link, next) {
4803 		/* Check for reordering */
4804 		tcp_rack_detect_reordering_acked(tp, seg);
4805 
4806 		TAILQ_REMOVE(&tp->t_segs_sent, seg, tx_link);
4807 		RB_REMOVE(tcp_seg_sent_tree_head, &tp->t_segs_sent_tree, seg);
4808 		tcp_seg_delete(tp, seg);
4809 	}
4810 	if (__improbable(!RB_EMPTY(&tp->t_segs_sent_tree))) {
4811 		os_log_error(OS_LOG_DEFAULT, "RB tree still contains segments while "
4812 		    "time ordered list is already empty");
4813 	}
4814 	if (__improbable(!TAILQ_EMPTY(&tp->t_segs_acked))) {
4815 		os_log_error(OS_LOG_DEFAULT, "Segment ACKed list shouldn't contain "
4816 		    "any segments as they are removed immediately after being ACKed");
4817 	}
4818 	/* Reset seg_retransmitted as we emptied the list */
4819 	tcp_rack_reset_segs_retransmitted(tp);
4820 	tp->bytes_lost = tp->bytes_sacked = tp->bytes_retransmitted = 0;
4821 
4822 	/* Empty the free segments pool */
4823 	if (free_segs) {
4824 		TAILQ_FOREACH_SAFE(seg, &tp->seg_pool.free_segs, free_link, next) {
4825 			TAILQ_REMOVE(&tp->seg_pool.free_segs, seg, free_link);
4826 			zfree(tcp_seg_sent_zone, seg);
4827 			tcp_memacct_sub(kalloc_type_size(tcp_seg_sent_zone));
4828 		}
4829 		tp->seg_pool.free_segs_count = 0;
4830 	}
4831 }
4832 
4833 void
tcp_rack_free_and_disable(struct tcpcb * tp)4834 tcp_rack_free_and_disable(struct tcpcb *tp)
4835 {
4836 	TCP_LOG(tp, "not enough memory to allocate segment, disabling RACK");
4837 	tcp_segs_sent_clean(tp, true);
4838 	tp->t_flagsext &= ~TF_RACK_ENABLED;
4839 }
4840 
4841 void
tcp_get_connectivity_status(struct tcpcb * tp,struct tcp_conn_status * connstatus)4842 tcp_get_connectivity_status(struct tcpcb *tp,
4843     struct tcp_conn_status *connstatus)
4844 {
4845 	if (tp == NULL || connstatus == NULL) {
4846 		return;
4847 	}
4848 	bzero(connstatus, sizeof(*connstatus));
4849 	if (tp->t_rxtshift >= TCP_CONNECTIVITY_PROBES_MAX) {
4850 		if (TCPS_HAVEESTABLISHED(tp->t_state)) {
4851 			connstatus->write_probe_failed = 1;
4852 		} else {
4853 			connstatus->conn_probe_failed = 1;
4854 		}
4855 	}
4856 	if (tp->t_rtimo_probes >= TCP_CONNECTIVITY_PROBES_MAX) {
4857 		connstatus->read_probe_failed = 1;
4858 	}
4859 	if (tp->t_inpcb != NULL && tp->t_inpcb->inp_last_outifp != NULL &&
4860 	    (tp->t_inpcb->inp_last_outifp->if_eflags & IFEF_PROBE_CONNECTIVITY)) {
4861 		connstatus->probe_activated = 1;
4862 	}
4863 }
4864 
4865 void
tcp_disable_tfo(struct tcpcb * tp)4866 tcp_disable_tfo(struct tcpcb *tp)
4867 {
4868 	tp->t_flagsext &= ~TF_FASTOPEN;
4869 }
4870 
4871 static struct mbuf *
tcp_make_keepalive_frame(struct tcpcb * tp,struct ifnet * ifp,boolean_t is_probe)4872 tcp_make_keepalive_frame(struct tcpcb *tp, struct ifnet *ifp,
4873     boolean_t is_probe)
4874 {
4875 	struct inpcb *inp = tp->t_inpcb;
4876 	struct tcphdr *th;
4877 	caddr_t data;
4878 	int win = 0;
4879 	struct mbuf *m;
4880 
4881 	/*
4882 	 * The code assumes the IP + TCP headers fit in an mbuf packet header
4883 	 */
4884 	static_assert(sizeof(struct ip) + sizeof(struct tcphdr) <= _MHLEN);
4885 	static_assert(sizeof(struct ip6_hdr) + sizeof(struct tcphdr) <= _MHLEN);
4886 
4887 	MGETHDR(m, M_WAIT, MT_HEADER);
4888 	if (m == NULL) {
4889 		return NULL;
4890 	}
4891 	m->m_pkthdr.pkt_proto = IPPROTO_TCP;
4892 
4893 	data = m_mtod_lower_bound(m);
4894 
4895 	if (inp->inp_vflag & INP_IPV4) {
4896 		bzero(data, sizeof(struct ip) + sizeof(struct tcphdr));
4897 		th = (struct tcphdr *)(void *) (data + sizeof(struct ip));
4898 		m->m_len = sizeof(struct ip) + sizeof(struct tcphdr);
4899 		m->m_pkthdr.len = m->m_len;
4900 	} else {
4901 		VERIFY(inp->inp_vflag & INP_IPV6);
4902 
4903 		bzero(data, sizeof(struct ip6_hdr)
4904 		    + sizeof(struct tcphdr));
4905 		th = (struct tcphdr *)(void *)(data + sizeof(struct ip6_hdr));
4906 		m->m_len = sizeof(struct ip6_hdr) +
4907 		    sizeof(struct tcphdr);
4908 		m->m_pkthdr.len = m->m_len;
4909 	}
4910 
4911 	tcp_fillheaders(m, tp, data, th, NULL, NULL);
4912 
4913 	if (inp->inp_vflag & INP_IPV4) {
4914 		struct ip *ip;
4915 
4916 		ip = (__typeof__(ip))(void *)data;
4917 
4918 		ip->ip_id = rfc6864 ? 0 : ip_randomid((uint64_t)m);
4919 		ip->ip_off = htons(IP_DF);
4920 		ip->ip_len = htons(sizeof(struct ip) + sizeof(struct tcphdr));
4921 		ip->ip_ttl = inp->inp_ip_ttl;
4922 		ip->ip_tos |= (inp->inp_ip_tos & ~IPTOS_ECN_MASK);
4923 		ip->ip_sum = in_cksum_hdr(ip);
4924 	} else {
4925 		struct ip6_hdr *ip6;
4926 
4927 		ip6 = (__typeof__(ip6))(void *)data;
4928 
4929 		ip6->ip6_plen = htons(sizeof(struct tcphdr));
4930 		ip6->ip6_hlim = in6_selecthlim(inp, ifp);
4931 		ip6->ip6_flow = ip6->ip6_flow & ~IPV6_FLOW_ECN_MASK;
4932 
4933 		if (IN6_IS_SCOPE_EMBED(&ip6->ip6_src)) {
4934 			ip6->ip6_src.s6_addr16[1] = 0;
4935 		}
4936 		if (IN6_IS_SCOPE_EMBED(&ip6->ip6_dst)) {
4937 			ip6->ip6_dst.s6_addr16[1] = 0;
4938 		}
4939 	}
4940 	th->th_flags = TH_ACK;
4941 
4942 	win = tcp_sbspace(tp);
4943 	if (win > ((int32_t)TCP_MAXWIN << tp->rcv_scale)) {
4944 		win = (int32_t)TCP_MAXWIN << tp->rcv_scale;
4945 	}
4946 	th->th_win = htons((u_short) (win >> tp->rcv_scale));
4947 
4948 	if (is_probe) {
4949 		th->th_seq = htonl(tp->snd_una - 1);
4950 	} else {
4951 		th->th_seq = htonl(tp->snd_una);
4952 	}
4953 	th->th_ack = htonl(tp->rcv_nxt);
4954 
4955 	/* Force recompute TCP checksum to be the final value */
4956 	th->th_sum = 0;
4957 	if (inp->inp_vflag & INP_IPV4) {
4958 		th->th_sum = inet_cksum(m, IPPROTO_TCP,
4959 		    sizeof(struct ip), sizeof(struct tcphdr));
4960 	} else {
4961 		th->th_sum = inet6_cksum(m, IPPROTO_TCP,
4962 		    sizeof(struct ip6_hdr), sizeof(struct tcphdr));
4963 	}
4964 
4965 	return m;
4966 }
4967 
4968 void
tcp_fill_keepalive_offload_frames(ifnet_t ifp,struct ifnet_keepalive_offload_frame * frames_array __counted_by (frames_array_count),u_int32_t frames_array_count,size_t frame_data_offset,u_int32_t * used_frames_count)4969 tcp_fill_keepalive_offload_frames(ifnet_t ifp,
4970     struct ifnet_keepalive_offload_frame *frames_array __counted_by(frames_array_count),
4971     u_int32_t frames_array_count, size_t frame_data_offset,
4972     u_int32_t *used_frames_count)
4973 {
4974 	struct inpcb *inp;
4975 	inp_gen_t gencnt;
4976 	u_int32_t frame_index = *used_frames_count;
4977 
4978 	/* Validation of the parameters */
4979 	if (ifp == NULL || frames_array == NULL ||
4980 	    frames_array_count == 0 ||
4981 	    frame_index >= frames_array_count ||
4982 	    frame_data_offset >= IFNET_KEEPALIVE_OFFLOAD_FRAME_DATA_SIZE) {
4983 		return;
4984 	}
4985 
4986 	/* Fast exit when no process is using the socket option TCP_KEEPALIVE_OFFLOAD */
4987 	if (ifp->if_tcp_kao_cnt == 0) {
4988 		return;
4989 	}
4990 
4991 	/*
4992 	 * This function is called outside the regular TCP processing
4993 	 * so we need to update the TCP clock.
4994 	 */
4995 	calculate_tcp_clock();
4996 
4997 	lck_rw_lock_shared(&tcbinfo.ipi_lock);
4998 	gencnt = tcbinfo.ipi_gencnt;
4999 	LIST_FOREACH(inp, tcbinfo.ipi_listhead, inp_list) {
5000 		struct socket *so;
5001 		struct ifnet_keepalive_offload_frame *frame;
5002 		struct mbuf *m = NULL;
5003 		struct tcpcb *tp = intotcpcb(inp);
5004 
5005 		if (frame_index >= frames_array_count) {
5006 			break;
5007 		}
5008 
5009 		if (inp->inp_gencnt > gencnt ||
5010 		    inp->inp_state == INPCB_STATE_DEAD) {
5011 			continue;
5012 		}
5013 
5014 		if ((so = inp->inp_socket) == NULL ||
5015 		    (so->so_state & SS_DEFUNCT)) {
5016 			continue;
5017 		}
5018 		/*
5019 		 * check for keepalive offload flag without socket
5020 		 * lock to avoid a deadlock
5021 		 */
5022 		if (!(inp->inp_flags2 & INP2_KEEPALIVE_OFFLOAD)) {
5023 			continue;
5024 		}
5025 
5026 		if (!(inp->inp_vflag & (INP_IPV4 | INP_IPV6))) {
5027 			continue;
5028 		}
5029 		if (inp->inp_ppcb == NULL ||
5030 		    in_pcb_checkstate(inp, WNT_ACQUIRE, 0) == WNT_STOPUSING) {
5031 			continue;
5032 		}
5033 		socket_lock(so, 1);
5034 		/* Release the want count */
5035 		if (inp->inp_ppcb == NULL ||
5036 		    (in_pcb_checkstate(inp, WNT_RELEASE, 1) == WNT_STOPUSING)) {
5037 			socket_unlock(so, 1);
5038 			continue;
5039 		}
5040 		if ((inp->inp_vflag & INP_IPV4) &&
5041 		    (inp->inp_laddr.s_addr == INADDR_ANY ||
5042 		    inp->inp_faddr.s_addr == INADDR_ANY)) {
5043 			socket_unlock(so, 1);
5044 			continue;
5045 		}
5046 		if ((inp->inp_vflag & INP_IPV6) &&
5047 		    (IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr) ||
5048 		    IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_faddr))) {
5049 			socket_unlock(so, 1);
5050 			continue;
5051 		}
5052 		if (inp->inp_lport == 0 || inp->inp_fport == 0) {
5053 			socket_unlock(so, 1);
5054 			continue;
5055 		}
5056 		if (inp->inp_last_outifp == NULL ||
5057 		    inp->inp_last_outifp->if_index != ifp->if_index) {
5058 			socket_unlock(so, 1);
5059 			continue;
5060 		}
5061 		if ((inp->inp_vflag & INP_IPV4) && frame_data_offset +
5062 		    sizeof(struct ip) + sizeof(struct tcphdr) >
5063 		    IFNET_KEEPALIVE_OFFLOAD_FRAME_DATA_SIZE) {
5064 			socket_unlock(so, 1);
5065 			continue;
5066 		} else if (!(inp->inp_vflag & INP_IPV4) && frame_data_offset +
5067 		    sizeof(struct ip6_hdr) + sizeof(struct tcphdr) >
5068 		    IFNET_KEEPALIVE_OFFLOAD_FRAME_DATA_SIZE) {
5069 			socket_unlock(so, 1);
5070 			continue;
5071 		}
5072 		/*
5073 		 * There is no point in waking up the device for connections
5074 		 * that are not established. Long lived connection are meant
5075 		 * for processes that will sent and receive data
5076 		 */
5077 		if (tp->t_state != TCPS_ESTABLISHED) {
5078 			socket_unlock(so, 1);
5079 			continue;
5080 		}
5081 		/*
5082 		 * This inp has all the information that is needed to
5083 		 * generate an offload frame.
5084 		 */
5085 		frame = &frames_array[frame_index];
5086 		frame->type = IFNET_KEEPALIVE_OFFLOAD_FRAME_TCP;
5087 		frame->ether_type = (inp->inp_vflag & INP_IPV4) ?
5088 		    IFNET_KEEPALIVE_OFFLOAD_FRAME_ETHERTYPE_IPV4 :
5089 		    IFNET_KEEPALIVE_OFFLOAD_FRAME_ETHERTYPE_IPV6;
5090 		frame->interval = (uint16_t)(tp->t_keepidle > 0 ? tp->t_keepidle :
5091 		    tcp_keepidle);
5092 		frame->keep_cnt = (uint8_t)TCP_CONN_KEEPCNT(tp);
5093 		frame->keep_retry = (uint16_t)TCP_CONN_KEEPINTVL(tp);
5094 		if (so->so_options & SO_NOWAKEFROMSLEEP) {
5095 			frame->flags |=
5096 			    IFNET_KEEPALIVE_OFFLOAD_FLAG_NOWAKEFROMSLEEP;
5097 		}
5098 		frame->local_port = ntohs(inp->inp_lport);
5099 		frame->remote_port = ntohs(inp->inp_fport);
5100 		frame->local_seq = tp->snd_nxt;
5101 		frame->remote_seq = tp->rcv_nxt;
5102 		if (inp->inp_vflag & INP_IPV4) {
5103 			ASSERT(frame_data_offset + sizeof(struct ip) + sizeof(struct tcphdr) <= UINT8_MAX);
5104 			frame->length = (uint8_t)(frame_data_offset +
5105 			    sizeof(struct ip) + sizeof(struct tcphdr));
5106 			frame->reply_length =  frame->length;
5107 
5108 			frame->addr_length = sizeof(struct in_addr);
5109 			bcopy(&inp->inp_laddr, frame->local_addr,
5110 			    sizeof(struct in_addr));
5111 			bcopy(&inp->inp_faddr, frame->remote_addr,
5112 			    sizeof(struct in_addr));
5113 		} else {
5114 			struct in6_addr *ip6;
5115 
5116 			ASSERT(frame_data_offset + sizeof(struct ip6_hdr) + sizeof(struct tcphdr) <= UINT8_MAX);
5117 			frame->length = (uint8_t)(frame_data_offset +
5118 			    sizeof(struct ip6_hdr) + sizeof(struct tcphdr));
5119 			frame->reply_length =  frame->length;
5120 
5121 			frame->addr_length = sizeof(struct in6_addr);
5122 			ip6 = (struct in6_addr *)(void *)frame->local_addr;
5123 			bcopy(&inp->in6p_laddr, ip6, sizeof(struct in6_addr));
5124 			if (IN6_IS_SCOPE_EMBED(ip6)) {
5125 				ip6->s6_addr16[1] = 0;
5126 			}
5127 
5128 			ip6 = (struct in6_addr *)(void *)frame->remote_addr;
5129 			bcopy(&inp->in6p_faddr, ip6, sizeof(struct in6_addr));
5130 			if (IN6_IS_SCOPE_EMBED(ip6)) {
5131 				ip6->s6_addr16[1] = 0;
5132 			}
5133 		}
5134 
5135 		/*
5136 		 * First the probe
5137 		 */
5138 		m = tcp_make_keepalive_frame(tp, ifp, TRUE);
5139 		if (m == NULL) {
5140 			socket_unlock(so, 1);
5141 			continue;
5142 		}
5143 		bcopy(m_mtod_current(m), frame->data + frame_data_offset, m->m_len);
5144 		m_freem(m);
5145 
5146 		/*
5147 		 * Now the response packet to incoming probes
5148 		 */
5149 		m = tcp_make_keepalive_frame(tp, ifp, FALSE);
5150 		if (m == NULL) {
5151 			socket_unlock(so, 1);
5152 			continue;
5153 		}
5154 		bcopy(m_mtod_current(m), frame->reply_data + frame_data_offset,
5155 		    m->m_len);
5156 		m_freem(m);
5157 
5158 		frame_index++;
5159 		socket_unlock(so, 1);
5160 	}
5161 	lck_rw_done(&tcbinfo.ipi_lock);
5162 	*used_frames_count = frame_index;
5163 }
5164 
5165 static bool
inp_matches_kao_frame(ifnet_t ifp,struct ifnet_keepalive_offload_frame * frame,struct inpcb * inp)5166 inp_matches_kao_frame(ifnet_t ifp, struct ifnet_keepalive_offload_frame *frame,
5167     struct inpcb *inp)
5168 {
5169 	if (inp->inp_ppcb == NULL) {
5170 		return false;
5171 	}
5172 	/* Release the want count */
5173 	if (in_pcb_checkstate(inp, WNT_RELEASE, 1) == WNT_STOPUSING) {
5174 		return false;
5175 	}
5176 	if (inp->inp_last_outifp == NULL ||
5177 	    inp->inp_last_outifp->if_index != ifp->if_index) {
5178 		return false;
5179 	}
5180 	if (frame->local_port != ntohs(inp->inp_lport) ||
5181 	    frame->remote_port != ntohs(inp->inp_fport)) {
5182 		return false;
5183 	}
5184 	if (inp->inp_vflag & INP_IPV4) {
5185 		if (memcmp(&inp->inp_laddr, frame->local_addr,
5186 		    sizeof(struct in_addr)) != 0 ||
5187 		    memcmp(&inp->inp_faddr, frame->remote_addr,
5188 		    sizeof(struct in_addr)) != 0) {
5189 			return false;
5190 		}
5191 	} else if (inp->inp_vflag & INP_IPV6) {
5192 		if (memcmp(&inp->inp_laddr, frame->local_addr,
5193 		    sizeof(struct in6_addr)) != 0 ||
5194 		    memcmp(&inp->inp_faddr, frame->remote_addr,
5195 		    sizeof(struct in6_addr)) != 0) {
5196 			return false;
5197 		}
5198 	} else {
5199 		return false;
5200 	}
5201 	return true;
5202 }
5203 
5204 int
tcp_notify_kao_timeout(ifnet_t ifp,struct ifnet_keepalive_offload_frame * frame)5205 tcp_notify_kao_timeout(ifnet_t ifp,
5206     struct ifnet_keepalive_offload_frame *frame)
5207 {
5208 	struct inpcb *inp = NULL;
5209 	struct socket *so = NULL;
5210 	bool found = false;
5211 
5212 	/*
5213 	 *  Unlock the list before posting event on the matching socket
5214 	 */
5215 	lck_rw_lock_shared(&tcbinfo.ipi_lock);
5216 
5217 	LIST_FOREACH(inp, tcbinfo.ipi_listhead, inp_list) {
5218 		if ((so = inp->inp_socket) == NULL ||
5219 		    (so->so_state & SS_DEFUNCT)) {
5220 			continue;
5221 		}
5222 		if (!(inp->inp_flags2 & INP2_KEEPALIVE_OFFLOAD)) {
5223 			continue;
5224 		}
5225 		if (!(inp->inp_vflag & (INP_IPV4 | INP_IPV6))) {
5226 			continue;
5227 		}
5228 		if (inp->inp_ppcb == NULL ||
5229 		    in_pcb_checkstate(inp, WNT_ACQUIRE, 0) == WNT_STOPUSING) {
5230 			continue;
5231 		}
5232 		socket_lock(so, 1);
5233 		if (inp_matches_kao_frame(ifp, frame, inp)) {
5234 			/*
5235 			 * Keep the matching socket locked
5236 			 */
5237 			found = true;
5238 			break;
5239 		}
5240 		socket_unlock(so, 1);
5241 	}
5242 	lck_rw_done(&tcbinfo.ipi_lock);
5243 
5244 	if (found) {
5245 		ASSERT(inp != NULL);
5246 		ASSERT(so != NULL);
5247 		ASSERT(so == inp->inp_socket);
5248 		/*
5249 		 * Drop the TCP connection like tcptimers() does
5250 		 */
5251 		tcpcb_ref_t tp = inp->inp_ppcb;
5252 
5253 		tcpstat.tcps_keepdrops++;
5254 		soevent(so,
5255 		    (SO_FILT_HINT_LOCKED | SO_FILT_HINT_TIMEOUT));
5256 		tp = tcp_drop(tp, ETIMEDOUT);
5257 
5258 		tcpstat.tcps_ka_offload_drops++;
5259 		os_log_info(OS_LOG_DEFAULT, "%s: dropped lport %u fport %u\n",
5260 		    __func__, frame->local_port, frame->remote_port);
5261 
5262 		socket_unlock(so, 1);
5263 	}
5264 
5265 	return 0;
5266 }
5267 
5268 errno_t
tcp_notify_ack_id_valid(struct tcpcb * tp,struct socket * so,u_int32_t notify_id)5269 tcp_notify_ack_id_valid(struct tcpcb *tp, struct socket *so,
5270     u_int32_t notify_id)
5271 {
5272 	struct tcp_notify_ack_marker *elm;
5273 
5274 	if (so->so_snd.sb_cc == 0) {
5275 		return ENOBUFS;
5276 	}
5277 
5278 	SLIST_FOREACH(elm, &tp->t_notify_ack, notify_next) {
5279 		/* Duplicate id is not allowed */
5280 		if (elm->notify_id == notify_id) {
5281 			return EINVAL;
5282 		}
5283 		/* Duplicate position is not allowed */
5284 		if (elm->notify_snd_una == tp->snd_una + so->so_snd.sb_cc) {
5285 			return EINVAL;
5286 		}
5287 	}
5288 	return 0;
5289 }
5290 
5291 errno_t
tcp_add_notify_ack_marker(struct tcpcb * tp,u_int32_t notify_id)5292 tcp_add_notify_ack_marker(struct tcpcb *tp, u_int32_t notify_id)
5293 {
5294 	struct tcp_notify_ack_marker *nm, *elm = NULL;
5295 	struct socket *so = tp->t_inpcb->inp_socket;
5296 
5297 	nm = kalloc_type(struct tcp_notify_ack_marker, M_WAIT | Z_ZERO);
5298 	if (nm == NULL) {
5299 		return ENOMEM;
5300 	}
5301 	nm->notify_id = notify_id;
5302 	nm->notify_snd_una = tp->snd_una + so->so_snd.sb_cc;
5303 
5304 	SLIST_FOREACH(elm, &tp->t_notify_ack, notify_next) {
5305 		if (SEQ_GT(nm->notify_snd_una, elm->notify_snd_una)) {
5306 			break;
5307 		}
5308 	}
5309 
5310 	if (elm == NULL) {
5311 		VERIFY(SLIST_EMPTY(&tp->t_notify_ack));
5312 		SLIST_INSERT_HEAD(&tp->t_notify_ack, nm, notify_next);
5313 	} else {
5314 		SLIST_INSERT_AFTER(elm, nm, notify_next);
5315 	}
5316 	tp->t_notify_ack_count++;
5317 	return 0;
5318 }
5319 
5320 void
tcp_notify_ack_free(struct tcpcb * tp)5321 tcp_notify_ack_free(struct tcpcb *tp)
5322 {
5323 	struct tcp_notify_ack_marker *elm, *next;
5324 	if (SLIST_EMPTY(&tp->t_notify_ack)) {
5325 		return;
5326 	}
5327 
5328 	SLIST_FOREACH_SAFE(elm, &tp->t_notify_ack, notify_next, next) {
5329 		SLIST_REMOVE(&tp->t_notify_ack, elm, tcp_notify_ack_marker,
5330 		    notify_next);
5331 		kfree_type(struct tcp_notify_ack_marker, elm);
5332 	}
5333 	SLIST_INIT(&tp->t_notify_ack);
5334 	tp->t_notify_ack_count = 0;
5335 }
5336 
5337 inline void
tcp_notify_acknowledgement(struct tcpcb * tp,struct socket * so)5338 tcp_notify_acknowledgement(struct tcpcb *tp, struct socket *so)
5339 {
5340 	struct tcp_notify_ack_marker *elm;
5341 
5342 	elm = SLIST_FIRST(&tp->t_notify_ack);
5343 	if (SEQ_GEQ(tp->snd_una, elm->notify_snd_una)) {
5344 		soevent(so, SO_FILT_HINT_LOCKED | SO_FILT_HINT_NOTIFY_ACK);
5345 	}
5346 }
5347 
5348 void
tcp_get_notify_ack_count(struct tcpcb * tp,struct tcp_notify_ack_complete * retid)5349 tcp_get_notify_ack_count(struct tcpcb *tp,
5350     struct tcp_notify_ack_complete *retid)
5351 {
5352 	struct tcp_notify_ack_marker *elm;
5353 	uint32_t  complete = 0;
5354 
5355 	SLIST_FOREACH(elm, &tp->t_notify_ack, notify_next) {
5356 		if (SEQ_GEQ(tp->snd_una, elm->notify_snd_una)) {
5357 			ASSERT(complete < UINT32_MAX);
5358 			complete++;
5359 		} else {
5360 			break;
5361 		}
5362 	}
5363 	retid->notify_pending = tp->t_notify_ack_count - complete;
5364 	retid->notify_complete_count = min(TCP_MAX_NOTIFY_ACK, complete);
5365 }
5366 
5367 void
tcp_get_notify_ack_ids(struct tcpcb * tp,struct tcp_notify_ack_complete * retid)5368 tcp_get_notify_ack_ids(struct tcpcb *tp,
5369     struct tcp_notify_ack_complete *retid)
5370 {
5371 	size_t i = 0;
5372 	struct tcp_notify_ack_marker *elm, *next;
5373 
5374 	SLIST_FOREACH_SAFE(elm, &tp->t_notify_ack, notify_next, next) {
5375 		if (i >= retid->notify_complete_count) {
5376 			break;
5377 		}
5378 		if (SEQ_GEQ(tp->snd_una, elm->notify_snd_una)) {
5379 			retid->notify_complete_id[i++] = elm->notify_id;
5380 			SLIST_REMOVE(&tp->t_notify_ack, elm,
5381 			    tcp_notify_ack_marker, notify_next);
5382 			kfree_type(struct tcp_notify_ack_marker, elm);
5383 			tp->t_notify_ack_count--;
5384 		} else {
5385 			break;
5386 		}
5387 	}
5388 }
5389 
5390 bool
tcp_notify_ack_active(struct socket * so)5391 tcp_notify_ack_active(struct socket *so)
5392 {
5393 	if ((SOCK_DOM(so) == PF_INET || SOCK_DOM(so) == PF_INET6) &&
5394 	    SOCK_TYPE(so) == SOCK_STREAM) {
5395 		struct tcpcb *tp = intotcpcb(sotoinpcb(so));
5396 
5397 		if (!SLIST_EMPTY(&tp->t_notify_ack)) {
5398 			struct tcp_notify_ack_marker *elm;
5399 			elm = SLIST_FIRST(&tp->t_notify_ack);
5400 			if (SEQ_GEQ(tp->snd_una, elm->notify_snd_una)) {
5401 				return true;
5402 			}
5403 		}
5404 	}
5405 	return false;
5406 }
5407 
5408 inline int32_t
inp_get_sndbytes_allunsent(struct socket * so,u_int32_t th_ack)5409 inp_get_sndbytes_allunsent(struct socket *so, u_int32_t th_ack)
5410 {
5411 	struct inpcb *inp = sotoinpcb(so);
5412 	struct tcpcb *tp = intotcpcb(inp);
5413 
5414 	if ((so->so_snd.sb_flags & SB_SNDBYTE_CNT) &&
5415 	    so->so_snd.sb_cc > 0) {
5416 		int32_t unsent, sent;
5417 		sent = tp->snd_max - th_ack;
5418 		if (tp->t_flags & TF_SENTFIN) {
5419 			sent--;
5420 		}
5421 		unsent = so->so_snd.sb_cc - sent;
5422 		return unsent;
5423 	}
5424 	return 0;
5425 }
5426 
5427 uint8_t
tcp_get_ace(struct tcphdr * th)5428 tcp_get_ace(struct tcphdr *th)
5429 {
5430 	uint8_t ace = 0;
5431 	if (th->th_flags & TH_ECE) {
5432 		ace += 1;
5433 	}
5434 	if (th->th_flags & TH_CWR) {
5435 		ace += 2;
5436 	}
5437 	if (th->th_x2 & (TH_AE >> 8)) {
5438 		ace += 4;
5439 	}
5440 
5441 	return ace;
5442 }
5443 
5444 #define IFP_PER_FLOW_STAT(_ipv4_, _stat_) { \
5445 	if (_ipv4_) { \
5446 	        ifp->if_ipv4_stat->_stat_++; \
5447 	} else { \
5448 	        ifp->if_ipv6_stat->_stat_++; \
5449 	} \
5450 }
5451 
5452 #define FLOW_ECN_ENABLED(_flags_) \
5453     ((_flags_ & (TE_ECN_ON)) == (TE_ECN_ON))
5454 
5455 void
tcp_update_stats_per_flow(struct ifnet_stats_per_flow * ifs,struct ifnet * ifp)5456 tcp_update_stats_per_flow(struct ifnet_stats_per_flow *ifs,
5457     struct ifnet *ifp)
5458 {
5459 	if (ifp == NULL || !ifnet_is_fully_attached(ifp)) {
5460 		return;
5461 	}
5462 
5463 	ifnet_lock_shared(ifp);
5464 	if (ifs->ecn_flags & TE_SETUPSENT) {
5465 		if (ifs->ecn_flags & TE_CLIENT_SETUP) {
5466 			IFP_PER_FLOW_STAT(ifs->ipv4, ecn_client_setup);
5467 			if (FLOW_ECN_ENABLED(ifs->ecn_flags)) {
5468 				IFP_PER_FLOW_STAT(ifs->ipv4,
5469 				    ecn_client_success);
5470 			} else if (ifs->ecn_flags & TE_LOST_SYN) {
5471 				IFP_PER_FLOW_STAT(ifs->ipv4,
5472 				    ecn_syn_lost);
5473 			} else {
5474 				IFP_PER_FLOW_STAT(ifs->ipv4,
5475 				    ecn_peer_nosupport);
5476 			}
5477 		} else {
5478 			IFP_PER_FLOW_STAT(ifs->ipv4, ecn_server_setup);
5479 			if (FLOW_ECN_ENABLED(ifs->ecn_flags)) {
5480 				IFP_PER_FLOW_STAT(ifs->ipv4,
5481 				    ecn_server_success);
5482 			} else if (ifs->ecn_flags & TE_LOST_SYN) {
5483 				IFP_PER_FLOW_STAT(ifs->ipv4,
5484 				    ecn_synack_lost);
5485 			} else {
5486 				IFP_PER_FLOW_STAT(ifs->ipv4,
5487 				    ecn_peer_nosupport);
5488 			}
5489 		}
5490 	} else {
5491 		IFP_PER_FLOW_STAT(ifs->ipv4, ecn_off_conn);
5492 	}
5493 	if (FLOW_ECN_ENABLED(ifs->ecn_flags)) {
5494 		if (ifs->ecn_flags & TE_RECV_ECN_CE) {
5495 			tcpstat.tcps_ecn_conn_recv_ce++;
5496 			IFP_PER_FLOW_STAT(ifs->ipv4, ecn_conn_recv_ce);
5497 		}
5498 		if (ifs->ecn_flags & TE_RECV_ECN_ECE) {
5499 			tcpstat.tcps_ecn_conn_recv_ece++;
5500 			IFP_PER_FLOW_STAT(ifs->ipv4, ecn_conn_recv_ece);
5501 		}
5502 		if (ifs->ecn_flags & (TE_RECV_ECN_CE | TE_RECV_ECN_ECE)) {
5503 			if (ifs->txretransmitbytes > 0 ||
5504 			    ifs->rxoutoforderbytes > 0) {
5505 				tcpstat.tcps_ecn_conn_pl_ce++;
5506 				IFP_PER_FLOW_STAT(ifs->ipv4, ecn_conn_plce);
5507 			} else {
5508 				tcpstat.tcps_ecn_conn_nopl_ce++;
5509 				IFP_PER_FLOW_STAT(ifs->ipv4, ecn_conn_noplce);
5510 			}
5511 		} else {
5512 			if (ifs->txretransmitbytes > 0 ||
5513 			    ifs->rxoutoforderbytes > 0) {
5514 				tcpstat.tcps_ecn_conn_plnoce++;
5515 				IFP_PER_FLOW_STAT(ifs->ipv4, ecn_conn_plnoce);
5516 			}
5517 		}
5518 	}
5519 
5520 	/* Other stats are interesting for non-local connections only */
5521 	if (ifs->local) {
5522 		ifnet_lock_done(ifp);
5523 		return;
5524 	}
5525 
5526 	if (ifs->ipv4) {
5527 		ifp->if_ipv4_stat->timestamp = net_uptime();
5528 		if (FLOW_ECN_ENABLED(ifs->ecn_flags)) {
5529 			tcp_flow_ecn_perf_stats(ifs, &ifp->if_ipv4_stat->ecn_on);
5530 		} else {
5531 			tcp_flow_ecn_perf_stats(ifs, &ifp->if_ipv4_stat->ecn_off);
5532 		}
5533 	} else {
5534 		ifp->if_ipv6_stat->timestamp = net_uptime();
5535 		if (FLOW_ECN_ENABLED(ifs->ecn_flags)) {
5536 			tcp_flow_ecn_perf_stats(ifs, &ifp->if_ipv6_stat->ecn_on);
5537 		} else {
5538 			tcp_flow_ecn_perf_stats(ifs, &ifp->if_ipv6_stat->ecn_off);
5539 		}
5540 	}
5541 
5542 	if (ifs->rxmit_drop) {
5543 		if (FLOW_ECN_ENABLED(ifs->ecn_flags)) {
5544 			IFP_PER_FLOW_STAT(ifs->ipv4, ecn_on.rxmit_drop);
5545 		} else {
5546 			IFP_PER_FLOW_STAT(ifs->ipv4, ecn_off.rxmit_drop);
5547 		}
5548 	}
5549 	if (ifs->ecn_fallback_synloss) {
5550 		IFP_PER_FLOW_STAT(ifs->ipv4, ecn_fallback_synloss);
5551 	}
5552 	if (ifs->ecn_fallback_droprst) {
5553 		IFP_PER_FLOW_STAT(ifs->ipv4, ecn_fallback_droprst);
5554 	}
5555 	if (ifs->ecn_fallback_droprxmt) {
5556 		IFP_PER_FLOW_STAT(ifs->ipv4, ecn_fallback_droprxmt);
5557 	}
5558 	if (ifs->ecn_fallback_ce) {
5559 		IFP_PER_FLOW_STAT(ifs->ipv4, ecn_fallback_ce);
5560 	}
5561 	if (ifs->ecn_fallback_reorder) {
5562 		IFP_PER_FLOW_STAT(ifs->ipv4, ecn_fallback_reorder);
5563 	}
5564 	if (ifs->ecn_recv_ce > 0) {
5565 		IFP_PER_FLOW_STAT(ifs->ipv4, ecn_recv_ce);
5566 	}
5567 	if (ifs->ecn_recv_ece > 0) {
5568 		IFP_PER_FLOW_STAT(ifs->ipv4, ecn_recv_ece);
5569 	}
5570 
5571 	tcp_flow_lim_stats(ifs, &ifp->if_lim_stat);
5572 
5573 	/*
5574 	 * Link heuristics are updated here only for NECP client flow when they close
5575 	 * Socket flows are updated live
5576 	 */
5577 	os_atomic_add(&ifp->if_tcp_stat->linkheur_noackpri, ifs->linkheur_noackpri, relaxed);
5578 	os_atomic_add(&ifp->if_tcp_stat->linkheur_comprxmt, ifs->linkheur_comprxmt, relaxed);
5579 	os_atomic_add(&ifp->if_tcp_stat->linkheur_synrxmt, ifs->linkheur_synrxmt, relaxed);
5580 	os_atomic_add(&ifp->if_tcp_stat->linkheur_rxmtfloor, ifs->linkheur_rxmtfloor, relaxed);
5581 
5582 	ifnet_lock_done(ifp);
5583 }
5584 
5585 #if SKYWALK
5586 
5587 #include <skywalk/core/skywalk_var.h>
5588 #include <skywalk/nexus/flowswitch/nx_flowswitch.h>
5589 
5590 void
tcp_add_fsw_flow(struct tcpcb * tp,struct ifnet * ifp)5591 tcp_add_fsw_flow(struct tcpcb *tp, struct ifnet *ifp)
5592 {
5593 	struct inpcb *inp = tp->t_inpcb;
5594 	struct socket *so = inp->inp_socket;
5595 	uuid_t fsw_uuid;
5596 	struct nx_flow_req nfr;
5597 	int err;
5598 
5599 	if (!NX_FSW_TCP_RX_AGG_ENABLED()) {
5600 		return;
5601 	}
5602 
5603 	if (ifp == NULL || kern_nexus_get_flowswitch_instance(ifp, fsw_uuid)) {
5604 		TCP_LOG_FSW_FLOW(tp, "skip ifp no fsw");
5605 		return;
5606 	}
5607 
5608 	memset(&nfr, 0, sizeof(nfr));
5609 
5610 	if (inp->inp_vflag & INP_IPV4) {
5611 		ASSERT(!(inp->inp_laddr.s_addr == INADDR_ANY ||
5612 		    inp->inp_faddr.s_addr == INADDR_ANY ||
5613 		    IN_MULTICAST(ntohl(inp->inp_laddr.s_addr)) ||
5614 		    IN_MULTICAST(ntohl(inp->inp_faddr.s_addr))));
5615 		nfr.nfr_saddr.sin.sin_len = sizeof(struct sockaddr_in);
5616 		nfr.nfr_saddr.sin.sin_family = AF_INET;
5617 		nfr.nfr_saddr.sin.sin_port = inp->inp_lport;
5618 		memcpy(&nfr.nfr_saddr.sin.sin_addr, &inp->inp_laddr,
5619 		    sizeof(struct in_addr));
5620 		nfr.nfr_daddr.sin.sin_len = sizeof(struct sockaddr_in);
5621 		nfr.nfr_daddr.sin.sin_family = AF_INET;
5622 		nfr.nfr_daddr.sin.sin_port = inp->inp_fport;
5623 		memcpy(&nfr.nfr_daddr.sin.sin_addr, &inp->inp_faddr,
5624 		    sizeof(struct in_addr));
5625 	} else {
5626 		ASSERT(!(IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr) ||
5627 		    IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_faddr) ||
5628 		    IN6_IS_ADDR_MULTICAST(&inp->in6p_laddr) ||
5629 		    IN6_IS_ADDR_MULTICAST(&inp->in6p_faddr)));
5630 		nfr.nfr_saddr.sin6.sin6_len = sizeof(struct sockaddr_in6);
5631 		nfr.nfr_saddr.sin6.sin6_family = AF_INET6;
5632 		nfr.nfr_saddr.sin6.sin6_port = inp->inp_lport;
5633 		memcpy(&nfr.nfr_saddr.sin6.sin6_addr, &inp->in6p_laddr,
5634 		    sizeof(struct in6_addr));
5635 		nfr.nfr_daddr.sin6.sin6_len = sizeof(struct sockaddr_in6);
5636 		nfr.nfr_daddr.sin.sin_family = AF_INET6;
5637 		nfr.nfr_daddr.sin6.sin6_port = inp->inp_fport;
5638 		memcpy(&nfr.nfr_daddr.sin6.sin6_addr, &inp->in6p_faddr,
5639 		    sizeof(struct in6_addr));
5640 		/* clear embedded scope ID */
5641 		if (IN6_IS_SCOPE_EMBED(&nfr.nfr_saddr.sin6.sin6_addr)) {
5642 			nfr.nfr_saddr.sin6.sin6_addr.s6_addr16[1] = 0;
5643 		}
5644 		if (IN6_IS_SCOPE_EMBED(&nfr.nfr_daddr.sin6.sin6_addr)) {
5645 			nfr.nfr_daddr.sin6.sin6_addr.s6_addr16[1] = 0;
5646 		}
5647 	}
5648 
5649 	nfr.nfr_nx_port = 1;
5650 	nfr.nfr_ip_protocol = IPPROTO_TCP;
5651 	nfr.nfr_transport_protocol = IPPROTO_TCP;
5652 	nfr.nfr_flags = NXFLOWREQF_ASIS;
5653 	nfr.nfr_epid = (so != NULL ? so->last_pid : 0);
5654 	if (NETNS_TOKEN_VALID(&inp->inp_netns_token)) {
5655 		nfr.nfr_port_reservation = inp->inp_netns_token;
5656 		nfr.nfr_flags |= NXFLOWREQF_EXT_PORT_RSV;
5657 	}
5658 	ASSERT(inp->inp_flowhash != 0);
5659 	nfr.nfr_inp_flowhash = inp->inp_flowhash;
5660 
5661 	uuid_generate_random(nfr.nfr_flow_uuid);
5662 	err = kern_nexus_flow_add(kern_nexus_shared_controller(), fsw_uuid,
5663 	    &nfr, sizeof(nfr));
5664 
5665 	if (err == 0) {
5666 		uuid_copy(tp->t_fsw_uuid, fsw_uuid);
5667 		uuid_copy(tp->t_flow_uuid, nfr.nfr_flow_uuid);
5668 	}
5669 
5670 	TCP_LOG_FSW_FLOW(tp, "add err %d\n", err);
5671 }
5672 
5673 void
tcp_del_fsw_flow(struct tcpcb * tp)5674 tcp_del_fsw_flow(struct tcpcb *tp)
5675 {
5676 	if (uuid_is_null(tp->t_fsw_uuid) || uuid_is_null(tp->t_flow_uuid)) {
5677 		return;
5678 	}
5679 
5680 	struct nx_flow_req nfr;
5681 	uuid_copy(nfr.nfr_flow_uuid, tp->t_flow_uuid);
5682 
5683 	/* It's possible for this call to fail if the nexus has detached */
5684 	int err = kern_nexus_flow_del(kern_nexus_shared_controller(),
5685 	    tp->t_fsw_uuid, &nfr, sizeof(nfr));
5686 	VERIFY(err == 0 || err == ENOENT || err == ENXIO);
5687 
5688 	uuid_clear(tp->t_fsw_uuid);
5689 	uuid_clear(tp->t_flow_uuid);
5690 
5691 	TCP_LOG_FSW_FLOW(tp, "del err %d\n", err);
5692 }
5693 
5694 #endif /* SKYWALK */
5695