xref: /xnu-11215.81.4/bsd/netinet/tcp_subr.c (revision d4514f0bc1d3f944c22d92e68b646ac3fb40d452)
1 /*
2  * Copyright (c) 2000-2022 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995
30  *	The Regents of the University of California.  All rights reserved.
31  *
32  * Redistribution and use in source and binary forms, with or without
33  * modification, are permitted provided that the following conditions
34  * are met:
35  * 1. Redistributions of source code must retain the above copyright
36  *    notice, this list of conditions and the following disclaimer.
37  * 2. Redistributions in binary form must reproduce the above copyright
38  *    notice, this list of conditions and the following disclaimer in the
39  *    documentation and/or other materials provided with the distribution.
40  * 3. All advertising materials mentioning features or use of this software
41  *    must display the following acknowledgement:
42  *	This product includes software developed by the University of
43  *	California, Berkeley and its contributors.
44  * 4. Neither the name of the University nor the names of its contributors
45  *    may be used to endorse or promote products derived from this software
46  *    without specific prior written permission.
47  *
48  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58  * SUCH DAMAGE.
59  *
60  *	@(#)tcp_subr.c	8.2 (Berkeley) 5/24/95
61  */
62 /*
63  * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
64  * support for mandatory and extensible security protections.  This notice
65  * is included in support of clause 2.2 (b) of the Apple Public License,
66  * Version 2.0.
67  */
68 
69 #include "tcp_includes.h"
70 
71 #include <sys/param.h>
72 #include <sys/systm.h>
73 #include <sys/kernel.h>
74 #include <sys/sysctl.h>
75 #include <sys/malloc.h>
76 #include <sys/mbuf.h>
77 #include <sys/domain.h>
78 #include <sys/proc.h>
79 #include <sys/kauth.h>
80 #include <sys/socket.h>
81 #include <sys/socketvar.h>
82 #include <sys/protosw.h>
83 #include <sys/random.h>
84 #include <sys/syslog.h>
85 #include <sys/mcache.h>
86 #include <kern/locks.h>
87 #include <kern/zalloc.h>
88 
89 #include <dev/random/randomdev.h>
90 
91 #include <net/route.h>
92 #include <net/if.h>
93 #include <net/content_filter.h>
94 #include <net/ntstat.h>
95 #include <net/multi_layer_pkt_log.h>
96 
97 #define tcp_minmssoverload fring
98 #define _IP_VHL
99 #include <netinet/in.h>
100 #include <netinet/in_systm.h>
101 #include <netinet/ip.h>
102 #include <netinet/ip_icmp.h>
103 #include <netinet/ip6.h>
104 #include <netinet/icmp6.h>
105 #include <netinet/in_pcb.h>
106 #include <netinet6/in6_pcb.h>
107 #include <netinet/in_var.h>
108 #include <netinet/ip_var.h>
109 #include <netinet/icmp_var.h>
110 #include <netinet6/ip6_var.h>
111 #include <netinet/mptcp_var.h>
112 #include <netinet/tcp.h>
113 #include <netinet/tcp_fsm.h>
114 #include <netinet/tcp_seq.h>
115 #include <netinet/tcp_timer.h>
116 #include <netinet/tcp_var.h>
117 #include <netinet/tcp_cc.h>
118 #include <netinet/tcp_cache.h>
119 #include <kern/thread_call.h>
120 
121 #include <netinet6/tcp6_var.h>
122 #include <netinet/tcpip.h>
123 #include <netinet/tcp_log.h>
124 
125 #include <netinet6/ip6protosw.h>
126 #include <netinet6/esp.h>
127 
128 #if IPSEC
129 #include <netinet6/ipsec.h>
130 #include <netinet6/ipsec6.h>
131 #endif /* IPSEC */
132 
133 #if NECP
134 #include <net/necp.h>
135 #endif /* NECP */
136 
137 #undef tcp_minmssoverload
138 
139 #include <net/sockaddr_utils.h>
140 
141 #include <corecrypto/ccaes.h>
142 #include <libkern/crypto/aes.h>
143 #include <libkern/crypto/md5.h>
144 #include <sys/kdebug.h>
145 #include <mach/sdt.h>
146 #include <pexpert/pexpert.h>
147 #include <mach/mach_time.h>
148 
149 #define DBG_FNC_TCP_CLOSE       NETDBG_CODE(DBG_NETTCP, ((5 << 8) | 2))
150 
151 static tcp_cc tcp_ccgen;
152 
153 extern struct tcptimerlist tcp_timer_list;
154 extern struct tcptailq tcp_tw_tailq;
155 
156 extern int tcp_awdl_rtobase;
157 
158 SYSCTL_SKMEM_TCP_INT(TCPCTL_MSSDFLT, mssdflt, CTLFLAG_RW | CTLFLAG_LOCKED,
159     int, tcp_mssdflt, TCP_MSS, "Default TCP Maximum Segment Size");
160 
161 SYSCTL_SKMEM_TCP_INT(TCPCTL_V6MSSDFLT, v6mssdflt,
162     CTLFLAG_RW | CTLFLAG_LOCKED, int, tcp_v6mssdflt, TCP6_MSS,
163     "Default TCP Maximum Segment Size for IPv6");
164 
165 int tcp_sysctl_fastopenkey(struct sysctl_oid *, void *, int,
166     struct sysctl_req *);
167 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, fastopen_key, CTLTYPE_STRING | CTLFLAG_WR,
168     0, 0, tcp_sysctl_fastopenkey, "S", "TCP Fastopen key");
169 
170 /* Current count of half-open TFO connections */
171 int     tcp_tfo_halfcnt = 0;
172 
173 /* Maximum of half-open TFO connection backlog */
174 SYSCTL_SKMEM_TCP_INT(OID_AUTO, fastopen_backlog,
175     CTLFLAG_RW | CTLFLAG_LOCKED, int, tcp_tfo_backlog, 10,
176     "Backlog queue for half-open TFO connections");
177 
178 SYSCTL_SKMEM_TCP_INT(OID_AUTO, fastopen, CTLFLAG_RW | CTLFLAG_LOCKED,
179     int, tcp_fastopen, TCP_FASTOPEN_CLIENT | TCP_FASTOPEN_SERVER,
180     "Enable TCP Fastopen (RFC 7413)");
181 
182 SYSCTL_SKMEM_TCP_INT(OID_AUTO, now_init, CTLFLAG_RD | CTLFLAG_LOCKED,
183     uint32_t, tcp_now_init, 0, "Initial tcp now value");
184 
185 SYSCTL_SKMEM_TCP_INT(OID_AUTO, microuptime_init, CTLFLAG_RD | CTLFLAG_LOCKED,
186     uint32_t, tcp_microuptime_init, 0, "Initial tcp uptime value in micro seconds");
187 
188 /*
189  * Minimum MSS we accept and use. This prevents DoS attacks where
190  * we are forced to a ridiculous low MSS like 20 and send hundreds
191  * of packets instead of one. The effect scales with the available
192  * bandwidth and quickly saturates the CPU and network interface
193  * with packet generation and sending. Set to zero to disable MINMSS
194  * checking. This setting prevents us from sending too small packets.
195  */
196 SYSCTL_SKMEM_TCP_INT(OID_AUTO, minmss, CTLFLAG_RW | CTLFLAG_LOCKED,
197     int, tcp_minmss, TCP_MINMSS, "Minmum TCP Maximum Segment Size");
198 
199 SYSCTL_UINT(_net_inet_tcp, OID_AUTO, pcbcount, CTLFLAG_RD | CTLFLAG_LOCKED,
200     &tcbinfo.ipi_count, 0, "Number of active PCBs");
201 
202 SYSCTL_SKMEM_TCP_INT(OID_AUTO, icmp_may_rst, CTLFLAG_RW | CTLFLAG_LOCKED,
203     static int, icmp_may_rst, 1,
204     "Certain ICMP unreachable messages may abort connections in SYN_SENT");
205 
206 int             tcp_do_timestamps = 1;
207 #if (DEVELOPMENT || DEBUG)
208 SYSCTL_INT(_net_inet_tcp, OID_AUTO, do_timestamps,
209     CTLFLAG_RW | CTLFLAG_LOCKED, &tcp_do_timestamps, 0, "enable TCP timestamps");
210 #endif /* (DEVELOPMENT || DEBUG) */
211 
212 SYSCTL_SKMEM_TCP_INT(OID_AUTO, rtt_min, CTLFLAG_RW | CTLFLAG_LOCKED,
213     int, tcp_TCPTV_MIN, 100, "min rtt value allowed");
214 
215 SYSCTL_SKMEM_TCP_INT(OID_AUTO, rexmt_slop, CTLFLAG_RW,
216     int, tcp_rexmt_slop, TCPTV_REXMTSLOP, "Slop added to retransmit timeout");
217 
218 SYSCTL_SKMEM_TCP_INT(OID_AUTO, randomize_ports, CTLFLAG_RW | CTLFLAG_LOCKED,
219     __private_extern__ int, tcp_use_randomport, 0,
220     "Randomize TCP port numbers");
221 
222 SYSCTL_SKMEM_TCP_INT(OID_AUTO, win_scale_factor, CTLFLAG_RW | CTLFLAG_LOCKED,
223     __private_extern__ int, tcp_win_scale, 3, "Window scaling factor");
224 
225 #if (DEVELOPMENT || DEBUG)
226 SYSCTL_SKMEM_TCP_INT(OID_AUTO, init_rtt_from_cache,
227     CTLFLAG_RW | CTLFLAG_LOCKED, static int, tcp_init_rtt_from_cache, 1,
228     "Initalize RTT from route cache");
229 #else
230 SYSCTL_SKMEM_TCP_INT(OID_AUTO, init_rtt_from_cache,
231     CTLFLAG_RD | CTLFLAG_LOCKED, static int, tcp_init_rtt_from_cache, 1,
232     "Initalize RTT from route cache");
233 #endif /* (DEVELOPMENT || DEBUG) */
234 
235 static int tso_debug = 0;
236 SYSCTL_INT(_net_inet_tcp, OID_AUTO, tso_debug, CTLFLAG_RW | CTLFLAG_LOCKED,
237     &tso_debug, 0, "TSO verbosity");
238 
239 static int tcp_rxt_seg_max = 1024;
240 SYSCTL_INT(_net_inet_tcp, OID_AUTO, rxt_seg_max, CTLFLAG_RW | CTLFLAG_LOCKED,
241     &tcp_rxt_seg_max, 0, "");
242 
243 static unsigned long tcp_rxt_seg_drop = 0;
244 SYSCTL_ULONG(_net_inet_tcp, OID_AUTO, rxt_seg_drop, CTLFLAG_RD | CTLFLAG_LOCKED,
245     &tcp_rxt_seg_drop, "");
246 
247 static void     tcp_notify(struct inpcb *, int);
248 
249 static KALLOC_TYPE_DEFINE(tcp_bwmeas_zone, struct bwmeas, NET_KT_DEFAULT);
250 KALLOC_TYPE_DEFINE(tcp_reass_zone, struct tseg_qent, NET_KT_DEFAULT);
251 KALLOC_TYPE_DEFINE(tcp_rxt_seg_zone, struct tcp_rxt_seg, NET_KT_DEFAULT);
252 KALLOC_TYPE_DEFINE(tcp_seg_sent_zone, struct tcp_seg_sent, NET_KT_DEFAULT);
253 
254 extern int slowlink_wsize;      /* window correction for slow links */
255 extern int path_mtu_discovery;
256 
257 uint32_t tcp_now_remainder_us = 0;  /* remaining micro seconds for tcp_now */
258 
259 static void tcp_sbrcv_grow_rwin(struct tcpcb *tp, struct sockbuf *sb);
260 
261 #define TCP_BWMEAS_BURST_MINSIZE 6
262 #define TCP_BWMEAS_BURST_MAXSIZE 25
263 
264 /*
265  * Target size of TCP PCB hash tables. Must be a power of two.
266  *
267  * Note that this can be overridden by the kernel environment
268  * variable net.inet.tcp.tcbhashsize
269  */
270 #ifndef TCBHASHSIZE
271 #define TCBHASHSIZE     CONFIG_TCBHASHSIZE
272 #endif
273 
274 __private_extern__ int  tcp_tcbhashsize = TCBHASHSIZE;
275 SYSCTL_INT(_net_inet_tcp, OID_AUTO, tcbhashsize, CTLFLAG_RD | CTLFLAG_LOCKED,
276     &tcp_tcbhashsize, 0, "Size of TCP control-block hashtable");
277 
278 /*
279  * This is the actual shape of what we allocate using the zone
280  * allocator.  Doing it this way allows us to protect both structures
281  * using the same generation count, and also eliminates the overhead
282  * of allocating tcpcbs separately.  By hiding the structure here,
283  * we avoid changing most of the rest of the code (although it needs
284  * to be changed, eventually, for greater efficiency).
285  */
286 #define ALIGNMENT       32
287 struct  inp_tp {
288 	struct  inpcb   inp;
289 	struct  tcpcb   tcb __attribute__((aligned(ALIGNMENT)));
290 };
291 #undef ALIGNMENT
292 
293 static KALLOC_TYPE_DEFINE(tcpcbzone, struct inp_tp, NET_KT_DEFAULT);
294 
295 int  get_inpcb_str_size(void);
296 int  get_tcp_str_size(void);
297 
298 os_log_t tcp_mpkl_log_object = NULL;
299 
300 static void tcpcb_to_otcpcb(struct tcpcb *, struct otcpcb *);
301 
302 int tcp_notsent_lowat_check(struct socket *so);
303 static void tcp_flow_lim_stats(struct ifnet_stats_per_flow *ifs,
304     struct if_lim_perf_stat *stat);
305 static void tcp_flow_ecn_perf_stats(struct ifnet_stats_per_flow *ifs,
306     struct if_tcp_ecn_perf_stat *stat);
307 
308 static aes_encrypt_ctx tfo_ctx; /* Crypto-context for TFO */
309 
310 void
tcp_tfo_gen_cookie(struct inpcb * inp,u_char * out,size_t blk_size)311 tcp_tfo_gen_cookie(struct inpcb *inp, u_char *out, size_t blk_size)
312 {
313 	u_char in[CCAES_BLOCK_SIZE];
314 	int isipv6 = inp->inp_vflag & INP_IPV6;
315 
316 	VERIFY(blk_size == CCAES_BLOCK_SIZE);
317 
318 	bzero(&in[0], CCAES_BLOCK_SIZE);
319 	bzero(&out[0], CCAES_BLOCK_SIZE);
320 
321 	if (isipv6) {
322 		memcpy(in, &inp->in6p_faddr, sizeof(struct in6_addr));
323 	} else {
324 		memcpy(in, &inp->inp_faddr, sizeof(struct in_addr));
325 	}
326 
327 	aes_encrypt_cbc(in, NULL, 1, out, &tfo_ctx);
328 }
329 
330 __private_extern__ int
tcp_sysctl_fastopenkey(__unused struct sysctl_oid * oidp,__unused void * arg1,__unused int arg2,struct sysctl_req * req)331 tcp_sysctl_fastopenkey(__unused struct sysctl_oid *oidp, __unused void *arg1,
332     __unused int arg2, struct sysctl_req *req)
333 {
334 	int error = 0;
335 	/*
336 	 * TFO-key is expressed as a string in hex format
337 	 *  +1 to account for the \0 char
338 	 *  +1 because sysctl_io_string() expects a string length but the sysctl command
339 	 *     now includes the terminating \0 in newlen -- see rdar://77205344
340 	 */
341 	char keystring[TCP_FASTOPEN_KEYLEN * 2 + 2];
342 	u_int32_t key[TCP_FASTOPEN_KEYLEN / sizeof(u_int32_t)];
343 	int i;
344 
345 	/*
346 	 * sysctl_io_string copies keystring into the oldptr of the sysctl_req.
347 	 * Make sure everything is zero, to avoid putting garbage in there or
348 	 * leaking the stack.
349 	 */
350 	bzero(keystring, sizeof(keystring));
351 
352 	error = sysctl_io_string(req, keystring, sizeof(keystring), 0, NULL);
353 	if (error) {
354 		os_log(OS_LOG_DEFAULT,
355 		    "%s: sysctl_io_string() error %d, req->newlen %lu, sizeof(keystring) %lu",
356 		    __func__, error, req->newlen, sizeof(keystring));
357 		goto exit;
358 	}
359 	if (req->newptr == USER_ADDR_NULL) {
360 		goto exit;
361 	}
362 
363 	if (strlen(keystring) != TCP_FASTOPEN_KEYLEN * 2) {
364 		os_log(OS_LOG_DEFAULT,
365 		    "%s: strlen(keystring) %lu != TCP_FASTOPEN_KEYLEN * 2 %u, newlen %lu",
366 		    __func__, strlen(keystring), TCP_FASTOPEN_KEYLEN * 2, req->newlen);
367 		error = EINVAL;
368 		goto exit;
369 	}
370 
371 	for (i = 0; i < (TCP_FASTOPEN_KEYLEN / sizeof(u_int32_t)); i++) {
372 		/*
373 		 * We jump over the keystring in 8-character (4 byte in hex)
374 		 * steps
375 		 */
376 		if (sscanf(&keystring[i * 8], "%8x", &key[i]) != 1) {
377 			error = EINVAL;
378 			os_log(OS_LOG_DEFAULT,
379 			    "%s: sscanf() != 1, error EINVAL", __func__);
380 			goto exit;
381 		}
382 	}
383 
384 	aes_encrypt_key128((u_char *)key, &tfo_ctx);
385 
386 exit:
387 	return error;
388 }
389 
390 int
get_inpcb_str_size(void)391 get_inpcb_str_size(void)
392 {
393 	return sizeof(struct inpcb);
394 }
395 
396 int
get_tcp_str_size(void)397 get_tcp_str_size(void)
398 {
399 	return sizeof(struct tcpcb);
400 }
401 
402 static int scale_to_powerof2(int size);
403 
404 /*
405  * This helper routine returns one of the following scaled value of size:
406  * 1. Rounded down power of two value of size if the size value passed as
407  *    argument is not a power of two and the rounded up value overflows.
408  * OR
409  * 2. Rounded up power of two value of size if the size value passed as
410  *    argument is not a power of two and the rounded up value does not overflow
411  * OR
412  * 3. Same value as argument size if it is already a power of two.
413  */
414 static int
scale_to_powerof2(int size)415 scale_to_powerof2(int size)
416 {
417 	/* Handle special case of size = 0 */
418 	int ret = size ? size : 1;
419 
420 	if (!powerof2(ret)) {
421 		while (!powerof2(size)) {
422 			/*
423 			 * Clear out least significant
424 			 * set bit till size is left with
425 			 * its highest set bit at which point
426 			 * it is rounded down power of two.
427 			 */
428 			size = size & (size - 1);
429 		}
430 
431 		/* Check for overflow when rounding up */
432 		if (0 == (size << 1)) {
433 			ret = size;
434 		} else {
435 			ret = size << 1;
436 		}
437 	}
438 
439 	return ret;
440 }
441 
442 /*
443  * Round the floating point to the next integer
444  * Eg. 1.3 will round up to 2.
445  */
446 uint32_t
tcp_ceil(double a)447 tcp_ceil(double a)
448 {
449 	double res = (uint32_t) a;
450 	return (uint32_t)(res + (res < a));
451 }
452 
453 uint32_t
tcp_round_to(uint32_t val,uint32_t round)454 tcp_round_to(uint32_t val, uint32_t round)
455 {
456 	/*
457 	 * Round up or down based on the middle. Meaning, if we round upon a
458 	 * multiple of 10, 16 will round to 20 and 14 will round to 10.
459 	 */
460 	return ((val + (round / 2)) / round) * round;
461 }
462 
463 /*
464  * Round up to the next multiple of base.
465  * Eg. for a base of 64, 65 will become 128,
466  * 2896 will become 2944.
467  */
468 uint32_t
tcp_round_up(uint32_t val,uint32_t base)469 tcp_round_up(uint32_t val, uint32_t base)
470 {
471 	if (base == 1 || val % base == 0) {
472 		return val;
473 	}
474 
475 	return ((val + base) / base) * base;
476 }
477 
478 uint32_t
ntoh24(u_char * p)479 ntoh24(u_char *p)
480 {
481 	uint32_t v;
482 
483 	v  = (uint32_t)(p[0] << 16);
484 	v |= (uint32_t)(p[1] << 8);
485 	v |= (uint32_t)(p[2] << 0);
486 	return v;
487 }
488 
489 uint32_t
tcp_packets_this_ack(struct tcpcb * tp,uint32_t acked)490 tcp_packets_this_ack(struct tcpcb *tp, uint32_t acked)
491 {
492 	return acked / tp->t_maxseg +
493 	       (((acked % tp->t_maxseg) != 0) ? 1 : 0);
494 }
495 
496 static void
tcp_tfo_init(void)497 tcp_tfo_init(void)
498 {
499 	u_char key[TCP_FASTOPEN_KEYLEN];
500 
501 	read_frandom(key, sizeof(key));
502 	aes_encrypt_key128(key, &tfo_ctx);
503 }
504 
505 static u_char isn_secret[32];
506 
507 /*
508  * Tcp initialization
509  */
510 void
tcp_init(struct protosw * pp,struct domain * dp)511 tcp_init(struct protosw *pp, struct domain *dp)
512 {
513 #pragma unused(dp)
514 	static int tcp_initialized = 0;
515 	struct inpcbinfo *pcbinfo;
516 
517 	VERIFY((pp->pr_flags & (PR_INITIALIZED | PR_ATTACHED)) == PR_ATTACHED);
518 
519 	if (tcp_initialized) {
520 		return;
521 	}
522 	tcp_initialized = 1;
523 
524 #if DEBUG || DEVELOPMENT
525 	(void) PE_parse_boot_argn("tcp_rxt_seg_max", &tcp_rxt_seg_max,
526 	    sizeof(tcp_rxt_seg_max));
527 #endif /* DEBUG || DEVELOPMENT */
528 
529 	tcp_ccgen = 1;
530 	tcp_keepinit = TCPTV_KEEP_INIT;
531 	tcp_keepidle = TCPTV_KEEP_IDLE;
532 	tcp_keepintvl = TCPTV_KEEPINTVL;
533 	tcp_keepcnt = TCPTV_KEEPCNT;
534 	tcp_maxpersistidle = TCPTV_KEEP_IDLE;
535 	tcp_msl = TCPTV_MSL;
536 
537 	microuptime(&tcp_uptime);
538 	read_frandom(&tcp_now, sizeof(tcp_now));
539 
540 	/* Starts tcp internal clock at a random value */
541 	tcp_now = tcp_now & 0x3fffffff;
542 
543 	/* expose initial uptime/now via systcl for utcp to keep time sync */
544 	tcp_now_init = tcp_now;
545 	tcp_microuptime_init =
546 	    (uint32_t)(tcp_uptime.tv_usec + (tcp_uptime.tv_sec * USEC_PER_SEC));
547 	SYSCTL_SKMEM_UPDATE_FIELD(tcp.microuptime_init, tcp_microuptime_init);
548 	SYSCTL_SKMEM_UPDATE_FIELD(tcp.now_init, tcp_now_init);
549 
550 	tcp_tfo_init();
551 
552 	LIST_INIT(&tcb);
553 	tcbinfo.ipi_listhead = &tcb;
554 
555 	pcbinfo = &tcbinfo;
556 
557 	/*
558 	 * allocate group, lock attributes and lock for tcp pcb mutexes
559 	 */
560 	pcbinfo->ipi_lock_grp = lck_grp_alloc_init("tcppcb",
561 	    LCK_GRP_ATTR_NULL);
562 	lck_attr_setdefault(&pcbinfo->ipi_lock_attr);
563 	lck_rw_init(&pcbinfo->ipi_lock, pcbinfo->ipi_lock_grp,
564 	    &pcbinfo->ipi_lock_attr);
565 
566 	if (tcp_tcbhashsize == 0) {
567 		/* Set to default */
568 		tcp_tcbhashsize = 512;
569 	}
570 
571 	if (!powerof2(tcp_tcbhashsize)) {
572 		int old_hash_size = tcp_tcbhashsize;
573 		tcp_tcbhashsize = scale_to_powerof2(tcp_tcbhashsize);
574 		/* Lower limit of 16  */
575 		if (tcp_tcbhashsize < 16) {
576 			tcp_tcbhashsize = 16;
577 		}
578 		printf("WARNING: TCB hash size not a power of 2, "
579 		    "scaled from %d to %d.\n",
580 		    old_hash_size,
581 		    tcp_tcbhashsize);
582 	}
583 
584 	hashinit_counted_by(tcp_tcbhashsize, tcbinfo.ipi_hashbase,
585 	    tcbinfo.ipi_hashbase_count);
586 	tcbinfo.ipi_hashmask = tcbinfo.ipi_hashbase_count - 1;
587 	hashinit_counted_by(tcp_tcbhashsize, tcbinfo.ipi_porthashbase,
588 	    tcbinfo.ipi_porthashbase_count);
589 	tcbinfo.ipi_porthashmask = tcbinfo.ipi_porthashbase_count - 1;
590 	tcbinfo.ipi_zone = tcpcbzone;
591 
592 	tcbinfo.ipi_gc = tcp_gc;
593 	tcbinfo.ipi_timer = tcp_itimer;
594 	in_pcbinfo_attach(&tcbinfo);
595 
596 #define TCP_MINPROTOHDR (sizeof(struct ip6_hdr) + sizeof(struct tcphdr))
597 	if (max_protohdr < TCP_MINPROTOHDR) {
598 		max_protohdr = (int)P2ROUNDUP(TCP_MINPROTOHDR, sizeof(uint32_t));
599 	}
600 	if (max_linkhdr + max_protohdr > MCLBYTES) {
601 		panic("tcp_init");
602 	}
603 #undef TCP_MINPROTOHDR
604 
605 	/* Initialize time wait and timer lists */
606 	TAILQ_INIT(&tcp_tw_tailq);
607 
608 	bzero(&tcp_timer_list, sizeof(tcp_timer_list));
609 	LIST_INIT(&tcp_timer_list.lhead);
610 	/*
611 	 * allocate group and attribute for the tcp timer list
612 	 */
613 	tcp_timer_list.mtx_grp = lck_grp_alloc_init("tcptimerlist",
614 	    LCK_GRP_ATTR_NULL);
615 	lck_mtx_init(&tcp_timer_list.mtx, tcp_timer_list.mtx_grp,
616 	    LCK_ATTR_NULL);
617 
618 	tcp_timer_list.call = thread_call_allocate(tcp_run_timerlist, NULL);
619 	if (tcp_timer_list.call == NULL) {
620 		panic("failed to allocate call entry 1 in tcp_init");
621 	}
622 
623 	/* Initialize TCP Cache */
624 	tcp_cache_init();
625 
626 	tcp_mpkl_log_object = MPKL_CREATE_LOGOBJECT("com.apple.xnu.tcp");
627 	if (tcp_mpkl_log_object == NULL) {
628 		panic("MPKL_CREATE_LOGOBJECT failed");
629 	}
630 
631 	if (PE_parse_boot_argn("tcp_log", &tcp_log_enable_flags, sizeof(tcp_log_enable_flags))) {
632 		os_log(OS_LOG_DEFAULT, "tcp_init: set tcp_log_enable_flags to 0x%x", tcp_log_enable_flags);
633 	}
634 
635 	/*
636 	 * If more than 4GB of actual memory is available, increase the
637 	 * maximum allowed receive and send socket buffer size.
638 	 */
639 	if (mem_actual >= (1ULL << (GBSHIFT + 2))) {
640 		if (serverperfmode) {
641 			tcp_autorcvbuf_max = 8 * 1024 * 1024;
642 			tcp_autosndbuf_max = 8 * 1024 * 1024;
643 		} else {
644 			tcp_autorcvbuf_max = 4 * 1024 * 1024;
645 			tcp_autosndbuf_max = 4 * 1024 * 1024;
646 		}
647 
648 		SYSCTL_SKMEM_UPDATE_FIELD(tcp.autorcvbufmax, tcp_autorcvbuf_max);
649 		SYSCTL_SKMEM_UPDATE_FIELD(tcp.autosndbufmax, tcp_autosndbuf_max);
650 	}
651 
652 	/* Initialize the TCP CCA array */
653 	tcp_cc_init();
654 
655 	read_frandom(&isn_secret, sizeof(isn_secret));
656 }
657 
658 /*
659  * Fill in the IP and TCP headers for an outgoing packet, given the tcpcb.
660  * tcp_template used to store this data in mbufs, but we now recopy it out
661  * of the tcpcb each time to conserve mbufs.
662  */
663 void
tcp_fillheaders(struct mbuf * m,struct tcpcb * tp,void * ip_ptr,void * tcp_ptr)664 tcp_fillheaders(struct mbuf *m, struct tcpcb *tp, void *ip_ptr, void *tcp_ptr)
665 {
666 	struct inpcb *inp = tp->t_inpcb;
667 	struct tcphdr *tcp_hdr = (struct tcphdr *)tcp_ptr;
668 
669 	if ((inp->inp_vflag & INP_IPV6) != 0) {
670 		struct ip6_hdr *ip6;
671 
672 		ip6 = (struct ip6_hdr *)ip_ptr;
673 		ip6->ip6_flow = (ip6->ip6_flow & ~IPV6_FLOWINFO_MASK) |
674 		    (inp->inp_flow & IPV6_FLOWINFO_MASK);
675 		ip6->ip6_vfc = (ip6->ip6_vfc & ~IPV6_VERSION_MASK) |
676 		    (IPV6_VERSION & IPV6_VERSION_MASK);
677 		ip6->ip6_plen = htons(sizeof(struct tcphdr));
678 		ip6->ip6_nxt = IPPROTO_TCP;
679 		ip6->ip6_hlim = 0;
680 		ip6->ip6_src = inp->in6p_laddr;
681 		ip6->ip6_dst = inp->in6p_faddr;
682 		if (m->m_flags & M_PKTHDR) {
683 			uint32_t lifscope = inp->inp_lifscope != 0 ? inp->inp_lifscope : inp->inp_fifscope;
684 			uint32_t fifscope = inp->inp_fifscope != 0 ? inp->inp_fifscope : inp->inp_lifscope;
685 			ip6_output_setsrcifscope(m, lifscope, NULL);
686 			ip6_output_setdstifscope(m, fifscope, NULL);
687 		}
688 		tcp_hdr->th_sum = in6_pseudo(&inp->in6p_laddr, &inp->in6p_faddr,
689 		    htonl(sizeof(struct tcphdr) + IPPROTO_TCP));
690 	} else {
691 		struct ip *ip = (struct ip *) ip_ptr;
692 
693 		ip->ip_vhl = IP_VHL_BORING;
694 		ip->ip_tos = 0;
695 		ip->ip_len = 0;
696 		ip->ip_id = 0;
697 		ip->ip_off = 0;
698 		ip->ip_ttl = 0;
699 		ip->ip_sum = 0;
700 		ip->ip_p = IPPROTO_TCP;
701 		ip->ip_src = inp->inp_laddr;
702 		ip->ip_dst = inp->inp_faddr;
703 		tcp_hdr->th_sum =
704 		    in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr,
705 		    htons(sizeof(struct tcphdr) + IPPROTO_TCP));
706 	}
707 
708 	tcp_hdr->th_sport = inp->inp_lport;
709 	tcp_hdr->th_dport = inp->inp_fport;
710 	tcp_hdr->th_seq = 0;
711 	tcp_hdr->th_ack = 0;
712 	tcp_hdr->th_x2 = 0;
713 	tcp_hdr->th_off = 5;
714 	tcp_hdr->th_flags = 0;
715 	tcp_hdr->th_win = 0;
716 	tcp_hdr->th_urp = 0;
717 }
718 
719 /*
720  * Create template to be used to send tcp packets on a connection.
721  * Allocates an mbuf and fills in a skeletal tcp/ip header.  The only
722  * use for this function is in keepalives, which use tcp_respond.
723  */
724 struct tcptemp *
tcp_maketemplate(struct tcpcb * tp,struct mbuf ** mp)725 tcp_maketemplate(struct tcpcb *tp, struct mbuf **mp)
726 {
727 	struct mbuf *m;
728 	struct tcptemp *n;
729 
730 	*mp = m = m_get(M_DONTWAIT, MT_HEADER);
731 	if (m == NULL) {
732 		return NULL;
733 	}
734 	m->m_len = sizeof(struct tcptemp);
735 	n = mtod(m, struct tcptemp *);
736 
737 	tcp_fillheaders(m, tp, (void *)&n->tt_ipgen, (void *)&n->tt_t);
738 	return n;
739 }
740 
741 /*
742  * Send a single message to the TCP at address specified by
743  * the given TCP/IP header.  If m == 0, then we make a copy
744  * of the tcpiphdr at ti and send directly to the addressed host.
745  * This is used to force keep alive messages out using the TCP
746  * template for a connection.  If flags are given then we send
747  * a message back to the TCP which originated the * segment ti,
748  * and discard the mbuf containing it and any other attached mbufs.
749  *
750  * In any case the ack and sequence number of the transmitted
751  * segment are as specified by the parameters.
752  *
753  * NOTE: If m != NULL, then ti must point to *inside* the mbuf.
754  */
755 void
tcp_respond(struct tcpcb * tp,void * ipgen,struct tcphdr * th,struct mbuf * m,tcp_seq ack,tcp_seq seq,uint8_t flags,struct tcp_respond_args * tra)756 tcp_respond(struct tcpcb *tp, void *ipgen, struct tcphdr *th, struct mbuf *m,
757     tcp_seq ack, tcp_seq seq, uint8_t flags, struct tcp_respond_args *tra)
758 {
759 	uint16_t tlen;
760 	int win = 0;
761 	struct route *ro = 0;
762 	struct route sro;
763 	struct ip *ip;
764 	struct tcphdr *nth;
765 	struct route_in6 *ro6 = 0;
766 	struct route_in6 sro6;
767 	struct ip6_hdr *ip6;
768 	int isipv6;
769 	struct ifnet *outif;
770 	int sotc = SO_TC_UNSPEC;
771 	bool check_qos_marking_again = FALSE;
772 	uint32_t sifscope = IFSCOPE_NONE, fifscope = IFSCOPE_NONE;
773 
774 	isipv6 = IP_VHL_V(((struct ip *)ipgen)->ip_vhl) == 6;
775 	ip6 = ipgen;
776 	ip = ipgen;
777 
778 	if (tp) {
779 		check_qos_marking_again = tp->t_inpcb->inp_socket->so_flags1 & SOF1_QOSMARKING_POLICY_OVERRIDE ? FALSE : TRUE;
780 		sifscope = tp->t_inpcb->inp_lifscope;
781 		fifscope = tp->t_inpcb->inp_fifscope;
782 		if (!(flags & TH_RST)) {
783 			win = tcp_sbspace(tp);
784 			if (win > (int32_t)TCP_MAXWIN << tp->rcv_scale) {
785 				win = (int32_t)TCP_MAXWIN << tp->rcv_scale;
786 			}
787 		}
788 		if (isipv6) {
789 			ro6 = &tp->t_inpcb->in6p_route;
790 		} else {
791 			ro = &tp->t_inpcb->inp_route;
792 		}
793 	} else {
794 		if (isipv6) {
795 			ro6 = &sro6;
796 			bzero(ro6, sizeof(*ro6));
797 		} else {
798 			ro = &sro;
799 			bzero(ro, sizeof(*ro));
800 		}
801 	}
802 	if (m == 0) {
803 		m = m_gethdr(M_DONTWAIT, MT_HEADER);    /* MAC-OK */
804 		if (m == NULL) {
805 			return;
806 		}
807 		tlen = 0;
808 		m->m_data += max_linkhdr;
809 		if (isipv6) {
810 			VERIFY((MHLEN - max_linkhdr) >=
811 			    (sizeof(*ip6) + sizeof(*nth)));
812 			bcopy((caddr_t)ip6, mtod(m, caddr_t),
813 			    sizeof(struct ip6_hdr));
814 			ip6 = mtod(m, struct ip6_hdr *);
815 			nth = (struct tcphdr *)(void *)(ip6 + 1);
816 		} else {
817 			VERIFY((MHLEN - max_linkhdr) >=
818 			    (sizeof(*ip) + sizeof(*nth)));
819 			bcopy((caddr_t)ip, mtod(m, caddr_t), sizeof(struct ip));
820 			ip = mtod(m, struct ip *);
821 			nth = (struct tcphdr *)(void *)(ip + 1);
822 		}
823 		bcopy((caddr_t)th, (caddr_t)nth, sizeof(struct tcphdr));
824 #if MPTCP
825 		if ((tp) && (tp->t_mpflags & TMPF_RESET)) {
826 			flags = (TH_RST | TH_ACK);
827 		} else
828 #endif
829 		flags = TH_ACK;
830 	} else {
831 		m_freem(m->m_next);
832 		m->m_next = 0;
833 		m->m_data = (uintptr_t)ipgen;
834 		/* m_len is set later */
835 		tlen = 0;
836 #define xchg(a, b, type) { type t; t = a; a = b; b = t; }
837 		if (isipv6) {
838 			ip6_getsrcifaddr_info(m, &sifscope, NULL);
839 			ip6_getdstifaddr_info(m, &fifscope, NULL);
840 			if (!in6_embedded_scope) {
841 				m->m_pkthdr.pkt_flags &= ~PKTF_IFAINFO;
842 			}
843 			/* Expect 32-bit aligned IP on strict-align platforms */
844 			IP6_HDR_STRICT_ALIGNMENT_CHECK(ip6);
845 			xchg(ip6->ip6_dst, ip6->ip6_src, struct in6_addr);
846 			nth = (struct tcphdr *)(void *)(ip6 + 1);
847 		} else {
848 			/* Expect 32-bit aligned IP on strict-align platforms */
849 			IP_HDR_STRICT_ALIGNMENT_CHECK(ip);
850 			xchg(ip->ip_dst.s_addr, ip->ip_src.s_addr, n_long);
851 			nth = (struct tcphdr *)(void *)(ip + 1);
852 		}
853 		if (th != nth) {
854 			/*
855 			 * this is usually a case when an extension header
856 			 * exists between the IPv6 header and the
857 			 * TCP header.
858 			 */
859 			nth->th_sport = th->th_sport;
860 			nth->th_dport = th->th_dport;
861 		}
862 		xchg(nth->th_dport, nth->th_sport, n_short);
863 #undef xchg
864 	}
865 	if (isipv6) {
866 		ip6->ip6_plen = htons((u_short)(sizeof(struct tcphdr) +
867 		    tlen));
868 		tlen += sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
869 		ip6_output_setsrcifscope(m, sifscope, NULL);
870 		ip6_output_setdstifscope(m, fifscope, NULL);
871 	} else {
872 		tlen += sizeof(struct tcpiphdr);
873 		ip->ip_len = tlen;
874 		ip->ip_ttl = (uint8_t)ip_defttl;
875 	}
876 	m->m_len = tlen;
877 	m->m_pkthdr.len = tlen;
878 	m->m_pkthdr.rcvif = 0;
879 	if (tra->keep_alive) {
880 		m->m_pkthdr.pkt_flags |= PKTF_KEEPALIVE;
881 	}
882 
883 	nth->th_seq = htonl(seq);
884 	nth->th_ack = htonl(ack);
885 	nth->th_x2 = 0;
886 	nth->th_off = sizeof(struct tcphdr) >> 2;
887 	nth->th_flags = flags;
888 	if (tp) {
889 		nth->th_win = htons((u_short) (win >> tp->rcv_scale));
890 	} else {
891 		nth->th_win = htons((u_short)win);
892 	}
893 	nth->th_urp = 0;
894 	if (isipv6) {
895 		nth->th_sum = 0;
896 		nth->th_sum = in6_pseudo(&ip6->ip6_src, &ip6->ip6_dst,
897 		    htonl((tlen - sizeof(struct ip6_hdr)) + IPPROTO_TCP));
898 		m->m_pkthdr.csum_flags = CSUM_TCPIPV6;
899 		m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
900 		ip6->ip6_hlim = in6_selecthlim(tp ? tp->t_inpcb : NULL,
901 		    ro6 && ro6->ro_rt ? ro6->ro_rt->rt_ifp : NULL);
902 	} else {
903 		nth->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr,
904 		    htons((u_short)(tlen - sizeof(struct ip) + ip->ip_p)));
905 		m->m_pkthdr.csum_flags = CSUM_TCP;
906 		m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
907 	}
908 #if NECP
909 	necp_mark_packet_from_socket(m, tp ? tp->t_inpcb : NULL, 0, 0, 0, 0);
910 #endif /* NECP */
911 
912 #if IPSEC
913 	if (tp != NULL && tp->t_inpcb->inp_sp != NULL &&
914 	    ipsec_setsocket(m, tp ? tp->t_inpcb->inp_socket : NULL) != 0) {
915 		m_freem(m);
916 		return;
917 	}
918 #endif
919 
920 	if (tp != NULL) {
921 		u_int32_t svc_flags = 0;
922 		if (isipv6) {
923 			svc_flags |= PKT_SCF_IPV6;
924 		}
925 		sotc = tp->t_inpcb->inp_socket->so_traffic_class;
926 		if ((flags & TH_RST) == 0) {
927 			set_packet_service_class(m, tp->t_inpcb->inp_socket,
928 			    sotc, svc_flags);
929 		} else {
930 			m_set_service_class(m, MBUF_SC_BK_SYS);
931 		}
932 
933 		/* Embed flowhash and flow control flags */
934 		m->m_pkthdr.pkt_flowsrc = FLOWSRC_INPCB;
935 		m->m_pkthdr.pkt_flowid = tp->t_inpcb->inp_flowhash;
936 		m->m_pkthdr.pkt_flags |= (PKTF_FLOW_ID | PKTF_FLOW_LOCALSRC | PKTF_FLOW_ADV);
937 		m->m_pkthdr.pkt_proto = IPPROTO_TCP;
938 		m->m_pkthdr.tx_tcp_pid = tp->t_inpcb->inp_socket->last_pid;
939 		m->m_pkthdr.tx_tcp_e_pid = tp->t_inpcb->inp_socket->e_pid;
940 
941 		if (flags & TH_RST) {
942 			m->m_pkthdr.comp_gencnt = tp->t_comp_gencnt;
943 		}
944 	} else {
945 		if (flags & TH_RST) {
946 			m->m_pkthdr.comp_gencnt = TCP_ACK_COMPRESSION_DUMMY;
947 			m_set_service_class(m, MBUF_SC_BK_SYS);
948 		}
949 	}
950 
951 	if (isipv6) {
952 		struct ip6_out_args ip6oa;
953 		bzero(&ip6oa, sizeof(ip6oa));
954 		ip6oa.ip6oa_boundif = tra->ifscope;
955 		ip6oa.ip6oa_flags = IP6OAF_SELECT_SRCIF | IP6OAF_BOUND_SRCADDR;
956 		ip6oa.ip6oa_sotc = SO_TC_UNSPEC;
957 		ip6oa.ip6oa_netsvctype = _NET_SERVICE_TYPE_UNSPEC;
958 
959 		if (tra->ifscope != IFSCOPE_NONE) {
960 			ip6oa.ip6oa_flags |= IP6OAF_BOUND_IF;
961 		}
962 		if (tra->nocell) {
963 			ip6oa.ip6oa_flags |= IP6OAF_NO_CELLULAR;
964 		}
965 		if (tra->noexpensive) {
966 			ip6oa.ip6oa_flags |= IP6OAF_NO_EXPENSIVE;
967 		}
968 		if (tra->noconstrained) {
969 			ip6oa.ip6oa_flags |= IP6OAF_NO_CONSTRAINED;
970 		}
971 		if (tra->awdl_unrestricted) {
972 			ip6oa.ip6oa_flags |= IP6OAF_AWDL_UNRESTRICTED;
973 		}
974 		if (tra->intcoproc_allowed) {
975 			ip6oa.ip6oa_flags |= IP6OAF_INTCOPROC_ALLOWED;
976 		}
977 		if (tra->management_allowed) {
978 			ip6oa.ip6oa_flags |= IP6OAF_MANAGEMENT_ALLOWED;
979 		}
980 		ip6oa.ip6oa_sotc = sotc;
981 		if (tp != NULL) {
982 			if ((tp->t_inpcb->inp_socket->so_flags1 & SOF1_QOSMARKING_ALLOWED)) {
983 				ip6oa.ip6oa_flags |= IP6OAF_QOSMARKING_ALLOWED;
984 			}
985 			ip6oa.qos_marking_gencount = tp->t_inpcb->inp_policyresult.results.qos_marking_gencount;
986 			if (check_qos_marking_again) {
987 				ip6oa.ip6oa_flags |= IP6OAF_REDO_QOSMARKING_POLICY;
988 			}
989 			ip6oa.ip6oa_netsvctype = tp->t_inpcb->inp_socket->so_netsvctype;
990 		}
991 		(void) ip6_output(m, NULL, ro6, IPV6_OUTARGS, NULL,
992 		    NULL, &ip6oa);
993 
994 		if (check_qos_marking_again) {
995 			struct inpcb *inp = tp->t_inpcb;
996 			inp->inp_policyresult.results.qos_marking_gencount = ip6oa.qos_marking_gencount;
997 			if (ip6oa.ip6oa_flags & IP6OAF_QOSMARKING_ALLOWED) {
998 				inp->inp_socket->so_flags1 |= SOF1_QOSMARKING_ALLOWED;
999 			} else {
1000 				inp->inp_socket->so_flags1 &= ~SOF1_QOSMARKING_ALLOWED;
1001 			}
1002 		}
1003 
1004 		if (tp != NULL && ro6 != NULL && ro6->ro_rt != NULL &&
1005 		    (outif = ro6->ro_rt->rt_ifp) !=
1006 		    tp->t_inpcb->in6p_last_outifp) {
1007 			tp->t_inpcb->in6p_last_outifp = outif;
1008 #if SKYWALK
1009 			if (NETNS_TOKEN_VALID(&tp->t_inpcb->inp_netns_token)) {
1010 				netns_set_ifnet(&tp->t_inpcb->inp_netns_token,
1011 				    tp->t_inpcb->in6p_last_outifp);
1012 			}
1013 #endif /* SKYWALK */
1014 		}
1015 
1016 		if (ro6 == &sro6) {
1017 			ROUTE_RELEASE(ro6);
1018 		}
1019 	} else {
1020 		struct ip_out_args ipoa;
1021 		bzero(&ipoa, sizeof(ipoa));
1022 		ipoa.ipoa_boundif = tra->ifscope;
1023 		ipoa.ipoa_flags = IPOAF_SELECT_SRCIF | IPOAF_BOUND_SRCADDR;
1024 		ipoa.ipoa_sotc = SO_TC_UNSPEC;
1025 		ipoa.ipoa_netsvctype = _NET_SERVICE_TYPE_UNSPEC;
1026 
1027 		if (tra->ifscope != IFSCOPE_NONE) {
1028 			ipoa.ipoa_flags |= IPOAF_BOUND_IF;
1029 		}
1030 		if (tra->nocell) {
1031 			ipoa.ipoa_flags |= IPOAF_NO_CELLULAR;
1032 		}
1033 		if (tra->noexpensive) {
1034 			ipoa.ipoa_flags |= IPOAF_NO_EXPENSIVE;
1035 		}
1036 		if (tra->noconstrained) {
1037 			ipoa.ipoa_flags |= IPOAF_NO_CONSTRAINED;
1038 		}
1039 		if (tra->awdl_unrestricted) {
1040 			ipoa.ipoa_flags |= IPOAF_AWDL_UNRESTRICTED;
1041 		}
1042 		if (tra->management_allowed) {
1043 			ipoa.ipoa_flags |= IPOAF_MANAGEMENT_ALLOWED;
1044 		}
1045 		ipoa.ipoa_sotc = sotc;
1046 		if (tp != NULL) {
1047 			if ((tp->t_inpcb->inp_socket->so_flags1 & SOF1_QOSMARKING_ALLOWED)) {
1048 				ipoa.ipoa_flags |= IPOAF_QOSMARKING_ALLOWED;
1049 			}
1050 			if (!(tp->t_inpcb->inp_socket->so_flags1 & SOF1_QOSMARKING_POLICY_OVERRIDE)) {
1051 				ipoa.ipoa_flags |= IPOAF_REDO_QOSMARKING_POLICY;
1052 			}
1053 			ipoa.qos_marking_gencount = tp->t_inpcb->inp_policyresult.results.qos_marking_gencount;
1054 			ipoa.ipoa_netsvctype = tp->t_inpcb->inp_socket->so_netsvctype;
1055 		}
1056 		if (ro != &sro) {
1057 			/* Copy the cached route and take an extra reference */
1058 			inp_route_copyout(tp->t_inpcb, &sro);
1059 		}
1060 		/*
1061 		 * For consistency, pass a local route copy.
1062 		 */
1063 		(void) ip_output(m, NULL, &sro, IP_OUTARGS, NULL, &ipoa);
1064 
1065 		if (check_qos_marking_again) {
1066 			struct inpcb *inp = tp->t_inpcb;
1067 			inp->inp_policyresult.results.qos_marking_gencount = ipoa.qos_marking_gencount;
1068 			if (ipoa.ipoa_flags & IPOAF_QOSMARKING_ALLOWED) {
1069 				inp->inp_socket->so_flags1 |= SOF1_QOSMARKING_ALLOWED;
1070 			} else {
1071 				inp->inp_socket->so_flags1 &= ~SOF1_QOSMARKING_ALLOWED;
1072 			}
1073 		}
1074 		if (tp != NULL && sro.ro_rt != NULL &&
1075 		    (outif = sro.ro_rt->rt_ifp) !=
1076 		    tp->t_inpcb->inp_last_outifp) {
1077 			tp->t_inpcb->inp_last_outifp = outif;
1078 #if SKYWALK
1079 			if (NETNS_TOKEN_VALID(&tp->t_inpcb->inp_netns_token)) {
1080 				netns_set_ifnet(&tp->t_inpcb->inp_netns_token, outif);
1081 			}
1082 #endif /* SKYWALK */
1083 		}
1084 		if (ro != &sro) {
1085 			/* Synchronize cached PCB route */
1086 			inp_route_copyin(tp->t_inpcb, &sro);
1087 		} else {
1088 			ROUTE_RELEASE(&sro);
1089 		}
1090 	}
1091 }
1092 
1093 /*
1094  * Create a new TCP control block, making an
1095  * empty reassembly queue and hooking it to the argument
1096  * protocol control block.  The `inp' parameter must have
1097  * come from the zone allocator set up in tcp_init().
1098  */
1099 struct tcpcb *
tcp_newtcpcb(struct inpcb * inp)1100 tcp_newtcpcb(struct inpcb *inp)
1101 {
1102 	struct inp_tp *it;
1103 	struct tcpcb *tp;
1104 	struct socket *so = inp->inp_socket;
1105 	int isipv6 = (inp->inp_vflag & INP_IPV6) != 0;
1106 	uint32_t random_32;
1107 
1108 	calculate_tcp_clock();
1109 
1110 	if ((so->so_flags1 & SOF1_CACHED_IN_SOCK_LAYER) == 0) {
1111 		it = (struct inp_tp *)(void *)inp;
1112 		tp = &it->tcb;
1113 	} else {
1114 		tp = (struct tcpcb *)(void *)inp->inp_saved_ppcb;
1115 	}
1116 
1117 	bzero((char *) tp, sizeof(struct tcpcb));
1118 	LIST_INIT(&tp->t_segq);
1119 	tp->t_maxseg = tp->t_maxopd = isipv6 ? tcp_v6mssdflt : tcp_mssdflt;
1120 
1121 	tp->t_flags = TF_REQ_SCALE | (tcp_do_timestamps ? TF_REQ_TSTMP : 0);
1122 	tp->t_flagsext |= TF_SACK_ENABLE;
1123 
1124 	if (tcp_rack) {
1125 		tp->t_flagsext |= TF_RACK_ENABLED;
1126 	}
1127 
1128 	TAILQ_INIT(&tp->snd_holes);
1129 	SLIST_INIT(&tp->t_rxt_segments);
1130 	TAILQ_INIT(&tp->t_segs_sent);
1131 	RB_INIT(&tp->t_segs_sent_tree);
1132 	TAILQ_INIT(&tp->t_segs_acked);
1133 	TAILQ_INIT(&tp->seg_pool.free_segs);
1134 	SLIST_INIT(&tp->t_notify_ack);
1135 	tp->t_inpcb = inp;
1136 	/*
1137 	 * Init srtt to TCPTV_SRTTBASE (0), so we can tell that we have no
1138 	 * rtt estimate.  Set rttvar so that srtt + 4 * rttvar gives
1139 	 * reasonable initial retransmit time.
1140 	 */
1141 	tp->t_srtt = TCPTV_SRTTBASE;
1142 	tp->t_rttvar =
1143 	    ((TCPTV_RTOBASE - TCPTV_SRTTBASE) << TCP_RTTVAR_SHIFT) / 4;
1144 	tp->t_rttmin = tcp_TCPTV_MIN;
1145 	tp->t_rxtcur = TCPTV_RTOBASE;
1146 
1147 	if (tcp_use_newreno) {
1148 		/* use newreno by default */
1149 		tp->tcp_cc_index = TCP_CC_ALGO_NEWRENO_INDEX;
1150 #if (DEVELOPMENT || DEBUG)
1151 	} else if (tcp_use_ledbat) {
1152 		/* use ledbat for testing */
1153 		tp->tcp_cc_index = TCP_CC_ALGO_BACKGROUND_INDEX;
1154 #endif
1155 	} else {
1156 		if (TCP_L4S_ENABLED(tp)) {
1157 			tp->tcp_cc_index = TCP_CC_ALGO_PRAGUE_INDEX;
1158 		} else {
1159 			tp->tcp_cc_index = TCP_CC_ALGO_CUBIC_INDEX;
1160 		}
1161 	}
1162 
1163 	tcp_cc_allocate_state(tp);
1164 
1165 	if (CC_ALGO(tp)->init != NULL) {
1166 		CC_ALGO(tp)->init(tp);
1167 	}
1168 
1169 	/* Initialize rledbat if we are using recv_bg */
1170 	if (tcp_rledbat == 1 && TCP_RECV_BG(inp->inp_socket) &&
1171 	    tcp_cc_rledbat.init != NULL) {
1172 		tcp_cc_rledbat.init(tp);
1173 	}
1174 
1175 	tp->snd_cwnd = tcp_initial_cwnd(tp);
1176 	tp->snd_ssthresh = TCP_MAXWIN << TCP_MAX_WINSHIFT;
1177 	tp->snd_ssthresh_prev = TCP_MAXWIN << TCP_MAX_WINSHIFT;
1178 	tp->t_rcvtime = tcp_now;
1179 	tp->tentry.timer_start = tcp_now;
1180 	tp->rcv_unackwin = tcp_now;
1181 	tp->t_persist_timeout = tcp_max_persist_timeout;
1182 	tp->t_persist_stop = 0;
1183 	tp->t_flagsext |= TF_RCVUNACK_WAITSS;
1184 	tp->t_rexmtthresh = (uint8_t)tcprexmtthresh;
1185 	tp->rack.reo_wnd_multi = 1;
1186 	tp->rfbuf_ts = tcp_now;
1187 	tp->rfbuf_space = tcp_initial_cwnd(tp);
1188 	tp->t_forced_acks = TCP_FORCED_ACKS_COUNT;
1189 	tp->bytes_lost = tp->bytes_sacked = tp->bytes_retransmitted = 0;
1190 
1191 	/* Enable bandwidth measurement on this connection */
1192 	tp->t_flagsext |= TF_MEASURESNDBW;
1193 	if (tp->t_bwmeas == NULL) {
1194 		tp->t_bwmeas = tcp_bwmeas_alloc(tp);
1195 		if (tp->t_bwmeas == NULL) {
1196 			tp->t_flagsext &= ~TF_MEASURESNDBW;
1197 		}
1198 	}
1199 
1200 	/* Clear time wait tailq entry */
1201 	tp->t_twentry.tqe_next = NULL;
1202 	tp->t_twentry.tqe_prev = NULL;
1203 
1204 	read_frandom(&random_32, sizeof(random_32));
1205 	tp->t_comp_gencnt = random_32;
1206 	if (tp->t_comp_gencnt <= TCP_ACK_COMPRESSION_DUMMY) {
1207 		tp->t_comp_gencnt = TCP_ACK_COMPRESSION_DUMMY + 1;
1208 	}
1209 	tp->t_comp_lastinc = tcp_now;
1210 
1211 	/* Initialize Accurate ECN state */
1212 	tp->t_client_accecn_state = tcp_connection_client_accurate_ecn_feature_disabled;
1213 	tp->t_server_accecn_state = tcp_connection_server_accurate_ecn_feature_disabled;
1214 
1215 	/*
1216 	 * IPv4 TTL initialization is necessary for an IPv6 socket as well,
1217 	 * because the socket may be bound to an IPv6 wildcard address,
1218 	 * which may match an IPv4-mapped IPv6 address.
1219 	 */
1220 	inp->inp_ip_ttl = (uint8_t)ip_defttl;
1221 	inp->inp_ppcb = (caddr_t)tp;
1222 	return tp;            /* XXX */
1223 }
1224 
1225 /*
1226  * Drop a TCP connection, reporting
1227  * the specified error.  If connection is synchronized,
1228  * then send a RST to peer.
1229  */
1230 struct tcpcb *
tcp_drop(struct tcpcb * tp,int errno)1231 tcp_drop(struct tcpcb *tp, int errno)
1232 {
1233 	struct socket *so = tp->t_inpcb->inp_socket;
1234 #if CONFIG_DTRACE
1235 	struct inpcb *inp = tp->t_inpcb;
1236 #endif
1237 
1238 	if (TCPS_HAVERCVDSYN(tp->t_state)) {
1239 		DTRACE_TCP4(state__change, void, NULL, struct inpcb *, inp,
1240 		    struct tcpcb *, tp, int32_t, TCPS_CLOSED);
1241 		TCP_LOG_STATE(tp, TCPS_CLOSED);
1242 		tp->t_state = TCPS_CLOSED;
1243 		(void) tcp_output(tp);
1244 		tcpstat.tcps_drops++;
1245 	} else {
1246 		tcpstat.tcps_conndrops++;
1247 	}
1248 	if (errno == ETIMEDOUT && tp->t_softerror) {
1249 		errno = tp->t_softerror;
1250 	}
1251 	so->so_error = (u_short)errno;
1252 
1253 	TCP_LOG_CONNECTION_SUMMARY(tp);
1254 
1255 	return tcp_close(tp);
1256 }
1257 
1258 void
tcp_getrt_rtt(struct tcpcb * tp,struct rtentry * rt)1259 tcp_getrt_rtt(struct tcpcb *tp, struct rtentry *rt)
1260 {
1261 	uint32_t rtt = rt->rt_rmx.rmx_rtt;
1262 
1263 	TCP_LOG_RTM_RTT(tp, rt);
1264 
1265 	if (rtt != 0 && tcp_init_rtt_from_cache != 0) {
1266 		/*
1267 		 * XXX the lock bit for RTT indicates that the value
1268 		 * is also a minimum value; this is subject to time.
1269 		 */
1270 		if (rt->rt_rmx.rmx_locks & RTV_RTT) {
1271 			tp->t_rttmin = rtt / (RTM_RTTUNIT / TCP_RETRANSHZ);
1272 		} else {
1273 			tp->t_rttmin = TCPTV_REXMTMIN;
1274 		}
1275 
1276 		tp->t_srtt =
1277 		    rtt / (RTM_RTTUNIT / (TCP_RETRANSHZ * TCP_RTT_SCALE));
1278 		tcpstat.tcps_usedrtt++;
1279 
1280 		if (rt->rt_rmx.rmx_rttvar) {
1281 			tp->t_rttvar = rt->rt_rmx.rmx_rttvar /
1282 			    (RTM_RTTUNIT / (TCP_RETRANSHZ * TCP_RTTVAR_SCALE));
1283 			tcpstat.tcps_usedrttvar++;
1284 		} else {
1285 			/* default variation is +- 1 rtt */
1286 			tp->t_rttvar =
1287 			    tp->t_srtt * TCP_RTTVAR_SCALE / TCP_RTT_SCALE;
1288 		}
1289 
1290 		/*
1291 		 * The RTO formula in the route metric case is based on:
1292 		 *     srtt + 4 * rttvar
1293 		 * modulo the min, max and slop
1294 		 */
1295 		TCPT_RANGESET(tp->t_rxtcur,
1296 		    TCP_REXMTVAL(tp),
1297 		    tp->t_rttmin, TCPTV_REXMTMAX,
1298 		    TCP_ADD_REXMTSLOP(tp));
1299 	} else if (tp->t_state < TCPS_ESTABLISHED && tp->t_srtt == 0 &&
1300 	    tp->t_rxtshift == 0) {
1301 		struct ifnet *ifp = rt->rt_ifp;
1302 
1303 		if (ifp != NULL && (ifp->if_eflags & IFEF_AWDL) != 0) {
1304 			/*
1305 			 * AWDL needs a special value for the default initial retransmission timeout
1306 			 */
1307 			if (tcp_awdl_rtobase > tcp_TCPTV_MIN) {
1308 				tp->t_rttvar = ((tcp_awdl_rtobase - TCPTV_SRTTBASE) << TCP_RTTVAR_SHIFT) / 4;
1309 			} else {
1310 				tp->t_rttvar = ((tcp_TCPTV_MIN - TCPTV_SRTTBASE) << TCP_RTTVAR_SHIFT) / 4;
1311 			}
1312 			TCPT_RANGESET(tp->t_rxtcur,
1313 			    TCP_REXMTVAL(tp),
1314 			    tp->t_rttmin, TCPTV_REXMTMAX,
1315 			    TCP_ADD_REXMTSLOP(tp));
1316 		}
1317 	}
1318 
1319 	TCP_LOG_RTT_INFO(tp);
1320 }
1321 
1322 static inline void
tcp_create_ifnet_stats_per_flow(struct tcpcb * tp,struct ifnet_stats_per_flow * ifs)1323 tcp_create_ifnet_stats_per_flow(struct tcpcb *tp,
1324     struct ifnet_stats_per_flow *ifs)
1325 {
1326 	struct inpcb *inp;
1327 	struct socket *so;
1328 	if (tp == NULL || ifs == NULL) {
1329 		return;
1330 	}
1331 
1332 	bzero(ifs, sizeof(*ifs));
1333 	inp = tp->t_inpcb;
1334 	so = inp->inp_socket;
1335 
1336 	ifs->ipv4 = (inp->inp_vflag & INP_IPV6) ? 0 : 1;
1337 	ifs->local = (tp->t_flags & TF_LOCAL) ? 1 : 0;
1338 	ifs->connreset = (so->so_error == ECONNRESET) ? 1 : 0;
1339 	ifs->conntimeout = (so->so_error == ETIMEDOUT) ? 1 : 0;
1340 	ifs->ecn_flags = tp->ecn_flags;
1341 	ifs->txretransmitbytes = tp->t_stat.txretransmitbytes;
1342 	ifs->rxoutoforderbytes = tp->t_stat.rxoutoforderbytes;
1343 	ifs->rxmitpkts = tp->t_stat.rxmitpkts;
1344 	ifs->rcvoopack = tp->t_rcvoopack;
1345 	ifs->pawsdrop = tp->t_pawsdrop;
1346 	ifs->sack_recovery_episodes = tp->t_sack_recovery_episode;
1347 	ifs->reordered_pkts = tp->t_reordered_pkts;
1348 	ifs->dsack_sent = tp->t_dsack_sent;
1349 	ifs->dsack_recvd = tp->t_dsack_recvd;
1350 	ifs->srtt = tp->t_srtt;
1351 	ifs->rttupdated = tp->t_rttupdated;
1352 	ifs->rttvar = tp->t_rttvar;
1353 	ifs->rttmin = get_base_rtt(tp);
1354 	if (tp->t_bwmeas != NULL && tp->t_bwmeas->bw_sndbw_max > 0) {
1355 		ifs->bw_sndbw_max = tp->t_bwmeas->bw_sndbw_max;
1356 	} else {
1357 		ifs->bw_sndbw_max = 0;
1358 	}
1359 	if (tp->t_bwmeas != NULL && tp->t_bwmeas->bw_rcvbw_max > 0) {
1360 		ifs->bw_rcvbw_max = tp->t_bwmeas->bw_rcvbw_max;
1361 	} else {
1362 		ifs->bw_rcvbw_max = 0;
1363 	}
1364 	ifs->bk_txpackets = so->so_tc_stats[MBUF_TC_BK].txpackets;
1365 	ifs->txpackets = inp->inp_stat->txpackets;
1366 	ifs->rxpackets = inp->inp_stat->rxpackets;
1367 }
1368 
1369 static inline void
tcp_flow_ecn_perf_stats(struct ifnet_stats_per_flow * ifs,struct if_tcp_ecn_perf_stat * stat)1370 tcp_flow_ecn_perf_stats(struct ifnet_stats_per_flow *ifs,
1371     struct if_tcp_ecn_perf_stat *stat)
1372 {
1373 	u_int64_t curval, oldval;
1374 	stat->total_txpkts += ifs->txpackets;
1375 	stat->total_rxpkts += ifs->rxpackets;
1376 	stat->total_rxmitpkts += ifs->rxmitpkts;
1377 	stat->total_oopkts += ifs->rcvoopack;
1378 	stat->total_reorderpkts += (ifs->reordered_pkts +
1379 	    ifs->pawsdrop + ifs->dsack_sent + ifs->dsack_recvd);
1380 
1381 	/* Average RTT */
1382 	curval = ifs->srtt >> TCP_RTT_SHIFT;
1383 	if (curval > 0 && ifs->rttupdated >= 16) {
1384 		if (stat->rtt_avg == 0) {
1385 			stat->rtt_avg = curval;
1386 		} else {
1387 			oldval = stat->rtt_avg;
1388 			stat->rtt_avg = ((oldval << 4) - oldval + curval) >> 4;
1389 		}
1390 	}
1391 
1392 	/* RTT variance */
1393 	curval = ifs->rttvar >> TCP_RTTVAR_SHIFT;
1394 	if (curval > 0 && ifs->rttupdated >= 16) {
1395 		if (stat->rtt_var == 0) {
1396 			stat->rtt_var = curval;
1397 		} else {
1398 			oldval = stat->rtt_var;
1399 			stat->rtt_var =
1400 			    ((oldval << 4) - oldval + curval) >> 4;
1401 		}
1402 	}
1403 
1404 	/* SACK episodes */
1405 	stat->sack_episodes += ifs->sack_recovery_episodes;
1406 	if (ifs->connreset) {
1407 		stat->rst_drop++;
1408 	}
1409 }
1410 
1411 static inline void
tcp_flow_lim_stats(struct ifnet_stats_per_flow * ifs,struct if_lim_perf_stat * stat)1412 tcp_flow_lim_stats(struct ifnet_stats_per_flow *ifs,
1413     struct if_lim_perf_stat *stat)
1414 {
1415 	u_int64_t curval, oldval;
1416 
1417 	stat->lim_total_txpkts += ifs->txpackets;
1418 	stat->lim_total_rxpkts += ifs->rxpackets;
1419 	stat->lim_total_retxpkts += ifs->rxmitpkts;
1420 	stat->lim_total_oopkts += ifs->rcvoopack;
1421 
1422 	if (ifs->bw_sndbw_max > 0) {
1423 		/* convert from bytes per ms to bits per second */
1424 		ifs->bw_sndbw_max *= 8000;
1425 		stat->lim_ul_max_bandwidth = MAX(stat->lim_ul_max_bandwidth,
1426 		    ifs->bw_sndbw_max);
1427 	}
1428 
1429 	if (ifs->bw_rcvbw_max > 0) {
1430 		/* convert from bytes per ms to bits per second */
1431 		ifs->bw_rcvbw_max *= 8000;
1432 		stat->lim_dl_max_bandwidth = MAX(stat->lim_dl_max_bandwidth,
1433 		    ifs->bw_rcvbw_max);
1434 	}
1435 
1436 	/* Average RTT */
1437 	curval = ifs->srtt >> TCP_RTT_SHIFT;
1438 	if (curval > 0 && ifs->rttupdated >= 16) {
1439 		if (stat->lim_rtt_average == 0) {
1440 			stat->lim_rtt_average = curval;
1441 		} else {
1442 			oldval = stat->lim_rtt_average;
1443 			stat->lim_rtt_average =
1444 			    ((oldval << 4) - oldval + curval) >> 4;
1445 		}
1446 	}
1447 
1448 	/* RTT variance */
1449 	curval = ifs->rttvar >> TCP_RTTVAR_SHIFT;
1450 	if (curval > 0 && ifs->rttupdated >= 16) {
1451 		if (stat->lim_rtt_variance == 0) {
1452 			stat->lim_rtt_variance = curval;
1453 		} else {
1454 			oldval = stat->lim_rtt_variance;
1455 			stat->lim_rtt_variance =
1456 			    ((oldval << 4) - oldval + curval) >> 4;
1457 		}
1458 	}
1459 
1460 	if (stat->lim_rtt_min == 0) {
1461 		stat->lim_rtt_min = ifs->rttmin;
1462 	} else {
1463 		stat->lim_rtt_min = MIN(stat->lim_rtt_min, ifs->rttmin);
1464 	}
1465 
1466 	/* connection timeouts */
1467 	stat->lim_conn_attempts++;
1468 	if (ifs->conntimeout) {
1469 		stat->lim_conn_timeouts++;
1470 	}
1471 
1472 	/* bytes sent using background delay-based algorithms */
1473 	stat->lim_bk_txpkts += ifs->bk_txpackets;
1474 }
1475 
1476 /*
1477  * Close a TCP control block:
1478  *	discard all space held by the tcp
1479  *	discard internet protocol block
1480  *	wake up any sleepers
1481  */
1482 struct tcpcb *
tcp_close(struct tcpcb * tp)1483 tcp_close(struct tcpcb *tp)
1484 {
1485 	struct inpcb *inp = tp->t_inpcb;
1486 	struct socket *so = inp->inp_socket;
1487 	int isipv6 = (inp->inp_vflag & INP_IPV6) != 0;
1488 	struct route *ro;
1489 	struct rtentry *rt;
1490 	int dosavessthresh;
1491 	struct ifnet_stats_per_flow ifs;
1492 
1493 	/* tcp_close was called previously, bail */
1494 	if (inp->inp_ppcb == NULL) {
1495 		return NULL;
1496 	}
1497 
1498 	tcp_del_fsw_flow(tp);
1499 
1500 	tcp_canceltimers(tp);
1501 	KERNEL_DEBUG(DBG_FNC_TCP_CLOSE | DBG_FUNC_START, tp, 0, 0, 0, 0);
1502 
1503 	/*
1504 	 * If another thread for this tcp is currently in ip (indicated by
1505 	 * the TF_SENDINPROG flag), defer the cleanup until after it returns
1506 	 * back to tcp.  This is done to serialize the close until after all
1507 	 * pending output is finished, in order to avoid having the PCB be
1508 	 * detached and the cached route cleaned, only for ip to cache the
1509 	 * route back into the PCB again.  Note that we've cleared all the
1510 	 * timers at this point.  Set TF_CLOSING to indicate to tcp_output()
1511 	 * that is should call us again once it returns from ip; at that
1512 	 * point both flags should be cleared and we can proceed further
1513 	 * with the cleanup.
1514 	 */
1515 	if ((tp->t_flags & TF_CLOSING) ||
1516 	    inp->inp_sndinprog_cnt > 0) {
1517 		tp->t_flags |= TF_CLOSING;
1518 		return NULL;
1519 	}
1520 
1521 	TCP_LOG_CONNECTION_SUMMARY(tp);
1522 
1523 	DTRACE_TCP4(state__change, void, NULL, struct inpcb *, inp,
1524 	    struct tcpcb *, tp, int32_t, TCPS_CLOSED);
1525 
1526 	ro = (isipv6 ? (struct route *)&inp->in6p_route : &inp->inp_route);
1527 	rt = ro->ro_rt;
1528 	if (rt != NULL) {
1529 		RT_LOCK_SPIN(rt);
1530 	}
1531 
1532 	/*
1533 	 * If we got enough samples through the srtt filter,
1534 	 * save the rtt and rttvar in the routing entry.
1535 	 * 'Enough' is arbitrarily defined as the 16 samples.
1536 	 * 16 samples is enough for the srtt filter to converge
1537 	 * to within 5% of the correct value; fewer samples and
1538 	 * we could save a very bogus rtt.
1539 	 *
1540 	 * Don't update the default route's characteristics and don't
1541 	 * update anything that the user "locked".
1542 	 */
1543 	if (tp->t_rttupdated >= 16) {
1544 		u_int32_t i = 0;
1545 		bool log_rtt = false;
1546 
1547 		if (isipv6) {
1548 			struct sockaddr_in6 *sin6;
1549 
1550 			if (rt == NULL) {
1551 				goto no_valid_rt;
1552 			}
1553 			sin6 = SIN6(rt_key(rt));
1554 			if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
1555 				goto no_valid_rt;
1556 			}
1557 		} else if (ROUTE_UNUSABLE(ro) ||
1558 		    SIN(rt_key(rt))->sin_addr.s_addr == INADDR_ANY) {
1559 			DTRACE_TCP4(state__change, void, NULL,
1560 			    struct inpcb *, inp, struct tcpcb *, tp,
1561 			    int32_t, TCPS_CLOSED);
1562 			TCP_LOG_STATE(tp, TCPS_CLOSED);
1563 			tp->t_state = TCPS_CLOSED;
1564 			goto no_valid_rt;
1565 		}
1566 
1567 		RT_LOCK_ASSERT_HELD(rt);
1568 		if ((rt->rt_rmx.rmx_locks & RTV_RTT) == 0) {
1569 			i = tp->t_srtt *
1570 			    (RTM_RTTUNIT / (TCP_RETRANSHZ * TCP_RTT_SCALE));
1571 			if (rt->rt_rmx.rmx_rtt && i) {
1572 				/*
1573 				 * filter this update to half the old & half
1574 				 * the new values, converting scale.
1575 				 * See route.h and tcp_var.h for a
1576 				 * description of the scaling constants.
1577 				 */
1578 				rt->rt_rmx.rmx_rtt =
1579 				    (rt->rt_rmx.rmx_rtt + i) / 2;
1580 			} else {
1581 				rt->rt_rmx.rmx_rtt = i;
1582 			}
1583 			tcpstat.tcps_cachedrtt++;
1584 			log_rtt = true;
1585 		}
1586 		if ((rt->rt_rmx.rmx_locks & RTV_RTTVAR) == 0) {
1587 			i = tp->t_rttvar *
1588 			    (RTM_RTTUNIT / (TCP_RETRANSHZ * TCP_RTTVAR_SCALE));
1589 			if (rt->rt_rmx.rmx_rttvar && i) {
1590 				rt->rt_rmx.rmx_rttvar =
1591 				    (rt->rt_rmx.rmx_rttvar + i) / 2;
1592 			} else {
1593 				rt->rt_rmx.rmx_rttvar = i;
1594 			}
1595 			tcpstat.tcps_cachedrttvar++;
1596 			log_rtt = true;
1597 		}
1598 		if (log_rtt) {
1599 			TCP_LOG_RTM_RTT(tp, rt);
1600 			TCP_LOG_RTT_INFO(tp);
1601 		}
1602 		/*
1603 		 * The old comment here said:
1604 		 * update the pipelimit (ssthresh) if it has been updated
1605 		 * already or if a pipesize was specified & the threshhold
1606 		 * got below half the pipesize.  I.e., wait for bad news
1607 		 * before we start updating, then update on both good
1608 		 * and bad news.
1609 		 *
1610 		 * But we want to save the ssthresh even if no pipesize is
1611 		 * specified explicitly in the route, because such
1612 		 * connections still have an implicit pipesize specified
1613 		 * by the global tcp_sendspace.  In the absence of a reliable
1614 		 * way to calculate the pipesize, it will have to do.
1615 		 */
1616 		i = tp->snd_ssthresh;
1617 		if (rt->rt_rmx.rmx_sendpipe != 0) {
1618 			dosavessthresh = (i < rt->rt_rmx.rmx_sendpipe / 2);
1619 		} else {
1620 			dosavessthresh = (i < so->so_snd.sb_hiwat / 2);
1621 		}
1622 		if (((rt->rt_rmx.rmx_locks & RTV_SSTHRESH) == 0 &&
1623 		    i != 0 && rt->rt_rmx.rmx_ssthresh != 0) ||
1624 		    dosavessthresh) {
1625 			/*
1626 			 * convert the limit from user data bytes to
1627 			 * packets then to packet data bytes.
1628 			 */
1629 			i = (i + tp->t_maxseg / 2) / tp->t_maxseg;
1630 			if (i < 2) {
1631 				i = 2;
1632 			}
1633 			i *= (u_int32_t)(tp->t_maxseg +
1634 			    isipv6 ? sizeof(struct ip6_hdr) +
1635 			    sizeof(struct tcphdr) :
1636 			    sizeof(struct tcpiphdr));
1637 			if (rt->rt_rmx.rmx_ssthresh) {
1638 				rt->rt_rmx.rmx_ssthresh =
1639 				    (rt->rt_rmx.rmx_ssthresh + i) / 2;
1640 			} else {
1641 				rt->rt_rmx.rmx_ssthresh = i;
1642 			}
1643 			tcpstat.tcps_cachedssthresh++;
1644 		}
1645 	}
1646 
1647 	/*
1648 	 * Mark route for deletion if no information is cached.
1649 	 */
1650 	if (rt != NULL && (so->so_flags & SOF_OVERFLOW)) {
1651 		if (!(rt->rt_rmx.rmx_locks & RTV_RTT) &&
1652 		    rt->rt_rmx.rmx_rtt == 0) {
1653 			rt->rt_flags |= RTF_DELCLONE;
1654 		}
1655 	}
1656 
1657 no_valid_rt:
1658 	if (rt != NULL) {
1659 		RT_UNLOCK(rt);
1660 	}
1661 
1662 	/* free the reassembly queue, if any */
1663 	(void) tcp_freeq(tp);
1664 
1665 	/* performance stats per interface */
1666 	tcp_create_ifnet_stats_per_flow(tp, &ifs);
1667 	tcp_update_stats_per_flow(&ifs, inp->inp_last_outifp);
1668 
1669 	tcp_free_sackholes(tp);
1670 	tcp_notify_ack_free(tp);
1671 
1672 	inp_decr_sndbytes_allunsent(so, tp->snd_una);
1673 
1674 	if (tp->t_bwmeas != NULL) {
1675 		tcp_bwmeas_free(tp);
1676 	}
1677 	tcp_rxtseg_clean(tp);
1678 	tcp_segs_sent_clean(tp, true);
1679 
1680 	/* Free the packet list */
1681 	if (tp->t_pktlist_head != NULL) {
1682 		m_freem_list(tp->t_pktlist_head);
1683 	}
1684 	TCP_PKTLIST_CLEAR(tp);
1685 
1686 	if (so->so_flags1 & SOF1_CACHED_IN_SOCK_LAYER) {
1687 		inp->inp_saved_ppcb = (caddr_t) tp;
1688 	}
1689 
1690 	TCP_LOG_STATE(tp, TCPS_CLOSED);
1691 	tp->t_state = TCPS_CLOSED;
1692 
1693 	/*
1694 	 * Issue a wakeup before detach so that we don't miss
1695 	 * a wakeup
1696 	 */
1697 	sodisconnectwakeup(so);
1698 
1699 	/*
1700 	 * Make sure to clear the TCP Keep Alive Offload as it is
1701 	 * ref counted on the interface
1702 	 */
1703 	tcp_clear_keep_alive_offload(so);
1704 
1705 	/*
1706 	 * If this is a socket that does not want to wakeup the device
1707 	 * for it's traffic, the application might need to know that the
1708 	 * socket is closed, send a notification.
1709 	 */
1710 	if ((so->so_options & SO_NOWAKEFROMSLEEP) &&
1711 	    inp->inp_state != INPCB_STATE_DEAD &&
1712 	    !(inp->inp_flags2 & INP2_TIMEWAIT)) {
1713 		socket_post_kev_msg_closed(so);
1714 	}
1715 
1716 	if (CC_ALGO(tp)->cleanup != NULL) {
1717 		CC_ALGO(tp)->cleanup(tp);
1718 	}
1719 
1720 	tp->tcp_cc_index = TCP_CC_ALGO_NONE;
1721 
1722 	if (TCP_USE_RLEDBAT(tp, so) && tcp_cc_rledbat.cleanup != NULL) {
1723 		tcp_cc_rledbat.cleanup(tp);
1724 	}
1725 
1726 	/* Can happen if we close the socket before receiving the third ACK */
1727 	if ((tp->t_tfo_flags & TFO_F_COOKIE_VALID)) {
1728 		OSDecrementAtomic(&tcp_tfo_halfcnt);
1729 
1730 		/* Panic if something has gone terribly wrong. */
1731 		VERIFY(tcp_tfo_halfcnt >= 0);
1732 
1733 		tp->t_tfo_flags &= ~TFO_F_COOKIE_VALID;
1734 	}
1735 
1736 	if (SOCK_CHECK_DOM(so, PF_INET6)) {
1737 		in6_pcbdetach(inp);
1738 	} else {
1739 		in_pcbdetach(inp);
1740 	}
1741 
1742 	/*
1743 	 * Call soisdisconnected after detach because it might unlock the socket
1744 	 */
1745 	soisdisconnected(so);
1746 	tcpstat.tcps_closed++;
1747 	KERNEL_DEBUG(DBG_FNC_TCP_CLOSE | DBG_FUNC_END,
1748 	    tcpstat.tcps_closed, 0, 0, 0, 0);
1749 	return NULL;
1750 }
1751 
1752 int
tcp_freeq(struct tcpcb * tp)1753 tcp_freeq(struct tcpcb *tp)
1754 {
1755 	struct tseg_qent *q;
1756 	int rv = 0;
1757 	int count = 0;
1758 
1759 	while ((q = LIST_FIRST(&tp->t_segq)) != NULL) {
1760 		LIST_REMOVE(q, tqe_q);
1761 		tp->t_reassq_mbcnt -= _MSIZE + (q->tqe_m->m_flags & M_EXT) ?
1762 		    q->tqe_m->m_ext.ext_size : 0;
1763 		m_freem(q->tqe_m);
1764 		zfree(tcp_reass_zone, q);
1765 		rv = 1;
1766 		count++;
1767 	}
1768 	tp->t_reassqlen = 0;
1769 	if (count > 0) {
1770 		OSAddAtomic(-count, &tcp_reass_total_qlen);
1771 	}
1772 	return rv;
1773 }
1774 
1775 
1776 void
tcp_drain(void)1777 tcp_drain(void)
1778 {
1779 	struct inpcb *inp;
1780 	struct tcpcb *tp;
1781 
1782 	if (!lck_rw_try_lock_exclusive(&tcbinfo.ipi_lock)) {
1783 		return;
1784 	}
1785 
1786 	LIST_FOREACH(inp, tcbinfo.ipi_listhead, inp_list) {
1787 		if (in_pcb_checkstate(inp, WNT_ACQUIRE, 0) !=
1788 		    WNT_STOPUSING) {
1789 			socket_lock(inp->inp_socket, 1);
1790 			if (in_pcb_checkstate(inp, WNT_RELEASE, 1)
1791 			    == WNT_STOPUSING) {
1792 				/* lost a race, try the next one */
1793 				socket_unlock(inp->inp_socket, 1);
1794 				continue;
1795 			}
1796 			tp = intotcpcb(inp);
1797 
1798 			so_drain_extended_bk_idle(inp->inp_socket);
1799 
1800 			socket_unlock(inp->inp_socket, 1);
1801 		}
1802 	}
1803 	lck_rw_done(&tcbinfo.ipi_lock);
1804 }
1805 
1806 /*
1807  * Notify a tcp user of an asynchronous error;
1808  * store error as soft error, but wake up user
1809  * (for now, won't do anything until can select for soft error).
1810  *
1811  * Do not wake up user since there currently is no mechanism for
1812  * reporting soft errors (yet - a kqueue filter may be added).
1813  */
1814 static void
tcp_notify(struct inpcb * inp,int error)1815 tcp_notify(struct inpcb *inp, int error)
1816 {
1817 	struct tcpcb *tp;
1818 
1819 	if (inp == NULL || (inp->inp_state == INPCB_STATE_DEAD)) {
1820 		return; /* pcb is gone already */
1821 	}
1822 	tp = (struct tcpcb *)inp->inp_ppcb;
1823 
1824 	VERIFY(tp != NULL);
1825 	/*
1826 	 * Ignore some errors if we are hooked up.
1827 	 * If connection hasn't completed, has retransmitted several times,
1828 	 * and receives a second error, give up now.  This is better
1829 	 * than waiting a long time to establish a connection that
1830 	 * can never complete.
1831 	 */
1832 	if (tp->t_state == TCPS_ESTABLISHED &&
1833 	    (error == EHOSTUNREACH || error == ENETUNREACH ||
1834 	    error == EHOSTDOWN)) {
1835 		if (inp->inp_route.ro_rt) {
1836 			rtfree(inp->inp_route.ro_rt);
1837 			inp->inp_route.ro_rt = (struct rtentry *)NULL;
1838 		}
1839 	} else if (tp->t_state < TCPS_ESTABLISHED && tp->t_rxtshift > 3 &&
1840 	    tp->t_softerror) {
1841 		tcp_drop(tp, error);
1842 	} else {
1843 		tp->t_softerror = error;
1844 	}
1845 }
1846 
1847 struct bwmeas *
tcp_bwmeas_alloc(struct tcpcb * tp)1848 tcp_bwmeas_alloc(struct tcpcb *tp)
1849 {
1850 	struct bwmeas *elm;
1851 	elm = zalloc_flags(tcp_bwmeas_zone, Z_ZERO | Z_WAITOK);
1852 	elm->bw_minsizepkts = TCP_BWMEAS_BURST_MINSIZE;
1853 	elm->bw_minsize = elm->bw_minsizepkts * tp->t_maxseg;
1854 	return elm;
1855 }
1856 
1857 void
tcp_bwmeas_free(struct tcpcb * tp)1858 tcp_bwmeas_free(struct tcpcb *tp)
1859 {
1860 	zfree(tcp_bwmeas_zone, tp->t_bwmeas);
1861 	tp->t_bwmeas = NULL;
1862 	tp->t_flagsext &= ~(TF_MEASURESNDBW);
1863 }
1864 
1865 int
get_tcp_inp_list(struct inpcb * __single * inp_list __counted_by (n),size_t n,inp_gen_t gencnt)1866 get_tcp_inp_list(struct inpcb * __single *inp_list __counted_by(n), size_t n, inp_gen_t gencnt)
1867 {
1868 	struct tcpcb *tp;
1869 	struct inpcb *inp;
1870 	int i = 0;
1871 
1872 	LIST_FOREACH(inp, tcbinfo.ipi_listhead, inp_list) {
1873 		if (i >= n) {
1874 			break;
1875 		}
1876 		if (inp->inp_gencnt <= gencnt &&
1877 		    inp->inp_state != INPCB_STATE_DEAD) {
1878 			inp_list[i++] = inp;
1879 		}
1880 	}
1881 
1882 	TAILQ_FOREACH(tp, &tcp_tw_tailq, t_twentry) {
1883 		if (i >= n) {
1884 			break;
1885 		}
1886 		inp = tp->t_inpcb;
1887 		if (inp->inp_gencnt <= gencnt &&
1888 		    inp->inp_state != INPCB_STATE_DEAD) {
1889 			inp_list[i++] = inp;
1890 		}
1891 	}
1892 	return i;
1893 }
1894 
1895 /*
1896  * tcpcb_to_otcpcb copies specific bits of a tcpcb to a otcpcb format.
1897  * The otcpcb data structure is passed to user space and must not change.
1898  */
1899 static void
tcpcb_to_otcpcb(struct tcpcb * tp,struct otcpcb * otp)1900 tcpcb_to_otcpcb(struct tcpcb *tp, struct otcpcb *otp)
1901 {
1902 	otp->t_segq = (uint32_t)VM_KERNEL_ADDRHASH(tp->t_segq.lh_first);
1903 	otp->t_dupacks = tp->t_dupacks;
1904 	otp->t_timer[TCPT_REXMT_EXT] = tp->t_timer[TCPT_REXMT];
1905 	otp->t_timer[TCPT_PERSIST_EXT] = tp->t_timer[TCPT_PERSIST];
1906 	otp->t_timer[TCPT_KEEP_EXT] = tp->t_timer[TCPT_KEEP];
1907 	otp->t_timer[TCPT_2MSL_EXT] = tp->t_timer[TCPT_2MSL];
1908 	otp->t_inpcb =
1909 	    (_TCPCB_PTR(struct inpcb *))VM_KERNEL_ADDRHASH(tp->t_inpcb);
1910 	otp->t_state = tp->t_state;
1911 	otp->t_flags = tp->t_flags;
1912 	otp->t_force = (tp->t_flagsext & TF_FORCE) ? 1 : 0;
1913 	otp->snd_una = tp->snd_una;
1914 	otp->snd_max = tp->snd_max;
1915 	otp->snd_nxt = tp->snd_nxt;
1916 	otp->snd_up = tp->snd_up;
1917 	otp->snd_wl1 = tp->snd_wl1;
1918 	otp->snd_wl2 = tp->snd_wl2;
1919 	otp->iss = tp->iss;
1920 	otp->irs = tp->irs;
1921 	otp->rcv_nxt = tp->rcv_nxt;
1922 	otp->rcv_adv = tp->rcv_adv;
1923 	otp->rcv_wnd = tp->rcv_wnd;
1924 	otp->rcv_up = tp->rcv_up;
1925 	otp->snd_wnd = tp->snd_wnd;
1926 	otp->snd_cwnd = tp->snd_cwnd;
1927 	otp->snd_ssthresh = tp->snd_ssthresh;
1928 	otp->t_maxopd = tp->t_maxopd;
1929 	otp->t_rcvtime = tp->t_rcvtime;
1930 	otp->t_starttime = tp->t_starttime;
1931 	otp->t_rtttime = tp->t_rtttime;
1932 	otp->t_rtseq = tp->t_rtseq;
1933 	otp->t_rxtcur = tp->t_rxtcur;
1934 	otp->t_maxseg = tp->t_maxseg;
1935 	otp->t_srtt = tp->t_srtt;
1936 	otp->t_rttvar = tp->t_rttvar;
1937 	otp->t_rxtshift = tp->t_rxtshift;
1938 	otp->t_rttmin = tp->t_rttmin;
1939 	otp->t_rttupdated = tp->t_rttupdated;
1940 	otp->max_sndwnd = tp->max_sndwnd;
1941 	otp->t_softerror = tp->t_softerror;
1942 	otp->t_oobflags = tp->t_oobflags;
1943 	otp->t_iobc = tp->t_iobc;
1944 	otp->snd_scale = tp->snd_scale;
1945 	otp->rcv_scale = tp->rcv_scale;
1946 	otp->request_r_scale = tp->request_r_scale;
1947 	otp->requested_s_scale = tp->requested_s_scale;
1948 	otp->ts_recent = tp->ts_recent;
1949 	otp->ts_recent_age = tp->ts_recent_age;
1950 	otp->last_ack_sent = tp->last_ack_sent;
1951 	otp->cc_send = 0;
1952 	otp->cc_recv = 0;
1953 	otp->snd_recover = tp->snd_recover;
1954 	otp->snd_cwnd_prev = tp->snd_cwnd_prev;
1955 	otp->snd_ssthresh_prev = tp->snd_ssthresh_prev;
1956 	otp->t_badrxtwin = 0;
1957 }
1958 
1959 static int
1960 tcp_pcblist SYSCTL_HANDLER_ARGS
1961 {
1962 #pragma unused(oidp, arg1, arg2)
1963 	int error, i = 0, n, sz;
1964 	struct inpcb **inp_list;
1965 	inp_gen_t gencnt;
1966 	struct xinpgen xig;
1967 
1968 	/*
1969 	 * The process of preparing the TCB list is too time-consuming and
1970 	 * resource-intensive to repeat twice on every request.
1971 	 */
1972 	lck_rw_lock_shared(&tcbinfo.ipi_lock);
1973 	if (req->oldptr == USER_ADDR_NULL) {
1974 		n = tcbinfo.ipi_count;
1975 		req->oldidx = 2 * (sizeof(xig))
1976 		    + (n + n / 8) * sizeof(struct xtcpcb);
1977 		lck_rw_done(&tcbinfo.ipi_lock);
1978 		return 0;
1979 	}
1980 
1981 	if (req->newptr != USER_ADDR_NULL) {
1982 		lck_rw_done(&tcbinfo.ipi_lock);
1983 		return EPERM;
1984 	}
1985 
1986 	/*
1987 	 * OK, now we're committed to doing something.
1988 	 */
1989 	gencnt = tcbinfo.ipi_gencnt;
1990 	sz = n = tcbinfo.ipi_count;
1991 
1992 	bzero(&xig, sizeof(xig));
1993 	xig.xig_len = sizeof(xig);
1994 	xig.xig_count = n;
1995 	xig.xig_gen = gencnt;
1996 	xig.xig_sogen = so_gencnt;
1997 	error = SYSCTL_OUT(req, &xig, sizeof(xig));
1998 	if (error) {
1999 		lck_rw_done(&tcbinfo.ipi_lock);
2000 		return error;
2001 	}
2002 	/*
2003 	 * We are done if there is no pcb
2004 	 */
2005 	if (n == 0) {
2006 		lck_rw_done(&tcbinfo.ipi_lock);
2007 		return 0;
2008 	}
2009 
2010 	inp_list = kalloc_type(struct inpcb *, n, Z_WAITOK);
2011 	if (inp_list == NULL) {
2012 		lck_rw_done(&tcbinfo.ipi_lock);
2013 		return ENOMEM;
2014 	}
2015 
2016 	n = get_tcp_inp_list(inp_list, n, gencnt);
2017 
2018 	error = 0;
2019 	for (i = 0; i < n; i++) {
2020 		struct xtcpcb xt;
2021 		caddr_t inp_ppcb;
2022 		struct inpcb *inp;
2023 
2024 		inp = inp_list[i];
2025 
2026 		if (in_pcb_checkstate(inp, WNT_ACQUIRE, 0) == WNT_STOPUSING) {
2027 			continue;
2028 		}
2029 		socket_lock(inp->inp_socket, 1);
2030 		if (in_pcb_checkstate(inp, WNT_RELEASE, 1) == WNT_STOPUSING) {
2031 			socket_unlock(inp->inp_socket, 1);
2032 			continue;
2033 		}
2034 		if (inp->inp_gencnt > gencnt) {
2035 			socket_unlock(inp->inp_socket, 1);
2036 			continue;
2037 		}
2038 
2039 		bzero(&xt, sizeof(xt));
2040 		xt.xt_len = sizeof(xt);
2041 		/* XXX should avoid extra copy */
2042 		inpcb_to_compat(inp, &xt.xt_inp);
2043 		inp_ppcb = inp->inp_ppcb;
2044 		if (inp_ppcb != NULL) {
2045 			tcpcb_to_otcpcb((struct tcpcb *)(void *)inp_ppcb,
2046 			    &xt.xt_tp);
2047 		} else {
2048 			bzero((char *) &xt.xt_tp, sizeof(xt.xt_tp));
2049 		}
2050 		if (inp->inp_socket) {
2051 			sotoxsocket(inp->inp_socket, &xt.xt_socket);
2052 		}
2053 
2054 		socket_unlock(inp->inp_socket, 1);
2055 
2056 		error = SYSCTL_OUT(req, &xt, sizeof(xt));
2057 	}
2058 	if (!error) {
2059 		/*
2060 		 * Give the user an updated idea of our state.
2061 		 * If the generation differs from what we told
2062 		 * her before, she knows that something happened
2063 		 * while we were processing this request, and it
2064 		 * might be necessary to retry.
2065 		 */
2066 		bzero(&xig, sizeof(xig));
2067 		xig.xig_len = sizeof(xig);
2068 		xig.xig_gen = tcbinfo.ipi_gencnt;
2069 		xig.xig_sogen = so_gencnt;
2070 		xig.xig_count = tcbinfo.ipi_count;
2071 		error = SYSCTL_OUT(req, &xig, sizeof(xig));
2072 	}
2073 
2074 	lck_rw_done(&tcbinfo.ipi_lock);
2075 	kfree_type(struct inpcb *, sz, inp_list);
2076 	return error;
2077 }
2078 
2079 SYSCTL_PROC(_net_inet_tcp, TCPCTL_PCBLIST, pcblist,
2080     CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
2081     tcp_pcblist, "S,xtcpcb", "List of active TCP connections");
2082 
2083 #if XNU_TARGET_OS_OSX
2084 
2085 static void
tcpcb_to_xtcpcb64(struct tcpcb * tp,struct xtcpcb64 * otp)2086 tcpcb_to_xtcpcb64(struct tcpcb *tp, struct xtcpcb64 *otp)
2087 {
2088 	otp->t_segq = (uint32_t)VM_KERNEL_ADDRHASH(tp->t_segq.lh_first);
2089 	otp->t_dupacks = tp->t_dupacks;
2090 	otp->t_timer[TCPT_REXMT_EXT] = tp->t_timer[TCPT_REXMT];
2091 	otp->t_timer[TCPT_PERSIST_EXT] = tp->t_timer[TCPT_PERSIST];
2092 	otp->t_timer[TCPT_KEEP_EXT] = tp->t_timer[TCPT_KEEP];
2093 	otp->t_timer[TCPT_2MSL_EXT] = tp->t_timer[TCPT_2MSL];
2094 	otp->t_state = tp->t_state;
2095 	otp->t_flags = tp->t_flags;
2096 	otp->t_force = (tp->t_flagsext & TF_FORCE) ? 1 : 0;
2097 	otp->snd_una = tp->snd_una;
2098 	otp->snd_max = tp->snd_max;
2099 	otp->snd_nxt = tp->snd_nxt;
2100 	otp->snd_up = tp->snd_up;
2101 	otp->snd_wl1 = tp->snd_wl1;
2102 	otp->snd_wl2 = tp->snd_wl2;
2103 	otp->iss = tp->iss;
2104 	otp->irs = tp->irs;
2105 	otp->rcv_nxt = tp->rcv_nxt;
2106 	otp->rcv_adv = tp->rcv_adv;
2107 	otp->rcv_wnd = tp->rcv_wnd;
2108 	otp->rcv_up = tp->rcv_up;
2109 	otp->snd_wnd = tp->snd_wnd;
2110 	otp->snd_cwnd = tp->snd_cwnd;
2111 	otp->snd_ssthresh = tp->snd_ssthresh;
2112 	otp->t_maxopd = tp->t_maxopd;
2113 	otp->t_rcvtime = tp->t_rcvtime;
2114 	otp->t_starttime = tp->t_starttime;
2115 	otp->t_rtttime = tp->t_rtttime;
2116 	otp->t_rtseq = tp->t_rtseq;
2117 	otp->t_rxtcur = tp->t_rxtcur;
2118 	otp->t_maxseg = tp->t_maxseg;
2119 	otp->t_srtt = tp->t_srtt;
2120 	otp->t_rttvar = tp->t_rttvar;
2121 	otp->t_rxtshift = tp->t_rxtshift;
2122 	otp->t_rttmin = tp->t_rttmin;
2123 	otp->t_rttupdated = tp->t_rttupdated;
2124 	otp->max_sndwnd = tp->max_sndwnd;
2125 	otp->t_softerror = tp->t_softerror;
2126 	otp->t_oobflags = tp->t_oobflags;
2127 	otp->t_iobc = tp->t_iobc;
2128 	otp->snd_scale = tp->snd_scale;
2129 	otp->rcv_scale = tp->rcv_scale;
2130 	otp->request_r_scale = tp->request_r_scale;
2131 	otp->requested_s_scale = tp->requested_s_scale;
2132 	otp->ts_recent = tp->ts_recent;
2133 	otp->ts_recent_age = tp->ts_recent_age;
2134 	otp->last_ack_sent = tp->last_ack_sent;
2135 	otp->cc_send = 0;
2136 	otp->cc_recv = 0;
2137 	otp->snd_recover = tp->snd_recover;
2138 	otp->snd_cwnd_prev = tp->snd_cwnd_prev;
2139 	otp->snd_ssthresh_prev = tp->snd_ssthresh_prev;
2140 	otp->t_badrxtwin = 0;
2141 }
2142 
2143 
2144 static int
2145 tcp_pcblist64 SYSCTL_HANDLER_ARGS
2146 {
2147 #pragma unused(oidp, arg1, arg2)
2148 	int error, i = 0, n, sz;
2149 	struct inpcb **inp_list;
2150 	inp_gen_t gencnt;
2151 	struct xinpgen xig;
2152 
2153 	/*
2154 	 * The process of preparing the TCB list is too time-consuming and
2155 	 * resource-intensive to repeat twice on every request.
2156 	 */
2157 	lck_rw_lock_shared(&tcbinfo.ipi_lock);
2158 	if (req->oldptr == USER_ADDR_NULL) {
2159 		n = tcbinfo.ipi_count;
2160 		req->oldidx = 2 * (sizeof(xig))
2161 		    + (n + n / 8) * sizeof(struct xtcpcb64);
2162 		lck_rw_done(&tcbinfo.ipi_lock);
2163 		return 0;
2164 	}
2165 
2166 	if (req->newptr != USER_ADDR_NULL) {
2167 		lck_rw_done(&tcbinfo.ipi_lock);
2168 		return EPERM;
2169 	}
2170 
2171 	/*
2172 	 * OK, now we're committed to doing something.
2173 	 */
2174 	gencnt = tcbinfo.ipi_gencnt;
2175 	sz = n = tcbinfo.ipi_count;
2176 
2177 	bzero(&xig, sizeof(xig));
2178 	xig.xig_len = sizeof(xig);
2179 	xig.xig_count = n;
2180 	xig.xig_gen = gencnt;
2181 	xig.xig_sogen = so_gencnt;
2182 	error = SYSCTL_OUT(req, &xig, sizeof(xig));
2183 	if (error) {
2184 		lck_rw_done(&tcbinfo.ipi_lock);
2185 		return error;
2186 	}
2187 	/*
2188 	 * We are done if there is no pcb
2189 	 */
2190 	if (n == 0) {
2191 		lck_rw_done(&tcbinfo.ipi_lock);
2192 		return 0;
2193 	}
2194 
2195 	inp_list = kalloc_type(struct inpcb *, n, Z_WAITOK);
2196 	if (inp_list == NULL) {
2197 		lck_rw_done(&tcbinfo.ipi_lock);
2198 		return ENOMEM;
2199 	}
2200 
2201 	n = get_tcp_inp_list(inp_list, n, gencnt);
2202 
2203 	error = 0;
2204 	for (i = 0; i < n; i++) {
2205 		struct xtcpcb64 xt;
2206 		struct inpcb *inp;
2207 
2208 		inp = inp_list[i];
2209 
2210 		if (in_pcb_checkstate(inp, WNT_ACQUIRE, 0) == WNT_STOPUSING) {
2211 			continue;
2212 		}
2213 		socket_lock(inp->inp_socket, 1);
2214 		if (in_pcb_checkstate(inp, WNT_RELEASE, 1) == WNT_STOPUSING) {
2215 			socket_unlock(inp->inp_socket, 1);
2216 			continue;
2217 		}
2218 		if (inp->inp_gencnt > gencnt) {
2219 			socket_unlock(inp->inp_socket, 1);
2220 			continue;
2221 		}
2222 
2223 		bzero(&xt, sizeof(xt));
2224 		xt.xt_len = sizeof(xt);
2225 		inpcb_to_xinpcb64(inp, &xt.xt_inpcb);
2226 		xt.xt_inpcb.inp_ppcb =
2227 		    (uint64_t)VM_KERNEL_ADDRHASH(inp->inp_ppcb);
2228 		if (inp->inp_ppcb != NULL) {
2229 			tcpcb_to_xtcpcb64((struct tcpcb *)inp->inp_ppcb,
2230 			    &xt);
2231 		}
2232 		if (inp->inp_socket) {
2233 			sotoxsocket64(inp->inp_socket,
2234 			    &xt.xt_inpcb.xi_socket);
2235 		}
2236 
2237 		socket_unlock(inp->inp_socket, 1);
2238 
2239 		error = SYSCTL_OUT(req, &xt, sizeof(xt));
2240 	}
2241 	if (!error) {
2242 		/*
2243 		 * Give the user an updated idea of our state.
2244 		 * If the generation differs from what we told
2245 		 * her before, she knows that something happened
2246 		 * while we were processing this request, and it
2247 		 * might be necessary to retry.
2248 		 */
2249 		bzero(&xig, sizeof(xig));
2250 		xig.xig_len = sizeof(xig);
2251 		xig.xig_gen = tcbinfo.ipi_gencnt;
2252 		xig.xig_sogen = so_gencnt;
2253 		xig.xig_count = tcbinfo.ipi_count;
2254 		error = SYSCTL_OUT(req, &xig, sizeof(xig));
2255 	}
2256 
2257 	lck_rw_done(&tcbinfo.ipi_lock);
2258 	kfree_type(struct inpcb *, sz, inp_list);
2259 	return error;
2260 }
2261 
2262 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, pcblist64,
2263     CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
2264     tcp_pcblist64, "S,xtcpcb64", "List of active TCP connections");
2265 
2266 #endif /* XNU_TARGET_OS_OSX */
2267 
2268 static int
2269 tcp_pcblist_n SYSCTL_HANDLER_ARGS
2270 {
2271 #pragma unused(oidp, arg1, arg2)
2272 	int error = 0;
2273 
2274 	error = get_pcblist_n(IPPROTO_TCP, req, &tcbinfo);
2275 
2276 	return error;
2277 }
2278 
2279 
2280 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, pcblist_n,
2281     CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
2282     tcp_pcblist_n, "S,xtcpcb_n", "List of active TCP connections");
2283 
2284 static int
2285 tcp_progress_probe_enable SYSCTL_HANDLER_ARGS
2286 {
2287 #pragma unused(oidp, arg1, arg2)
2288 
2289 	return ntstat_tcp_progress_enable(req);
2290 }
2291 
2292 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, progress_enable,
2293     CTLTYPE_STRUCT | CTLFLAG_RW | CTLFLAG_LOCKED | CTLFLAG_ANYBODY, 0, 0,
2294     tcp_progress_probe_enable, "S", "Enable/disable TCP keepalive probing on the specified link(s)");
2295 
2296 
2297 __private_extern__ void
tcp_get_ports_used(ifnet_t ifp,int protocol,uint32_t flags,bitstr_t * bitfield)2298 tcp_get_ports_used(ifnet_t ifp, int protocol, uint32_t flags,
2299     bitstr_t *bitfield)
2300 {
2301 	inpcb_get_ports_used(ifp, protocol, flags, bitfield,
2302 	    &tcbinfo);
2303 }
2304 
2305 __private_extern__ uint32_t
tcp_count_opportunistic(unsigned int ifindex,u_int32_t flags)2306 tcp_count_opportunistic(unsigned int ifindex, u_int32_t flags)
2307 {
2308 	return inpcb_count_opportunistic(ifindex, &tcbinfo, flags);
2309 }
2310 
2311 __private_extern__ uint32_t
tcp_find_anypcb_byaddr(struct ifaddr * ifa)2312 tcp_find_anypcb_byaddr(struct ifaddr *ifa)
2313 {
2314 #if SKYWALK
2315 	if (netns_is_enabled()) {
2316 		return netns_find_anyres_byaddr(ifa, IPPROTO_TCP);
2317 	} else
2318 #endif /* SKYWALK */
2319 	return inpcb_find_anypcb_byaddr(ifa, &tcbinfo);
2320 }
2321 
2322 static void
tcp_handle_msgsize(struct ip * ip,struct inpcb * inp)2323 tcp_handle_msgsize(struct ip *ip, struct inpcb *inp)
2324 {
2325 	struct rtentry *rt = NULL;
2326 	u_short ifscope = IFSCOPE_NONE;
2327 	int mtu;
2328 	struct sockaddr_in icmpsrc = {
2329 		.sin_len = sizeof(struct sockaddr_in),
2330 		.sin_family = AF_INET, .sin_port = 0, .sin_addr = { .s_addr = 0 },
2331 		.sin_zero = { 0, 0, 0, 0, 0, 0, 0, 0 }
2332 	};
2333 	struct icmp *icp = NULL;
2334 
2335 	icp = (struct icmp *)(void *)
2336 	    ((caddr_t)ip - offsetof(struct icmp, icmp_ip));
2337 
2338 	icmpsrc.sin_addr = icp->icmp_ip.ip_dst;
2339 
2340 	/*
2341 	 * MTU discovery:
2342 	 * If we got a needfrag and there is a host route to the
2343 	 * original destination, and the MTU is not locked, then
2344 	 * set the MTU in the route to the suggested new value
2345 	 * (if given) and then notify as usual.  The ULPs will
2346 	 * notice that the MTU has changed and adapt accordingly.
2347 	 * If no new MTU was suggested, then we guess a new one
2348 	 * less than the current value.  If the new MTU is
2349 	 * unreasonably small (defined by sysctl tcp_minmss), then
2350 	 * we reset the MTU to the interface value and enable the
2351 	 * lock bit, indicating that we are no longer doing MTU
2352 	 * discovery.
2353 	 */
2354 	if (ROUTE_UNUSABLE(&(inp->inp_route)) == false) {
2355 		rt = inp->inp_route.ro_rt;
2356 	}
2357 
2358 	/*
2359 	 * icmp6_mtudisc_update scopes the routing lookup
2360 	 * to the incoming interface (delivered from mbuf
2361 	 * packet header.
2362 	 * That is mostly ok but for asymmetric networks
2363 	 * that may be an issue.
2364 	 * Frag needed OR Packet too big really communicates
2365 	 * MTU for the out data path.
2366 	 * Take the interface scope from cached route or
2367 	 * the last outgoing interface from inp
2368 	 */
2369 	if (rt != NULL) {
2370 		ifscope = (rt->rt_ifp != NULL) ?
2371 		    rt->rt_ifp->if_index : IFSCOPE_NONE;
2372 	} else {
2373 		ifscope = (inp->inp_last_outifp != NULL) ?
2374 		    inp->inp_last_outifp->if_index : IFSCOPE_NONE;
2375 	}
2376 
2377 	if ((rt == NULL) ||
2378 	    !(rt->rt_flags & RTF_HOST) ||
2379 	    (rt->rt_flags & (RTF_CLONING | RTF_PRCLONING))) {
2380 		rt = rtalloc1_scoped(SA(&icmpsrc), 0, RTF_CLONING | RTF_PRCLONING, ifscope);
2381 	} else if (rt) {
2382 		RT_LOCK(rt);
2383 		rtref(rt);
2384 		RT_UNLOCK(rt);
2385 	}
2386 
2387 	if (rt != NULL) {
2388 		RT_LOCK(rt);
2389 		if ((rt->rt_flags & RTF_HOST) &&
2390 		    !(rt->rt_rmx.rmx_locks & RTV_MTU)) {
2391 			mtu = ntohs(icp->icmp_nextmtu);
2392 			/*
2393 			 * XXX Stock BSD has changed the following
2394 			 * to compare with icp->icmp_ip.ip_len
2395 			 * to converge faster when sent packet
2396 			 * < route's MTU. We may want to adopt
2397 			 * that change.
2398 			 */
2399 			if (mtu == 0) {
2400 				mtu = ip_next_mtu(rt->rt_rmx.
2401 				    rmx_mtu, 1);
2402 			}
2403 #if DEBUG_MTUDISC
2404 			printf("MTU for %s reduced to %d\n",
2405 			    inet_ntop(AF_INET,
2406 			    &icmpsrc.sin_addr, ipv4str,
2407 			    sizeof(ipv4str)), mtu);
2408 #endif
2409 			if (mtu < max(296, (tcp_minmss +
2410 			    sizeof(struct tcpiphdr)))) {
2411 				rt->rt_rmx.rmx_locks |= RTV_MTU;
2412 			} else if (rt->rt_rmx.rmx_mtu > mtu) {
2413 				rt->rt_rmx.rmx_mtu = mtu;
2414 			}
2415 		}
2416 		RT_UNLOCK(rt);
2417 		rtfree(rt);
2418 	}
2419 }
2420 
2421 void
tcp_ctlinput(int cmd,struct sockaddr * sa,void * vip,__unused struct ifnet * ifp)2422 tcp_ctlinput(int cmd, struct sockaddr *sa, void *vip, __unused struct ifnet *ifp)
2423 {
2424 	tcp_seq icmp_tcp_seq;
2425 	struct ipctlparam *ctl_param = vip;
2426 	struct ip *ip = NULL;
2427 	struct mbuf *m = NULL;
2428 	struct in_addr faddr;
2429 	struct inpcb *inp;
2430 	struct tcpcb *tp;
2431 	struct tcphdr *th;
2432 	struct icmp *icp;
2433 	size_t off;
2434 #if SKYWALK
2435 	union sockaddr_in_4_6 sock_laddr;
2436 	struct protoctl_ev_val prctl_ev_val;
2437 #endif /* SKYWALK */
2438 	void (*notify)(struct inpcb *, int) = tcp_notify;
2439 
2440 	if (ctl_param != NULL) {
2441 		ip = ctl_param->ipc_icmp_ip;
2442 		icp = ctl_param->ipc_icmp;
2443 		m = ctl_param->ipc_m;
2444 		off = ctl_param->ipc_off;
2445 	} else {
2446 		ip = NULL;
2447 		icp = NULL;
2448 		m = NULL;
2449 		off = 0;
2450 	}
2451 
2452 	faddr = SIN(sa)->sin_addr;
2453 	if (sa->sa_family != AF_INET || faddr.s_addr == INADDR_ANY) {
2454 		return;
2455 	}
2456 
2457 	if ((unsigned)cmd >= PRC_NCMDS) {
2458 		return;
2459 	}
2460 
2461 	/* Source quench is deprecated */
2462 	if (cmd == PRC_QUENCH) {
2463 		return;
2464 	}
2465 
2466 	if (cmd == PRC_MSGSIZE) {
2467 		notify = tcp_mtudisc;
2468 	} else if (icmp_may_rst && (cmd == PRC_UNREACH_ADMIN_PROHIB ||
2469 	    cmd == PRC_UNREACH_PORT || cmd == PRC_UNREACH_PROTOCOL ||
2470 	    cmd == PRC_TIMXCEED_INTRANS) && ip) {
2471 		notify = tcp_drop_syn_sent;
2472 	}
2473 	/*
2474 	 * Hostdead is ugly because it goes linearly through all PCBs.
2475 	 * XXX: We never get this from ICMP, otherwise it makes an
2476 	 * excellent DoS attack on machines with many connections.
2477 	 */
2478 	else if (cmd == PRC_HOSTDEAD) {
2479 		ip = NULL;
2480 	} else if (inetctlerrmap[cmd] == 0 && !PRC_IS_REDIRECT(cmd)) {
2481 		return;
2482 	}
2483 
2484 #if SKYWALK
2485 	bzero(&prctl_ev_val, sizeof(prctl_ev_val));
2486 	bzero(&sock_laddr, sizeof(sock_laddr));
2487 #endif /* SKYWALK */
2488 
2489 	if (ip == NULL) {
2490 		in_pcbnotifyall(&tcbinfo, faddr, inetctlerrmap[cmd], notify);
2491 #if SKYWALK
2492 		protoctl_event_enqueue_nwk_wq_entry(ifp, NULL,
2493 		    sa, 0, 0, IPPROTO_TCP, cmd, NULL);
2494 #endif /* SKYWALK */
2495 		return;
2496 	}
2497 
2498 	/* Check if we can safely get the sport, dport and the sequence number from the tcp header. */
2499 	if (m == NULL ||
2500 	    (m->m_len < off + (sizeof(unsigned short) + sizeof(unsigned short) + sizeof(tcp_seq)))) {
2501 		/* Insufficient length */
2502 		return;
2503 	}
2504 
2505 	th = (struct tcphdr*)(void*)(mtod(m, uint8_t*) + off);
2506 	icmp_tcp_seq = ntohl(th->th_seq);
2507 
2508 	inp = in_pcblookup_hash(&tcbinfo, faddr, th->th_dport,
2509 	    ip->ip_src, th->th_sport, 0, NULL);
2510 
2511 	if (inp == NULL ||
2512 	    inp->inp_socket == NULL) {
2513 #if SKYWALK
2514 		if (cmd == PRC_MSGSIZE) {
2515 			prctl_ev_val.val = ntohs(icp->icmp_nextmtu);
2516 		}
2517 		prctl_ev_val.tcp_seq_number = icmp_tcp_seq;
2518 
2519 		sock_laddr.sin.sin_family = AF_INET;
2520 		sock_laddr.sin.sin_len = sizeof(sock_laddr.sin);
2521 		sock_laddr.sin.sin_addr = ip->ip_src;
2522 
2523 		protoctl_event_enqueue_nwk_wq_entry(ifp,
2524 		    SA(&sock_laddr), sa,
2525 		    th->th_sport, th->th_dport, IPPROTO_TCP,
2526 		    cmd, &prctl_ev_val);
2527 #endif /* SKYWALK */
2528 		return;
2529 	}
2530 
2531 	socket_lock(inp->inp_socket, 1);
2532 	if (in_pcb_checkstate(inp, WNT_RELEASE, 1) ==
2533 	    WNT_STOPUSING) {
2534 		socket_unlock(inp->inp_socket, 1);
2535 		return;
2536 	}
2537 
2538 	if (PRC_IS_REDIRECT(cmd)) {
2539 		/* signal EHOSTDOWN, as it flushes the cached route */
2540 		(*notify)(inp, EHOSTDOWN);
2541 	} else {
2542 		tp = intotcpcb(inp);
2543 		if (SEQ_GEQ(icmp_tcp_seq, tp->snd_una) &&
2544 		    SEQ_LT(icmp_tcp_seq, tp->snd_max)) {
2545 			if (cmd == PRC_MSGSIZE) {
2546 				tcp_handle_msgsize(ip, inp);
2547 			}
2548 
2549 			(*notify)(inp, inetctlerrmap[cmd]);
2550 		}
2551 	}
2552 	socket_unlock(inp->inp_socket, 1);
2553 }
2554 
2555 void
tcp6_ctlinput(int cmd,struct sockaddr * sa,void * d,__unused struct ifnet * ifp)2556 tcp6_ctlinput(int cmd, struct sockaddr *sa, void *d, __unused struct ifnet *ifp)
2557 {
2558 	tcp_seq icmp_tcp_seq;
2559 	struct in6_addr *dst;
2560 	void (*notify)(struct inpcb *, int) = tcp_notify;
2561 	struct ip6_hdr *ip6;
2562 	struct mbuf *m;
2563 	struct inpcb *inp;
2564 	struct tcpcb *tp;
2565 	struct icmp6_hdr *icmp6;
2566 	struct ip6ctlparam *ip6cp = NULL;
2567 	const struct sockaddr_in6 *sa6_src = NULL;
2568 	unsigned int mtu;
2569 	unsigned int off;
2570 
2571 	struct tcp_ports {
2572 		uint16_t th_sport;
2573 		uint16_t th_dport;
2574 	} t_ports;
2575 #if SKYWALK
2576 	union sockaddr_in_4_6 sock_laddr;
2577 	struct protoctl_ev_val prctl_ev_val;
2578 #endif /* SKYWALK */
2579 
2580 	if (sa->sa_family != AF_INET6 ||
2581 	    sa->sa_len != sizeof(struct sockaddr_in6)) {
2582 		return;
2583 	}
2584 
2585 	/* Source quench is deprecated */
2586 	if (cmd == PRC_QUENCH) {
2587 		return;
2588 	}
2589 
2590 	if ((unsigned)cmd >= PRC_NCMDS) {
2591 		return;
2592 	}
2593 
2594 	/* if the parameter is from icmp6, decode it. */
2595 	if (d != NULL) {
2596 		ip6cp = (struct ip6ctlparam *)d;
2597 		icmp6 = ip6cp->ip6c_icmp6;
2598 		m = ip6cp->ip6c_m;
2599 		ip6 = ip6cp->ip6c_ip6;
2600 		off = ip6cp->ip6c_off;
2601 		sa6_src = ip6cp->ip6c_src;
2602 		dst = ip6cp->ip6c_finaldst;
2603 	} else {
2604 		m = NULL;
2605 		ip6 = NULL;
2606 		off = 0;        /* fool gcc */
2607 		sa6_src = &sa6_any;
2608 		dst = NULL;
2609 	}
2610 
2611 	if (cmd == PRC_MSGSIZE) {
2612 		notify = tcp_mtudisc;
2613 	} else if (icmp_may_rst && (cmd == PRC_UNREACH_ADMIN_PROHIB ||
2614 	    cmd == PRC_UNREACH_PORT || cmd == PRC_TIMXCEED_INTRANS) &&
2615 	    ip6 != NULL) {
2616 		notify = tcp_drop_syn_sent;
2617 	}
2618 	/*
2619 	 * Hostdead is ugly because it goes linearly through all PCBs.
2620 	 * XXX: We never get this from ICMP, otherwise it makes an
2621 	 * excellent DoS attack on machines with many connections.
2622 	 */
2623 	else if (cmd == PRC_HOSTDEAD) {
2624 		ip6 = NULL;
2625 	} else if (inet6ctlerrmap[cmd] == 0 && !PRC_IS_REDIRECT(cmd)) {
2626 		return;
2627 	}
2628 
2629 #if SKYWALK
2630 	bzero(&prctl_ev_val, sizeof(prctl_ev_val));
2631 	bzero(&sock_laddr, sizeof(sock_laddr));
2632 #endif /* SKYWALK */
2633 
2634 	if (ip6 == NULL) {
2635 		in6_pcbnotify(&tcbinfo, sa, 0, SA(sa6_src), 0, cmd, NULL, notify);
2636 #if SKYWALK
2637 		protoctl_event_enqueue_nwk_wq_entry(ifp, NULL, sa,
2638 		    0, 0, IPPROTO_TCP, cmd, NULL);
2639 #endif /* SKYWALK */
2640 		return;
2641 	}
2642 
2643 	/* Check if we can safely get the ports from the tcp hdr */
2644 	if (m == NULL ||
2645 	    (m->m_pkthdr.len <
2646 	    (int32_t) (off + sizeof(struct tcp_ports)))) {
2647 		return;
2648 	}
2649 	bzero(&t_ports, sizeof(struct tcp_ports));
2650 	m_copydata(m, off, sizeof(struct tcp_ports), (caddr_t)&t_ports);
2651 
2652 	off += sizeof(struct tcp_ports);
2653 	if (m->m_pkthdr.len < (int32_t) (off + sizeof(tcp_seq))) {
2654 		return;
2655 	}
2656 	m_copydata(m, off, sizeof(tcp_seq), (caddr_t)&icmp_tcp_seq);
2657 	icmp_tcp_seq = ntohl(icmp_tcp_seq);
2658 
2659 	if (cmd == PRC_MSGSIZE) {
2660 		mtu = ntohl(icmp6->icmp6_mtu);
2661 		/*
2662 		 * If no alternative MTU was proposed, or the proposed
2663 		 * MTU was too small, set to the min.
2664 		 */
2665 		if (mtu < IPV6_MMTU) {
2666 			mtu = IPV6_MMTU - 8;
2667 		}
2668 	}
2669 
2670 	inp = in6_pcblookup_hash(&tcbinfo, &ip6->ip6_dst, t_ports.th_dport, ip6_input_getdstifscope(m),
2671 	    &ip6->ip6_src, t_ports.th_sport, ip6_input_getsrcifscope(m), 0, NULL);
2672 
2673 	if (inp == NULL ||
2674 	    inp->inp_socket == NULL) {
2675 #if SKYWALK
2676 		if (cmd == PRC_MSGSIZE) {
2677 			prctl_ev_val.val = mtu;
2678 		}
2679 		prctl_ev_val.tcp_seq_number = icmp_tcp_seq;
2680 
2681 		sock_laddr.sin6.sin6_family = AF_INET6;
2682 		sock_laddr.sin6.sin6_len = sizeof(sock_laddr.sin6);
2683 		sock_laddr.sin6.sin6_addr = ip6->ip6_src;
2684 
2685 		protoctl_event_enqueue_nwk_wq_entry(ifp,
2686 		    SA(&sock_laddr), sa,
2687 		    t_ports.th_sport, t_ports.th_dport, IPPROTO_TCP,
2688 		    cmd, &prctl_ev_val);
2689 #endif /* SKYWALK */
2690 		return;
2691 	}
2692 
2693 	socket_lock(inp->inp_socket, 1);
2694 	if (in_pcb_checkstate(inp, WNT_RELEASE, 1) ==
2695 	    WNT_STOPUSING) {
2696 		socket_unlock(inp->inp_socket, 1);
2697 		return;
2698 	}
2699 
2700 	if (PRC_IS_REDIRECT(cmd)) {
2701 		/* signal EHOSTDOWN, as it flushes the cached route */
2702 		(*notify)(inp, EHOSTDOWN);
2703 	} else {
2704 		tp = intotcpcb(inp);
2705 		if (SEQ_GEQ(icmp_tcp_seq, tp->snd_una) &&
2706 		    SEQ_LT(icmp_tcp_seq, tp->snd_max)) {
2707 			if (cmd == PRC_MSGSIZE) {
2708 				/*
2709 				 * Only process the offered MTU if it
2710 				 * is smaller than the current one.
2711 				 */
2712 				if (mtu < tp->t_maxseg +
2713 				    (sizeof(struct tcphdr) + sizeof(struct ip6_hdr))) {
2714 					(*notify)(inp, inetctlerrmap[cmd]);
2715 				}
2716 			} else {
2717 				(*notify)(inp, inetctlerrmap[cmd]);
2718 			}
2719 		}
2720 	}
2721 	socket_unlock(inp->inp_socket, 1);
2722 }
2723 
2724 
2725 /*
2726  * Following is where TCP initial sequence number generation occurs.
2727  *
2728  * There are two places where we must use initial sequence numbers:
2729  * 1.  In SYN-ACK packets.
2730  * 2.  In SYN packets.
2731  *
2732  * The ISNs in SYN-ACK packets have no monotonicity requirement,
2733  * and should be as unpredictable as possible to avoid the possibility
2734  * of spoofing and/or connection hijacking.  To satisfy this
2735  * requirement, SYN-ACK ISNs are generated via the arc4random()
2736  * function.  If exact RFC 1948 compliance is requested via sysctl,
2737  * these ISNs will be generated just like those in SYN packets.
2738  *
2739  * The ISNs in SYN packets must be monotonic; TIME_WAIT recycling
2740  * depends on this property.  In addition, these ISNs should be
2741  * unguessable so as to prevent connection hijacking.  To satisfy
2742  * the requirements of this situation, the algorithm outlined in
2743  * RFC 9293 is used to generate sequence numbers.
2744  *
2745  * For more information on the theory of operation, please see
2746  * RFC 9293.
2747  *
2748  * Implementation details:
2749  *
2750  * Time is based off the system timer, and is corrected so that it
2751  * increases by one megabyte per second.  This allows for proper
2752  * recycling on high speed LANs while still leaving over an hour
2753  * before rollover.
2754  *
2755  */
2756 
2757 #define ISN_BYTES_PER_SECOND 1048576
2758 
2759 tcp_seq
tcp_new_isn(struct tcpcb * tp)2760 tcp_new_isn(struct tcpcb *tp)
2761 {
2762 	uint32_t md5_buffer[4];
2763 	tcp_seq new_isn;
2764 	struct timespec timenow;
2765 	MD5_CTX isn_ctx;
2766 
2767 	nanouptime(&timenow);
2768 
2769 	/* Compute the md5 hash and return the ISN. */
2770 	MD5Init(&isn_ctx);
2771 	MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_fport,
2772 	    sizeof(u_short));
2773 	MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_lport,
2774 	    sizeof(u_short));
2775 	if ((tp->t_inpcb->inp_vflag & INP_IPV6) != 0) {
2776 		MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->in6p_faddr,
2777 		    sizeof(struct in6_addr));
2778 		MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->in6p_laddr,
2779 		    sizeof(struct in6_addr));
2780 	} else {
2781 		MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_faddr,
2782 		    sizeof(struct in_addr));
2783 		MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_laddr,
2784 		    sizeof(struct in_addr));
2785 	}
2786 	MD5Update(&isn_ctx, (u_char *) &isn_secret, sizeof(isn_secret));
2787 	MD5Final((u_char *) &md5_buffer, &isn_ctx);
2788 
2789 	new_isn = (tcp_seq) md5_buffer[0];
2790 
2791 	/*
2792 	 * We use a 128ns clock, which is equivalent to 600 Mbps and wraps at
2793 	 * 549 seconds, thus safe for 2 MSL lifetime of TIME-WAIT-state.
2794 	 */
2795 	new_isn += (timenow.tv_sec * NSEC_PER_SEC + timenow.tv_nsec) >> 7;
2796 
2797 	if (__probable(tcp_randomize_timestamps)) {
2798 		tp->t_ts_offset = md5_buffer[1];
2799 	}
2800 
2801 	return new_isn;
2802 }
2803 
2804 
2805 /*
2806  * When a specific ICMP unreachable message is received and the
2807  * connection state is SYN-SENT, drop the connection.  This behavior
2808  * is controlled by the icmp_may_rst sysctl.
2809  */
2810 void
tcp_drop_syn_sent(struct inpcb * inp,int errno)2811 tcp_drop_syn_sent(struct inpcb *inp, int errno)
2812 {
2813 	struct tcpcb *tp = intotcpcb(inp);
2814 
2815 	if (tp && tp->t_state == TCPS_SYN_SENT) {
2816 		tcp_drop(tp, errno);
2817 	}
2818 }
2819 
2820 /*
2821  * Get effective MTU for redirect virtual interface. Redirect
2822  * virtual interface switches between multiple delegated interfaces.
2823  * For cases, where redirect forwards packets to an ipsec interface,
2824  * MTU should be adjusted to consider ESP encapsulation overhead.
2825  */
2826 uint32_t
tcp_get_effective_mtu(struct rtentry * rt,uint32_t current_mtu)2827 tcp_get_effective_mtu(struct rtentry *rt, uint32_t current_mtu)
2828 {
2829 	ifnet_t ifp = NULL;
2830 	ifnet_t delegated_ifp = NULL;
2831 	ifnet_t outgoing_ifp = NULL;
2832 	uint32_t min_mtu = 0;
2833 	uint32_t outgoing_mtu = 0;
2834 	uint32_t tunnel_overhead = 0;
2835 
2836 	if (rt == NULL || rt->rt_ifp == NULL) {
2837 		return current_mtu;
2838 	}
2839 
2840 	ifp = rt->rt_ifp;
2841 	if (ifp->if_subfamily != IFNET_SUBFAMILY_REDIRECT) {
2842 		return current_mtu;
2843 	}
2844 
2845 	delegated_ifp = ifp->if_delegated.ifp;
2846 	if (delegated_ifp == NULL || delegated_ifp->if_family != IFNET_FAMILY_IPSEC) {
2847 		return current_mtu;
2848 	}
2849 
2850 	min_mtu = MIN(delegated_ifp->if_mtu, current_mtu);
2851 
2852 	outgoing_ifp = delegated_ifp->if_delegated.ifp;
2853 	if (outgoing_ifp == NULL) {
2854 		return min_mtu;
2855 	}
2856 
2857 	outgoing_mtu = outgoing_ifp->if_mtu;
2858 	if (outgoing_mtu > 0) {
2859 		tunnel_overhead = (u_int32_t)(esp_hdrsiz(NULL) + sizeof(struct ip6_hdr));
2860 		if (outgoing_mtu > tunnel_overhead) {
2861 			outgoing_mtu -= tunnel_overhead;
2862 		}
2863 		if (outgoing_mtu < min_mtu) {
2864 			return outgoing_mtu;
2865 		}
2866 	}
2867 
2868 	return min_mtu;
2869 }
2870 
2871 /*
2872  * When `need fragmentation' ICMP is received, update our idea of the MSS
2873  * based on the new value in the route.  Also nudge TCP to send something,
2874  * since we know the packet we just sent was dropped.
2875  * This duplicates some code in the tcp_mss() function in tcp_input.c.
2876  */
2877 void
tcp_mtudisc(struct inpcb * inp,__unused int errno)2878 tcp_mtudisc(struct inpcb *inp, __unused int errno)
2879 {
2880 	struct tcpcb *tp = intotcpcb(inp);
2881 	struct rtentry *rt;
2882 	struct socket *so = inp->inp_socket;
2883 	int mss;
2884 	u_int32_t mtu;
2885 	u_int32_t protoHdrOverhead = sizeof(struct tcpiphdr);
2886 	int isipv6 = (tp->t_inpcb->inp_vflag & INP_IPV6) != 0;
2887 
2888 	/*
2889 	 * Nothing left to send after the socket is defunct or TCP is in the closed state
2890 	 */
2891 	if ((so->so_state & SS_DEFUNCT) || (tp != NULL && tp->t_state == TCPS_CLOSED)) {
2892 		return;
2893 	}
2894 
2895 	if (isipv6) {
2896 		protoHdrOverhead = sizeof(struct ip6_hdr) +
2897 		    sizeof(struct tcphdr);
2898 	}
2899 
2900 	if (tp != NULL) {
2901 		if (isipv6) {
2902 			rt = tcp_rtlookup6(inp, IFSCOPE_NONE);
2903 		} else {
2904 			rt = tcp_rtlookup(inp, IFSCOPE_NONE);
2905 		}
2906 		if (!rt || !rt->rt_rmx.rmx_mtu) {
2907 			tp->t_maxopd = tp->t_maxseg =
2908 			    isipv6 ? tcp_v6mssdflt :
2909 			    tcp_mssdflt;
2910 
2911 			/* Route locked during lookup above */
2912 			if (rt != NULL) {
2913 				RT_UNLOCK(rt);
2914 			}
2915 			return;
2916 		}
2917 		mtu = rt->rt_rmx.rmx_mtu;
2918 
2919 		mtu = tcp_get_effective_mtu(rt, mtu);
2920 
2921 		/* Route locked during lookup above */
2922 		RT_UNLOCK(rt);
2923 
2924 #if NECP
2925 		// Adjust MTU if necessary.
2926 		mtu = necp_socket_get_effective_mtu(inp, mtu);
2927 #endif /* NECP */
2928 		mss = mtu - protoHdrOverhead;
2929 
2930 		if (tp->t_maxopd) {
2931 			mss = min(mss, tp->t_maxopd);
2932 		}
2933 		/*
2934 		 * XXX - The above conditional probably violates the TCP
2935 		 * spec.  The problem is that, since we don't know the
2936 		 * other end's MSS, we are supposed to use a conservative
2937 		 * default.  But, if we do that, then MTU discovery will
2938 		 * never actually take place, because the conservative
2939 		 * default is much less than the MTUs typically seen
2940 		 * on the Internet today.  For the moment, we'll sweep
2941 		 * this under the carpet.
2942 		 *
2943 		 * The conservative default might not actually be a problem
2944 		 * if the only case this occurs is when sending an initial
2945 		 * SYN with options and data to a host we've never talked
2946 		 * to before.  Then, they will reply with an MSS value which
2947 		 * will get recorded and the new parameters should get
2948 		 * recomputed.  For Further Study.
2949 		 */
2950 		if (tp->t_maxopd <= mss) {
2951 			return;
2952 		}
2953 		tp->t_maxopd = mss;
2954 
2955 		if ((tp->t_flags & (TF_REQ_TSTMP | TF_NOOPT)) == TF_REQ_TSTMP &&
2956 		    (tp->t_flags & TF_RCVD_TSTMP) == TF_RCVD_TSTMP) {
2957 			mss -= TCPOLEN_TSTAMP_APPA;
2958 		}
2959 
2960 #if MPTCP
2961 		mss -= mptcp_adj_mss(tp, TRUE);
2962 #endif
2963 		if (so->so_snd.sb_hiwat < mss) {
2964 			mss = so->so_snd.sb_hiwat;
2965 		}
2966 
2967 		tp->t_maxseg = mss;
2968 
2969 		ASSERT(tp->t_maxseg);
2970 
2971 		/*
2972 		 * Reset the slow-start flight size as it may depends on the
2973 		 * new MSS
2974 		 */
2975 		if (CC_ALGO(tp)->cwnd_init != NULL) {
2976 			CC_ALGO(tp)->cwnd_init(tp);
2977 		}
2978 
2979 		if (TCP_USE_RLEDBAT(tp, so) && tcp_cc_rledbat.rwnd_init != NULL) {
2980 			tcp_cc_rledbat.rwnd_init(tp);
2981 		}
2982 
2983 		tcpstat.tcps_mturesent++;
2984 		tp->t_rtttime = 0;
2985 		tp->snd_nxt = tp->snd_una;
2986 		tcp_output(tp);
2987 	}
2988 }
2989 
2990 /*
2991  * Look-up the routing entry to the peer of this inpcb.  If no route
2992  * is found and it cannot be allocated the return NULL.  This routine
2993  * is called by TCP routines that access the rmx structure and by tcp_mss
2994  * to get the interface MTU.  If a route is found, this routine will
2995  * hold the rtentry lock; the caller is responsible for unlocking.
2996  */
2997 struct rtentry *
tcp_rtlookup(struct inpcb * inp,unsigned int input_ifscope)2998 tcp_rtlookup(struct inpcb *inp, unsigned int input_ifscope)
2999 {
3000 	struct route *ro;
3001 	struct rtentry *rt;
3002 	struct tcpcb *tp;
3003 
3004 	LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_NOTOWNED);
3005 
3006 	ro = &inp->inp_route;
3007 	if ((rt = ro->ro_rt) != NULL) {
3008 		RT_LOCK(rt);
3009 	}
3010 
3011 	if (ROUTE_UNUSABLE(ro)) {
3012 		if (rt != NULL) {
3013 			RT_UNLOCK(rt);
3014 			rt = NULL;
3015 		}
3016 		ROUTE_RELEASE(ro);
3017 		/* No route yet, so try to acquire one */
3018 		if (inp->inp_faddr.s_addr != INADDR_ANY) {
3019 			unsigned int ifscope;
3020 
3021 			ro->ro_dst.sa_family = AF_INET;
3022 			ro->ro_dst.sa_len = sizeof(struct sockaddr_in);
3023 			SIN(&ro->ro_dst)->sin_addr = inp->inp_faddr;
3024 
3025 			/*
3026 			 * If the socket was bound to an interface, then
3027 			 * the bound-to-interface takes precedence over
3028 			 * the inbound interface passed in by the caller
3029 			 * (if we get here as part of the output path then
3030 			 * input_ifscope is IFSCOPE_NONE).
3031 			 */
3032 			ifscope = (inp->inp_flags & INP_BOUND_IF) ?
3033 			    inp->inp_boundifp->if_index : input_ifscope;
3034 
3035 			rtalloc_scoped(ro, ifscope);
3036 			if ((rt = ro->ro_rt) != NULL) {
3037 				RT_LOCK(rt);
3038 			}
3039 		}
3040 	}
3041 	if (rt != NULL) {
3042 		RT_LOCK_ASSERT_HELD(rt);
3043 	}
3044 
3045 	/*
3046 	 * Update MTU discovery determination. Don't do it if:
3047 	 *	1) it is disabled via the sysctl
3048 	 *	2) the route isn't up
3049 	 *	3) the MTU is locked (if it is, then discovery has been
3050 	 *	   disabled)
3051 	 */
3052 
3053 	tp = intotcpcb(inp);
3054 
3055 	if (!path_mtu_discovery || ((rt != NULL) &&
3056 	    (!(rt->rt_flags & RTF_UP) || (rt->rt_rmx.rmx_locks & RTV_MTU)))) {
3057 		tp->t_flags &= ~TF_PMTUD;
3058 	} else {
3059 		tp->t_flags |= TF_PMTUD;
3060 	}
3061 
3062 	if (rt != NULL && rt->rt_ifp != NULL) {
3063 		somultipages(inp->inp_socket,
3064 		    (rt->rt_ifp->if_hwassist & IFNET_MULTIPAGES));
3065 		tcp_set_tso(tp, rt->rt_ifp);
3066 		soif2kcl(inp->inp_socket,
3067 		    (rt->rt_ifp->if_eflags & IFEF_2KCL));
3068 		tcp_set_ecn(tp, rt->rt_ifp);
3069 		if (inp->inp_last_outifp == NULL) {
3070 			inp->inp_last_outifp = rt->rt_ifp;
3071 #if SKYWALK
3072 			if (NETNS_TOKEN_VALID(&inp->inp_netns_token)) {
3073 				netns_set_ifnet(&inp->inp_netns_token,
3074 				    inp->inp_last_outifp);
3075 			}
3076 #endif /* SKYWALK */
3077 		}
3078 	}
3079 
3080 	/* Note if the peer is local */
3081 	if (rt != NULL && !(rt->rt_ifp->if_flags & IFF_POINTOPOINT) &&
3082 	    (rt->rt_gateway->sa_family == AF_LINK ||
3083 	    rt->rt_ifp->if_flags & IFF_LOOPBACK ||
3084 	    in_localaddr(inp->inp_faddr))) {
3085 		tp->t_flags |= TF_LOCAL;
3086 	}
3087 
3088 	/*
3089 	 * Caller needs to call RT_UNLOCK(rt).
3090 	 */
3091 	return rt;
3092 }
3093 
3094 struct rtentry *
tcp_rtlookup6(struct inpcb * inp,unsigned int input_ifscope)3095 tcp_rtlookup6(struct inpcb *inp, unsigned int input_ifscope)
3096 {
3097 	struct route_in6 *ro6;
3098 	struct rtentry *rt;
3099 	struct tcpcb *tp;
3100 
3101 	LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_NOTOWNED);
3102 
3103 	ro6 = &inp->in6p_route;
3104 	if ((rt = ro6->ro_rt) != NULL) {
3105 		RT_LOCK(rt);
3106 	}
3107 
3108 	if (ROUTE_UNUSABLE(ro6)) {
3109 		if (rt != NULL) {
3110 			RT_UNLOCK(rt);
3111 			rt = NULL;
3112 		}
3113 		ROUTE_RELEASE(ro6);
3114 		/* No route yet, so try to acquire one */
3115 		if (!IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_faddr)) {
3116 			struct sockaddr_in6 *dst6;
3117 			unsigned int ifscope;
3118 
3119 			dst6 = SIN6(&ro6->ro_dst);
3120 			dst6->sin6_family = AF_INET6;
3121 			dst6->sin6_len = sizeof(*dst6);
3122 			dst6->sin6_addr = inp->in6p_faddr;
3123 
3124 			/*
3125 			 * If the socket was bound to an interface, then
3126 			 * the bound-to-interface takes precedence over
3127 			 * the inbound interface passed in by the caller
3128 			 * (if we get here as part of the output path then
3129 			 * input_ifscope is IFSCOPE_NONE).
3130 			 */
3131 			ifscope = (inp->inp_flags & INP_BOUND_IF) ?
3132 			    inp->inp_boundifp->if_index : input_ifscope;
3133 
3134 			rtalloc_scoped((struct route *)ro6, ifscope);
3135 			if ((rt = ro6->ro_rt) != NULL) {
3136 				RT_LOCK(rt);
3137 			}
3138 		}
3139 	}
3140 	if (rt != NULL) {
3141 		RT_LOCK_ASSERT_HELD(rt);
3142 	}
3143 
3144 	/*
3145 	 * Update path MTU Discovery determination
3146 	 * while looking up the route:
3147 	 *  1) we have a valid route to the destination
3148 	 *  2) the MTU is not locked (if it is, then discovery has been
3149 	 *    disabled)
3150 	 */
3151 
3152 
3153 	tp = intotcpcb(inp);
3154 
3155 	/*
3156 	 * Update MTU discovery determination. Don't do it if:
3157 	 *	1) it is disabled via the sysctl
3158 	 *	2) the route isn't up
3159 	 *	3) the MTU is locked (if it is, then discovery has been
3160 	 *	   disabled)
3161 	 */
3162 
3163 	if (!path_mtu_discovery || ((rt != NULL) &&
3164 	    (!(rt->rt_flags & RTF_UP) || (rt->rt_rmx.rmx_locks & RTV_MTU)))) {
3165 		tp->t_flags &= ~TF_PMTUD;
3166 	} else {
3167 		tp->t_flags |= TF_PMTUD;
3168 	}
3169 
3170 	if (rt != NULL && rt->rt_ifp != NULL) {
3171 		somultipages(inp->inp_socket,
3172 		    (rt->rt_ifp->if_hwassist & IFNET_MULTIPAGES));
3173 		tcp_set_tso(tp, rt->rt_ifp);
3174 		soif2kcl(inp->inp_socket,
3175 		    (rt->rt_ifp->if_eflags & IFEF_2KCL));
3176 		tcp_set_ecn(tp, rt->rt_ifp);
3177 		if (inp->inp_last_outifp == NULL) {
3178 			inp->inp_last_outifp = rt->rt_ifp;
3179 #if SKYWALK
3180 			if (NETNS_TOKEN_VALID(&inp->inp_netns_token)) {
3181 				netns_set_ifnet(&inp->inp_netns_token,
3182 				    inp->inp_last_outifp);
3183 			}
3184 #endif /* SKYWALK */
3185 		}
3186 
3187 		/* Note if the peer is local */
3188 		if (!(rt->rt_ifp->if_flags & IFF_POINTOPOINT) &&
3189 		    (IN6_IS_ADDR_LOOPBACK(&inp->in6p_faddr) ||
3190 		    IN6_IS_ADDR_LINKLOCAL(&inp->in6p_faddr) ||
3191 		    rt->rt_gateway->sa_family == AF_LINK ||
3192 		    in6_localaddr(&inp->in6p_faddr))) {
3193 			tp->t_flags |= TF_LOCAL;
3194 		}
3195 	}
3196 
3197 	/*
3198 	 * Caller needs to call RT_UNLOCK(rt).
3199 	 */
3200 	return rt;
3201 }
3202 
3203 #if IPSEC
3204 /* compute ESP/AH header size for TCP, including outer IP header. */
3205 size_t
ipsec_hdrsiz_tcp(struct tcpcb * tp)3206 ipsec_hdrsiz_tcp(struct tcpcb *tp)
3207 {
3208 	struct inpcb *inp;
3209 	struct mbuf *m;
3210 	size_t hdrsiz;
3211 	struct ip *ip;
3212 	struct ip6_hdr *ip6 = NULL;
3213 	struct tcphdr *th;
3214 
3215 	if ((tp == NULL) || ((inp = tp->t_inpcb) == NULL)) {
3216 		return 0;
3217 	}
3218 	MGETHDR(m, M_DONTWAIT, MT_DATA);        /* MAC-OK */
3219 	if (!m) {
3220 		return 0;
3221 	}
3222 
3223 	if ((inp->inp_vflag & INP_IPV6) != 0) {
3224 		ip6 = mtod(m, struct ip6_hdr *);
3225 		th = (struct tcphdr *)(void *)(ip6 + 1);
3226 		m->m_pkthdr.len = m->m_len =
3227 		    sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
3228 		tcp_fillheaders(m, tp, ip6, th);
3229 		hdrsiz = ipsec6_hdrsiz(m, IPSEC_DIR_OUTBOUND, inp);
3230 	} else {
3231 		ip = mtod(m, struct ip *);
3232 		th = (struct tcphdr *)(ip + 1);
3233 		m->m_pkthdr.len = m->m_len = sizeof(struct tcpiphdr);
3234 		tcp_fillheaders(m, tp, ip, th);
3235 		hdrsiz = ipsec4_hdrsiz(m, IPSEC_DIR_OUTBOUND, inp);
3236 	}
3237 	m_free(m);
3238 	return hdrsiz;
3239 }
3240 #endif /* IPSEC */
3241 
3242 int
tcp_lock(struct socket * so,int refcount,void * lr)3243 tcp_lock(struct socket *so, int refcount, void *lr)
3244 {
3245 	void *lr_saved;
3246 
3247 	if (lr == NULL) {
3248 		lr_saved = __builtin_return_address(0);
3249 	} else {
3250 		lr_saved = lr;
3251 	}
3252 
3253 retry:
3254 	if (so->so_pcb != NULL) {
3255 		if (so->so_flags & SOF_MP_SUBFLOW) {
3256 			struct mptcb *mp_tp = tptomptp(sototcpcb(so));
3257 			struct socket *mp_so = mptetoso(mp_tp->mpt_mpte);
3258 
3259 			socket_lock(mp_so, refcount);
3260 
3261 			/*
3262 			 * Check if we became non-MPTCP while waiting for the lock.
3263 			 * If yes, we have to retry to grab the right lock.
3264 			 */
3265 			if (!(so->so_flags & SOF_MP_SUBFLOW)) {
3266 				socket_unlock(mp_so, refcount);
3267 				goto retry;
3268 			}
3269 		} else {
3270 			lck_mtx_lock(&((struct inpcb *)so->so_pcb)->inpcb_mtx);
3271 
3272 			if (so->so_flags & SOF_MP_SUBFLOW) {
3273 				/*
3274 				 * While waiting for the lock, we might have
3275 				 * become MPTCP-enabled (see mptcp_subflow_socreate).
3276 				 */
3277 				lck_mtx_unlock(&((struct inpcb *)so->so_pcb)->inpcb_mtx);
3278 				goto retry;
3279 			}
3280 		}
3281 	} else {
3282 		panic("tcp_lock: so=%p NO PCB! lr=%p lrh= %s",
3283 		    so, lr_saved, solockhistory_nr(so));
3284 		/* NOTREACHED */
3285 	}
3286 
3287 	if (so->so_usecount < 0) {
3288 		panic("tcp_lock: so=%p so_pcb=%p lr=%p ref=%x lrh= %s",
3289 		    so, so->so_pcb, lr_saved, so->so_usecount,
3290 		    solockhistory_nr(so));
3291 		/* NOTREACHED */
3292 	}
3293 	if (refcount) {
3294 		so->so_usecount++;
3295 	}
3296 	so->lock_lr[so->next_lock_lr] = lr_saved;
3297 	so->next_lock_lr = (so->next_lock_lr + 1) % SO_LCKDBG_MAX;
3298 	return 0;
3299 }
3300 
3301 int
tcp_unlock(struct socket * so,int refcount,void * lr)3302 tcp_unlock(struct socket *so, int refcount, void *lr)
3303 {
3304 	void *lr_saved;
3305 
3306 	if (lr == NULL) {
3307 		lr_saved = __builtin_return_address(0);
3308 	} else {
3309 		lr_saved = lr;
3310 	}
3311 
3312 #ifdef MORE_TCPLOCK_DEBUG
3313 	printf("tcp_unlock: so=0x%llx sopcb=0x%llx lock=0x%llx ref=%x "
3314 	    "lr=0x%llx\n", (uint64_t)VM_KERNEL_ADDRPERM(so),
3315 	    (uint64_t)VM_KERNEL_ADDRPERM(so->so_pcb),
3316 	    (uint64_t)VM_KERNEL_ADDRPERM(&(sotoinpcb(so)->inpcb_mtx)),
3317 	    so->so_usecount, (uint64_t)VM_KERNEL_ADDRPERM(lr_saved));
3318 #endif
3319 	if (refcount) {
3320 		so->so_usecount--;
3321 	}
3322 
3323 	if (so->so_usecount < 0) {
3324 		panic("tcp_unlock: so=%p usecount=%x lrh= %s",
3325 		    so, so->so_usecount, solockhistory_nr(so));
3326 		/* NOTREACHED */
3327 	}
3328 	if (so->so_pcb == NULL) {
3329 		panic("tcp_unlock: so=%p NO PCB usecount=%x lr=%p lrh= %s",
3330 		    so, so->so_usecount, lr_saved, solockhistory_nr(so));
3331 		/* NOTREACHED */
3332 	} else {
3333 		so->unlock_lr[so->next_unlock_lr] = lr_saved;
3334 		so->next_unlock_lr = (so->next_unlock_lr + 1) % SO_LCKDBG_MAX;
3335 
3336 		if (so->so_flags & SOF_MP_SUBFLOW) {
3337 			struct mptcb *mp_tp = tptomptp(sototcpcb(so));
3338 			struct socket *mp_so = mptetoso(mp_tp->mpt_mpte);
3339 
3340 			socket_lock_assert_owned(mp_so);
3341 
3342 			socket_unlock(mp_so, refcount);
3343 		} else {
3344 			LCK_MTX_ASSERT(&((struct inpcb *)so->so_pcb)->inpcb_mtx,
3345 			    LCK_MTX_ASSERT_OWNED);
3346 			lck_mtx_unlock(&((struct inpcb *)so->so_pcb)->inpcb_mtx);
3347 		}
3348 	}
3349 	return 0;
3350 }
3351 
3352 lck_mtx_t *
tcp_getlock(struct socket * so,int flags)3353 tcp_getlock(struct socket *so, int flags)
3354 {
3355 	struct inpcb *inp = sotoinpcb(so);
3356 
3357 	if (so->so_pcb) {
3358 		if (so->so_usecount < 0) {
3359 			panic("tcp_getlock: so=%p usecount=%x lrh= %s",
3360 			    so, so->so_usecount, solockhistory_nr(so));
3361 		}
3362 
3363 		if (so->so_flags & SOF_MP_SUBFLOW) {
3364 			struct mptcb *mp_tp = tptomptp(sototcpcb(so));
3365 			struct socket *mp_so = mptetoso(mp_tp->mpt_mpte);
3366 
3367 			return mp_so->so_proto->pr_getlock(mp_so, flags);
3368 		} else {
3369 			return &inp->inpcb_mtx;
3370 		}
3371 	} else {
3372 		panic("tcp_getlock: so=%p NULL so_pcb %s",
3373 		    so, solockhistory_nr(so));
3374 		return so->so_proto->pr_domain->dom_mtx;
3375 	}
3376 }
3377 
3378 /*
3379  * Determine if we can grow the recieve socket buffer to avoid sending
3380  * a zero window update to the peer. We allow even socket buffers that
3381  * have fixed size (set by the application) to grow if the resource
3382  * constraints are met. They will also be trimmed after the application
3383  * reads data.
3384  */
3385 static void
tcp_sbrcv_grow_rwin(struct tcpcb * tp,struct sockbuf * sb)3386 tcp_sbrcv_grow_rwin(struct tcpcb *tp, struct sockbuf *sb)
3387 {
3388 	u_int32_t rcvbufinc = tp->t_maxseg << 4;
3389 	u_int32_t rcvbuf = sb->sb_hiwat;
3390 	struct socket *so = tp->t_inpcb->inp_socket;
3391 
3392 	if (tcp_recv_bg == 1 || IS_TCP_RECV_BG(so)) {
3393 		return;
3394 	}
3395 
3396 	if (tcp_do_autorcvbuf == 1 &&
3397 	    (tp->t_flags & TF_SLOWLINK) == 0 &&
3398 	    (so->so_flags1 & SOF1_EXTEND_BK_IDLE_WANTED) == 0 &&
3399 	    (rcvbuf - sb->sb_cc) < rcvbufinc &&
3400 	    rcvbuf < tcp_autorcvbuf_max &&
3401 	    (sb->sb_idealsize > 0 &&
3402 	    sb->sb_hiwat <= (sb->sb_idealsize + rcvbufinc))) {
3403 		sbreserve(sb,
3404 		    min((sb->sb_hiwat + rcvbufinc), tcp_autorcvbuf_max));
3405 	}
3406 }
3407 
3408 int32_t
tcp_sbspace(struct tcpcb * tp)3409 tcp_sbspace(struct tcpcb *tp)
3410 {
3411 	struct socket *so = tp->t_inpcb->inp_socket;
3412 	struct sockbuf *sb = &so->so_rcv;
3413 	u_int32_t rcvbuf;
3414 	int32_t space;
3415 	int32_t pending = 0;
3416 
3417 	if (so->so_flags & SOF_MP_SUBFLOW) {
3418 		/* We still need to grow TCP's buffer to have a BDP-estimate */
3419 		tcp_sbrcv_grow_rwin(tp, sb);
3420 
3421 		return mptcp_sbspace(tptomptp(tp));
3422 	}
3423 
3424 	tcp_sbrcv_grow_rwin(tp, sb);
3425 
3426 	/* hiwat might have changed */
3427 	rcvbuf = sb->sb_hiwat;
3428 
3429 	space =  ((int32_t) imin((rcvbuf - sb->sb_cc),
3430 	    (sb->sb_mbmax - sb->sb_mbcnt)));
3431 	if (space < 0) {
3432 		space = 0;
3433 	}
3434 
3435 #if CONTENT_FILTER
3436 	/* Compensate for data being processed by content filters */
3437 	pending = cfil_sock_data_space(sb);
3438 #endif /* CONTENT_FILTER */
3439 	if (pending > space) {
3440 		space = 0;
3441 	} else {
3442 		space -= pending;
3443 	}
3444 
3445 	/*
3446 	 * Avoid increasing window size if the current window
3447 	 * is already very low, we could be in "persist" mode and
3448 	 * we could break some apps (see rdar://5409343)
3449 	 */
3450 
3451 	if (space < tp->t_maxseg) {
3452 		return space;
3453 	}
3454 
3455 	/* Clip window size for slower link */
3456 
3457 	if (((tp->t_flags & TF_SLOWLINK) != 0) && slowlink_wsize > 0) {
3458 		return imin(space, slowlink_wsize);
3459 	}
3460 
3461 	return space;
3462 }
3463 /*
3464  * Checks TCP Segment Offloading capability for a given connection
3465  * and interface pair.
3466  */
3467 void
tcp_set_tso(struct tcpcb * tp,struct ifnet * ifp)3468 tcp_set_tso(struct tcpcb *tp, struct ifnet *ifp)
3469 {
3470 	struct inpcb *inp;
3471 	int isipv6;
3472 	struct ifnet *tunnel_ifp = NULL;
3473 #define IFNET_TSO_MASK (IFNET_TSO_IPV6 | IFNET_TSO_IPV4)
3474 
3475 	tp->t_flags &= ~TF_TSO;
3476 
3477 	/*
3478 	 * Bail if there's a non-TSO-capable filter on the interface.
3479 	 */
3480 	if (ifp == NULL || ifp->if_flt_no_tso_count > 0) {
3481 		return;
3482 	}
3483 
3484 	inp = tp->t_inpcb;
3485 	isipv6 = (inp->inp_vflag & INP_IPV6) != 0;
3486 
3487 #if MPTCP
3488 	/*
3489 	 * We can't use TSO if this tcpcb belongs to an MPTCP session.
3490 	 */
3491 	if (inp->inp_socket->so_flags & SOF_MP_SUBFLOW) {
3492 		return;
3493 	}
3494 #endif
3495 	/*
3496 	 * We can't use TSO if the TSO capability of the tunnel interface does
3497 	 * not match the capability of another interface known by TCP
3498 	 */
3499 	if (inp->inp_policyresult.results.result == NECP_KERNEL_POLICY_RESULT_IP_TUNNEL) {
3500 		u_int tunnel_if_index = inp->inp_policyresult.results.result_parameter.tunnel_interface_index;
3501 
3502 		if (tunnel_if_index != 0) {
3503 			ifnet_head_lock_shared();
3504 			tunnel_ifp = ifindex2ifnet[tunnel_if_index];
3505 			ifnet_head_done();
3506 		}
3507 
3508 		if (tunnel_ifp == NULL) {
3509 			return;
3510 		}
3511 
3512 		if ((ifp->if_hwassist & IFNET_TSO_MASK) != (tunnel_ifp->if_hwassist & IFNET_TSO_MASK)) {
3513 			if (tso_debug > 0) {
3514 				os_log(OS_LOG_DEFAULT,
3515 				    "%s: %u > %u TSO 0 tunnel_ifp %s hwassist mismatch with ifp %s",
3516 				    __func__,
3517 				    ntohs(tp->t_inpcb->inp_lport), ntohs(tp->t_inpcb->inp_fport),
3518 				    tunnel_ifp->if_xname, ifp->if_xname);
3519 			}
3520 			return;
3521 		}
3522 		if (inp->inp_last_outifp != NULL &&
3523 		    (inp->inp_last_outifp->if_hwassist & IFNET_TSO_MASK) != (tunnel_ifp->if_hwassist & IFNET_TSO_MASK)) {
3524 			if (tso_debug > 0) {
3525 				os_log(OS_LOG_DEFAULT,
3526 				    "%s: %u > %u TSO 0 tunnel_ifp %s hwassist mismatch with inp_last_outifp %s",
3527 				    __func__,
3528 				    ntohs(tp->t_inpcb->inp_lport), ntohs(tp->t_inpcb->inp_fport),
3529 				    tunnel_ifp->if_xname, inp->inp_last_outifp->if_xname);
3530 			}
3531 			return;
3532 		}
3533 		if ((inp->inp_flags & INP_BOUND_IF) && inp->inp_boundifp != NULL &&
3534 		    (inp->inp_boundifp->if_hwassist & IFNET_TSO_MASK) != (tunnel_ifp->if_hwassist & IFNET_TSO_MASK)) {
3535 			if (tso_debug > 0) {
3536 				os_log(OS_LOG_DEFAULT,
3537 				    "%s: %u > %u TSO 0 tunnel_ifp %s hwassist mismatch with inp_boundifp %s",
3538 				    __func__,
3539 				    ntohs(tp->t_inpcb->inp_lport), ntohs(tp->t_inpcb->inp_fport),
3540 				    tunnel_ifp->if_xname, inp->inp_boundifp->if_xname);
3541 			}
3542 			return;
3543 		}
3544 	}
3545 
3546 	if (isipv6) {
3547 		if (ifp->if_hwassist & IFNET_TSO_IPV6) {
3548 			tp->t_flags |= TF_TSO;
3549 			if (ifp->if_tso_v6_mtu != 0) {
3550 				tp->tso_max_segment_size = ifp->if_tso_v6_mtu;
3551 			} else {
3552 				tp->tso_max_segment_size = TCP_MAXWIN;
3553 			}
3554 		}
3555 	} else {
3556 		if (ifp->if_hwassist & IFNET_TSO_IPV4) {
3557 			tp->t_flags |= TF_TSO;
3558 			if (ifp->if_tso_v4_mtu != 0) {
3559 				tp->tso_max_segment_size = ifp->if_tso_v4_mtu;
3560 			} else {
3561 				tp->tso_max_segment_size = TCP_MAXWIN;
3562 			}
3563 			if (INTF_ADJUST_MTU_FOR_CLAT46(ifp)) {
3564 				tp->tso_max_segment_size -=
3565 				    CLAT46_HDR_EXPANSION_OVERHD;
3566 			}
3567 		}
3568 	}
3569 
3570 	if (tso_debug > 1) {
3571 		os_log(OS_LOG_DEFAULT, "%s: %u > %u TSO %d ifp %s",
3572 		    __func__,
3573 		    ntohs(tp->t_inpcb->inp_lport),
3574 		    ntohs(tp->t_inpcb->inp_fport),
3575 		    (tp->t_flags & TF_TSO) != 0,
3576 		    ifp != NULL ? ifp->if_xname : "<NULL>");
3577 	}
3578 }
3579 
3580 #define TIMEVAL_TO_TCPHZ(_tv_) ((uint32_t)((_tv_).tv_sec * TCP_RETRANSHZ + \
3581 	(_tv_).tv_usec / TCP_RETRANSHZ_TO_USEC))
3582 
3583 /*
3584  * Function to calculate the tcp clock. The tcp clock will get updated
3585  * at the boundaries of the tcp layer. This is done at 3 places:
3586  * 1. Right before processing an input tcp packet
3587  * 2. Whenever a connection wants to access the network using tcp_usrreqs
3588  * 3. When a tcp timer fires or before tcp slow timeout
3589  *
3590  */
3591 
3592 void
calculate_tcp_clock(void)3593 calculate_tcp_clock(void)
3594 {
3595 	struct timeval tv = tcp_uptime;
3596 	struct timeval interval = {.tv_sec = 0, .tv_usec = TCP_RETRANSHZ_TO_USEC};
3597 	struct timeval now, hold_now;
3598 	uint32_t incr = 0;
3599 
3600 	microuptime(&now);
3601 
3602 	/*
3603 	 * Update coarse-grained networking timestamp (in sec.); the idea
3604 	 * is to update the counter returnable via net_uptime() when
3605 	 * we read time.
3606 	 */
3607 	net_update_uptime_with_time(&now);
3608 
3609 	timevaladd(&tv, &interval);
3610 	if (timevalcmp(&now, &tv, >)) {
3611 		/* time to update the clock */
3612 		lck_spin_lock(&tcp_uptime_lock);
3613 		if (timevalcmp(&tcp_uptime, &now, >=)) {
3614 			/* clock got updated while waiting for the lock */
3615 			lck_spin_unlock(&tcp_uptime_lock);
3616 			return;
3617 		}
3618 
3619 		microuptime(&now);
3620 		hold_now = now;
3621 		tv = tcp_uptime;
3622 		timevalsub(&now, &tv);
3623 
3624 		incr = TIMEVAL_TO_TCPHZ(now);
3625 
3626 		/* Account for the previous remainder */
3627 		uint32_t remaining_us = (now.tv_usec % TCP_RETRANSHZ_TO_USEC) +
3628 		    tcp_now_remainder_us;
3629 		if (remaining_us >= TCP_RETRANSHZ_TO_USEC) {
3630 			incr += (remaining_us / TCP_RETRANSHZ_TO_USEC);
3631 		}
3632 
3633 		if (incr > 0) {
3634 			tcp_uptime = hold_now;
3635 			tcp_now_remainder_us = remaining_us % TCP_RETRANSHZ_TO_USEC;
3636 			tcp_now += incr;
3637 		}
3638 
3639 		lck_spin_unlock(&tcp_uptime_lock);
3640 	}
3641 }
3642 
3643 uint64_t
microuptime_ns(void)3644 microuptime_ns(void)
3645 {
3646 	uint64_t abstime = mach_absolute_time();
3647 	uint64_t ns = 0;
3648 	absolutetime_to_nanoseconds(abstime, &ns);
3649 
3650 	return ns;
3651 }
3652 
3653 #define MAX_BURST_INTERVAL_KERNEL_PACING_NSEC                                  \
3654 	(10 * NSEC_PER_MSEC) // Don't delay more than 10ms between two bursts
3655 static uint64_t
tcp_pacer_get_packet_interval(struct tcpcb * tp,uint32_t size)3656 tcp_pacer_get_packet_interval(struct tcpcb *tp, uint32_t size)
3657 {
3658 	if (tp->t_pacer.rate == 0) {
3659 		os_log_error(OS_LOG_DEFAULT,
3660 		    "pacer rate shouldn't be 0, CCA is %s (cwnd=%u, smoothed rtt=%u ms)",
3661 		    CC_ALGO(tp)->name, tp->snd_cwnd, tp->t_srtt >> TCP_RTT_SHIFT);
3662 
3663 		return MAX_BURST_INTERVAL_KERNEL_PACING_NSEC;
3664 	}
3665 
3666 	uint64_t interval = (uint64_t)size * NSEC_PER_SEC / tp->t_pacer.rate;
3667 	if (interval > MAX_BURST_INTERVAL_KERNEL_PACING_NSEC) {
3668 		interval = MAX_BURST_INTERVAL_KERNEL_PACING_NSEC;
3669 	}
3670 
3671 	return interval;
3672 }
3673 
3674 /* Return packet tx_time in nanoseconds (absolute as well as continuous) */
3675 uint64_t
tcp_pacer_get_packet_tx_time(struct tcpcb * tp,uint16_t pkt_len)3676 tcp_pacer_get_packet_tx_time(struct tcpcb *tp, uint16_t pkt_len)
3677 {
3678 	/*
3679 	 * This function is called multiple times for mss-sized packets
3680 	 * and for high-speeds, we'd want to send multiple packets
3681 	 * that add up to burst_size at the same time.
3682 	 */
3683 	uint64_t now = microuptime_ns();
3684 
3685 	if (pkt_len == 0 || now == 0) {
3686 		return now;
3687 	}
3688 
3689 	if (tp->t_pacer.packet_tx_time == 0) {
3690 		tp->t_pacer.packet_tx_time = now;
3691 		tp->t_pacer.current_size = pkt_len;
3692 	} else {
3693 		tp->t_pacer.current_size += pkt_len;
3694 		if (tp->t_pacer.current_size > tp->t_pacer.tso_burst_size) {
3695 			/*
3696 			 * Increment tx_time by packet_interval and
3697 			 * reset size to this packet's len
3698 			 */
3699 			tp->t_pacer.packet_tx_time +=
3700 			    tcp_pacer_get_packet_interval(tp, tp->t_pacer.current_size);
3701 			tp->t_pacer.current_size = 0;
3702 			if (now > tp->t_pacer.packet_tx_time) {
3703 				/*
3704 				 * If current time is bigger, then application
3705 				 * has already paced the packet. Also, we can't
3706 				 * set tx_time in the past.
3707 				 */
3708 				tp->t_pacer.packet_tx_time = now;
3709 			}
3710 		}
3711 	}
3712 
3713 	return tp->t_pacer.packet_tx_time;
3714 }
3715 
3716 void
tcp_set_mbuf_tx_time(struct mbuf * m,uint64_t tx_time)3717 tcp_set_mbuf_tx_time(struct mbuf *m, uint64_t tx_time)
3718 {
3719 	struct m_tag *tag = NULL;
3720 	tag = m_tag_create(KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_AQM,
3721 	    sizeof(uint64_t), M_WAITOK, m);
3722 	if (tag != NULL) {
3723 		m_tag_prepend(m, tag);
3724 		*(uint64_t *)tag->m_tag_data = tx_time;
3725 	}
3726 }
3727 
3728 /*
3729  * Compute receive window scaling that we are going to request
3730  * for this connection based on  sb_hiwat. Try to leave some
3731  * room to potentially increase the window size upto a maximum
3732  * defined by the constant tcp_autorcvbuf_max.
3733  */
3734 void
tcp_set_max_rwinscale(struct tcpcb * tp,struct socket * so)3735 tcp_set_max_rwinscale(struct tcpcb *tp, struct socket *so)
3736 {
3737 	uint32_t maxsockbufsize;
3738 
3739 	tp->request_r_scale = MAX((uint8_t)tcp_win_scale, tp->request_r_scale);
3740 	maxsockbufsize = ((so->so_rcv.sb_flags & SB_USRSIZE) != 0) ?
3741 	    so->so_rcv.sb_hiwat : tcp_autorcvbuf_max;
3742 
3743 	/*
3744 	 * Window scale should not exceed what is needed
3745 	 * to send the max receive window size; adding 1 to TCP_MAXWIN
3746 	 * ensures that.
3747 	 */
3748 	while (tp->request_r_scale < TCP_MAX_WINSHIFT &&
3749 	    ((TCP_MAXWIN + 1) << tp->request_r_scale) < maxsockbufsize) {
3750 		tp->request_r_scale++;
3751 	}
3752 	tp->request_r_scale = MIN(tp->request_r_scale, TCP_MAX_WINSHIFT);
3753 }
3754 
3755 int
tcp_notsent_lowat_check(struct socket * so)3756 tcp_notsent_lowat_check(struct socket *so)
3757 {
3758 	struct inpcb *inp = sotoinpcb(so);
3759 	struct tcpcb *tp = NULL;
3760 	int notsent = 0;
3761 
3762 	if (inp != NULL) {
3763 		tp = intotcpcb(inp);
3764 	}
3765 
3766 	if (tp == NULL) {
3767 		return 0;
3768 	}
3769 
3770 	notsent = so->so_snd.sb_cc -
3771 	    (tp->snd_nxt - tp->snd_una);
3772 
3773 	/*
3774 	 * When we send a FIN or SYN, not_sent can be negative.
3775 	 * In that case also we need to send a write event to the
3776 	 * process if it is waiting. In the FIN case, it will
3777 	 * get an error from send because cantsendmore will be set.
3778 	 */
3779 	if (notsent <= tp->t_notsent_lowat) {
3780 		return 1;
3781 	}
3782 
3783 	/*
3784 	 * When Nagle's algorithm is not disabled, it is better
3785 	 * to wakeup the client until there is atleast one
3786 	 * maxseg of data to write.
3787 	 */
3788 	if ((tp->t_flags & TF_NODELAY) == 0 &&
3789 	    notsent > 0 && notsent < tp->t_maxseg) {
3790 		return 1;
3791 	}
3792 	return 0;
3793 }
3794 
3795 void
tcp_rxtseg_insert(struct tcpcb * tp,tcp_seq start,tcp_seq end)3796 tcp_rxtseg_insert(struct tcpcb *tp, tcp_seq start, tcp_seq end)
3797 {
3798 	struct tcp_rxt_seg *rxseg = NULL, *prev = NULL, *next = NULL;
3799 	uint16_t rxcount = 0;
3800 
3801 	if (SLIST_EMPTY(&tp->t_rxt_segments)) {
3802 		tp->t_dsack_lastuna = tp->snd_una;
3803 	}
3804 	/*
3805 	 * First check if there is a segment already existing for this
3806 	 * sequence space.
3807 	 */
3808 
3809 	SLIST_FOREACH(rxseg, &tp->t_rxt_segments, rx_link) {
3810 		if (SEQ_GT(rxseg->rx_start, start)) {
3811 			break;
3812 		}
3813 		prev = rxseg;
3814 	}
3815 	next = rxseg;
3816 
3817 	/* check if prev seg is for this sequence */
3818 	if (prev != NULL && SEQ_LEQ(prev->rx_start, start) &&
3819 	    SEQ_GEQ(prev->rx_end, end)) {
3820 		prev->rx_count++;
3821 		return;
3822 	}
3823 
3824 	/*
3825 	 * There are a couple of possibilities at this point.
3826 	 * 1. prev overlaps with the beginning of this sequence
3827 	 * 2. next overlaps with the end of this sequence
3828 	 * 3. there is no overlap.
3829 	 */
3830 
3831 	if (prev != NULL && SEQ_GT(prev->rx_end, start)) {
3832 		if (prev->rx_start == start && SEQ_GT(end, prev->rx_end)) {
3833 			start = prev->rx_end + 1;
3834 			prev->rx_count++;
3835 		} else {
3836 			prev->rx_end = (start - 1);
3837 			rxcount = prev->rx_count;
3838 		}
3839 	}
3840 
3841 	if (next != NULL && SEQ_LT(next->rx_start, end)) {
3842 		if (SEQ_LEQ(next->rx_end, end)) {
3843 			end = next->rx_start - 1;
3844 			next->rx_count++;
3845 		} else {
3846 			next->rx_start = end + 1;
3847 			rxcount = next->rx_count;
3848 		}
3849 	}
3850 	if (!SEQ_LT(start, end)) {
3851 		return;
3852 	}
3853 
3854 	if (tcp_rxt_seg_max > 0 && tp->t_rxt_seg_count >= tcp_rxt_seg_max) {
3855 		rxseg = SLIST_FIRST(&tp->t_rxt_segments);
3856 		if (prev == rxseg) {
3857 			prev = NULL;
3858 		}
3859 		SLIST_REMOVE(&tp->t_rxt_segments, rxseg,
3860 		    tcp_rxt_seg, rx_link);
3861 
3862 		tcp_rxt_seg_drop++;
3863 		tp->t_rxt_seg_drop++;
3864 		zfree(tcp_rxt_seg_zone, rxseg);
3865 
3866 		tp->t_rxt_seg_count -= 1;
3867 	}
3868 
3869 	rxseg = zalloc_flags(tcp_rxt_seg_zone, Z_WAITOK | Z_ZERO | Z_NOFAIL);
3870 	rxseg->rx_start = start;
3871 	rxseg->rx_end = end;
3872 	rxseg->rx_count = rxcount + 1;
3873 
3874 	if (prev != NULL) {
3875 		SLIST_INSERT_AFTER(prev, rxseg, rx_link);
3876 	} else {
3877 		SLIST_INSERT_HEAD(&tp->t_rxt_segments, rxseg, rx_link);
3878 	}
3879 	tp->t_rxt_seg_count += 1;
3880 }
3881 
3882 struct tcp_rxt_seg *
tcp_rxtseg_find(struct tcpcb * tp,tcp_seq start,tcp_seq end)3883 tcp_rxtseg_find(struct tcpcb *tp, tcp_seq start, tcp_seq end)
3884 {
3885 	struct tcp_rxt_seg *rxseg;
3886 
3887 	if (SLIST_EMPTY(&tp->t_rxt_segments)) {
3888 		return NULL;
3889 	}
3890 
3891 	SLIST_FOREACH(rxseg, &tp->t_rxt_segments, rx_link) {
3892 		if (SEQ_LEQ(rxseg->rx_start, start) &&
3893 		    SEQ_GEQ(rxseg->rx_end, end)) {
3894 			return rxseg;
3895 		}
3896 		if (SEQ_GT(rxseg->rx_start, start)) {
3897 			break;
3898 		}
3899 	}
3900 	return NULL;
3901 }
3902 
3903 void
tcp_rxtseg_set_spurious(struct tcpcb * tp,tcp_seq start,tcp_seq end)3904 tcp_rxtseg_set_spurious(struct tcpcb *tp, tcp_seq start, tcp_seq end)
3905 {
3906 	struct tcp_rxt_seg *rxseg;
3907 
3908 	if (SLIST_EMPTY(&tp->t_rxt_segments)) {
3909 		return;
3910 	}
3911 
3912 	SLIST_FOREACH(rxseg, &tp->t_rxt_segments, rx_link) {
3913 		if (SEQ_GEQ(rxseg->rx_start, start) &&
3914 		    SEQ_LEQ(rxseg->rx_end, end)) {
3915 			/*
3916 			 * If the segment was retransmitted only once, mark it as
3917 			 * spurious.
3918 			 */
3919 			if (rxseg->rx_count == 1) {
3920 				rxseg->rx_flags |= TCP_RXT_SPURIOUS;
3921 			}
3922 		}
3923 
3924 		if (SEQ_GEQ(rxseg->rx_start, end)) {
3925 			break;
3926 		}
3927 	}
3928 	return;
3929 }
3930 
3931 void
tcp_rxtseg_clean(struct tcpcb * tp)3932 tcp_rxtseg_clean(struct tcpcb *tp)
3933 {
3934 	struct tcp_rxt_seg *rxseg, *next;
3935 
3936 	SLIST_FOREACH_SAFE(rxseg, &tp->t_rxt_segments, rx_link, next) {
3937 		SLIST_REMOVE(&tp->t_rxt_segments, rxseg,
3938 		    tcp_rxt_seg, rx_link);
3939 		zfree(tcp_rxt_seg_zone, rxseg);
3940 	}
3941 	tp->t_rxt_seg_count = 0;
3942 	tp->t_dsack_lastuna = tp->snd_max;
3943 }
3944 
3945 boolean_t
tcp_rxtseg_detect_bad_rexmt(struct tcpcb * tp,tcp_seq th_ack)3946 tcp_rxtseg_detect_bad_rexmt(struct tcpcb *tp, tcp_seq th_ack)
3947 {
3948 	boolean_t bad_rexmt;
3949 	struct tcp_rxt_seg *rxseg;
3950 
3951 	if (SLIST_EMPTY(&tp->t_rxt_segments)) {
3952 		return FALSE;
3953 	}
3954 
3955 	/*
3956 	 * If all of the segments in this window are not cumulatively
3957 	 * acknowledged, then there can still be undetected packet loss.
3958 	 * Do not restore congestion window in that case.
3959 	 */
3960 	if (SEQ_LT(th_ack, tp->snd_recover)) {
3961 		return FALSE;
3962 	}
3963 
3964 	bad_rexmt = TRUE;
3965 	SLIST_FOREACH(rxseg, &tp->t_rxt_segments, rx_link) {
3966 		if (!(rxseg->rx_flags & TCP_RXT_SPURIOUS)) {
3967 			bad_rexmt = FALSE;
3968 			break;
3969 		}
3970 	}
3971 	return bad_rexmt;
3972 }
3973 
3974 u_int32_t
tcp_rxtseg_total_size(struct tcpcb * tp)3975 tcp_rxtseg_total_size(struct tcpcb *tp)
3976 {
3977 	struct tcp_rxt_seg *rxseg;
3978 	u_int32_t total_size = 0;
3979 
3980 	SLIST_FOREACH(rxseg, &tp->t_rxt_segments, rx_link) {
3981 		total_size += (rxseg->rx_end - rxseg->rx_start) + 1;
3982 	}
3983 	return total_size;
3984 }
3985 
3986 int
tcp_seg_cmp(const struct tcp_seg_sent * seg1,const struct tcp_seg_sent * seg2)3987 tcp_seg_cmp(const struct tcp_seg_sent *seg1, const struct tcp_seg_sent *seg2)
3988 {
3989 	return (int)(seg1->end_seq - seg2->end_seq);
3990 }
3991 
RB_GENERATE(tcp_seg_sent_tree_head,tcp_seg_sent,seg_link,tcp_seg_cmp)3992 RB_GENERATE(tcp_seg_sent_tree_head, tcp_seg_sent, seg_link, tcp_seg_cmp)
3993 
3994 uint32_t
3995 tcp_seg_len(struct tcp_seg_sent *seg)
3996 {
3997 	if (SEQ_LT(seg->end_seq, seg->start_seq)) {
3998 		os_log_error(OS_LOG_DEFAULT, "segment end(%u) can't be smaller "
3999 		    "than segment start(%u)", seg->end_seq, seg->start_seq);
4000 	}
4001 
4002 	return seg->end_seq - seg->start_seq;
4003 }
4004 
4005 static struct tcp_seg_sent *
tcp_seg_alloc_init(struct tcpcb * tp)4006 tcp_seg_alloc_init(struct tcpcb *tp)
4007 {
4008 	struct tcp_seg_sent *seg = TAILQ_FIRST(&tp->seg_pool.free_segs);
4009 	if (seg != NULL) {
4010 		TAILQ_REMOVE(&tp->seg_pool.free_segs, seg, free_link);
4011 		tp->seg_pool.free_segs_count--;
4012 	} else {
4013 		// TODO: remove Z_WAITOK and Z_NOFAIL?
4014 		seg = zalloc_flags(tcp_seg_sent_zone, Z_WAITOK | Z_ZERO | Z_NOFAIL);
4015 		if (seg == NULL) {
4016 			return NULL;
4017 		}
4018 	}
4019 	bzero(seg, sizeof(*seg));
4020 
4021 	return seg;
4022 }
4023 
4024 static void
tcp_update_seg_after_rto(struct tcpcb * tp,struct tcp_seg_sent * found_seg,uint32_t xmit_ts,uint8_t flags)4025 tcp_update_seg_after_rto(struct tcpcb *tp, struct tcp_seg_sent *found_seg,
4026     uint32_t xmit_ts, uint8_t flags)
4027 {
4028 	tcp_rack_transmit_seg(tp, found_seg, found_seg->start_seq, found_seg->end_seq,
4029 	    xmit_ts, flags);
4030 	struct tcp_seg_sent *seg = TAILQ_FIRST(&tp->t_segs_sent);
4031 	if (found_seg == seg) {
4032 		// Move this segment to the end of time-ordered list.
4033 		TAILQ_REMOVE(&tp->t_segs_sent, seg, tx_link);
4034 		TAILQ_INSERT_TAIL(&tp->t_segs_sent, seg, tx_link);
4035 	}
4036 }
4037 
4038 static void
tcp_process_rxmt_segs_after_rto(struct tcpcb * tp,struct tcp_seg_sent * seg,tcp_seq start,uint32_t xmit_ts,uint8_t flags)4039 tcp_process_rxmt_segs_after_rto(struct tcpcb *tp, struct tcp_seg_sent *seg, tcp_seq start,
4040     uint32_t xmit_ts, uint8_t flags)
4041 {
4042 	struct tcp_seg_sent segment = {};
4043 
4044 	while (seg != NULL) {
4045 		if (SEQ_LEQ(seg->start_seq, start)) {
4046 			tcp_update_seg_after_rto(tp, seg, xmit_ts, flags);
4047 			break;
4048 		} else {
4049 			/* The segment is a part of the total RTO retransmission */
4050 			tcp_update_seg_after_rto(tp, seg, xmit_ts, flags);
4051 
4052 			/* Find the next segment ending at the start of current segment */
4053 			segment.end_seq = seg->start_seq;
4054 			seg = RB_FIND(tcp_seg_sent_tree_head, &tp->t_segs_sent_tree, &segment);
4055 		}
4056 	}
4057 }
4058 
4059 static struct tcp_seg_sent *
tcp_seg_sent_insert_before(struct tcpcb * tp,struct tcp_seg_sent * before,tcp_seq start,tcp_seq end,uint32_t xmit_ts,uint8_t flags)4060 tcp_seg_sent_insert_before(struct tcpcb *tp, struct tcp_seg_sent *before, tcp_seq start, tcp_seq end,
4061     uint32_t xmit_ts, uint8_t flags)
4062 {
4063 	struct tcp_seg_sent *seg = tcp_seg_alloc_init(tp);
4064 	/* segment MUST be allocated, there is no other fail-safe here */
4065 	tcp_rack_transmit_seg(tp, seg, start, end, xmit_ts, flags);
4066 	struct tcp_seg_sent *not_inserted = RB_INSERT(tcp_seg_sent_tree_head, &tp->t_segs_sent_tree, seg);
4067 	if (not_inserted) {
4068 		os_log(OS_LOG_DEFAULT, "segment %p[%u %u) was not inserted in the RB tree", not_inserted,
4069 		    not_inserted->start_seq, not_inserted->end_seq);
4070 	}
4071 	TAILQ_INSERT_BEFORE(before, seg, tx_link);
4072 
4073 	return seg;
4074 }
4075 
4076 static struct tcp_seg_sent *
tcp_seg_rto_insert_end(struct tcpcb * tp,tcp_seq start,tcp_seq end,uint32_t xmit_ts,uint8_t flags)4077 tcp_seg_rto_insert_end(struct tcpcb *tp, tcp_seq start, tcp_seq end,
4078     uint32_t xmit_ts, uint8_t flags)
4079 {
4080 	struct tcp_seg_sent *seg = tcp_seg_alloc_init(tp);
4081 	/* segment MUST be allocated, there is no other fail-safe here */
4082 	tcp_rack_transmit_seg(tp, seg, start, end, xmit_ts, flags);
4083 	struct tcp_seg_sent *not_inserted = RB_INSERT(tcp_seg_sent_tree_head, &tp->t_segs_sent_tree, seg);
4084 	if (not_inserted) {
4085 		os_log(OS_LOG_DEFAULT, "segment %p[%u %u) was not inserted in the RB tree", not_inserted,
4086 		    not_inserted->start_seq, not_inserted->end_seq);
4087 	}
4088 	TAILQ_INSERT_TAIL(&tp->t_segs_sent, seg, tx_link);
4089 
4090 	return seg;
4091 }
4092 
4093 void
tcp_seg_sent_insert(struct tcpcb * tp,struct tcp_seg_sent * seg,tcp_seq start,tcp_seq end,uint32_t xmit_ts,uint8_t flags)4094 tcp_seg_sent_insert(struct tcpcb *tp, struct tcp_seg_sent *seg, tcp_seq start, tcp_seq end,
4095     uint32_t xmit_ts, uint8_t flags)
4096 {
4097 	if (seg != NULL) {
4098 		uint8_t seg_flags = seg->flags | flags;
4099 		if (seg->end_seq == end) {
4100 			/* Entire seg retransmitted in RACK recovery, start and end sequence doesn't change */
4101 			if (seg->start_seq != start) {
4102 				os_log_error(OS_LOG_DEFAULT, "Segment start (%u) is not same as retransmitted "
4103 				    "start sequence number (%u)", seg->start_seq, start);
4104 			}
4105 			tcp_rack_transmit_seg(tp, seg, seg->start_seq, seg->end_seq, xmit_ts, seg_flags);
4106 			TAILQ_REMOVE(&tp->t_segs_sent, seg, tx_link);
4107 			TAILQ_INSERT_TAIL(&tp->t_segs_sent, seg, tx_link);
4108 		} else {
4109 			/*
4110 			 * Original segment is retransmitted partially, update start_seq by len
4111 			 * and create new segment for retransmitted part
4112 			 */
4113 			struct tcp_seg_sent *partial_seg = tcp_seg_alloc_init(tp);
4114 			if (partial_seg == NULL) {
4115 				return;
4116 			}
4117 			seg->start_seq += (end - start);
4118 			tcp_rack_transmit_seg(tp, partial_seg, start, end, xmit_ts, seg_flags);
4119 			struct tcp_seg_sent *not_inserted = RB_INSERT(tcp_seg_sent_tree_head,
4120 			    &tp->t_segs_sent_tree, partial_seg);
4121 			if (not_inserted) {
4122 				os_log(OS_LOG_DEFAULT, "segment %p[%u %u) was not inserted in the RB tree", not_inserted,
4123 				    not_inserted->start_seq, not_inserted->end_seq);
4124 			}
4125 			TAILQ_INSERT_TAIL(&tp->t_segs_sent, partial_seg, tx_link);
4126 		}
4127 
4128 		return;
4129 	}
4130 
4131 	if ((flags & TCP_SEGMENT_RETRANSMITTED_ATLEAST_ONCE) == 0) {
4132 		/* This is a new segment */
4133 		seg = tcp_seg_alloc_init(tp);
4134 		if (seg == NULL) {
4135 			return;
4136 		}
4137 
4138 		tcp_rack_transmit_seg(tp, seg, start, end, xmit_ts, flags);
4139 		struct tcp_seg_sent *not_inserted = RB_INSERT(tcp_seg_sent_tree_head, &tp->t_segs_sent_tree, seg);
4140 		if (not_inserted) {
4141 			os_log(OS_LOG_DEFAULT, "segment %p[%u %u) was not inserted in the RB tree", not_inserted,
4142 			    not_inserted->start_seq, not_inserted->end_seq);
4143 		}
4144 		TAILQ_INSERT_TAIL(&tp->t_segs_sent, seg, tx_link);
4145 
4146 		return;
4147 	}
4148 	/*
4149 	 * Either retransmitted after an RTO or PTO.
4150 	 * During RTO, time-ordered list may lose its order.
4151 	 * If retransmitted after RTO, check if the segment
4152 	 * already exists in RB tree and update its xmit_ts. Also,
4153 	 * if this seg is at the top of ordered list, then move it
4154 	 * to the end.
4155 	 */
4156 	struct tcp_seg_sent segment = {};
4157 	struct tcp_seg_sent *found_seg = NULL, *rxmt_seg = NULL;
4158 
4159 	/* Set the end sequence to search for existing segment */
4160 	segment.end_seq = end;
4161 	found_seg = RB_FIND(tcp_seg_sent_tree_head, &tp->t_segs_sent_tree, &segment);
4162 	if (found_seg != NULL) {
4163 		/* Found an exact match for retransmitted end sequence */
4164 		tcp_process_rxmt_segs_after_rto(tp, found_seg, start, xmit_ts, flags);
4165 		return;
4166 	}
4167 	/*
4168 	 * We come here when we don't find an exact match and end of segment
4169 	 * retransmitted after RTO lies within a segment.
4170 	 */
4171 	RB_FOREACH(found_seg, tcp_seg_sent_tree_head, &tp->t_segs_sent_tree) {
4172 		if (SEQ_LT(end, found_seg->end_seq) && SEQ_GT(end, found_seg->start_seq)) {
4173 			/*
4174 			 * This segment is partially retransmitted. We split this segment at the boundary of end
4175 			 * sequence. First insert the part being retransmitted at the end of time-ordered list.
4176 			 */
4177 			tcp_seg_rto_insert_end(tp, found_seg->start_seq, end, xmit_ts,
4178 			    found_seg->flags | flags);
4179 
4180 			if (SEQ_LEQ(found_seg->start_seq, start)) {
4181 				/*
4182 				 * We are done with the retransmitted part.
4183 				 * Move the start of existing segment
4184 				 */
4185 				found_seg->start_seq = end;
4186 			} else {
4187 				/*
4188 				 * This retransmitted sequence covers more than one segment
4189 				 * Look for segments covered by this retransmission below this segment
4190 				 */
4191 				segment.end_seq = found_seg->start_seq;
4192 				rxmt_seg = RB_FIND(tcp_seg_sent_tree_head, &tp->t_segs_sent_tree, &segment);
4193 
4194 				if (rxmt_seg != NULL) {
4195 					/* rxmt_seg is just before the current segment */
4196 					tcp_process_rxmt_segs_after_rto(tp, rxmt_seg, start, xmit_ts, flags);
4197 				}
4198 
4199 				/* Move the start of existing segment */
4200 				found_seg->start_seq = end;
4201 			}
4202 			return;
4203 		}
4204 	}
4205 }
4206 
4207 static void
tcp_seg_collect_acked_subtree(struct tcpcb * tp,struct tcp_seg_sent * seg,uint32_t acked_xmit_ts,uint32_t tsecr)4208 tcp_seg_collect_acked_subtree(struct tcpcb *tp, struct tcp_seg_sent *seg,
4209     uint32_t acked_xmit_ts, uint32_t tsecr)
4210 {
4211 	if (seg != NULL) {
4212 		tcp_seg_collect_acked_subtree(tp, RB_LEFT(seg, seg_link), acked_xmit_ts, tsecr);
4213 		tcp_seg_collect_acked_subtree(tp, RB_RIGHT(seg, seg_link), acked_xmit_ts, tsecr);
4214 		TAILQ_INSERT_TAIL(&tp->t_segs_acked, seg, ack_link);
4215 	}
4216 }
4217 
4218 /* Call this function with root of the rb tree */
4219 static void
tcp_seg_collect_acked(struct tcpcb * tp,struct tcp_seg_sent * seg,tcp_seq th_ack,uint32_t acked_xmit_ts,uint32_t tsecr)4220 tcp_seg_collect_acked(struct tcpcb *tp, struct tcp_seg_sent *seg, tcp_seq th_ack,
4221     uint32_t acked_xmit_ts, uint32_t tsecr)
4222 {
4223 	if (seg == NULL) {
4224 		return;
4225 	}
4226 
4227 	if (SEQ_GEQ(th_ack, seg->end_seq)) {
4228 		/* Delete the entire left sub-tree */
4229 		tcp_seg_collect_acked_subtree(tp, RB_LEFT(seg, seg_link), acked_xmit_ts, tsecr);
4230 		/* Evaluate the right sub-tree */
4231 		tcp_seg_collect_acked(tp, RB_RIGHT(seg, seg_link), th_ack, acked_xmit_ts, tsecr);
4232 		TAILQ_INSERT_TAIL(&tp->t_segs_acked, seg, ack_link);
4233 	} else {
4234 		/*
4235 		 * This ACK doesn't acknowledge the current root and its right sub-tree.
4236 		 * Evaluate the left sub-tree
4237 		 */
4238 		tcp_seg_collect_acked(tp, RB_LEFT(seg, seg_link), th_ack, acked_xmit_ts, tsecr);
4239 	}
4240 }
4241 
4242 static void
tcp_seg_delete_acked(struct tcpcb * tp,uint32_t acked_xmit_ts,uint32_t tsecr)4243 tcp_seg_delete_acked(struct tcpcb *tp, uint32_t acked_xmit_ts, uint32_t tsecr)
4244 {
4245 	struct tcp_seg_sent *acked_seg = NULL, *next = NULL;
4246 
4247 	TAILQ_FOREACH_SAFE(acked_seg, &tp->t_segs_acked, ack_link, next) {
4248 		/* Advance RACK state if applicable */
4249 		if (acked_seg->xmit_ts > acked_xmit_ts) {
4250 			tcp_rack_update_segment_acked(tp, tsecr, acked_seg->xmit_ts, acked_seg->end_seq,
4251 			    !!(acked_seg->flags & TCP_SEGMENT_RETRANSMITTED_ATLEAST_ONCE));
4252 		}
4253 		/* Check for reordering */
4254 		tcp_rack_detect_reordering_acked(tp, acked_seg);
4255 
4256 		const uint32_t seg_len = tcp_seg_len(acked_seg);
4257 		if (acked_seg->flags & TCP_SEGMENT_LOST) {
4258 			if (tp->bytes_lost < seg_len) {
4259 				os_log_error(OS_LOG_DEFAULT, "bytes_lost (%u) can't be smaller than already "
4260 				    "lost segment length (%u)", tp->bytes_lost, seg_len);
4261 			}
4262 			tp->bytes_lost -= seg_len;
4263 		}
4264 		if (acked_seg->flags & TCP_RACK_RETRANSMITTED) {
4265 			if (tp->bytes_retransmitted < seg_len) {
4266 				os_log_error(OS_LOG_DEFAULT, "bytes_retransmitted (%u) can't be smaller "
4267 				    "than already retransmited segment length (%u)",
4268 				    tp->bytes_retransmitted, seg_len);
4269 			}
4270 			tp->bytes_retransmitted -= seg_len;
4271 		}
4272 		if (acked_seg->flags & TCP_SEGMENT_SACKED) {
4273 			if (tp->bytes_sacked < seg_len) {
4274 				os_log_error(OS_LOG_DEFAULT, "bytes_sacked (%u) can't be smaller than already "
4275 				    "SACKed segment length (%u)", tp->bytes_sacked, seg_len);
4276 			}
4277 			tp->bytes_sacked -= seg_len;
4278 		}
4279 		TAILQ_REMOVE(&tp->t_segs_acked, acked_seg, ack_link);
4280 		TAILQ_REMOVE(&tp->t_segs_sent, acked_seg, tx_link);
4281 		RB_REMOVE(tcp_seg_sent_tree_head, &tp->t_segs_sent_tree, acked_seg);
4282 		tcp_seg_delete(tp, acked_seg);
4283 	}
4284 }
4285 
4286 void
tcp_segs_doack(struct tcpcb * tp,tcp_seq th_ack,struct tcpopt * to)4287 tcp_segs_doack(struct tcpcb *tp, tcp_seq th_ack, struct tcpopt *to)
4288 {
4289 	uint32_t tsecr = 0, acked_xmit_ts = 0;
4290 	tcp_seq acked_seq = th_ack;
4291 	bool was_retransmitted = false;
4292 
4293 	if (TAILQ_EMPTY(&tp->t_segs_sent)) {
4294 		return;
4295 	}
4296 
4297 	if (((to->to_flags & TOF_TS) != 0) && (to->to_tsecr != 0)) {
4298 		tsecr = to->to_tsecr;
4299 	}
4300 
4301 	struct tcp_seg_sent seg = {};
4302 	struct tcp_seg_sent *found_seg = NULL, *next = NULL;
4303 
4304 	found_seg = TAILQ_LAST(&tp->t_segs_sent, tcp_seg_sent_head);
4305 
4306 	if (tp->rack.segs_retransmitted == false) {
4307 		if (SEQ_GEQ(th_ack, found_seg->end_seq)) {
4308 			/*
4309 			 * ACK acknowledges the last sent segment completely (snd_max),
4310 			 * we can remove all segments from time ordered list.
4311 			 */
4312 			acked_seq = found_seg->end_seq;
4313 			acked_xmit_ts = found_seg->xmit_ts;
4314 			was_retransmitted = !!(found_seg->flags & TCP_SEGMENT_RETRANSMITTED_ATLEAST_ONCE);
4315 			tcp_segs_sent_clean(tp, false);
4316 
4317 			/* Advance RACK state */
4318 			tcp_rack_update_segment_acked(tp, tsecr, acked_xmit_ts, acked_seq, was_retransmitted);
4319 			return;
4320 		}
4321 	}
4322 	/*
4323 	 * If either not all segments are ACKed OR the time-ordered list contains retransmitted
4324 	 * segments, do a RB tree search for largest (completely) ACKed segment and remove the ACKed
4325 	 * segment and all segments left of it from both RB tree and time-ordered list.
4326 	 *
4327 	 * Set the end sequence to search for ACKed segment.
4328 	 */
4329 	seg.end_seq = th_ack;
4330 
4331 	if ((found_seg = RB_FIND(tcp_seg_sent_tree_head, &tp->t_segs_sent_tree, &seg)) != NULL) {
4332 		acked_seq = found_seg->end_seq;
4333 		acked_xmit_ts = found_seg->xmit_ts;
4334 		was_retransmitted = !!(found_seg->flags & TCP_SEGMENT_RETRANSMITTED_ATLEAST_ONCE);
4335 
4336 		/*
4337 		 * Remove all segments that are ACKed by this ACK.
4338 		 * We defer self-balancing of RB tree to the end
4339 		 * by calling RB_REMOVE after collecting all ACKed segments.
4340 		 */
4341 		tcp_seg_collect_acked(tp, RB_ROOT(&tp->t_segs_sent_tree), th_ack, acked_xmit_ts, tsecr);
4342 		tcp_seg_delete_acked(tp, acked_xmit_ts, tsecr);
4343 
4344 		/* Advance RACK state */
4345 		tcp_rack_update_segment_acked(tp, tsecr, acked_xmit_ts, acked_seq, was_retransmitted);
4346 
4347 		return;
4348 	}
4349 	/*
4350 	 * When TSO is enabled, it is possible that th_ack is less
4351 	 * than segment->end, hence we search the tree
4352 	 * until we find the largest (partially) ACKed segment.
4353 	 */
4354 	RB_FOREACH_SAFE(found_seg, tcp_seg_sent_tree_head, &tp->t_segs_sent_tree, next) {
4355 		if (SEQ_LT(th_ack, found_seg->end_seq) && SEQ_GT(th_ack, found_seg->start_seq)) {
4356 			acked_seq = th_ack;
4357 			acked_xmit_ts = found_seg->xmit_ts;
4358 			was_retransmitted = !!(found_seg->flags & TCP_SEGMENT_RETRANSMITTED_ATLEAST_ONCE);
4359 
4360 			/* Remove all segments completely ACKed by this ack */
4361 			tcp_seg_collect_acked(tp, RB_ROOT(&tp->t_segs_sent_tree), th_ack, acked_xmit_ts, tsecr);
4362 			tcp_seg_delete_acked(tp, acked_xmit_ts, tsecr);
4363 			found_seg->start_seq = th_ack;
4364 
4365 			/* Advance RACK state */
4366 			tcp_rack_update_segment_acked(tp, tsecr, acked_xmit_ts, acked_seq, was_retransmitted);
4367 			break;
4368 		}
4369 	}
4370 }
4371 
4372 static bool
tcp_seg_mark_sacked(struct tcpcb * tp,struct tcp_seg_sent * seg,uint32_t * newbytes_sacked)4373 tcp_seg_mark_sacked(struct tcpcb *tp, struct tcp_seg_sent *seg, uint32_t *newbytes_sacked)
4374 {
4375 	if (seg->flags & TCP_SEGMENT_SACKED) {
4376 		return false;
4377 	}
4378 
4379 	const uint32_t seg_len = tcp_seg_len(seg);
4380 
4381 	/* Check for reordering */
4382 	tcp_rack_detect_reordering_acked(tp, seg);
4383 
4384 	if (seg->flags & TCP_RACK_RETRANSMITTED) {
4385 		if (seg->flags & TCP_SEGMENT_LOST) {
4386 			/*
4387 			 * If the segment is not considered lost, we don't clear
4388 			 * retransmitted as it might still be in flight. The ONLY time
4389 			 * this can happen is when RTO happens and segment is retransmitted
4390 			 * and SACKed before RACK detects segment was lost.
4391 			 */
4392 			seg->flags &= ~(TCP_SEGMENT_LOST | TCP_RACK_RETRANSMITTED);
4393 			if (tp->bytes_lost < seg_len || tp->bytes_retransmitted < seg_len) {
4394 				os_log_error(OS_LOG_DEFAULT, "bytes_lost (%u) and/or bytes_retransmitted (%u) "
4395 				    "can't be smaller than already lost/retransmitted segment length (%u)", tp->bytes_lost,
4396 				    tp->bytes_retransmitted, seg_len);
4397 			}
4398 			tp->bytes_lost -= seg_len;
4399 			tp->bytes_retransmitted -= seg_len;
4400 		}
4401 	} else {
4402 		if (seg->flags & TCP_SEGMENT_LOST) {
4403 			seg->flags &= ~(TCP_SEGMENT_LOST);
4404 			if (tp->bytes_lost < seg_len) {
4405 				os_log_error(OS_LOG_DEFAULT, "bytes_lost (%u) can't be smaller "
4406 				    "than already lost segment length (%u)", tp->bytes_lost, seg_len);
4407 			}
4408 			tp->bytes_lost -= seg_len;
4409 		}
4410 	}
4411 	*newbytes_sacked += seg_len;
4412 	seg->flags |= TCP_SEGMENT_SACKED;
4413 	tp->bytes_sacked += seg_len;
4414 
4415 	return true;
4416 }
4417 
4418 static void
tcp_segs_dosack_matched(struct tcpcb * tp,struct tcp_seg_sent * found_seg,tcp_seq sblk_start,uint32_t tsecr,uint32_t * newbytes_sacked)4419 tcp_segs_dosack_matched(struct tcpcb *tp, struct tcp_seg_sent *found_seg,
4420     tcp_seq sblk_start, uint32_t tsecr,
4421     uint32_t *newbytes_sacked)
4422 {
4423 	struct tcp_seg_sent seg = {};
4424 
4425 	while (found_seg != NULL) {
4426 		if (sblk_start == found_seg->start_seq) {
4427 			/*
4428 			 * Covered the entire SACK block.
4429 			 * Record segment flags before they get erased.
4430 			 */
4431 			uint8_t seg_flags = found_seg->flags;
4432 			bool newly_marked = tcp_seg_mark_sacked(tp, found_seg, newbytes_sacked);
4433 			if (newly_marked) {
4434 				/* Advance RACK state */
4435 				tcp_rack_update_segment_acked(tp, tsecr, found_seg->xmit_ts,
4436 				    found_seg->end_seq,
4437 				    !!(seg_flags & TCP_SEGMENT_RETRANSMITTED_ATLEAST_ONCE));
4438 			}
4439 			break;
4440 		} else if (SEQ_GT(sblk_start, found_seg->start_seq)) {
4441 			if ((found_seg->flags & TCP_SEGMENT_SACKED) != 0) {
4442 				/* No need to process an already SACKED segment */
4443 				break;
4444 			}
4445 			/*
4446 			 * This segment is partially ACKed by SACK block
4447 			 * as sblk_start > segment start. Since it is
4448 			 * partially SACKed, we should split the unSACKed and
4449 			 * SACKed parts.
4450 			 */
4451 			/* First create a new segment for unSACKed part */
4452 			tcp_seg_sent_insert_before(tp, found_seg, found_seg->start_seq, sblk_start,
4453 			    found_seg->xmit_ts, found_seg->flags);
4454 			/* Now, update the SACKed part */
4455 			found_seg->start_seq = sblk_start;
4456 			/* Record seg flags before they get erased. */
4457 			uint8_t seg_flags = found_seg->flags;
4458 			bool newly_marked = tcp_seg_mark_sacked(tp, found_seg, newbytes_sacked);
4459 			if (newly_marked) {
4460 				/* Advance RACK state */
4461 				tcp_rack_update_segment_acked(tp, tsecr, found_seg->xmit_ts,
4462 				    found_seg->end_seq,
4463 				    !!(seg_flags & TCP_SEGMENT_RETRANSMITTED_ATLEAST_ONCE));
4464 			}
4465 			break;
4466 		} else {
4467 			/*
4468 			 * This segment lies within the SACK block
4469 			 * Record segment flags before they get erased.
4470 			 */
4471 			uint8_t seg_flags = found_seg->flags;
4472 			bool newly_marked = tcp_seg_mark_sacked(tp, found_seg, newbytes_sacked);
4473 			if (newly_marked) {
4474 				/* Advance RACK state */
4475 				tcp_rack_update_segment_acked(tp, tsecr, found_seg->xmit_ts,
4476 				    found_seg->end_seq,
4477 				    !!(seg_flags & TCP_SEGMENT_RETRANSMITTED_ATLEAST_ONCE));
4478 			}
4479 			/* Find the next segment ending at the start of current segment */
4480 			seg.end_seq = found_seg->start_seq;
4481 			found_seg = RB_FIND(tcp_seg_sent_tree_head, &tp->t_segs_sent_tree, &seg);
4482 		}
4483 	}
4484 }
4485 
4486 void
tcp_segs_dosack(struct tcpcb * tp,tcp_seq sblk_start,tcp_seq sblk_end,uint32_t tsecr,uint32_t * newbytes_sacked)4487 tcp_segs_dosack(struct tcpcb *tp, tcp_seq sblk_start, tcp_seq sblk_end,
4488     uint32_t tsecr, uint32_t *newbytes_sacked)
4489 {
4490 	/*
4491 	 * When we receive SACK, min RTT is computed after SACK processing which
4492 	 * means we are using min RTT from the previous ACK to advance RACK state
4493 	 * This is ok as we track a windowed min-filtered estimate over a period.
4494 	 */
4495 	struct tcp_seg_sent seg = {};
4496 	struct tcp_seg_sent *found_seg = NULL, *sacked_seg = NULL;
4497 
4498 	/* Set the end sequence to search for SACKed segment */
4499 	seg.end_seq = sblk_end;
4500 	found_seg = RB_FIND(tcp_seg_sent_tree_head, &tp->t_segs_sent_tree, &seg);
4501 
4502 	if (found_seg != NULL) {
4503 		/* We found an exact match for sblk_end */
4504 		tcp_segs_dosack_matched(tp, found_seg, sblk_start, tsecr, newbytes_sacked);
4505 		return;
4506 	}
4507 	/*
4508 	 * We come here when we don't find an exact match and sblk_end
4509 	 * lies within a segment. This would happen only when TSO is used.
4510 	 */
4511 	RB_FOREACH(found_seg, tcp_seg_sent_tree_head, &tp->t_segs_sent_tree) {
4512 		if (SEQ_LT(sblk_end, found_seg->end_seq) && SEQ_GT(sblk_end, found_seg->start_seq)) {
4513 			/*
4514 			 * This segment is partially SACKed. We split this segment at the boundary
4515 			 * of SACK block. First insert the newly SACKed part
4516 			 */
4517 			tcp_seq start = SEQ_LEQ(sblk_start, found_seg->start_seq) ? found_seg->start_seq : sblk_start;
4518 			struct tcp_seg_sent *inserted = tcp_seg_sent_insert_before(tp, found_seg, start,
4519 			    sblk_end, found_seg->xmit_ts, found_seg->flags);
4520 			/* Record seg flags before they get erased. */
4521 			uint8_t seg_flags = inserted->flags;
4522 			/* Mark the SACKed segment */
4523 			tcp_seg_mark_sacked(tp, inserted, newbytes_sacked);
4524 
4525 			/* Advance RACK state */
4526 			tcp_rack_update_segment_acked(tp, tsecr, inserted->xmit_ts,
4527 			    inserted->end_seq, !!(seg_flags & TCP_SEGMENT_RETRANSMITTED_ATLEAST_ONCE));
4528 
4529 			if (sblk_start == found_seg->start_seq) {
4530 				/*
4531 				 * We are done with this SACK block.
4532 				 * Move the start of existing segment
4533 				 */
4534 				found_seg->start_seq = sblk_end;
4535 				break;
4536 			}
4537 
4538 			if (SEQ_GT(sblk_start, found_seg->start_seq)) {
4539 				/* Insert the remaining unSACKed part before the SACKED segment inserted above */
4540 				tcp_seg_sent_insert_before(tp, inserted, found_seg->start_seq,
4541 				    sblk_start, found_seg->xmit_ts, found_seg->flags);
4542 				/* Move the start of existing segment */
4543 				found_seg->start_seq = sblk_end;
4544 				break;
4545 			} else {
4546 				/*
4547 				 * This SACK block covers more than one segment
4548 				 * Look for segments SACKed below this segment
4549 				 */
4550 				seg.end_seq = found_seg->start_seq;
4551 				sacked_seg = RB_FIND(tcp_seg_sent_tree_head, &tp->t_segs_sent_tree, &seg);
4552 
4553 				if (sacked_seg != NULL) {
4554 					/* We found an exact match for sblk_end */
4555 					tcp_segs_dosack_matched(tp, sacked_seg, sblk_start, tsecr, newbytes_sacked);
4556 				}
4557 
4558 				/* Move the start of existing segment */
4559 				found_seg->start_seq = sblk_end;
4560 			}
4561 			break;
4562 		}
4563 	}
4564 }
4565 
4566 void
tcp_segs_clear_sacked(struct tcpcb * tp)4567 tcp_segs_clear_sacked(struct tcpcb *tp)
4568 {
4569 	struct tcp_seg_sent *seg = NULL;
4570 
4571 	TAILQ_FOREACH(seg, &tp->t_segs_sent, tx_link)
4572 	{
4573 		const uint32_t seg_len = tcp_seg_len(seg);
4574 
4575 		if (seg->flags & TCP_SEGMENT_SACKED) {
4576 			seg->flags &= ~(TCP_SEGMENT_SACKED);
4577 			if (tp->bytes_sacked < seg_len) {
4578 				os_log_error(OS_LOG_DEFAULT, "bytes_sacked (%u) can't be smaller "
4579 				    "than already SACKed segment length (%u)", tp->bytes_sacked, seg_len);
4580 			}
4581 			tp->bytes_sacked -= seg_len;
4582 		}
4583 	}
4584 }
4585 
4586 void
tcp_mark_seg_lost(struct tcpcb * tp,struct tcp_seg_sent * seg)4587 tcp_mark_seg_lost(struct tcpcb *tp, struct tcp_seg_sent *seg)
4588 {
4589 	const uint32_t seg_len = tcp_seg_len(seg);
4590 
4591 	if (seg->flags & TCP_SEGMENT_LOST) {
4592 		if (seg->flags & TCP_RACK_RETRANSMITTED) {
4593 			/* Retransmission was lost */
4594 			seg->flags &= ~TCP_RACK_RETRANSMITTED;
4595 			if (tp->bytes_retransmitted < seg_len) {
4596 				os_log_error(OS_LOG_DEFAULT, "bytes_retransmitted (%u) can't be "
4597 				    "smaller than retransmited segment length (%u)",
4598 				    tp->bytes_retransmitted, seg_len);
4599 				return;
4600 			}
4601 			tp->bytes_retransmitted -= seg_len;
4602 		}
4603 	} else {
4604 		seg->flags |= TCP_SEGMENT_LOST;
4605 		tp->bytes_lost += seg_len;
4606 	}
4607 }
4608 
4609 void
tcp_seg_delete(struct tcpcb * tp,struct tcp_seg_sent * seg)4610 tcp_seg_delete(struct tcpcb *tp, struct tcp_seg_sent *seg)
4611 {
4612 	if (tp->seg_pool.free_segs_count >= TCP_SEG_POOL_MAX_ITEM_COUNT) {
4613 		zfree(tcp_seg_sent_zone, seg);
4614 	} else {
4615 		bzero(seg, sizeof(*seg));
4616 		TAILQ_INSERT_TAIL(&tp->seg_pool.free_segs, seg, free_link);
4617 		tp->seg_pool.free_segs_count++;
4618 	}
4619 }
4620 
4621 void
tcp_segs_sent_clean(struct tcpcb * tp,bool free_segs)4622 tcp_segs_sent_clean(struct tcpcb *tp, bool free_segs)
4623 {
4624 	struct tcp_seg_sent *seg = NULL, *next = NULL;
4625 
4626 	TAILQ_FOREACH_SAFE(seg, &tp->t_segs_sent, tx_link, next) {
4627 		/* Check for reordering */
4628 		tcp_rack_detect_reordering_acked(tp, seg);
4629 
4630 		TAILQ_REMOVE(&tp->t_segs_sent, seg, tx_link);
4631 		RB_REMOVE(tcp_seg_sent_tree_head, &tp->t_segs_sent_tree, seg);
4632 		tcp_seg_delete(tp, seg);
4633 	}
4634 	if (__improbable(!RB_EMPTY(&tp->t_segs_sent_tree))) {
4635 		os_log_error(OS_LOG_DEFAULT, "RB tree still contains segments while "
4636 		    "time ordered list is already empty");
4637 	}
4638 	if (__improbable(!TAILQ_EMPTY(&tp->t_segs_acked))) {
4639 		os_log_error(OS_LOG_DEFAULT, "Segment ACKed list shouldn't contain "
4640 		    "any segments as they are removed immediately after being ACKed");
4641 	}
4642 	/* Reset seg_retransmitted as we emptied the list */
4643 	tcp_rack_reset_segs_retransmitted(tp);
4644 	tp->bytes_lost = tp->bytes_sacked = tp->bytes_retransmitted = 0;
4645 
4646 	/* Empty the free segments pool */
4647 	if (free_segs) {
4648 		TAILQ_FOREACH_SAFE(seg, &tp->seg_pool.free_segs, free_link, next) {
4649 			TAILQ_REMOVE(&tp->seg_pool.free_segs, seg, free_link);
4650 			zfree(tcp_seg_sent_zone, seg);
4651 		}
4652 		tp->seg_pool.free_segs_count = 0;
4653 	}
4654 }
4655 
4656 void
tcp_get_connectivity_status(struct tcpcb * tp,struct tcp_conn_status * connstatus)4657 tcp_get_connectivity_status(struct tcpcb *tp,
4658     struct tcp_conn_status *connstatus)
4659 {
4660 	if (tp == NULL || connstatus == NULL) {
4661 		return;
4662 	}
4663 	bzero(connstatus, sizeof(*connstatus));
4664 	if (tp->t_rxtshift >= TCP_CONNECTIVITY_PROBES_MAX) {
4665 		if (TCPS_HAVEESTABLISHED(tp->t_state)) {
4666 			connstatus->write_probe_failed = 1;
4667 		} else {
4668 			connstatus->conn_probe_failed = 1;
4669 		}
4670 	}
4671 	if (tp->t_rtimo_probes >= TCP_CONNECTIVITY_PROBES_MAX) {
4672 		connstatus->read_probe_failed = 1;
4673 	}
4674 	if (tp->t_inpcb != NULL && tp->t_inpcb->inp_last_outifp != NULL &&
4675 	    (tp->t_inpcb->inp_last_outifp->if_eflags & IFEF_PROBE_CONNECTIVITY)) {
4676 		connstatus->probe_activated = 1;
4677 	}
4678 }
4679 
4680 void
tcp_disable_tfo(struct tcpcb * tp)4681 tcp_disable_tfo(struct tcpcb *tp)
4682 {
4683 	tp->t_flagsext &= ~TF_FASTOPEN;
4684 }
4685 
4686 static struct mbuf *
tcp_make_keepalive_frame(struct tcpcb * tp,struct ifnet * ifp,boolean_t is_probe)4687 tcp_make_keepalive_frame(struct tcpcb *tp, struct ifnet *ifp,
4688     boolean_t is_probe)
4689 {
4690 	struct inpcb *inp = tp->t_inpcb;
4691 	struct tcphdr *th;
4692 	u_int8_t *data;
4693 	int win = 0;
4694 	struct mbuf *m;
4695 
4696 	/*
4697 	 * The code assumes the IP + TCP headers fit in an mbuf packet header
4698 	 */
4699 	_CASSERT(sizeof(struct ip) + sizeof(struct tcphdr) <= _MHLEN);
4700 	_CASSERT(sizeof(struct ip6_hdr) + sizeof(struct tcphdr) <= _MHLEN);
4701 
4702 	MGETHDR(m, M_WAIT, MT_HEADER);
4703 	if (m == NULL) {
4704 		return NULL;
4705 	}
4706 	m->m_pkthdr.pkt_proto = IPPROTO_TCP;
4707 
4708 	data = mbuf_datastart(m);
4709 
4710 	if (inp->inp_vflag & INP_IPV4) {
4711 		bzero(data, sizeof(struct ip) + sizeof(struct tcphdr));
4712 		th = (struct tcphdr *)(void *) (data + sizeof(struct ip));
4713 		m->m_len = sizeof(struct ip) + sizeof(struct tcphdr);
4714 		m->m_pkthdr.len = m->m_len;
4715 	} else {
4716 		VERIFY(inp->inp_vflag & INP_IPV6);
4717 
4718 		bzero(data, sizeof(struct ip6_hdr)
4719 		    + sizeof(struct tcphdr));
4720 		th = (struct tcphdr *)(void *)(data + sizeof(struct ip6_hdr));
4721 		m->m_len = sizeof(struct ip6_hdr) +
4722 		    sizeof(struct tcphdr);
4723 		m->m_pkthdr.len = m->m_len;
4724 	}
4725 
4726 	tcp_fillheaders(m, tp, data, th);
4727 
4728 	if (inp->inp_vflag & INP_IPV4) {
4729 		struct ip *ip;
4730 
4731 		ip = (__typeof__(ip))(void *)data;
4732 
4733 		ip->ip_id = rfc6864 ? 0 : ip_randomid((uint64_t)m);
4734 		ip->ip_off = htons(IP_DF);
4735 		ip->ip_len = htons(sizeof(struct ip) + sizeof(struct tcphdr));
4736 		ip->ip_ttl = inp->inp_ip_ttl;
4737 		ip->ip_tos |= (inp->inp_ip_tos & ~IPTOS_ECN_MASK);
4738 		ip->ip_sum = in_cksum_hdr(ip);
4739 	} else {
4740 		struct ip6_hdr *ip6;
4741 
4742 		ip6 = (__typeof__(ip6))(void *)data;
4743 
4744 		ip6->ip6_plen = htons(sizeof(struct tcphdr));
4745 		ip6->ip6_hlim = in6_selecthlim(inp, ifp);
4746 		ip6->ip6_flow = ip6->ip6_flow & ~IPV6_FLOW_ECN_MASK;
4747 
4748 		if (IN6_IS_SCOPE_EMBED(&ip6->ip6_src)) {
4749 			ip6->ip6_src.s6_addr16[1] = 0;
4750 		}
4751 		if (IN6_IS_SCOPE_EMBED(&ip6->ip6_dst)) {
4752 			ip6->ip6_dst.s6_addr16[1] = 0;
4753 		}
4754 	}
4755 	th->th_flags = TH_ACK;
4756 
4757 	win = tcp_sbspace(tp);
4758 	if (win > ((int32_t)TCP_MAXWIN << tp->rcv_scale)) {
4759 		win = (int32_t)TCP_MAXWIN << tp->rcv_scale;
4760 	}
4761 	th->th_win = htons((u_short) (win >> tp->rcv_scale));
4762 
4763 	if (is_probe) {
4764 		th->th_seq = htonl(tp->snd_una - 1);
4765 	} else {
4766 		th->th_seq = htonl(tp->snd_una);
4767 	}
4768 	th->th_ack = htonl(tp->rcv_nxt);
4769 
4770 	/* Force recompute TCP checksum to be the final value */
4771 	th->th_sum = 0;
4772 	if (inp->inp_vflag & INP_IPV4) {
4773 		th->th_sum = inet_cksum(m, IPPROTO_TCP,
4774 		    sizeof(struct ip), sizeof(struct tcphdr));
4775 	} else {
4776 		th->th_sum = inet6_cksum(m, IPPROTO_TCP,
4777 		    sizeof(struct ip6_hdr), sizeof(struct tcphdr));
4778 	}
4779 
4780 	return m;
4781 }
4782 
4783 void
tcp_fill_keepalive_offload_frames(ifnet_t ifp,struct ifnet_keepalive_offload_frame * frames_array,u_int32_t frames_array_count,size_t frame_data_offset,u_int32_t * used_frames_count)4784 tcp_fill_keepalive_offload_frames(ifnet_t ifp,
4785     struct ifnet_keepalive_offload_frame *frames_array,
4786     u_int32_t frames_array_count, size_t frame_data_offset,
4787     u_int32_t *used_frames_count)
4788 {
4789 	struct inpcb *inp;
4790 	inp_gen_t gencnt;
4791 	u_int32_t frame_index = *used_frames_count;
4792 
4793 	/* Validation of the parameters */
4794 	if (ifp == NULL || frames_array == NULL ||
4795 	    frames_array_count == 0 ||
4796 	    frame_index >= frames_array_count ||
4797 	    frame_data_offset >= IFNET_KEEPALIVE_OFFLOAD_FRAME_DATA_SIZE) {
4798 		return;
4799 	}
4800 
4801 	/* Fast exit when no process is using the socket option TCP_KEEPALIVE_OFFLOAD */
4802 	if (ifp->if_tcp_kao_cnt == 0) {
4803 		return;
4804 	}
4805 
4806 	/*
4807 	 * This function is called outside the regular TCP processing
4808 	 * so we need to update the TCP clock.
4809 	 */
4810 	calculate_tcp_clock();
4811 
4812 	lck_rw_lock_shared(&tcbinfo.ipi_lock);
4813 	gencnt = tcbinfo.ipi_gencnt;
4814 	LIST_FOREACH(inp, tcbinfo.ipi_listhead, inp_list) {
4815 		struct socket *so;
4816 		struct ifnet_keepalive_offload_frame *frame;
4817 		struct mbuf *m = NULL;
4818 		struct tcpcb *tp = intotcpcb(inp);
4819 
4820 		if (frame_index >= frames_array_count) {
4821 			break;
4822 		}
4823 
4824 		if (inp->inp_gencnt > gencnt ||
4825 		    inp->inp_state == INPCB_STATE_DEAD) {
4826 			continue;
4827 		}
4828 
4829 		if ((so = inp->inp_socket) == NULL ||
4830 		    (so->so_state & SS_DEFUNCT)) {
4831 			continue;
4832 		}
4833 		/*
4834 		 * check for keepalive offload flag without socket
4835 		 * lock to avoid a deadlock
4836 		 */
4837 		if (!(inp->inp_flags2 & INP2_KEEPALIVE_OFFLOAD)) {
4838 			continue;
4839 		}
4840 
4841 		if (!(inp->inp_vflag & (INP_IPV4 | INP_IPV6))) {
4842 			continue;
4843 		}
4844 		if (inp->inp_ppcb == NULL ||
4845 		    in_pcb_checkstate(inp, WNT_ACQUIRE, 0) == WNT_STOPUSING) {
4846 			continue;
4847 		}
4848 		socket_lock(so, 1);
4849 		/* Release the want count */
4850 		if (inp->inp_ppcb == NULL ||
4851 		    (in_pcb_checkstate(inp, WNT_RELEASE, 1) == WNT_STOPUSING)) {
4852 			socket_unlock(so, 1);
4853 			continue;
4854 		}
4855 		if ((inp->inp_vflag & INP_IPV4) &&
4856 		    (inp->inp_laddr.s_addr == INADDR_ANY ||
4857 		    inp->inp_faddr.s_addr == INADDR_ANY)) {
4858 			socket_unlock(so, 1);
4859 			continue;
4860 		}
4861 		if ((inp->inp_vflag & INP_IPV6) &&
4862 		    (IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr) ||
4863 		    IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_faddr))) {
4864 			socket_unlock(so, 1);
4865 			continue;
4866 		}
4867 		if (inp->inp_lport == 0 || inp->inp_fport == 0) {
4868 			socket_unlock(so, 1);
4869 			continue;
4870 		}
4871 		if (inp->inp_last_outifp == NULL ||
4872 		    inp->inp_last_outifp->if_index != ifp->if_index) {
4873 			socket_unlock(so, 1);
4874 			continue;
4875 		}
4876 		if ((inp->inp_vflag & INP_IPV4) && frame_data_offset +
4877 		    sizeof(struct ip) + sizeof(struct tcphdr) >
4878 		    IFNET_KEEPALIVE_OFFLOAD_FRAME_DATA_SIZE) {
4879 			socket_unlock(so, 1);
4880 			continue;
4881 		} else if (!(inp->inp_vflag & INP_IPV4) && frame_data_offset +
4882 		    sizeof(struct ip6_hdr) + sizeof(struct tcphdr) >
4883 		    IFNET_KEEPALIVE_OFFLOAD_FRAME_DATA_SIZE) {
4884 			socket_unlock(so, 1);
4885 			continue;
4886 		}
4887 		/*
4888 		 * There is no point in waking up the device for connections
4889 		 * that are not established. Long lived connection are meant
4890 		 * for processes that will sent and receive data
4891 		 */
4892 		if (tp->t_state != TCPS_ESTABLISHED) {
4893 			socket_unlock(so, 1);
4894 			continue;
4895 		}
4896 		/*
4897 		 * This inp has all the information that is needed to
4898 		 * generate an offload frame.
4899 		 */
4900 		frame = &frames_array[frame_index];
4901 		frame->type = IFNET_KEEPALIVE_OFFLOAD_FRAME_TCP;
4902 		frame->ether_type = (inp->inp_vflag & INP_IPV4) ?
4903 		    IFNET_KEEPALIVE_OFFLOAD_FRAME_ETHERTYPE_IPV4 :
4904 		    IFNET_KEEPALIVE_OFFLOAD_FRAME_ETHERTYPE_IPV6;
4905 		frame->interval = (uint16_t)(tp->t_keepidle > 0 ? tp->t_keepidle :
4906 		    tcp_keepidle);
4907 		frame->keep_cnt = (uint8_t)TCP_CONN_KEEPCNT(tp);
4908 		frame->keep_retry = (uint16_t)TCP_CONN_KEEPINTVL(tp);
4909 		if (so->so_options & SO_NOWAKEFROMSLEEP) {
4910 			frame->flags |=
4911 			    IFNET_KEEPALIVE_OFFLOAD_FLAG_NOWAKEFROMSLEEP;
4912 		}
4913 		frame->local_port = ntohs(inp->inp_lport);
4914 		frame->remote_port = ntohs(inp->inp_fport);
4915 		frame->local_seq = tp->snd_nxt;
4916 		frame->remote_seq = tp->rcv_nxt;
4917 		if (inp->inp_vflag & INP_IPV4) {
4918 			ASSERT(frame_data_offset + sizeof(struct ip) + sizeof(struct tcphdr) <= UINT8_MAX);
4919 			frame->length = (uint8_t)(frame_data_offset +
4920 			    sizeof(struct ip) + sizeof(struct tcphdr));
4921 			frame->reply_length =  frame->length;
4922 
4923 			frame->addr_length = sizeof(struct in_addr);
4924 			bcopy(&inp->inp_laddr, frame->local_addr,
4925 			    sizeof(struct in_addr));
4926 			bcopy(&inp->inp_faddr, frame->remote_addr,
4927 			    sizeof(struct in_addr));
4928 		} else {
4929 			struct in6_addr *ip6;
4930 
4931 			ASSERT(frame_data_offset + sizeof(struct ip6_hdr) + sizeof(struct tcphdr) <= UINT8_MAX);
4932 			frame->length = (uint8_t)(frame_data_offset +
4933 			    sizeof(struct ip6_hdr) + sizeof(struct tcphdr));
4934 			frame->reply_length =  frame->length;
4935 
4936 			frame->addr_length = sizeof(struct in6_addr);
4937 			ip6 = (struct in6_addr *)(void *)frame->local_addr;
4938 			bcopy(&inp->in6p_laddr, ip6, sizeof(struct in6_addr));
4939 			if (IN6_IS_SCOPE_EMBED(ip6)) {
4940 				ip6->s6_addr16[1] = 0;
4941 			}
4942 
4943 			ip6 = (struct in6_addr *)(void *)frame->remote_addr;
4944 			bcopy(&inp->in6p_faddr, ip6, sizeof(struct in6_addr));
4945 			if (IN6_IS_SCOPE_EMBED(ip6)) {
4946 				ip6->s6_addr16[1] = 0;
4947 			}
4948 		}
4949 
4950 		/*
4951 		 * First the probe
4952 		 */
4953 		m = tcp_make_keepalive_frame(tp, ifp, TRUE);
4954 		if (m == NULL) {
4955 			socket_unlock(so, 1);
4956 			continue;
4957 		}
4958 		bcopy(m_mtod_current(m), frame->data + frame_data_offset, m->m_len);
4959 		m_freem(m);
4960 
4961 		/*
4962 		 * Now the response packet to incoming probes
4963 		 */
4964 		m = tcp_make_keepalive_frame(tp, ifp, FALSE);
4965 		if (m == NULL) {
4966 			socket_unlock(so, 1);
4967 			continue;
4968 		}
4969 		bcopy(m_mtod_current(m), frame->reply_data + frame_data_offset,
4970 		    m->m_len);
4971 		m_freem(m);
4972 
4973 		frame_index++;
4974 		socket_unlock(so, 1);
4975 	}
4976 	lck_rw_done(&tcbinfo.ipi_lock);
4977 	*used_frames_count = frame_index;
4978 }
4979 
4980 static bool
inp_matches_kao_frame(ifnet_t ifp,struct ifnet_keepalive_offload_frame * frame,struct inpcb * inp)4981 inp_matches_kao_frame(ifnet_t ifp, struct ifnet_keepalive_offload_frame *frame,
4982     struct inpcb *inp)
4983 {
4984 	if (inp->inp_ppcb == NULL) {
4985 		return false;
4986 	}
4987 	/* Release the want count */
4988 	if (in_pcb_checkstate(inp, WNT_RELEASE, 1) == WNT_STOPUSING) {
4989 		return false;
4990 	}
4991 	if (inp->inp_last_outifp == NULL ||
4992 	    inp->inp_last_outifp->if_index != ifp->if_index) {
4993 		return false;
4994 	}
4995 	if (frame->local_port != ntohs(inp->inp_lport) ||
4996 	    frame->remote_port != ntohs(inp->inp_fport)) {
4997 		return false;
4998 	}
4999 	if (inp->inp_vflag & INP_IPV4) {
5000 		if (memcmp(&inp->inp_laddr, frame->local_addr,
5001 		    sizeof(struct in_addr)) != 0 ||
5002 		    memcmp(&inp->inp_faddr, frame->remote_addr,
5003 		    sizeof(struct in_addr)) != 0) {
5004 			return false;
5005 		}
5006 	} else if (inp->inp_vflag & INP_IPV6) {
5007 		if (memcmp(&inp->inp_laddr, frame->local_addr,
5008 		    sizeof(struct in6_addr)) != 0 ||
5009 		    memcmp(&inp->inp_faddr, frame->remote_addr,
5010 		    sizeof(struct in6_addr)) != 0) {
5011 			return false;
5012 		}
5013 	} else {
5014 		return false;
5015 	}
5016 	return true;
5017 }
5018 
5019 int
tcp_notify_kao_timeout(ifnet_t ifp,struct ifnet_keepalive_offload_frame * frame)5020 tcp_notify_kao_timeout(ifnet_t ifp,
5021     struct ifnet_keepalive_offload_frame *frame)
5022 {
5023 	struct inpcb *inp = NULL;
5024 	struct socket *so = NULL;
5025 	bool found = false;
5026 
5027 	/*
5028 	 *  Unlock the list before posting event on the matching socket
5029 	 */
5030 	lck_rw_lock_shared(&tcbinfo.ipi_lock);
5031 
5032 	LIST_FOREACH(inp, tcbinfo.ipi_listhead, inp_list) {
5033 		if ((so = inp->inp_socket) == NULL ||
5034 		    (so->so_state & SS_DEFUNCT)) {
5035 			continue;
5036 		}
5037 		if (!(inp->inp_flags2 & INP2_KEEPALIVE_OFFLOAD)) {
5038 			continue;
5039 		}
5040 		if (!(inp->inp_vflag & (INP_IPV4 | INP_IPV6))) {
5041 			continue;
5042 		}
5043 		if (inp->inp_ppcb == NULL ||
5044 		    in_pcb_checkstate(inp, WNT_ACQUIRE, 0) == WNT_STOPUSING) {
5045 			continue;
5046 		}
5047 		socket_lock(so, 1);
5048 		if (inp_matches_kao_frame(ifp, frame, inp)) {
5049 			/*
5050 			 * Keep the matching socket locked
5051 			 */
5052 			found = true;
5053 			break;
5054 		}
5055 		socket_unlock(so, 1);
5056 	}
5057 	lck_rw_done(&tcbinfo.ipi_lock);
5058 
5059 	if (found) {
5060 		ASSERT(inp != NULL);
5061 		ASSERT(so != NULL);
5062 		ASSERT(so == inp->inp_socket);
5063 		/*
5064 		 * Drop the TCP connection like tcptimers() does
5065 		 */
5066 		struct tcpcb *tp = inp->inp_ppcb;
5067 
5068 		tcpstat.tcps_keepdrops++;
5069 		soevent(so,
5070 		    (SO_FILT_HINT_LOCKED | SO_FILT_HINT_TIMEOUT));
5071 		tp = tcp_drop(tp, ETIMEDOUT);
5072 
5073 		tcpstat.tcps_ka_offload_drops++;
5074 		os_log_info(OS_LOG_DEFAULT, "%s: dropped lport %u fport %u\n",
5075 		    __func__, frame->local_port, frame->remote_port);
5076 
5077 		socket_unlock(so, 1);
5078 	}
5079 
5080 	return 0;
5081 }
5082 
5083 errno_t
tcp_notify_ack_id_valid(struct tcpcb * tp,struct socket * so,u_int32_t notify_id)5084 tcp_notify_ack_id_valid(struct tcpcb *tp, struct socket *so,
5085     u_int32_t notify_id)
5086 {
5087 	struct tcp_notify_ack_marker *elm;
5088 
5089 	if (so->so_snd.sb_cc == 0) {
5090 		return ENOBUFS;
5091 	}
5092 
5093 	SLIST_FOREACH(elm, &tp->t_notify_ack, notify_next) {
5094 		/* Duplicate id is not allowed */
5095 		if (elm->notify_id == notify_id) {
5096 			return EINVAL;
5097 		}
5098 		/* Duplicate position is not allowed */
5099 		if (elm->notify_snd_una == tp->snd_una + so->so_snd.sb_cc) {
5100 			return EINVAL;
5101 		}
5102 	}
5103 	return 0;
5104 }
5105 
5106 errno_t
tcp_add_notify_ack_marker(struct tcpcb * tp,u_int32_t notify_id)5107 tcp_add_notify_ack_marker(struct tcpcb *tp, u_int32_t notify_id)
5108 {
5109 	struct tcp_notify_ack_marker *nm, *elm = NULL;
5110 	struct socket *so = tp->t_inpcb->inp_socket;
5111 
5112 	nm = kalloc_type(struct tcp_notify_ack_marker, M_WAIT | Z_ZERO);
5113 	if (nm == NULL) {
5114 		return ENOMEM;
5115 	}
5116 	nm->notify_id = notify_id;
5117 	nm->notify_snd_una = tp->snd_una + so->so_snd.sb_cc;
5118 
5119 	SLIST_FOREACH(elm, &tp->t_notify_ack, notify_next) {
5120 		if (SEQ_GT(nm->notify_snd_una, elm->notify_snd_una)) {
5121 			break;
5122 		}
5123 	}
5124 
5125 	if (elm == NULL) {
5126 		VERIFY(SLIST_EMPTY(&tp->t_notify_ack));
5127 		SLIST_INSERT_HEAD(&tp->t_notify_ack, nm, notify_next);
5128 	} else {
5129 		SLIST_INSERT_AFTER(elm, nm, notify_next);
5130 	}
5131 	tp->t_notify_ack_count++;
5132 	return 0;
5133 }
5134 
5135 void
tcp_notify_ack_free(struct tcpcb * tp)5136 tcp_notify_ack_free(struct tcpcb *tp)
5137 {
5138 	struct tcp_notify_ack_marker *elm, *next;
5139 	if (SLIST_EMPTY(&tp->t_notify_ack)) {
5140 		return;
5141 	}
5142 
5143 	SLIST_FOREACH_SAFE(elm, &tp->t_notify_ack, notify_next, next) {
5144 		SLIST_REMOVE(&tp->t_notify_ack, elm, tcp_notify_ack_marker,
5145 		    notify_next);
5146 		kfree_type(struct tcp_notify_ack_marker, elm);
5147 	}
5148 	SLIST_INIT(&tp->t_notify_ack);
5149 	tp->t_notify_ack_count = 0;
5150 }
5151 
5152 inline void
tcp_notify_acknowledgement(struct tcpcb * tp,struct socket * so)5153 tcp_notify_acknowledgement(struct tcpcb *tp, struct socket *so)
5154 {
5155 	struct tcp_notify_ack_marker *elm;
5156 
5157 	elm = SLIST_FIRST(&tp->t_notify_ack);
5158 	if (SEQ_GEQ(tp->snd_una, elm->notify_snd_una)) {
5159 		soevent(so, SO_FILT_HINT_LOCKED | SO_FILT_HINT_NOTIFY_ACK);
5160 	}
5161 }
5162 
5163 void
tcp_get_notify_ack_count(struct tcpcb * tp,struct tcp_notify_ack_complete * retid)5164 tcp_get_notify_ack_count(struct tcpcb *tp,
5165     struct tcp_notify_ack_complete *retid)
5166 {
5167 	struct tcp_notify_ack_marker *elm;
5168 	uint32_t  complete = 0;
5169 
5170 	SLIST_FOREACH(elm, &tp->t_notify_ack, notify_next) {
5171 		if (SEQ_GEQ(tp->snd_una, elm->notify_snd_una)) {
5172 			ASSERT(complete < UINT32_MAX);
5173 			complete++;
5174 		} else {
5175 			break;
5176 		}
5177 	}
5178 	retid->notify_pending = tp->t_notify_ack_count - complete;
5179 	retid->notify_complete_count = min(TCP_MAX_NOTIFY_ACK, complete);
5180 }
5181 
5182 void
tcp_get_notify_ack_ids(struct tcpcb * tp,struct tcp_notify_ack_complete * retid)5183 tcp_get_notify_ack_ids(struct tcpcb *tp,
5184     struct tcp_notify_ack_complete *retid)
5185 {
5186 	size_t i = 0;
5187 	struct tcp_notify_ack_marker *elm, *next;
5188 
5189 	SLIST_FOREACH_SAFE(elm, &tp->t_notify_ack, notify_next, next) {
5190 		if (i >= retid->notify_complete_count) {
5191 			break;
5192 		}
5193 		if (SEQ_GEQ(tp->snd_una, elm->notify_snd_una)) {
5194 			retid->notify_complete_id[i++] = elm->notify_id;
5195 			SLIST_REMOVE(&tp->t_notify_ack, elm,
5196 			    tcp_notify_ack_marker, notify_next);
5197 			kfree_type(struct tcp_notify_ack_marker, elm);
5198 			tp->t_notify_ack_count--;
5199 		} else {
5200 			break;
5201 		}
5202 	}
5203 }
5204 
5205 bool
tcp_notify_ack_active(struct socket * so)5206 tcp_notify_ack_active(struct socket *so)
5207 {
5208 	if ((SOCK_DOM(so) == PF_INET || SOCK_DOM(so) == PF_INET6) &&
5209 	    SOCK_TYPE(so) == SOCK_STREAM) {
5210 		struct tcpcb *tp = intotcpcb(sotoinpcb(so));
5211 
5212 		if (!SLIST_EMPTY(&tp->t_notify_ack)) {
5213 			struct tcp_notify_ack_marker *elm;
5214 			elm = SLIST_FIRST(&tp->t_notify_ack);
5215 			if (SEQ_GEQ(tp->snd_una, elm->notify_snd_una)) {
5216 				return true;
5217 			}
5218 		}
5219 	}
5220 	return false;
5221 }
5222 
5223 inline int32_t
inp_get_sndbytes_allunsent(struct socket * so,u_int32_t th_ack)5224 inp_get_sndbytes_allunsent(struct socket *so, u_int32_t th_ack)
5225 {
5226 	struct inpcb *inp = sotoinpcb(so);
5227 	struct tcpcb *tp = intotcpcb(inp);
5228 
5229 	if ((so->so_snd.sb_flags & SB_SNDBYTE_CNT) &&
5230 	    so->so_snd.sb_cc > 0) {
5231 		int32_t unsent, sent;
5232 		sent = tp->snd_max - th_ack;
5233 		if (tp->t_flags & TF_SENTFIN) {
5234 			sent--;
5235 		}
5236 		unsent = so->so_snd.sb_cc - sent;
5237 		return unsent;
5238 	}
5239 	return 0;
5240 }
5241 
5242 uint8_t
tcp_get_ace(struct tcphdr * th)5243 tcp_get_ace(struct tcphdr *th)
5244 {
5245 	uint8_t ace = 0;
5246 	if (th->th_flags & TH_ECE) {
5247 		ace += 1;
5248 	}
5249 	if (th->th_flags & TH_CWR) {
5250 		ace += 2;
5251 	}
5252 	if (th->th_x2 & (TH_AE >> 8)) {
5253 		ace += 4;
5254 	}
5255 
5256 	return ace;
5257 }
5258 
5259 #define IFP_PER_FLOW_STAT(_ipv4_, _stat_) { \
5260 	if (_ipv4_) { \
5261 	        ifp->if_ipv4_stat->_stat_++; \
5262 	} else { \
5263 	        ifp->if_ipv6_stat->_stat_++; \
5264 	} \
5265 }
5266 
5267 #define FLOW_ECN_ENABLED(_flags_) \
5268     ((_flags_ & (TE_ECN_ON)) == (TE_ECN_ON))
5269 
5270 void
tcp_update_stats_per_flow(struct ifnet_stats_per_flow * ifs,struct ifnet * ifp)5271 tcp_update_stats_per_flow(struct ifnet_stats_per_flow *ifs,
5272     struct ifnet *ifp)
5273 {
5274 	if (ifp == NULL || !IF_FULLY_ATTACHED(ifp)) {
5275 		return;
5276 	}
5277 
5278 	ifnet_lock_shared(ifp);
5279 	if (ifs->ecn_flags & TE_SETUPSENT) {
5280 		if (ifs->ecn_flags & TE_CLIENT_SETUP) {
5281 			IFP_PER_FLOW_STAT(ifs->ipv4, ecn_client_setup);
5282 			if (FLOW_ECN_ENABLED(ifs->ecn_flags)) {
5283 				IFP_PER_FLOW_STAT(ifs->ipv4,
5284 				    ecn_client_success);
5285 			} else if (ifs->ecn_flags & TE_LOST_SYN) {
5286 				IFP_PER_FLOW_STAT(ifs->ipv4,
5287 				    ecn_syn_lost);
5288 			} else {
5289 				IFP_PER_FLOW_STAT(ifs->ipv4,
5290 				    ecn_peer_nosupport);
5291 			}
5292 		} else {
5293 			IFP_PER_FLOW_STAT(ifs->ipv4, ecn_server_setup);
5294 			if (FLOW_ECN_ENABLED(ifs->ecn_flags)) {
5295 				IFP_PER_FLOW_STAT(ifs->ipv4,
5296 				    ecn_server_success);
5297 			} else if (ifs->ecn_flags & TE_LOST_SYN) {
5298 				IFP_PER_FLOW_STAT(ifs->ipv4,
5299 				    ecn_synack_lost);
5300 			} else {
5301 				IFP_PER_FLOW_STAT(ifs->ipv4,
5302 				    ecn_peer_nosupport);
5303 			}
5304 		}
5305 	} else {
5306 		IFP_PER_FLOW_STAT(ifs->ipv4, ecn_off_conn);
5307 	}
5308 	if (FLOW_ECN_ENABLED(ifs->ecn_flags)) {
5309 		if (ifs->ecn_flags & TE_RECV_ECN_CE) {
5310 			tcpstat.tcps_ecn_conn_recv_ce++;
5311 			IFP_PER_FLOW_STAT(ifs->ipv4, ecn_conn_recv_ce);
5312 		}
5313 		if (ifs->ecn_flags & TE_RECV_ECN_ECE) {
5314 			tcpstat.tcps_ecn_conn_recv_ece++;
5315 			IFP_PER_FLOW_STAT(ifs->ipv4, ecn_conn_recv_ece);
5316 		}
5317 		if (ifs->ecn_flags & (TE_RECV_ECN_CE | TE_RECV_ECN_ECE)) {
5318 			if (ifs->txretransmitbytes > 0 ||
5319 			    ifs->rxoutoforderbytes > 0) {
5320 				tcpstat.tcps_ecn_conn_pl_ce++;
5321 				IFP_PER_FLOW_STAT(ifs->ipv4, ecn_conn_plce);
5322 			} else {
5323 				tcpstat.tcps_ecn_conn_nopl_ce++;
5324 				IFP_PER_FLOW_STAT(ifs->ipv4, ecn_conn_noplce);
5325 			}
5326 		} else {
5327 			if (ifs->txretransmitbytes > 0 ||
5328 			    ifs->rxoutoforderbytes > 0) {
5329 				tcpstat.tcps_ecn_conn_plnoce++;
5330 				IFP_PER_FLOW_STAT(ifs->ipv4, ecn_conn_plnoce);
5331 			}
5332 		}
5333 	}
5334 
5335 	/* Other stats are interesting for non-local connections only */
5336 	if (ifs->local) {
5337 		ifnet_lock_done(ifp);
5338 		return;
5339 	}
5340 
5341 	if (ifs->ipv4) {
5342 		ifp->if_ipv4_stat->timestamp = net_uptime();
5343 		if (FLOW_ECN_ENABLED(ifs->ecn_flags)) {
5344 			tcp_flow_ecn_perf_stats(ifs, &ifp->if_ipv4_stat->ecn_on);
5345 		} else {
5346 			tcp_flow_ecn_perf_stats(ifs, &ifp->if_ipv4_stat->ecn_off);
5347 		}
5348 	} else {
5349 		ifp->if_ipv6_stat->timestamp = net_uptime();
5350 		if (FLOW_ECN_ENABLED(ifs->ecn_flags)) {
5351 			tcp_flow_ecn_perf_stats(ifs, &ifp->if_ipv6_stat->ecn_on);
5352 		} else {
5353 			tcp_flow_ecn_perf_stats(ifs, &ifp->if_ipv6_stat->ecn_off);
5354 		}
5355 	}
5356 
5357 	if (ifs->rxmit_drop) {
5358 		if (FLOW_ECN_ENABLED(ifs->ecn_flags)) {
5359 			IFP_PER_FLOW_STAT(ifs->ipv4, ecn_on.rxmit_drop);
5360 		} else {
5361 			IFP_PER_FLOW_STAT(ifs->ipv4, ecn_off.rxmit_drop);
5362 		}
5363 	}
5364 	if (ifs->ecn_fallback_synloss) {
5365 		IFP_PER_FLOW_STAT(ifs->ipv4, ecn_fallback_synloss);
5366 	}
5367 	if (ifs->ecn_fallback_droprst) {
5368 		IFP_PER_FLOW_STAT(ifs->ipv4, ecn_fallback_droprst);
5369 	}
5370 	if (ifs->ecn_fallback_droprxmt) {
5371 		IFP_PER_FLOW_STAT(ifs->ipv4, ecn_fallback_droprxmt);
5372 	}
5373 	if (ifs->ecn_fallback_ce) {
5374 		IFP_PER_FLOW_STAT(ifs->ipv4, ecn_fallback_ce);
5375 	}
5376 	if (ifs->ecn_fallback_reorder) {
5377 		IFP_PER_FLOW_STAT(ifs->ipv4, ecn_fallback_reorder);
5378 	}
5379 	if (ifs->ecn_recv_ce > 0) {
5380 		IFP_PER_FLOW_STAT(ifs->ipv4, ecn_recv_ce);
5381 	}
5382 	if (ifs->ecn_recv_ece > 0) {
5383 		IFP_PER_FLOW_STAT(ifs->ipv4, ecn_recv_ece);
5384 	}
5385 
5386 	tcp_flow_lim_stats(ifs, &ifp->if_lim_stat);
5387 	ifnet_lock_done(ifp);
5388 }
5389 
5390 #if SKYWALK
5391 
5392 #include <skywalk/core/skywalk_var.h>
5393 #include <skywalk/nexus/flowswitch/nx_flowswitch.h>
5394 
5395 void
tcp_add_fsw_flow(struct tcpcb * tp,struct ifnet * ifp)5396 tcp_add_fsw_flow(struct tcpcb *tp, struct ifnet *ifp)
5397 {
5398 	struct inpcb *inp = tp->t_inpcb;
5399 	struct socket *so = inp->inp_socket;
5400 	uuid_t fsw_uuid;
5401 	struct nx_flow_req nfr;
5402 	int err;
5403 
5404 	if (!NX_FSW_TCP_RX_AGG_ENABLED()) {
5405 		return;
5406 	}
5407 
5408 	if (ifp == NULL || kern_nexus_get_flowswitch_instance(ifp, fsw_uuid)) {
5409 		TCP_LOG_FSW_FLOW(tp, "skip ifp no fsw");
5410 		return;
5411 	}
5412 
5413 	memset(&nfr, 0, sizeof(nfr));
5414 
5415 	if (inp->inp_vflag & INP_IPV4) {
5416 		ASSERT(!(inp->inp_laddr.s_addr == INADDR_ANY ||
5417 		    inp->inp_faddr.s_addr == INADDR_ANY ||
5418 		    IN_MULTICAST(ntohl(inp->inp_laddr.s_addr)) ||
5419 		    IN_MULTICAST(ntohl(inp->inp_faddr.s_addr))));
5420 		nfr.nfr_saddr.sin.sin_len = sizeof(struct sockaddr_in);
5421 		nfr.nfr_saddr.sin.sin_family = AF_INET;
5422 		nfr.nfr_saddr.sin.sin_port = inp->inp_lport;
5423 		memcpy(&nfr.nfr_saddr.sin.sin_addr, &inp->inp_laddr,
5424 		    sizeof(struct in_addr));
5425 		nfr.nfr_daddr.sin.sin_len = sizeof(struct sockaddr_in);
5426 		nfr.nfr_daddr.sin.sin_family = AF_INET;
5427 		nfr.nfr_daddr.sin.sin_port = inp->inp_fport;
5428 		memcpy(&nfr.nfr_daddr.sin.sin_addr, &inp->inp_faddr,
5429 		    sizeof(struct in_addr));
5430 	} else {
5431 		ASSERT(!(IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr) ||
5432 		    IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_faddr) ||
5433 		    IN6_IS_ADDR_MULTICAST(&inp->in6p_laddr) ||
5434 		    IN6_IS_ADDR_MULTICAST(&inp->in6p_faddr)));
5435 		nfr.nfr_saddr.sin6.sin6_len = sizeof(struct sockaddr_in6);
5436 		nfr.nfr_saddr.sin6.sin6_family = AF_INET6;
5437 		nfr.nfr_saddr.sin6.sin6_port = inp->inp_lport;
5438 		memcpy(&nfr.nfr_saddr.sin6.sin6_addr, &inp->in6p_laddr,
5439 		    sizeof(struct in6_addr));
5440 		nfr.nfr_daddr.sin6.sin6_len = sizeof(struct sockaddr_in6);
5441 		nfr.nfr_daddr.sin.sin_family = AF_INET6;
5442 		nfr.nfr_daddr.sin6.sin6_port = inp->inp_fport;
5443 		memcpy(&nfr.nfr_daddr.sin6.sin6_addr, &inp->in6p_faddr,
5444 		    sizeof(struct in6_addr));
5445 		/* clear embedded scope ID */
5446 		if (IN6_IS_SCOPE_EMBED(&nfr.nfr_saddr.sin6.sin6_addr)) {
5447 			nfr.nfr_saddr.sin6.sin6_addr.s6_addr16[1] = 0;
5448 		}
5449 		if (IN6_IS_SCOPE_EMBED(&nfr.nfr_daddr.sin6.sin6_addr)) {
5450 			nfr.nfr_daddr.sin6.sin6_addr.s6_addr16[1] = 0;
5451 		}
5452 	}
5453 
5454 	nfr.nfr_nx_port = 1;
5455 	nfr.nfr_ip_protocol = IPPROTO_TCP;
5456 	nfr.nfr_transport_protocol = IPPROTO_TCP;
5457 	nfr.nfr_flags = NXFLOWREQF_ASIS;
5458 	nfr.nfr_epid = (so != NULL ? so->last_pid : 0);
5459 	if (NETNS_TOKEN_VALID(&inp->inp_netns_token)) {
5460 		nfr.nfr_port_reservation = inp->inp_netns_token;
5461 		nfr.nfr_flags |= NXFLOWREQF_EXT_PORT_RSV;
5462 	}
5463 	ASSERT(inp->inp_flowhash != 0);
5464 	nfr.nfr_inp_flowhash = inp->inp_flowhash;
5465 
5466 	uuid_generate_random(nfr.nfr_flow_uuid);
5467 	err = kern_nexus_flow_add(kern_nexus_shared_controller(), fsw_uuid,
5468 	    &nfr, sizeof(nfr));
5469 
5470 	if (err == 0) {
5471 		uuid_copy(tp->t_fsw_uuid, fsw_uuid);
5472 		uuid_copy(tp->t_flow_uuid, nfr.nfr_flow_uuid);
5473 	}
5474 
5475 	TCP_LOG_FSW_FLOW(tp, "add err %d\n", err);
5476 }
5477 
5478 void
tcp_del_fsw_flow(struct tcpcb * tp)5479 tcp_del_fsw_flow(struct tcpcb *tp)
5480 {
5481 	if (uuid_is_null(tp->t_fsw_uuid) || uuid_is_null(tp->t_flow_uuid)) {
5482 		return;
5483 	}
5484 
5485 	struct nx_flow_req nfr;
5486 	uuid_copy(nfr.nfr_flow_uuid, tp->t_flow_uuid);
5487 
5488 	/* It's possible for this call to fail if the nexus has detached */
5489 	int err = kern_nexus_flow_del(kern_nexus_shared_controller(),
5490 	    tp->t_fsw_uuid, &nfr, sizeof(nfr));
5491 	VERIFY(err == 0 || err == ENOENT || err == ENXIO);
5492 
5493 	uuid_clear(tp->t_fsw_uuid);
5494 	uuid_clear(tp->t_flow_uuid);
5495 
5496 	TCP_LOG_FSW_FLOW(tp, "del err %d\n", err);
5497 }
5498 
5499 #endif /* SKYWALK */
5500