xref: /xnu-11215.1.10/bsd/netinet/tcp_subr.c (revision 8d741a5de7ff4191bf97d57b9f54c2f6d4a15585)
1 /*
2  * Copyright (c) 2000-2022 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995
30  *	The Regents of the University of California.  All rights reserved.
31  *
32  * Redistribution and use in source and binary forms, with or without
33  * modification, are permitted provided that the following conditions
34  * are met:
35  * 1. Redistributions of source code must retain the above copyright
36  *    notice, this list of conditions and the following disclaimer.
37  * 2. Redistributions in binary form must reproduce the above copyright
38  *    notice, this list of conditions and the following disclaimer in the
39  *    documentation and/or other materials provided with the distribution.
40  * 3. All advertising materials mentioning features or use of this software
41  *    must display the following acknowledgement:
42  *	This product includes software developed by the University of
43  *	California, Berkeley and its contributors.
44  * 4. Neither the name of the University nor the names of its contributors
45  *    may be used to endorse or promote products derived from this software
46  *    without specific prior written permission.
47  *
48  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58  * SUCH DAMAGE.
59  *
60  *	@(#)tcp_subr.c	8.2 (Berkeley) 5/24/95
61  */
62 /*
63  * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
64  * support for mandatory and extensible security protections.  This notice
65  * is included in support of clause 2.2 (b) of the Apple Public License,
66  * Version 2.0.
67  */
68 
69 #include "tcp_includes.h"
70 
71 #include <sys/param.h>
72 #include <sys/systm.h>
73 #include <sys/kernel.h>
74 #include <sys/sysctl.h>
75 #include <sys/malloc.h>
76 #include <sys/mbuf.h>
77 #include <sys/domain.h>
78 #include <sys/proc.h>
79 #include <sys/kauth.h>
80 #include <sys/socket.h>
81 #include <sys/socketvar.h>
82 #include <sys/protosw.h>
83 #include <sys/random.h>
84 #include <sys/syslog.h>
85 #include <sys/mcache.h>
86 #include <kern/locks.h>
87 #include <kern/zalloc.h>
88 
89 #include <dev/random/randomdev.h>
90 
91 #include <net/route.h>
92 #include <net/if.h>
93 #include <net/content_filter.h>
94 #include <net/ntstat.h>
95 #include <net/multi_layer_pkt_log.h>
96 
97 #define tcp_minmssoverload fring
98 #define _IP_VHL
99 #include <netinet/in.h>
100 #include <netinet/in_systm.h>
101 #include <netinet/ip.h>
102 #include <netinet/ip_icmp.h>
103 #include <netinet/ip6.h>
104 #include <netinet/icmp6.h>
105 #include <netinet/in_pcb.h>
106 #include <netinet6/in6_pcb.h>
107 #include <netinet/in_var.h>
108 #include <netinet/ip_var.h>
109 #include <netinet/icmp_var.h>
110 #include <netinet6/ip6_var.h>
111 #include <netinet/mptcp_var.h>
112 #include <netinet/tcp.h>
113 #include <netinet/tcp_fsm.h>
114 #include <netinet/tcp_seq.h>
115 #include <netinet/tcp_timer.h>
116 #include <netinet/tcp_var.h>
117 #include <netinet/tcp_cc.h>
118 #include <netinet/tcp_cache.h>
119 #include <kern/thread_call.h>
120 
121 #include <netinet6/tcp6_var.h>
122 #include <netinet/tcpip.h>
123 #include <netinet/tcp_log.h>
124 
125 #include <netinet6/ip6protosw.h>
126 
127 #if IPSEC
128 #include <netinet6/ipsec.h>
129 #include <netinet6/ipsec6.h>
130 #endif /* IPSEC */
131 
132 #if NECP
133 #include <net/necp.h>
134 #endif /* NECP */
135 
136 #undef tcp_minmssoverload
137 
138 #include <net/sockaddr_utils.h>
139 
140 #include <corecrypto/ccaes.h>
141 #include <libkern/crypto/aes.h>
142 #include <libkern/crypto/md5.h>
143 #include <sys/kdebug.h>
144 #include <mach/sdt.h>
145 #include <pexpert/pexpert.h>
146 #include <mach/mach_time.h>
147 
148 #define DBG_FNC_TCP_CLOSE       NETDBG_CODE(DBG_NETTCP, ((5 << 8) | 2))
149 
150 static tcp_cc tcp_ccgen;
151 
152 extern struct tcptimerlist tcp_timer_list;
153 extern struct tcptailq tcp_tw_tailq;
154 
155 extern int tcp_awdl_rtobase;
156 
157 SYSCTL_SKMEM_TCP_INT(TCPCTL_MSSDFLT, mssdflt, CTLFLAG_RW | CTLFLAG_LOCKED,
158     int, tcp_mssdflt, TCP_MSS, "Default TCP Maximum Segment Size");
159 
160 SYSCTL_SKMEM_TCP_INT(TCPCTL_V6MSSDFLT, v6mssdflt,
161     CTLFLAG_RW | CTLFLAG_LOCKED, int, tcp_v6mssdflt, TCP6_MSS,
162     "Default TCP Maximum Segment Size for IPv6");
163 
164 int tcp_sysctl_fastopenkey(struct sysctl_oid *, void *, int,
165     struct sysctl_req *);
166 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, fastopen_key, CTLTYPE_STRING | CTLFLAG_WR,
167     0, 0, tcp_sysctl_fastopenkey, "S", "TCP Fastopen key");
168 
169 /* Current count of half-open TFO connections */
170 int     tcp_tfo_halfcnt = 0;
171 
172 /* Maximum of half-open TFO connection backlog */
173 SYSCTL_SKMEM_TCP_INT(OID_AUTO, fastopen_backlog,
174     CTLFLAG_RW | CTLFLAG_LOCKED, int, tcp_tfo_backlog, 10,
175     "Backlog queue for half-open TFO connections");
176 
177 SYSCTL_SKMEM_TCP_INT(OID_AUTO, fastopen, CTLFLAG_RW | CTLFLAG_LOCKED,
178     int, tcp_fastopen, TCP_FASTOPEN_CLIENT | TCP_FASTOPEN_SERVER,
179     "Enable TCP Fastopen (RFC 7413)");
180 
181 SYSCTL_SKMEM_TCP_INT(OID_AUTO, now_init, CTLFLAG_RD | CTLFLAG_LOCKED,
182     uint32_t, tcp_now_init, 0, "Initial tcp now value");
183 
184 SYSCTL_SKMEM_TCP_INT(OID_AUTO, microuptime_init, CTLFLAG_RD | CTLFLAG_LOCKED,
185     uint32_t, tcp_microuptime_init, 0, "Initial tcp uptime value in micro seconds");
186 
187 /*
188  * Minimum MSS we accept and use. This prevents DoS attacks where
189  * we are forced to a ridiculous low MSS like 20 and send hundreds
190  * of packets instead of one. The effect scales with the available
191  * bandwidth and quickly saturates the CPU and network interface
192  * with packet generation and sending. Set to zero to disable MINMSS
193  * checking. This setting prevents us from sending too small packets.
194  */
195 SYSCTL_SKMEM_TCP_INT(OID_AUTO, minmss, CTLFLAG_RW | CTLFLAG_LOCKED,
196     int, tcp_minmss, TCP_MINMSS, "Minmum TCP Maximum Segment Size");
197 
198 SYSCTL_UINT(_net_inet_tcp, OID_AUTO, pcbcount, CTLFLAG_RD | CTLFLAG_LOCKED,
199     &tcbinfo.ipi_count, 0, "Number of active PCBs");
200 
201 SYSCTL_SKMEM_TCP_INT(OID_AUTO, icmp_may_rst, CTLFLAG_RW | CTLFLAG_LOCKED,
202     static int, icmp_may_rst, 1,
203     "Certain ICMP unreachable messages may abort connections in SYN_SENT");
204 
205 int             tcp_do_timestamps = 1;
206 #if (DEVELOPMENT || DEBUG)
207 SYSCTL_INT(_net_inet_tcp, OID_AUTO, do_timestamps,
208     CTLFLAG_RW | CTLFLAG_LOCKED, &tcp_do_timestamps, 0, "enable TCP timestamps");
209 #endif /* (DEVELOPMENT || DEBUG) */
210 
211 SYSCTL_SKMEM_TCP_INT(OID_AUTO, rtt_min, CTLFLAG_RW | CTLFLAG_LOCKED,
212     int, tcp_TCPTV_MIN, 100, "min rtt value allowed");
213 
214 SYSCTL_SKMEM_TCP_INT(OID_AUTO, rexmt_slop, CTLFLAG_RW,
215     int, tcp_rexmt_slop, TCPTV_REXMTSLOP, "Slop added to retransmit timeout");
216 
217 SYSCTL_SKMEM_TCP_INT(OID_AUTO, randomize_ports, CTLFLAG_RW | CTLFLAG_LOCKED,
218     __private_extern__ int, tcp_use_randomport, 0,
219     "Randomize TCP port numbers");
220 
221 SYSCTL_SKMEM_TCP_INT(OID_AUTO, win_scale_factor, CTLFLAG_RW | CTLFLAG_LOCKED,
222     __private_extern__ int, tcp_win_scale, 3, "Window scaling factor");
223 
224 #if (DEVELOPMENT || DEBUG)
225 SYSCTL_SKMEM_TCP_INT(OID_AUTO, init_rtt_from_cache,
226     CTLFLAG_RW | CTLFLAG_LOCKED, static int, tcp_init_rtt_from_cache, 1,
227     "Initalize RTT from route cache");
228 #else
229 SYSCTL_SKMEM_TCP_INT(OID_AUTO, init_rtt_from_cache,
230     CTLFLAG_RD | CTLFLAG_LOCKED, static int, tcp_init_rtt_from_cache, 1,
231     "Initalize RTT from route cache");
232 #endif /* (DEVELOPMENT || DEBUG) */
233 
234 static int tso_debug = 0;
235 SYSCTL_INT(_net_inet_tcp, OID_AUTO, tso_debug, CTLFLAG_RW | CTLFLAG_LOCKED,
236     &tso_debug, 0, "TSO verbosity");
237 
238 static int tcp_rxt_seg_max = 1024;
239 SYSCTL_INT(_net_inet_tcp, OID_AUTO, rxt_seg_max, CTLFLAG_RW | CTLFLAG_LOCKED,
240     &tcp_rxt_seg_max, 0, "");
241 
242 static unsigned long tcp_rxt_seg_drop = 0;
243 SYSCTL_ULONG(_net_inet_tcp, OID_AUTO, rxt_seg_drop, CTLFLAG_RD | CTLFLAG_LOCKED,
244     &tcp_rxt_seg_drop, "");
245 
246 static void     tcp_notify(struct inpcb *, int);
247 
248 static KALLOC_TYPE_DEFINE(tcp_bwmeas_zone, struct bwmeas, NET_KT_DEFAULT);
249 KALLOC_TYPE_DEFINE(tcp_reass_zone, struct tseg_qent, NET_KT_DEFAULT);
250 KALLOC_TYPE_DEFINE(tcp_rxt_seg_zone, struct tcp_rxt_seg, NET_KT_DEFAULT);
251 KALLOC_TYPE_DEFINE(tcp_seg_sent_zone, struct tcp_seg_sent, NET_KT_DEFAULT);
252 
253 extern int slowlink_wsize;      /* window correction for slow links */
254 extern int path_mtu_discovery;
255 
256 uint32_t tcp_now_remainder_us = 0;  /* remaining micro seconds for tcp_now */
257 
258 static void tcp_sbrcv_grow_rwin(struct tcpcb *tp, struct sockbuf *sb);
259 
260 #define TCP_BWMEAS_BURST_MINSIZE 6
261 #define TCP_BWMEAS_BURST_MAXSIZE 25
262 
263 /*
264  * Target size of TCP PCB hash tables. Must be a power of two.
265  *
266  * Note that this can be overridden by the kernel environment
267  * variable net.inet.tcp.tcbhashsize
268  */
269 #ifndef TCBHASHSIZE
270 #define TCBHASHSIZE     CONFIG_TCBHASHSIZE
271 #endif
272 
273 __private_extern__ int  tcp_tcbhashsize = TCBHASHSIZE;
274 SYSCTL_INT(_net_inet_tcp, OID_AUTO, tcbhashsize, CTLFLAG_RD | CTLFLAG_LOCKED,
275     &tcp_tcbhashsize, 0, "Size of TCP control-block hashtable");
276 
277 /*
278  * This is the actual shape of what we allocate using the zone
279  * allocator.  Doing it this way allows us to protect both structures
280  * using the same generation count, and also eliminates the overhead
281  * of allocating tcpcbs separately.  By hiding the structure here,
282  * we avoid changing most of the rest of the code (although it needs
283  * to be changed, eventually, for greater efficiency).
284  */
285 #define ALIGNMENT       32
286 struct  inp_tp {
287 	struct  inpcb   inp;
288 	struct  tcpcb   tcb __attribute__((aligned(ALIGNMENT)));
289 };
290 #undef ALIGNMENT
291 
292 static KALLOC_TYPE_DEFINE(tcpcbzone, struct inp_tp, NET_KT_DEFAULT);
293 
294 int  get_inpcb_str_size(void);
295 int  get_tcp_str_size(void);
296 
297 os_log_t tcp_mpkl_log_object = NULL;
298 
299 static void tcpcb_to_otcpcb(struct tcpcb *, struct otcpcb *);
300 
301 int tcp_notsent_lowat_check(struct socket *so);
302 static void tcp_flow_lim_stats(struct ifnet_stats_per_flow *ifs,
303     struct if_lim_perf_stat *stat);
304 static void tcp_flow_ecn_perf_stats(struct ifnet_stats_per_flow *ifs,
305     struct if_tcp_ecn_perf_stat *stat);
306 
307 static aes_encrypt_ctx tfo_ctx; /* Crypto-context for TFO */
308 
309 void
tcp_tfo_gen_cookie(struct inpcb * inp,u_char * out,size_t blk_size)310 tcp_tfo_gen_cookie(struct inpcb *inp, u_char *out, size_t blk_size)
311 {
312 	u_char in[CCAES_BLOCK_SIZE];
313 	int isipv6 = inp->inp_vflag & INP_IPV6;
314 
315 	VERIFY(blk_size == CCAES_BLOCK_SIZE);
316 
317 	bzero(&in[0], CCAES_BLOCK_SIZE);
318 	bzero(&out[0], CCAES_BLOCK_SIZE);
319 
320 	if (isipv6) {
321 		memcpy(in, &inp->in6p_faddr, sizeof(struct in6_addr));
322 	} else {
323 		memcpy(in, &inp->inp_faddr, sizeof(struct in_addr));
324 	}
325 
326 	aes_encrypt_cbc(in, NULL, 1, out, &tfo_ctx);
327 }
328 
329 __private_extern__ int
tcp_sysctl_fastopenkey(__unused struct sysctl_oid * oidp,__unused void * arg1,__unused int arg2,struct sysctl_req * req)330 tcp_sysctl_fastopenkey(__unused struct sysctl_oid *oidp, __unused void *arg1,
331     __unused int arg2, struct sysctl_req *req)
332 {
333 	int error = 0;
334 	/*
335 	 * TFO-key is expressed as a string in hex format
336 	 *  +1 to account for the \0 char
337 	 *  +1 because sysctl_io_string() expects a string length but the sysctl command
338 	 *     now includes the terminating \0 in newlen -- see rdar://77205344
339 	 */
340 	char keystring[TCP_FASTOPEN_KEYLEN * 2 + 2];
341 	u_int32_t key[TCP_FASTOPEN_KEYLEN / sizeof(u_int32_t)];
342 	int i;
343 
344 	/*
345 	 * sysctl_io_string copies keystring into the oldptr of the sysctl_req.
346 	 * Make sure everything is zero, to avoid putting garbage in there or
347 	 * leaking the stack.
348 	 */
349 	bzero(keystring, sizeof(keystring));
350 
351 	error = sysctl_io_string(req, keystring, sizeof(keystring), 0, NULL);
352 	if (error) {
353 		os_log(OS_LOG_DEFAULT,
354 		    "%s: sysctl_io_string() error %d, req->newlen %lu, sizeof(keystring) %lu",
355 		    __func__, error, req->newlen, sizeof(keystring));
356 		goto exit;
357 	}
358 	if (req->newptr == USER_ADDR_NULL) {
359 		goto exit;
360 	}
361 
362 	if (strlen(keystring) != TCP_FASTOPEN_KEYLEN * 2) {
363 		os_log(OS_LOG_DEFAULT,
364 		    "%s: strlen(keystring) %lu != TCP_FASTOPEN_KEYLEN * 2 %u, newlen %lu",
365 		    __func__, strlen(keystring), TCP_FASTOPEN_KEYLEN * 2, req->newlen);
366 		error = EINVAL;
367 		goto exit;
368 	}
369 
370 	for (i = 0; i < (TCP_FASTOPEN_KEYLEN / sizeof(u_int32_t)); i++) {
371 		/*
372 		 * We jump over the keystring in 8-character (4 byte in hex)
373 		 * steps
374 		 */
375 		if (sscanf(&keystring[i * 8], "%8x", &key[i]) != 1) {
376 			error = EINVAL;
377 			os_log(OS_LOG_DEFAULT,
378 			    "%s: sscanf() != 1, error EINVAL", __func__);
379 			goto exit;
380 		}
381 	}
382 
383 	aes_encrypt_key128((u_char *)key, &tfo_ctx);
384 
385 exit:
386 	return error;
387 }
388 
389 int
get_inpcb_str_size(void)390 get_inpcb_str_size(void)
391 {
392 	return sizeof(struct inpcb);
393 }
394 
395 int
get_tcp_str_size(void)396 get_tcp_str_size(void)
397 {
398 	return sizeof(struct tcpcb);
399 }
400 
401 static int scale_to_powerof2(int size);
402 
403 /*
404  * This helper routine returns one of the following scaled value of size:
405  * 1. Rounded down power of two value of size if the size value passed as
406  *    argument is not a power of two and the rounded up value overflows.
407  * OR
408  * 2. Rounded up power of two value of size if the size value passed as
409  *    argument is not a power of two and the rounded up value does not overflow
410  * OR
411  * 3. Same value as argument size if it is already a power of two.
412  */
413 static int
scale_to_powerof2(int size)414 scale_to_powerof2(int size)
415 {
416 	/* Handle special case of size = 0 */
417 	int ret = size ? size : 1;
418 
419 	if (!powerof2(ret)) {
420 		while (!powerof2(size)) {
421 			/*
422 			 * Clear out least significant
423 			 * set bit till size is left with
424 			 * its highest set bit at which point
425 			 * it is rounded down power of two.
426 			 */
427 			size = size & (size - 1);
428 		}
429 
430 		/* Check for overflow when rounding up */
431 		if (0 == (size << 1)) {
432 			ret = size;
433 		} else {
434 			ret = size << 1;
435 		}
436 	}
437 
438 	return ret;
439 }
440 
441 /*
442  * Round the floating point to the next integer
443  * Eg. 1.3 will round up to 2.
444  */
445 uint32_t
tcp_ceil(double a)446 tcp_ceil(double a)
447 {
448 	double res = (uint32_t) a;
449 	return (uint32_t)(res + (res < a));
450 }
451 
452 uint32_t
tcp_round_to(uint32_t val,uint32_t round)453 tcp_round_to(uint32_t val, uint32_t round)
454 {
455 	/*
456 	 * Round up or down based on the middle. Meaning, if we round upon a
457 	 * multiple of 10, 16 will round to 20 and 14 will round to 10.
458 	 */
459 	return ((val + (round / 2)) / round) * round;
460 }
461 
462 /*
463  * Round up to the next multiple of base.
464  * Eg. for a base of 64, 65 will become 128,
465  * 2896 will become 2944.
466  */
467 uint32_t
tcp_round_up(uint32_t val,uint32_t base)468 tcp_round_up(uint32_t val, uint32_t base)
469 {
470 	if (base == 1 || val % base == 0) {
471 		return val;
472 	}
473 
474 	return ((val + base) / base) * base;
475 }
476 
477 uint32_t
ntoh24(u_char * p)478 ntoh24(u_char *p)
479 {
480 	uint32_t v;
481 
482 	v  = (uint32_t)(p[0] << 16);
483 	v |= (uint32_t)(p[1] << 8);
484 	v |= (uint32_t)(p[2] << 0);
485 	return v;
486 }
487 
488 uint32_t
tcp_packets_this_ack(struct tcpcb * tp,uint32_t acked)489 tcp_packets_this_ack(struct tcpcb *tp, uint32_t acked)
490 {
491 	return acked / tp->t_maxseg +
492 	       (((acked % tp->t_maxseg) != 0) ? 1 : 0);
493 }
494 
495 static void
tcp_tfo_init(void)496 tcp_tfo_init(void)
497 {
498 	u_char key[TCP_FASTOPEN_KEYLEN];
499 
500 	read_frandom(key, sizeof(key));
501 	aes_encrypt_key128(key, &tfo_ctx);
502 }
503 
504 static u_char isn_secret[32];
505 
506 /*
507  * Tcp initialization
508  */
509 void
tcp_init(struct protosw * pp,struct domain * dp)510 tcp_init(struct protosw *pp, struct domain *dp)
511 {
512 #pragma unused(dp)
513 	static int tcp_initialized = 0;
514 	struct inpcbinfo *pcbinfo;
515 
516 	VERIFY((pp->pr_flags & (PR_INITIALIZED | PR_ATTACHED)) == PR_ATTACHED);
517 
518 	if (tcp_initialized) {
519 		return;
520 	}
521 	tcp_initialized = 1;
522 
523 #if DEBUG || DEVELOPMENT
524 	(void) PE_parse_boot_argn("tcp_rxt_seg_max", &tcp_rxt_seg_max,
525 	    sizeof(tcp_rxt_seg_max));
526 #endif /* DEBUG || DEVELOPMENT */
527 
528 	tcp_ccgen = 1;
529 	tcp_keepinit = TCPTV_KEEP_INIT;
530 	tcp_keepidle = TCPTV_KEEP_IDLE;
531 	tcp_keepintvl = TCPTV_KEEPINTVL;
532 	tcp_keepcnt = TCPTV_KEEPCNT;
533 	tcp_maxpersistidle = TCPTV_KEEP_IDLE;
534 	tcp_msl = TCPTV_MSL;
535 
536 	microuptime(&tcp_uptime);
537 	read_frandom(&tcp_now, sizeof(tcp_now));
538 
539 	/* Starts tcp internal clock at a random value */
540 	tcp_now = tcp_now & 0x3fffffff;
541 
542 	/* expose initial uptime/now via systcl for utcp to keep time sync */
543 	tcp_now_init = tcp_now;
544 	tcp_microuptime_init =
545 	    (uint32_t)(tcp_uptime.tv_usec + (tcp_uptime.tv_sec * USEC_PER_SEC));
546 	SYSCTL_SKMEM_UPDATE_FIELD(tcp.microuptime_init, tcp_microuptime_init);
547 	SYSCTL_SKMEM_UPDATE_FIELD(tcp.now_init, tcp_now_init);
548 
549 	tcp_tfo_init();
550 
551 	LIST_INIT(&tcb);
552 	tcbinfo.ipi_listhead = &tcb;
553 
554 	pcbinfo = &tcbinfo;
555 
556 	/*
557 	 * allocate group, lock attributes and lock for tcp pcb mutexes
558 	 */
559 	pcbinfo->ipi_lock_grp = lck_grp_alloc_init("tcppcb",
560 	    LCK_GRP_ATTR_NULL);
561 	lck_attr_setdefault(&pcbinfo->ipi_lock_attr);
562 	lck_rw_init(&pcbinfo->ipi_lock, pcbinfo->ipi_lock_grp,
563 	    &pcbinfo->ipi_lock_attr);
564 
565 	if (tcp_tcbhashsize == 0) {
566 		/* Set to default */
567 		tcp_tcbhashsize = 512;
568 	}
569 
570 	if (!powerof2(tcp_tcbhashsize)) {
571 		int old_hash_size = tcp_tcbhashsize;
572 		tcp_tcbhashsize = scale_to_powerof2(tcp_tcbhashsize);
573 		/* Lower limit of 16  */
574 		if (tcp_tcbhashsize < 16) {
575 			tcp_tcbhashsize = 16;
576 		}
577 		printf("WARNING: TCB hash size not a power of 2, "
578 		    "scaled from %d to %d.\n",
579 		    old_hash_size,
580 		    tcp_tcbhashsize);
581 	}
582 
583 	hashinit_counted_by(tcp_tcbhashsize, tcbinfo.ipi_hashbase,
584 	    tcbinfo.ipi_hashbase_count);
585 	tcbinfo.ipi_hashmask = tcbinfo.ipi_hashbase_count - 1;
586 	hashinit_counted_by(tcp_tcbhashsize, tcbinfo.ipi_porthashbase,
587 	    tcbinfo.ipi_porthashbase_count);
588 	tcbinfo.ipi_porthashmask = tcbinfo.ipi_porthashbase_count - 1;
589 	tcbinfo.ipi_zone = tcpcbzone;
590 
591 	tcbinfo.ipi_gc = tcp_gc;
592 	tcbinfo.ipi_timer = tcp_itimer;
593 	in_pcbinfo_attach(&tcbinfo);
594 
595 #define TCP_MINPROTOHDR (sizeof(struct ip6_hdr) + sizeof(struct tcphdr))
596 	if (max_protohdr < TCP_MINPROTOHDR) {
597 		max_protohdr = (int)P2ROUNDUP(TCP_MINPROTOHDR, sizeof(uint32_t));
598 	}
599 	if (max_linkhdr + max_protohdr > MCLBYTES) {
600 		panic("tcp_init");
601 	}
602 #undef TCP_MINPROTOHDR
603 
604 	/* Initialize time wait and timer lists */
605 	TAILQ_INIT(&tcp_tw_tailq);
606 
607 	bzero(&tcp_timer_list, sizeof(tcp_timer_list));
608 	LIST_INIT(&tcp_timer_list.lhead);
609 	/*
610 	 * allocate group and attribute for the tcp timer list
611 	 */
612 	tcp_timer_list.mtx_grp = lck_grp_alloc_init("tcptimerlist",
613 	    LCK_GRP_ATTR_NULL);
614 	lck_mtx_init(&tcp_timer_list.mtx, tcp_timer_list.mtx_grp,
615 	    LCK_ATTR_NULL);
616 
617 	tcp_timer_list.call = thread_call_allocate(tcp_run_timerlist, NULL);
618 	if (tcp_timer_list.call == NULL) {
619 		panic("failed to allocate call entry 1 in tcp_init");
620 	}
621 
622 	/* Initialize TCP Cache */
623 	tcp_cache_init();
624 
625 	tcp_mpkl_log_object = MPKL_CREATE_LOGOBJECT("com.apple.xnu.tcp");
626 	if (tcp_mpkl_log_object == NULL) {
627 		panic("MPKL_CREATE_LOGOBJECT failed");
628 	}
629 
630 	if (PE_parse_boot_argn("tcp_log", &tcp_log_enable_flags, sizeof(tcp_log_enable_flags))) {
631 		os_log(OS_LOG_DEFAULT, "tcp_init: set tcp_log_enable_flags to 0x%x", tcp_log_enable_flags);
632 	}
633 
634 	/*
635 	 * If more than 4GB of actual memory is available, increase the
636 	 * maximum allowed receive and send socket buffer size.
637 	 */
638 	if (mem_actual >= (1ULL << (GBSHIFT + 2))) {
639 		if (serverperfmode) {
640 			tcp_autorcvbuf_max = 8 * 1024 * 1024;
641 			tcp_autosndbuf_max = 8 * 1024 * 1024;
642 		} else {
643 			tcp_autorcvbuf_max = 4 * 1024 * 1024;
644 			tcp_autosndbuf_max = 4 * 1024 * 1024;
645 		}
646 
647 		SYSCTL_SKMEM_UPDATE_FIELD(tcp.autorcvbufmax, tcp_autorcvbuf_max);
648 		SYSCTL_SKMEM_UPDATE_FIELD(tcp.autosndbufmax, tcp_autosndbuf_max);
649 	}
650 
651 	/* Initialize the TCP CCA array */
652 	tcp_cc_init();
653 
654 	read_frandom(&isn_secret, sizeof(isn_secret));
655 }
656 
657 /*
658  * Fill in the IP and TCP headers for an outgoing packet, given the tcpcb.
659  * tcp_template used to store this data in mbufs, but we now recopy it out
660  * of the tcpcb each time to conserve mbufs.
661  */
662 void
tcp_fillheaders(struct mbuf * m,struct tcpcb * tp,void * ip_ptr,void * tcp_ptr)663 tcp_fillheaders(struct mbuf *m, struct tcpcb *tp, void *ip_ptr, void *tcp_ptr)
664 {
665 	struct inpcb *inp = tp->t_inpcb;
666 	struct tcphdr *tcp_hdr = (struct tcphdr *)tcp_ptr;
667 
668 	if ((inp->inp_vflag & INP_IPV6) != 0) {
669 		struct ip6_hdr *ip6;
670 
671 		ip6 = (struct ip6_hdr *)ip_ptr;
672 		ip6->ip6_flow = (ip6->ip6_flow & ~IPV6_FLOWINFO_MASK) |
673 		    (inp->inp_flow & IPV6_FLOWINFO_MASK);
674 		ip6->ip6_vfc = (ip6->ip6_vfc & ~IPV6_VERSION_MASK) |
675 		    (IPV6_VERSION & IPV6_VERSION_MASK);
676 		ip6->ip6_plen = htons(sizeof(struct tcphdr));
677 		ip6->ip6_nxt = IPPROTO_TCP;
678 		ip6->ip6_hlim = 0;
679 		ip6->ip6_src = inp->in6p_laddr;
680 		ip6->ip6_dst = inp->in6p_faddr;
681 		if (m->m_flags & M_PKTHDR) {
682 			uint32_t lifscope = inp->inp_lifscope != 0 ? inp->inp_lifscope : inp->inp_fifscope;
683 			uint32_t fifscope = inp->inp_fifscope != 0 ? inp->inp_fifscope : inp->inp_lifscope;
684 			ip6_output_setsrcifscope(m, lifscope, NULL);
685 			ip6_output_setdstifscope(m, fifscope, NULL);
686 		}
687 		tcp_hdr->th_sum = in6_pseudo(&inp->in6p_laddr, &inp->in6p_faddr,
688 		    htonl(sizeof(struct tcphdr) + IPPROTO_TCP));
689 	} else {
690 		struct ip *ip = (struct ip *) ip_ptr;
691 
692 		ip->ip_vhl = IP_VHL_BORING;
693 		ip->ip_tos = 0;
694 		ip->ip_len = 0;
695 		ip->ip_id = 0;
696 		ip->ip_off = 0;
697 		ip->ip_ttl = 0;
698 		ip->ip_sum = 0;
699 		ip->ip_p = IPPROTO_TCP;
700 		ip->ip_src = inp->inp_laddr;
701 		ip->ip_dst = inp->inp_faddr;
702 		tcp_hdr->th_sum =
703 		    in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr,
704 		    htons(sizeof(struct tcphdr) + IPPROTO_TCP));
705 	}
706 
707 	tcp_hdr->th_sport = inp->inp_lport;
708 	tcp_hdr->th_dport = inp->inp_fport;
709 	tcp_hdr->th_seq = 0;
710 	tcp_hdr->th_ack = 0;
711 	tcp_hdr->th_x2 = 0;
712 	tcp_hdr->th_off = 5;
713 	tcp_hdr->th_flags = 0;
714 	tcp_hdr->th_win = 0;
715 	tcp_hdr->th_urp = 0;
716 }
717 
718 /*
719  * Create template to be used to send tcp packets on a connection.
720  * Allocates an mbuf and fills in a skeletal tcp/ip header.  The only
721  * use for this function is in keepalives, which use tcp_respond.
722  */
723 struct tcptemp *
tcp_maketemplate(struct tcpcb * tp,struct mbuf ** mp)724 tcp_maketemplate(struct tcpcb *tp, struct mbuf **mp)
725 {
726 	struct mbuf *m;
727 	struct tcptemp *n;
728 
729 	*mp = m = m_get(M_DONTWAIT, MT_HEADER);
730 	if (m == NULL) {
731 		return NULL;
732 	}
733 	m->m_len = sizeof(struct tcptemp);
734 	n = mtod(m, struct tcptemp *);
735 
736 	tcp_fillheaders(m, tp, (void *)&n->tt_ipgen, (void *)&n->tt_t);
737 	return n;
738 }
739 
740 /*
741  * Send a single message to the TCP at address specified by
742  * the given TCP/IP header.  If m == 0, then we make a copy
743  * of the tcpiphdr at ti and send directly to the addressed host.
744  * This is used to force keep alive messages out using the TCP
745  * template for a connection.  If flags are given then we send
746  * a message back to the TCP which originated the * segment ti,
747  * and discard the mbuf containing it and any other attached mbufs.
748  *
749  * In any case the ack and sequence number of the transmitted
750  * segment are as specified by the parameters.
751  *
752  * NOTE: If m != NULL, then ti must point to *inside* the mbuf.
753  */
754 void
tcp_respond(struct tcpcb * tp,void * ipgen,struct tcphdr * th,struct mbuf * m,tcp_seq ack,tcp_seq seq,uint8_t flags,struct tcp_respond_args * tra)755 tcp_respond(struct tcpcb *tp, void *ipgen, struct tcphdr *th, struct mbuf *m,
756     tcp_seq ack, tcp_seq seq, uint8_t flags, struct tcp_respond_args *tra)
757 {
758 	uint16_t tlen;
759 	int win = 0;
760 	struct route *ro = 0;
761 	struct route sro;
762 	struct ip *ip;
763 	struct tcphdr *nth;
764 	struct route_in6 *ro6 = 0;
765 	struct route_in6 sro6;
766 	struct ip6_hdr *ip6;
767 	int isipv6;
768 	struct ifnet *outif;
769 	int sotc = SO_TC_UNSPEC;
770 	bool check_qos_marking_again = FALSE;
771 	uint32_t sifscope = IFSCOPE_NONE, fifscope = IFSCOPE_NONE;
772 
773 	isipv6 = IP_VHL_V(((struct ip *)ipgen)->ip_vhl) == 6;
774 	ip6 = ipgen;
775 	ip = ipgen;
776 
777 	if (tp) {
778 		check_qos_marking_again = tp->t_inpcb->inp_socket->so_flags1 & SOF1_QOSMARKING_POLICY_OVERRIDE ? FALSE : TRUE;
779 		sifscope = tp->t_inpcb->inp_lifscope;
780 		fifscope = tp->t_inpcb->inp_fifscope;
781 		if (!(flags & TH_RST)) {
782 			win = tcp_sbspace(tp);
783 			if (win > (int32_t)TCP_MAXWIN << tp->rcv_scale) {
784 				win = (int32_t)TCP_MAXWIN << tp->rcv_scale;
785 			}
786 		}
787 		if (isipv6) {
788 			ro6 = &tp->t_inpcb->in6p_route;
789 		} else {
790 			ro = &tp->t_inpcb->inp_route;
791 		}
792 	} else {
793 		if (isipv6) {
794 			ro6 = &sro6;
795 			bzero(ro6, sizeof(*ro6));
796 		} else {
797 			ro = &sro;
798 			bzero(ro, sizeof(*ro));
799 		}
800 	}
801 	if (m == 0) {
802 		m = m_gethdr(M_DONTWAIT, MT_HEADER);    /* MAC-OK */
803 		if (m == NULL) {
804 			return;
805 		}
806 		tlen = 0;
807 		m->m_data += max_linkhdr;
808 		if (isipv6) {
809 			VERIFY((MHLEN - max_linkhdr) >=
810 			    (sizeof(*ip6) + sizeof(*nth)));
811 			bcopy((caddr_t)ip6, mtod(m, caddr_t),
812 			    sizeof(struct ip6_hdr));
813 			ip6 = mtod(m, struct ip6_hdr *);
814 			nth = (struct tcphdr *)(void *)(ip6 + 1);
815 		} else {
816 			VERIFY((MHLEN - max_linkhdr) >=
817 			    (sizeof(*ip) + sizeof(*nth)));
818 			bcopy((caddr_t)ip, mtod(m, caddr_t), sizeof(struct ip));
819 			ip = mtod(m, struct ip *);
820 			nth = (struct tcphdr *)(void *)(ip + 1);
821 		}
822 		bcopy((caddr_t)th, (caddr_t)nth, sizeof(struct tcphdr));
823 #if MPTCP
824 		if ((tp) && (tp->t_mpflags & TMPF_RESET)) {
825 			flags = (TH_RST | TH_ACK);
826 		} else
827 #endif
828 		flags = TH_ACK;
829 	} else {
830 		m_freem(m->m_next);
831 		m->m_next = 0;
832 		m->m_data = (uintptr_t)ipgen;
833 		/* m_len is set later */
834 		tlen = 0;
835 #define xchg(a, b, type) { type t; t = a; a = b; b = t; }
836 		if (isipv6) {
837 			ip6_getsrcifaddr_info(m, &sifscope, NULL);
838 			ip6_getdstifaddr_info(m, &fifscope, NULL);
839 			if (!in6_embedded_scope) {
840 				m->m_pkthdr.pkt_flags &= ~PKTF_IFAINFO;
841 			}
842 			/* Expect 32-bit aligned IP on strict-align platforms */
843 			IP6_HDR_STRICT_ALIGNMENT_CHECK(ip6);
844 			xchg(ip6->ip6_dst, ip6->ip6_src, struct in6_addr);
845 			nth = (struct tcphdr *)(void *)(ip6 + 1);
846 		} else {
847 			/* Expect 32-bit aligned IP on strict-align platforms */
848 			IP_HDR_STRICT_ALIGNMENT_CHECK(ip);
849 			xchg(ip->ip_dst.s_addr, ip->ip_src.s_addr, n_long);
850 			nth = (struct tcphdr *)(void *)(ip + 1);
851 		}
852 		if (th != nth) {
853 			/*
854 			 * this is usually a case when an extension header
855 			 * exists between the IPv6 header and the
856 			 * TCP header.
857 			 */
858 			nth->th_sport = th->th_sport;
859 			nth->th_dport = th->th_dport;
860 		}
861 		xchg(nth->th_dport, nth->th_sport, n_short);
862 #undef xchg
863 	}
864 	if (isipv6) {
865 		ip6->ip6_plen = htons((u_short)(sizeof(struct tcphdr) +
866 		    tlen));
867 		tlen += sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
868 		ip6_output_setsrcifscope(m, sifscope, NULL);
869 		ip6_output_setdstifscope(m, fifscope, NULL);
870 	} else {
871 		tlen += sizeof(struct tcpiphdr);
872 		ip->ip_len = tlen;
873 		ip->ip_ttl = (uint8_t)ip_defttl;
874 	}
875 	m->m_len = tlen;
876 	m->m_pkthdr.len = tlen;
877 	m->m_pkthdr.rcvif = 0;
878 	if (tra->keep_alive) {
879 		m->m_pkthdr.pkt_flags |= PKTF_KEEPALIVE;
880 	}
881 
882 	nth->th_seq = htonl(seq);
883 	nth->th_ack = htonl(ack);
884 	nth->th_x2 = 0;
885 	nth->th_off = sizeof(struct tcphdr) >> 2;
886 	nth->th_flags = flags;
887 	if (tp) {
888 		nth->th_win = htons((u_short) (win >> tp->rcv_scale));
889 	} else {
890 		nth->th_win = htons((u_short)win);
891 	}
892 	nth->th_urp = 0;
893 	if (isipv6) {
894 		nth->th_sum = 0;
895 		nth->th_sum = in6_pseudo(&ip6->ip6_src, &ip6->ip6_dst,
896 		    htonl((tlen - sizeof(struct ip6_hdr)) + IPPROTO_TCP));
897 		m->m_pkthdr.csum_flags = CSUM_TCPIPV6;
898 		m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
899 		ip6->ip6_hlim = in6_selecthlim(tp ? tp->t_inpcb : NULL,
900 		    ro6 && ro6->ro_rt ? ro6->ro_rt->rt_ifp : NULL);
901 	} else {
902 		nth->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr,
903 		    htons((u_short)(tlen - sizeof(struct ip) + ip->ip_p)));
904 		m->m_pkthdr.csum_flags = CSUM_TCP;
905 		m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
906 	}
907 #if NECP
908 	necp_mark_packet_from_socket(m, tp ? tp->t_inpcb : NULL, 0, 0, 0, 0);
909 #endif /* NECP */
910 
911 #if IPSEC
912 	if (tp != NULL && tp->t_inpcb->inp_sp != NULL &&
913 	    ipsec_setsocket(m, tp ? tp->t_inpcb->inp_socket : NULL) != 0) {
914 		m_freem(m);
915 		return;
916 	}
917 #endif
918 
919 	if (tp != NULL) {
920 		u_int32_t svc_flags = 0;
921 		if (isipv6) {
922 			svc_flags |= PKT_SCF_IPV6;
923 		}
924 		sotc = tp->t_inpcb->inp_socket->so_traffic_class;
925 		if ((flags & TH_RST) == 0) {
926 			set_packet_service_class(m, tp->t_inpcb->inp_socket,
927 			    sotc, svc_flags);
928 		} else {
929 			m_set_service_class(m, MBUF_SC_BK_SYS);
930 		}
931 
932 		/* Embed flowhash and flow control flags */
933 		m->m_pkthdr.pkt_flowsrc = FLOWSRC_INPCB;
934 		m->m_pkthdr.pkt_flowid = tp->t_inpcb->inp_flowhash;
935 		m->m_pkthdr.pkt_flags |= (PKTF_FLOW_ID | PKTF_FLOW_LOCALSRC | PKTF_FLOW_ADV);
936 		m->m_pkthdr.pkt_proto = IPPROTO_TCP;
937 		m->m_pkthdr.tx_tcp_pid = tp->t_inpcb->inp_socket->last_pid;
938 		m->m_pkthdr.tx_tcp_e_pid = tp->t_inpcb->inp_socket->e_pid;
939 
940 		if (flags & TH_RST) {
941 			m->m_pkthdr.comp_gencnt = tp->t_comp_gencnt;
942 		}
943 	} else {
944 		if (flags & TH_RST) {
945 			m->m_pkthdr.comp_gencnt = TCP_ACK_COMPRESSION_DUMMY;
946 			m_set_service_class(m, MBUF_SC_BK_SYS);
947 		}
948 	}
949 
950 	if (isipv6) {
951 		struct ip6_out_args ip6oa;
952 		bzero(&ip6oa, sizeof(ip6oa));
953 		ip6oa.ip6oa_boundif = tra->ifscope;
954 		ip6oa.ip6oa_flags = IP6OAF_SELECT_SRCIF | IP6OAF_BOUND_SRCADDR;
955 		ip6oa.ip6oa_sotc = SO_TC_UNSPEC;
956 		ip6oa.ip6oa_netsvctype = _NET_SERVICE_TYPE_UNSPEC;
957 
958 		if (tra->ifscope != IFSCOPE_NONE) {
959 			ip6oa.ip6oa_flags |= IP6OAF_BOUND_IF;
960 		}
961 		if (tra->nocell) {
962 			ip6oa.ip6oa_flags |= IP6OAF_NO_CELLULAR;
963 		}
964 		if (tra->noexpensive) {
965 			ip6oa.ip6oa_flags |= IP6OAF_NO_EXPENSIVE;
966 		}
967 		if (tra->noconstrained) {
968 			ip6oa.ip6oa_flags |= IP6OAF_NO_CONSTRAINED;
969 		}
970 		if (tra->awdl_unrestricted) {
971 			ip6oa.ip6oa_flags |= IP6OAF_AWDL_UNRESTRICTED;
972 		}
973 		if (tra->intcoproc_allowed) {
974 			ip6oa.ip6oa_flags |= IP6OAF_INTCOPROC_ALLOWED;
975 		}
976 		if (tra->management_allowed) {
977 			ip6oa.ip6oa_flags |= IP6OAF_MANAGEMENT_ALLOWED;
978 		}
979 		ip6oa.ip6oa_sotc = sotc;
980 		if (tp != NULL) {
981 			if ((tp->t_inpcb->inp_socket->so_flags1 & SOF1_QOSMARKING_ALLOWED)) {
982 				ip6oa.ip6oa_flags |= IP6OAF_QOSMARKING_ALLOWED;
983 			}
984 			ip6oa.qos_marking_gencount = tp->t_inpcb->inp_policyresult.results.qos_marking_gencount;
985 			if (check_qos_marking_again) {
986 				ip6oa.ip6oa_flags |= IP6OAF_REDO_QOSMARKING_POLICY;
987 			}
988 			ip6oa.ip6oa_netsvctype = tp->t_inpcb->inp_socket->so_netsvctype;
989 		}
990 		(void) ip6_output(m, NULL, ro6, IPV6_OUTARGS, NULL,
991 		    NULL, &ip6oa);
992 
993 		if (check_qos_marking_again) {
994 			struct inpcb *inp = tp->t_inpcb;
995 			inp->inp_policyresult.results.qos_marking_gencount = ip6oa.qos_marking_gencount;
996 			if (ip6oa.ip6oa_flags & IP6OAF_QOSMARKING_ALLOWED) {
997 				inp->inp_socket->so_flags1 |= SOF1_QOSMARKING_ALLOWED;
998 			} else {
999 				inp->inp_socket->so_flags1 &= ~SOF1_QOSMARKING_ALLOWED;
1000 			}
1001 		}
1002 
1003 		if (tp != NULL && ro6 != NULL && ro6->ro_rt != NULL &&
1004 		    (outif = ro6->ro_rt->rt_ifp) !=
1005 		    tp->t_inpcb->in6p_last_outifp) {
1006 			tp->t_inpcb->in6p_last_outifp = outif;
1007 #if SKYWALK
1008 			if (NETNS_TOKEN_VALID(&tp->t_inpcb->inp_netns_token)) {
1009 				netns_set_ifnet(&tp->t_inpcb->inp_netns_token,
1010 				    tp->t_inpcb->in6p_last_outifp);
1011 			}
1012 #endif /* SKYWALK */
1013 		}
1014 
1015 		if (ro6 == &sro6) {
1016 			ROUTE_RELEASE(ro6);
1017 		}
1018 	} else {
1019 		struct ip_out_args ipoa;
1020 		bzero(&ipoa, sizeof(ipoa));
1021 		ipoa.ipoa_boundif = tra->ifscope;
1022 		ipoa.ipoa_flags = IPOAF_SELECT_SRCIF | IPOAF_BOUND_SRCADDR;
1023 		ipoa.ipoa_sotc = SO_TC_UNSPEC;
1024 		ipoa.ipoa_netsvctype = _NET_SERVICE_TYPE_UNSPEC;
1025 
1026 		if (tra->ifscope != IFSCOPE_NONE) {
1027 			ipoa.ipoa_flags |= IPOAF_BOUND_IF;
1028 		}
1029 		if (tra->nocell) {
1030 			ipoa.ipoa_flags |= IPOAF_NO_CELLULAR;
1031 		}
1032 		if (tra->noexpensive) {
1033 			ipoa.ipoa_flags |= IPOAF_NO_EXPENSIVE;
1034 		}
1035 		if (tra->noconstrained) {
1036 			ipoa.ipoa_flags |= IPOAF_NO_CONSTRAINED;
1037 		}
1038 		if (tra->awdl_unrestricted) {
1039 			ipoa.ipoa_flags |= IPOAF_AWDL_UNRESTRICTED;
1040 		}
1041 		if (tra->management_allowed) {
1042 			ipoa.ipoa_flags |= IPOAF_MANAGEMENT_ALLOWED;
1043 		}
1044 		ipoa.ipoa_sotc = sotc;
1045 		if (tp != NULL) {
1046 			if ((tp->t_inpcb->inp_socket->so_flags1 & SOF1_QOSMARKING_ALLOWED)) {
1047 				ipoa.ipoa_flags |= IPOAF_QOSMARKING_ALLOWED;
1048 			}
1049 			if (!(tp->t_inpcb->inp_socket->so_flags1 & SOF1_QOSMARKING_POLICY_OVERRIDE)) {
1050 				ipoa.ipoa_flags |= IPOAF_REDO_QOSMARKING_POLICY;
1051 			}
1052 			ipoa.qos_marking_gencount = tp->t_inpcb->inp_policyresult.results.qos_marking_gencount;
1053 			ipoa.ipoa_netsvctype = tp->t_inpcb->inp_socket->so_netsvctype;
1054 		}
1055 		if (ro != &sro) {
1056 			/* Copy the cached route and take an extra reference */
1057 			inp_route_copyout(tp->t_inpcb, &sro);
1058 		}
1059 		/*
1060 		 * For consistency, pass a local route copy.
1061 		 */
1062 		(void) ip_output(m, NULL, &sro, IP_OUTARGS, NULL, &ipoa);
1063 
1064 		if (check_qos_marking_again) {
1065 			struct inpcb *inp = tp->t_inpcb;
1066 			inp->inp_policyresult.results.qos_marking_gencount = ipoa.qos_marking_gencount;
1067 			if (ipoa.ipoa_flags & IPOAF_QOSMARKING_ALLOWED) {
1068 				inp->inp_socket->so_flags1 |= SOF1_QOSMARKING_ALLOWED;
1069 			} else {
1070 				inp->inp_socket->so_flags1 &= ~SOF1_QOSMARKING_ALLOWED;
1071 			}
1072 		}
1073 		if (tp != NULL && sro.ro_rt != NULL &&
1074 		    (outif = sro.ro_rt->rt_ifp) !=
1075 		    tp->t_inpcb->inp_last_outifp) {
1076 			tp->t_inpcb->inp_last_outifp = outif;
1077 #if SKYWALK
1078 			if (NETNS_TOKEN_VALID(&tp->t_inpcb->inp_netns_token)) {
1079 				netns_set_ifnet(&tp->t_inpcb->inp_netns_token, outif);
1080 			}
1081 #endif /* SKYWALK */
1082 		}
1083 		if (ro != &sro) {
1084 			/* Synchronize cached PCB route */
1085 			inp_route_copyin(tp->t_inpcb, &sro);
1086 		} else {
1087 			ROUTE_RELEASE(&sro);
1088 		}
1089 	}
1090 }
1091 
1092 /*
1093  * Create a new TCP control block, making an
1094  * empty reassembly queue and hooking it to the argument
1095  * protocol control block.  The `inp' parameter must have
1096  * come from the zone allocator set up in tcp_init().
1097  */
1098 struct tcpcb *
tcp_newtcpcb(struct inpcb * inp)1099 tcp_newtcpcb(struct inpcb *inp)
1100 {
1101 	struct inp_tp *it;
1102 	struct tcpcb *tp;
1103 	struct socket *so = inp->inp_socket;
1104 	int isipv6 = (inp->inp_vflag & INP_IPV6) != 0;
1105 	uint32_t random_32;
1106 
1107 	calculate_tcp_clock();
1108 
1109 	if ((so->so_flags1 & SOF1_CACHED_IN_SOCK_LAYER) == 0) {
1110 		it = (struct inp_tp *)(void *)inp;
1111 		tp = &it->tcb;
1112 	} else {
1113 		tp = (struct tcpcb *)(void *)inp->inp_saved_ppcb;
1114 	}
1115 
1116 	bzero((char *) tp, sizeof(struct tcpcb));
1117 	LIST_INIT(&tp->t_segq);
1118 	tp->t_maxseg = tp->t_maxopd = isipv6 ? tcp_v6mssdflt : tcp_mssdflt;
1119 
1120 	tp->t_flags = TF_REQ_SCALE | (tcp_do_timestamps ? TF_REQ_TSTMP : 0);
1121 	tp->t_flagsext |= TF_SACK_ENABLE;
1122 
1123 	if (tcp_rack) {
1124 		tp->t_flagsext |= TF_RACK_ENABLED;
1125 	}
1126 
1127 	TAILQ_INIT(&tp->snd_holes);
1128 	SLIST_INIT(&tp->t_rxt_segments);
1129 	TAILQ_INIT(&tp->t_segs_sent);
1130 	RB_INIT(&tp->t_segs_sent_tree);
1131 	TAILQ_INIT(&tp->t_segs_acked);
1132 	TAILQ_INIT(&tp->seg_pool.free_segs);
1133 	SLIST_INIT(&tp->t_notify_ack);
1134 	tp->t_inpcb = inp;
1135 	/*
1136 	 * Init srtt to TCPTV_SRTTBASE (0), so we can tell that we have no
1137 	 * rtt estimate.  Set rttvar so that srtt + 4 * rttvar gives
1138 	 * reasonable initial retransmit time.
1139 	 */
1140 	tp->t_srtt = TCPTV_SRTTBASE;
1141 	tp->t_rttvar =
1142 	    ((TCPTV_RTOBASE - TCPTV_SRTTBASE) << TCP_RTTVAR_SHIFT) / 4;
1143 	tp->t_rttmin = tcp_TCPTV_MIN;
1144 	tp->t_rxtcur = TCPTV_RTOBASE;
1145 
1146 	if (tcp_use_newreno) {
1147 		/* use newreno by default */
1148 		tp->tcp_cc_index = TCP_CC_ALGO_NEWRENO_INDEX;
1149 #if (DEVELOPMENT || DEBUG)
1150 	} else if (tcp_use_ledbat) {
1151 		/* use ledbat for testing */
1152 		tp->tcp_cc_index = TCP_CC_ALGO_BACKGROUND_INDEX;
1153 #endif
1154 	} else {
1155 		if (TCP_L4S_ENABLED(tp)) {
1156 			tp->tcp_cc_index = TCP_CC_ALGO_PRAGUE_INDEX;
1157 		} else {
1158 			tp->tcp_cc_index = TCP_CC_ALGO_CUBIC_INDEX;
1159 		}
1160 	}
1161 
1162 	tcp_cc_allocate_state(tp);
1163 
1164 	if (CC_ALGO(tp)->init != NULL) {
1165 		CC_ALGO(tp)->init(tp);
1166 	}
1167 
1168 	/* Initialize rledbat if we are using recv_bg */
1169 	if (tcp_rledbat == 1 && TCP_RECV_BG(inp->inp_socket) &&
1170 	    tcp_cc_rledbat.init != NULL) {
1171 		tcp_cc_rledbat.init(tp);
1172 	}
1173 
1174 	tp->snd_cwnd = tcp_initial_cwnd(tp);
1175 	tp->snd_ssthresh = TCP_MAXWIN << TCP_MAX_WINSHIFT;
1176 	tp->snd_ssthresh_prev = TCP_MAXWIN << TCP_MAX_WINSHIFT;
1177 	tp->t_rcvtime = tcp_now;
1178 	tp->tentry.timer_start = tcp_now;
1179 	tp->rcv_unackwin = tcp_now;
1180 	tp->t_persist_timeout = tcp_max_persist_timeout;
1181 	tp->t_persist_stop = 0;
1182 	tp->t_flagsext |= TF_RCVUNACK_WAITSS;
1183 	tp->t_rexmtthresh = (uint8_t)tcprexmtthresh;
1184 	tp->rack.reo_wnd_multi = 1;
1185 	tp->rfbuf_ts = tcp_now;
1186 	tp->rfbuf_space = tcp_initial_cwnd(tp);
1187 	tp->t_forced_acks = TCP_FORCED_ACKS_COUNT;
1188 	tp->bytes_lost = tp->bytes_sacked = tp->bytes_retransmitted = 0;
1189 
1190 	/* Enable bandwidth measurement on this connection */
1191 	tp->t_flagsext |= TF_MEASURESNDBW;
1192 	if (tp->t_bwmeas == NULL) {
1193 		tp->t_bwmeas = tcp_bwmeas_alloc(tp);
1194 		if (tp->t_bwmeas == NULL) {
1195 			tp->t_flagsext &= ~TF_MEASURESNDBW;
1196 		}
1197 	}
1198 
1199 	/* Clear time wait tailq entry */
1200 	tp->t_twentry.tqe_next = NULL;
1201 	tp->t_twentry.tqe_prev = NULL;
1202 
1203 	read_frandom(&random_32, sizeof(random_32));
1204 	tp->t_comp_gencnt = random_32;
1205 	if (tp->t_comp_gencnt <= TCP_ACK_COMPRESSION_DUMMY) {
1206 		tp->t_comp_gencnt = TCP_ACK_COMPRESSION_DUMMY + 1;
1207 	}
1208 	tp->t_comp_lastinc = tcp_now;
1209 
1210 	/* Initialize Accurate ECN state */
1211 	tp->t_client_accecn_state = tcp_connection_client_accurate_ecn_feature_disabled;
1212 	tp->t_server_accecn_state = tcp_connection_server_accurate_ecn_feature_disabled;
1213 
1214 	/*
1215 	 * IPv4 TTL initialization is necessary for an IPv6 socket as well,
1216 	 * because the socket may be bound to an IPv6 wildcard address,
1217 	 * which may match an IPv4-mapped IPv6 address.
1218 	 */
1219 	inp->inp_ip_ttl = (uint8_t)ip_defttl;
1220 	inp->inp_ppcb = (caddr_t)tp;
1221 	return tp;            /* XXX */
1222 }
1223 
1224 /*
1225  * Drop a TCP connection, reporting
1226  * the specified error.  If connection is synchronized,
1227  * then send a RST to peer.
1228  */
1229 struct tcpcb *
tcp_drop(struct tcpcb * tp,int errno)1230 tcp_drop(struct tcpcb *tp, int errno)
1231 {
1232 	struct socket *so = tp->t_inpcb->inp_socket;
1233 #if CONFIG_DTRACE
1234 	struct inpcb *inp = tp->t_inpcb;
1235 #endif
1236 
1237 	if (TCPS_HAVERCVDSYN(tp->t_state)) {
1238 		DTRACE_TCP4(state__change, void, NULL, struct inpcb *, inp,
1239 		    struct tcpcb *, tp, int32_t, TCPS_CLOSED);
1240 		TCP_LOG_STATE(tp, TCPS_CLOSED);
1241 		tp->t_state = TCPS_CLOSED;
1242 		(void) tcp_output(tp);
1243 		tcpstat.tcps_drops++;
1244 	} else {
1245 		tcpstat.tcps_conndrops++;
1246 	}
1247 	if (errno == ETIMEDOUT && tp->t_softerror) {
1248 		errno = tp->t_softerror;
1249 	}
1250 	so->so_error = (u_short)errno;
1251 
1252 	TCP_LOG_CONNECTION_SUMMARY(tp);
1253 
1254 	return tcp_close(tp);
1255 }
1256 
1257 void
tcp_getrt_rtt(struct tcpcb * tp,struct rtentry * rt)1258 tcp_getrt_rtt(struct tcpcb *tp, struct rtentry *rt)
1259 {
1260 	u_int32_t rtt = rt->rt_rmx.rmx_rtt;
1261 	int isnetlocal = (tp->t_flags & TF_LOCAL);
1262 
1263 	TCP_LOG_RTM_RTT(tp, rt);
1264 
1265 	if (rtt != 0 && tcp_init_rtt_from_cache != 0) {
1266 		/*
1267 		 * XXX the lock bit for RTT indicates that the value
1268 		 * is also a minimum value; this is subject to time.
1269 		 */
1270 		if (rt->rt_rmx.rmx_locks & RTV_RTT) {
1271 			tp->t_rttmin = rtt / (RTM_RTTUNIT / TCP_RETRANSHZ);
1272 		} else {
1273 			tp->t_rttmin = isnetlocal ? tcp_TCPTV_MIN :
1274 			    TCPTV_REXMTMIN;
1275 		}
1276 
1277 		tp->t_srtt =
1278 		    rtt / (RTM_RTTUNIT / (TCP_RETRANSHZ * TCP_RTT_SCALE));
1279 		tcpstat.tcps_usedrtt++;
1280 
1281 		if (rt->rt_rmx.rmx_rttvar) {
1282 			tp->t_rttvar = rt->rt_rmx.rmx_rttvar /
1283 			    (RTM_RTTUNIT / (TCP_RETRANSHZ * TCP_RTTVAR_SCALE));
1284 			tcpstat.tcps_usedrttvar++;
1285 		} else {
1286 			/* default variation is +- 1 rtt */
1287 			tp->t_rttvar =
1288 			    tp->t_srtt * TCP_RTTVAR_SCALE / TCP_RTT_SCALE;
1289 		}
1290 
1291 		/*
1292 		 * The RTO formula in the route metric case is based on:
1293 		 *     srtt + 4 * rttvar
1294 		 * modulo the min, max and slop
1295 		 */
1296 		TCPT_RANGESET(tp->t_rxtcur,
1297 		    TCP_REXMTVAL(tp),
1298 		    tp->t_rttmin, TCPTV_REXMTMAX,
1299 		    TCP_ADD_REXMTSLOP(tp));
1300 	} else if (tp->t_state < TCPS_ESTABLISHED && tp->t_srtt == 0 &&
1301 	    tp->t_rxtshift == 0) {
1302 		struct ifnet *ifp = rt->rt_ifp;
1303 
1304 		if (ifp != NULL && (ifp->if_eflags & IFEF_AWDL) != 0) {
1305 			/*
1306 			 * AWDL needs a special value for the default initial retransmission timeout
1307 			 */
1308 			if (tcp_awdl_rtobase > tcp_TCPTV_MIN) {
1309 				tp->t_rttvar = ((tcp_awdl_rtobase - TCPTV_SRTTBASE) << TCP_RTTVAR_SHIFT) / 4;
1310 			} else {
1311 				tp->t_rttvar = ((tcp_TCPTV_MIN - TCPTV_SRTTBASE) << TCP_RTTVAR_SHIFT) / 4;
1312 			}
1313 			TCPT_RANGESET(tp->t_rxtcur,
1314 			    TCP_REXMTVAL(tp),
1315 			    tp->t_rttmin, TCPTV_REXMTMAX,
1316 			    TCP_ADD_REXMTSLOP(tp));
1317 		}
1318 	}
1319 
1320 	TCP_LOG_RTT_INFO(tp);
1321 }
1322 
1323 static inline void
tcp_create_ifnet_stats_per_flow(struct tcpcb * tp,struct ifnet_stats_per_flow * ifs)1324 tcp_create_ifnet_stats_per_flow(struct tcpcb *tp,
1325     struct ifnet_stats_per_flow *ifs)
1326 {
1327 	struct inpcb *inp;
1328 	struct socket *so;
1329 	if (tp == NULL || ifs == NULL) {
1330 		return;
1331 	}
1332 
1333 	bzero(ifs, sizeof(*ifs));
1334 	inp = tp->t_inpcb;
1335 	so = inp->inp_socket;
1336 
1337 	ifs->ipv4 = (inp->inp_vflag & INP_IPV6) ? 0 : 1;
1338 	ifs->local = (tp->t_flags & TF_LOCAL) ? 1 : 0;
1339 	ifs->connreset = (so->so_error == ECONNRESET) ? 1 : 0;
1340 	ifs->conntimeout = (so->so_error == ETIMEDOUT) ? 1 : 0;
1341 	ifs->ecn_flags = tp->ecn_flags;
1342 	ifs->txretransmitbytes = tp->t_stat.txretransmitbytes;
1343 	ifs->rxoutoforderbytes = tp->t_stat.rxoutoforderbytes;
1344 	ifs->rxmitpkts = tp->t_stat.rxmitpkts;
1345 	ifs->rcvoopack = tp->t_rcvoopack;
1346 	ifs->pawsdrop = tp->t_pawsdrop;
1347 	ifs->sack_recovery_episodes = tp->t_sack_recovery_episode;
1348 	ifs->reordered_pkts = tp->t_reordered_pkts;
1349 	ifs->dsack_sent = tp->t_dsack_sent;
1350 	ifs->dsack_recvd = tp->t_dsack_recvd;
1351 	ifs->srtt = tp->t_srtt;
1352 	ifs->rttupdated = tp->t_rttupdated;
1353 	ifs->rttvar = tp->t_rttvar;
1354 	ifs->rttmin = get_base_rtt(tp);
1355 	if (tp->t_bwmeas != NULL && tp->t_bwmeas->bw_sndbw_max > 0) {
1356 		ifs->bw_sndbw_max = tp->t_bwmeas->bw_sndbw_max;
1357 	} else {
1358 		ifs->bw_sndbw_max = 0;
1359 	}
1360 	if (tp->t_bwmeas != NULL && tp->t_bwmeas->bw_rcvbw_max > 0) {
1361 		ifs->bw_rcvbw_max = tp->t_bwmeas->bw_rcvbw_max;
1362 	} else {
1363 		ifs->bw_rcvbw_max = 0;
1364 	}
1365 	ifs->bk_txpackets = so->so_tc_stats[MBUF_TC_BK].txpackets;
1366 	ifs->txpackets = inp->inp_stat->txpackets;
1367 	ifs->rxpackets = inp->inp_stat->rxpackets;
1368 }
1369 
1370 static inline void
tcp_flow_ecn_perf_stats(struct ifnet_stats_per_flow * ifs,struct if_tcp_ecn_perf_stat * stat)1371 tcp_flow_ecn_perf_stats(struct ifnet_stats_per_flow *ifs,
1372     struct if_tcp_ecn_perf_stat *stat)
1373 {
1374 	u_int64_t curval, oldval;
1375 	stat->total_txpkts += ifs->txpackets;
1376 	stat->total_rxpkts += ifs->rxpackets;
1377 	stat->total_rxmitpkts += ifs->rxmitpkts;
1378 	stat->total_oopkts += ifs->rcvoopack;
1379 	stat->total_reorderpkts += (ifs->reordered_pkts +
1380 	    ifs->pawsdrop + ifs->dsack_sent + ifs->dsack_recvd);
1381 
1382 	/* Average RTT */
1383 	curval = ifs->srtt >> TCP_RTT_SHIFT;
1384 	if (curval > 0 && ifs->rttupdated >= 16) {
1385 		if (stat->rtt_avg == 0) {
1386 			stat->rtt_avg = curval;
1387 		} else {
1388 			oldval = stat->rtt_avg;
1389 			stat->rtt_avg = ((oldval << 4) - oldval + curval) >> 4;
1390 		}
1391 	}
1392 
1393 	/* RTT variance */
1394 	curval = ifs->rttvar >> TCP_RTTVAR_SHIFT;
1395 	if (curval > 0 && ifs->rttupdated >= 16) {
1396 		if (stat->rtt_var == 0) {
1397 			stat->rtt_var = curval;
1398 		} else {
1399 			oldval = stat->rtt_var;
1400 			stat->rtt_var =
1401 			    ((oldval << 4) - oldval + curval) >> 4;
1402 		}
1403 	}
1404 
1405 	/* SACK episodes */
1406 	stat->sack_episodes += ifs->sack_recovery_episodes;
1407 	if (ifs->connreset) {
1408 		stat->rst_drop++;
1409 	}
1410 }
1411 
1412 static inline void
tcp_flow_lim_stats(struct ifnet_stats_per_flow * ifs,struct if_lim_perf_stat * stat)1413 tcp_flow_lim_stats(struct ifnet_stats_per_flow *ifs,
1414     struct if_lim_perf_stat *stat)
1415 {
1416 	u_int64_t curval, oldval;
1417 
1418 	stat->lim_total_txpkts += ifs->txpackets;
1419 	stat->lim_total_rxpkts += ifs->rxpackets;
1420 	stat->lim_total_retxpkts += ifs->rxmitpkts;
1421 	stat->lim_total_oopkts += ifs->rcvoopack;
1422 
1423 	if (ifs->bw_sndbw_max > 0) {
1424 		/* convert from bytes per ms to bits per second */
1425 		ifs->bw_sndbw_max *= 8000;
1426 		stat->lim_ul_max_bandwidth = MAX(stat->lim_ul_max_bandwidth,
1427 		    ifs->bw_sndbw_max);
1428 	}
1429 
1430 	if (ifs->bw_rcvbw_max > 0) {
1431 		/* convert from bytes per ms to bits per second */
1432 		ifs->bw_rcvbw_max *= 8000;
1433 		stat->lim_dl_max_bandwidth = MAX(stat->lim_dl_max_bandwidth,
1434 		    ifs->bw_rcvbw_max);
1435 	}
1436 
1437 	/* Average RTT */
1438 	curval = ifs->srtt >> TCP_RTT_SHIFT;
1439 	if (curval > 0 && ifs->rttupdated >= 16) {
1440 		if (stat->lim_rtt_average == 0) {
1441 			stat->lim_rtt_average = curval;
1442 		} else {
1443 			oldval = stat->lim_rtt_average;
1444 			stat->lim_rtt_average =
1445 			    ((oldval << 4) - oldval + curval) >> 4;
1446 		}
1447 	}
1448 
1449 	/* RTT variance */
1450 	curval = ifs->rttvar >> TCP_RTTVAR_SHIFT;
1451 	if (curval > 0 && ifs->rttupdated >= 16) {
1452 		if (stat->lim_rtt_variance == 0) {
1453 			stat->lim_rtt_variance = curval;
1454 		} else {
1455 			oldval = stat->lim_rtt_variance;
1456 			stat->lim_rtt_variance =
1457 			    ((oldval << 4) - oldval + curval) >> 4;
1458 		}
1459 	}
1460 
1461 	if (stat->lim_rtt_min == 0) {
1462 		stat->lim_rtt_min = ifs->rttmin;
1463 	} else {
1464 		stat->lim_rtt_min = MIN(stat->lim_rtt_min, ifs->rttmin);
1465 	}
1466 
1467 	/* connection timeouts */
1468 	stat->lim_conn_attempts++;
1469 	if (ifs->conntimeout) {
1470 		stat->lim_conn_timeouts++;
1471 	}
1472 
1473 	/* bytes sent using background delay-based algorithms */
1474 	stat->lim_bk_txpkts += ifs->bk_txpackets;
1475 }
1476 
1477 /*
1478  * Close a TCP control block:
1479  *	discard all space held by the tcp
1480  *	discard internet protocol block
1481  *	wake up any sleepers
1482  */
1483 struct tcpcb *
tcp_close(struct tcpcb * tp)1484 tcp_close(struct tcpcb *tp)
1485 {
1486 	struct inpcb *inp = tp->t_inpcb;
1487 	struct socket *so = inp->inp_socket;
1488 	int isipv6 = (inp->inp_vflag & INP_IPV6) != 0;
1489 	struct route *ro;
1490 	struct rtentry *rt;
1491 	int dosavessthresh;
1492 	struct ifnet_stats_per_flow ifs;
1493 
1494 	/* tcp_close was called previously, bail */
1495 	if (inp->inp_ppcb == NULL) {
1496 		return NULL;
1497 	}
1498 
1499 	tcp_del_fsw_flow(tp);
1500 
1501 	tcp_canceltimers(tp);
1502 	KERNEL_DEBUG(DBG_FNC_TCP_CLOSE | DBG_FUNC_START, tp, 0, 0, 0, 0);
1503 
1504 	/*
1505 	 * If another thread for this tcp is currently in ip (indicated by
1506 	 * the TF_SENDINPROG flag), defer the cleanup until after it returns
1507 	 * back to tcp.  This is done to serialize the close until after all
1508 	 * pending output is finished, in order to avoid having the PCB be
1509 	 * detached and the cached route cleaned, only for ip to cache the
1510 	 * route back into the PCB again.  Note that we've cleared all the
1511 	 * timers at this point.  Set TF_CLOSING to indicate to tcp_output()
1512 	 * that is should call us again once it returns from ip; at that
1513 	 * point both flags should be cleared and we can proceed further
1514 	 * with the cleanup.
1515 	 */
1516 	if ((tp->t_flags & TF_CLOSING) ||
1517 	    inp->inp_sndinprog_cnt > 0) {
1518 		tp->t_flags |= TF_CLOSING;
1519 		return NULL;
1520 	}
1521 
1522 	TCP_LOG_CONNECTION_SUMMARY(tp);
1523 
1524 	DTRACE_TCP4(state__change, void, NULL, struct inpcb *, inp,
1525 	    struct tcpcb *, tp, int32_t, TCPS_CLOSED);
1526 
1527 	ro = (isipv6 ? (struct route *)&inp->in6p_route : &inp->inp_route);
1528 	rt = ro->ro_rt;
1529 	if (rt != NULL) {
1530 		RT_LOCK_SPIN(rt);
1531 	}
1532 
1533 	/*
1534 	 * If we got enough samples through the srtt filter,
1535 	 * save the rtt and rttvar in the routing entry.
1536 	 * 'Enough' is arbitrarily defined as the 16 samples.
1537 	 * 16 samples is enough for the srtt filter to converge
1538 	 * to within 5% of the correct value; fewer samples and
1539 	 * we could save a very bogus rtt.
1540 	 *
1541 	 * Don't update the default route's characteristics and don't
1542 	 * update anything that the user "locked".
1543 	 */
1544 	if (tp->t_rttupdated >= 16) {
1545 		u_int32_t i = 0;
1546 		bool log_rtt = false;
1547 
1548 		if (isipv6) {
1549 			struct sockaddr_in6 *sin6;
1550 
1551 			if (rt == NULL) {
1552 				goto no_valid_rt;
1553 			}
1554 			sin6 = SIN6(rt_key(rt));
1555 			if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
1556 				goto no_valid_rt;
1557 			}
1558 		} else if (ROUTE_UNUSABLE(ro) ||
1559 		    SIN(rt_key(rt))->sin_addr.s_addr == INADDR_ANY) {
1560 			DTRACE_TCP4(state__change, void, NULL,
1561 			    struct inpcb *, inp, struct tcpcb *, tp,
1562 			    int32_t, TCPS_CLOSED);
1563 			TCP_LOG_STATE(tp, TCPS_CLOSED);
1564 			tp->t_state = TCPS_CLOSED;
1565 			goto no_valid_rt;
1566 		}
1567 
1568 		RT_LOCK_ASSERT_HELD(rt);
1569 		if ((rt->rt_rmx.rmx_locks & RTV_RTT) == 0) {
1570 			i = tp->t_srtt *
1571 			    (RTM_RTTUNIT / (TCP_RETRANSHZ * TCP_RTT_SCALE));
1572 			if (rt->rt_rmx.rmx_rtt && i) {
1573 				/*
1574 				 * filter this update to half the old & half
1575 				 * the new values, converting scale.
1576 				 * See route.h and tcp_var.h for a
1577 				 * description of the scaling constants.
1578 				 */
1579 				rt->rt_rmx.rmx_rtt =
1580 				    (rt->rt_rmx.rmx_rtt + i) / 2;
1581 			} else {
1582 				rt->rt_rmx.rmx_rtt = i;
1583 			}
1584 			tcpstat.tcps_cachedrtt++;
1585 			log_rtt = true;
1586 		}
1587 		if ((rt->rt_rmx.rmx_locks & RTV_RTTVAR) == 0) {
1588 			i = tp->t_rttvar *
1589 			    (RTM_RTTUNIT / (TCP_RETRANSHZ * TCP_RTTVAR_SCALE));
1590 			if (rt->rt_rmx.rmx_rttvar && i) {
1591 				rt->rt_rmx.rmx_rttvar =
1592 				    (rt->rt_rmx.rmx_rttvar + i) / 2;
1593 			} else {
1594 				rt->rt_rmx.rmx_rttvar = i;
1595 			}
1596 			tcpstat.tcps_cachedrttvar++;
1597 			log_rtt = true;
1598 		}
1599 		if (log_rtt) {
1600 			TCP_LOG_RTM_RTT(tp, rt);
1601 			TCP_LOG_RTT_INFO(tp);
1602 		}
1603 		/*
1604 		 * The old comment here said:
1605 		 * update the pipelimit (ssthresh) if it has been updated
1606 		 * already or if a pipesize was specified & the threshhold
1607 		 * got below half the pipesize.  I.e., wait for bad news
1608 		 * before we start updating, then update on both good
1609 		 * and bad news.
1610 		 *
1611 		 * But we want to save the ssthresh even if no pipesize is
1612 		 * specified explicitly in the route, because such
1613 		 * connections still have an implicit pipesize specified
1614 		 * by the global tcp_sendspace.  In the absence of a reliable
1615 		 * way to calculate the pipesize, it will have to do.
1616 		 */
1617 		i = tp->snd_ssthresh;
1618 		if (rt->rt_rmx.rmx_sendpipe != 0) {
1619 			dosavessthresh = (i < rt->rt_rmx.rmx_sendpipe / 2);
1620 		} else {
1621 			dosavessthresh = (i < so->so_snd.sb_hiwat / 2);
1622 		}
1623 		if (((rt->rt_rmx.rmx_locks & RTV_SSTHRESH) == 0 &&
1624 		    i != 0 && rt->rt_rmx.rmx_ssthresh != 0) ||
1625 		    dosavessthresh) {
1626 			/*
1627 			 * convert the limit from user data bytes to
1628 			 * packets then to packet data bytes.
1629 			 */
1630 			i = (i + tp->t_maxseg / 2) / tp->t_maxseg;
1631 			if (i < 2) {
1632 				i = 2;
1633 			}
1634 			i *= (u_int32_t)(tp->t_maxseg +
1635 			    isipv6 ? sizeof(struct ip6_hdr) +
1636 			    sizeof(struct tcphdr) :
1637 			    sizeof(struct tcpiphdr));
1638 			if (rt->rt_rmx.rmx_ssthresh) {
1639 				rt->rt_rmx.rmx_ssthresh =
1640 				    (rt->rt_rmx.rmx_ssthresh + i) / 2;
1641 			} else {
1642 				rt->rt_rmx.rmx_ssthresh = i;
1643 			}
1644 			tcpstat.tcps_cachedssthresh++;
1645 		}
1646 	}
1647 
1648 	/*
1649 	 * Mark route for deletion if no information is cached.
1650 	 */
1651 	if (rt != NULL && (so->so_flags & SOF_OVERFLOW)) {
1652 		if (!(rt->rt_rmx.rmx_locks & RTV_RTT) &&
1653 		    rt->rt_rmx.rmx_rtt == 0) {
1654 			rt->rt_flags |= RTF_DELCLONE;
1655 		}
1656 	}
1657 
1658 no_valid_rt:
1659 	if (rt != NULL) {
1660 		RT_UNLOCK(rt);
1661 	}
1662 
1663 	/* free the reassembly queue, if any */
1664 	(void) tcp_freeq(tp);
1665 
1666 	/* performance stats per interface */
1667 	tcp_create_ifnet_stats_per_flow(tp, &ifs);
1668 	tcp_update_stats_per_flow(&ifs, inp->inp_last_outifp);
1669 
1670 	tcp_free_sackholes(tp);
1671 	tcp_notify_ack_free(tp);
1672 
1673 	inp_decr_sndbytes_allunsent(so, tp->snd_una);
1674 
1675 	if (tp->t_bwmeas != NULL) {
1676 		tcp_bwmeas_free(tp);
1677 	}
1678 	tcp_rxtseg_clean(tp);
1679 	tcp_segs_sent_clean(tp, true);
1680 
1681 	/* Free the packet list */
1682 	if (tp->t_pktlist_head != NULL) {
1683 		m_freem_list(tp->t_pktlist_head);
1684 	}
1685 	TCP_PKTLIST_CLEAR(tp);
1686 
1687 	if (so->so_flags1 & SOF1_CACHED_IN_SOCK_LAYER) {
1688 		inp->inp_saved_ppcb = (caddr_t) tp;
1689 	}
1690 
1691 	TCP_LOG_STATE(tp, TCPS_CLOSED);
1692 	tp->t_state = TCPS_CLOSED;
1693 
1694 	/*
1695 	 * Issue a wakeup before detach so that we don't miss
1696 	 * a wakeup
1697 	 */
1698 	sodisconnectwakeup(so);
1699 
1700 	/*
1701 	 * Make sure to clear the TCP Keep Alive Offload as it is
1702 	 * ref counted on the interface
1703 	 */
1704 	tcp_clear_keep_alive_offload(so);
1705 
1706 	/*
1707 	 * If this is a socket that does not want to wakeup the device
1708 	 * for it's traffic, the application might need to know that the
1709 	 * socket is closed, send a notification.
1710 	 */
1711 	if ((so->so_options & SO_NOWAKEFROMSLEEP) &&
1712 	    inp->inp_state != INPCB_STATE_DEAD &&
1713 	    !(inp->inp_flags2 & INP2_TIMEWAIT)) {
1714 		socket_post_kev_msg_closed(so);
1715 	}
1716 
1717 	if (CC_ALGO(tp)->cleanup != NULL) {
1718 		CC_ALGO(tp)->cleanup(tp);
1719 	}
1720 
1721 	tp->tcp_cc_index = TCP_CC_ALGO_NONE;
1722 
1723 	if (TCP_USE_RLEDBAT(tp, so) && tcp_cc_rledbat.cleanup != NULL) {
1724 		tcp_cc_rledbat.cleanup(tp);
1725 	}
1726 
1727 	/* Can happen if we close the socket before receiving the third ACK */
1728 	if ((tp->t_tfo_flags & TFO_F_COOKIE_VALID)) {
1729 		OSDecrementAtomic(&tcp_tfo_halfcnt);
1730 
1731 		/* Panic if something has gone terribly wrong. */
1732 		VERIFY(tcp_tfo_halfcnt >= 0);
1733 
1734 		tp->t_tfo_flags &= ~TFO_F_COOKIE_VALID;
1735 	}
1736 
1737 	if (SOCK_CHECK_DOM(so, PF_INET6)) {
1738 		in6_pcbdetach(inp);
1739 	} else {
1740 		in_pcbdetach(inp);
1741 	}
1742 
1743 	/*
1744 	 * Call soisdisconnected after detach because it might unlock the socket
1745 	 */
1746 	soisdisconnected(so);
1747 	tcpstat.tcps_closed++;
1748 	KERNEL_DEBUG(DBG_FNC_TCP_CLOSE | DBG_FUNC_END,
1749 	    tcpstat.tcps_closed, 0, 0, 0, 0);
1750 	return NULL;
1751 }
1752 
1753 int
tcp_freeq(struct tcpcb * tp)1754 tcp_freeq(struct tcpcb *tp)
1755 {
1756 	struct tseg_qent *q;
1757 	int rv = 0;
1758 	int count = 0;
1759 
1760 	while ((q = LIST_FIRST(&tp->t_segq)) != NULL) {
1761 		LIST_REMOVE(q, tqe_q);
1762 		tp->t_reassq_mbcnt -= _MSIZE + (q->tqe_m->m_flags & M_EXT) ?
1763 		    q->tqe_m->m_ext.ext_size : 0;
1764 		m_freem(q->tqe_m);
1765 		zfree(tcp_reass_zone, q);
1766 		rv = 1;
1767 		count++;
1768 	}
1769 	tp->t_reassqlen = 0;
1770 	if (count > 0) {
1771 		OSAddAtomic(-count, &tcp_reass_total_qlen);
1772 	}
1773 	return rv;
1774 }
1775 
1776 
1777 void
tcp_drain(void)1778 tcp_drain(void)
1779 {
1780 	struct inpcb *inp;
1781 	struct tcpcb *tp;
1782 
1783 	if (!lck_rw_try_lock_exclusive(&tcbinfo.ipi_lock)) {
1784 		return;
1785 	}
1786 
1787 	LIST_FOREACH(inp, tcbinfo.ipi_listhead, inp_list) {
1788 		if (in_pcb_checkstate(inp, WNT_ACQUIRE, 0) !=
1789 		    WNT_STOPUSING) {
1790 			socket_lock(inp->inp_socket, 1);
1791 			if (in_pcb_checkstate(inp, WNT_RELEASE, 1)
1792 			    == WNT_STOPUSING) {
1793 				/* lost a race, try the next one */
1794 				socket_unlock(inp->inp_socket, 1);
1795 				continue;
1796 			}
1797 			tp = intotcpcb(inp);
1798 
1799 			so_drain_extended_bk_idle(inp->inp_socket);
1800 
1801 			socket_unlock(inp->inp_socket, 1);
1802 		}
1803 	}
1804 	lck_rw_done(&tcbinfo.ipi_lock);
1805 }
1806 
1807 /*
1808  * Notify a tcp user of an asynchronous error;
1809  * store error as soft error, but wake up user
1810  * (for now, won't do anything until can select for soft error).
1811  *
1812  * Do not wake up user since there currently is no mechanism for
1813  * reporting soft errors (yet - a kqueue filter may be added).
1814  */
1815 static void
tcp_notify(struct inpcb * inp,int error)1816 tcp_notify(struct inpcb *inp, int error)
1817 {
1818 	struct tcpcb *tp;
1819 
1820 	if (inp == NULL || (inp->inp_state == INPCB_STATE_DEAD)) {
1821 		return; /* pcb is gone already */
1822 	}
1823 	tp = (struct tcpcb *)inp->inp_ppcb;
1824 
1825 	VERIFY(tp != NULL);
1826 	/*
1827 	 * Ignore some errors if we are hooked up.
1828 	 * If connection hasn't completed, has retransmitted several times,
1829 	 * and receives a second error, give up now.  This is better
1830 	 * than waiting a long time to establish a connection that
1831 	 * can never complete.
1832 	 */
1833 	if (tp->t_state == TCPS_ESTABLISHED &&
1834 	    (error == EHOSTUNREACH || error == ENETUNREACH ||
1835 	    error == EHOSTDOWN)) {
1836 		if (inp->inp_route.ro_rt) {
1837 			rtfree(inp->inp_route.ro_rt);
1838 			inp->inp_route.ro_rt = (struct rtentry *)NULL;
1839 		}
1840 	} else if (tp->t_state < TCPS_ESTABLISHED && tp->t_rxtshift > 3 &&
1841 	    tp->t_softerror) {
1842 		tcp_drop(tp, error);
1843 	} else {
1844 		tp->t_softerror = error;
1845 	}
1846 }
1847 
1848 struct bwmeas *
tcp_bwmeas_alloc(struct tcpcb * tp)1849 tcp_bwmeas_alloc(struct tcpcb *tp)
1850 {
1851 	struct bwmeas *elm;
1852 	elm = zalloc_flags(tcp_bwmeas_zone, Z_ZERO | Z_WAITOK);
1853 	elm->bw_minsizepkts = TCP_BWMEAS_BURST_MINSIZE;
1854 	elm->bw_minsize = elm->bw_minsizepkts * tp->t_maxseg;
1855 	return elm;
1856 }
1857 
1858 void
tcp_bwmeas_free(struct tcpcb * tp)1859 tcp_bwmeas_free(struct tcpcb *tp)
1860 {
1861 	zfree(tcp_bwmeas_zone, tp->t_bwmeas);
1862 	tp->t_bwmeas = NULL;
1863 	tp->t_flagsext &= ~(TF_MEASURESNDBW);
1864 }
1865 
1866 int
get_tcp_inp_list(struct inpcb * __single * inp_list __counted_by (n),size_t n,inp_gen_t gencnt)1867 get_tcp_inp_list(struct inpcb * __single *inp_list __counted_by(n), size_t n, inp_gen_t gencnt)
1868 {
1869 	struct tcpcb *tp;
1870 	struct inpcb *inp;
1871 	int i = 0;
1872 
1873 	LIST_FOREACH(inp, tcbinfo.ipi_listhead, inp_list) {
1874 		if (i >= n) {
1875 			break;
1876 		}
1877 		if (inp->inp_gencnt <= gencnt &&
1878 		    inp->inp_state != INPCB_STATE_DEAD) {
1879 			inp_list[i++] = inp;
1880 		}
1881 	}
1882 
1883 	TAILQ_FOREACH(tp, &tcp_tw_tailq, t_twentry) {
1884 		if (i >= n) {
1885 			break;
1886 		}
1887 		inp = tp->t_inpcb;
1888 		if (inp->inp_gencnt <= gencnt &&
1889 		    inp->inp_state != INPCB_STATE_DEAD) {
1890 			inp_list[i++] = inp;
1891 		}
1892 	}
1893 	return i;
1894 }
1895 
1896 /*
1897  * tcpcb_to_otcpcb copies specific bits of a tcpcb to a otcpcb format.
1898  * The otcpcb data structure is passed to user space and must not change.
1899  */
1900 static void
tcpcb_to_otcpcb(struct tcpcb * tp,struct otcpcb * otp)1901 tcpcb_to_otcpcb(struct tcpcb *tp, struct otcpcb *otp)
1902 {
1903 	otp->t_segq = (uint32_t)VM_KERNEL_ADDRHASH(tp->t_segq.lh_first);
1904 	otp->t_dupacks = tp->t_dupacks;
1905 	otp->t_timer[TCPT_REXMT_EXT] = tp->t_timer[TCPT_REXMT];
1906 	otp->t_timer[TCPT_PERSIST_EXT] = tp->t_timer[TCPT_PERSIST];
1907 	otp->t_timer[TCPT_KEEP_EXT] = tp->t_timer[TCPT_KEEP];
1908 	otp->t_timer[TCPT_2MSL_EXT] = tp->t_timer[TCPT_2MSL];
1909 	otp->t_inpcb =
1910 	    (_TCPCB_PTR(struct inpcb *))VM_KERNEL_ADDRHASH(tp->t_inpcb);
1911 	otp->t_state = tp->t_state;
1912 	otp->t_flags = tp->t_flags;
1913 	otp->t_force = (tp->t_flagsext & TF_FORCE) ? 1 : 0;
1914 	otp->snd_una = tp->snd_una;
1915 	otp->snd_max = tp->snd_max;
1916 	otp->snd_nxt = tp->snd_nxt;
1917 	otp->snd_up = tp->snd_up;
1918 	otp->snd_wl1 = tp->snd_wl1;
1919 	otp->snd_wl2 = tp->snd_wl2;
1920 	otp->iss = tp->iss;
1921 	otp->irs = tp->irs;
1922 	otp->rcv_nxt = tp->rcv_nxt;
1923 	otp->rcv_adv = tp->rcv_adv;
1924 	otp->rcv_wnd = tp->rcv_wnd;
1925 	otp->rcv_up = tp->rcv_up;
1926 	otp->snd_wnd = tp->snd_wnd;
1927 	otp->snd_cwnd = tp->snd_cwnd;
1928 	otp->snd_ssthresh = tp->snd_ssthresh;
1929 	otp->t_maxopd = tp->t_maxopd;
1930 	otp->t_rcvtime = tp->t_rcvtime;
1931 	otp->t_starttime = tp->t_starttime;
1932 	otp->t_rtttime = tp->t_rtttime;
1933 	otp->t_rtseq = tp->t_rtseq;
1934 	otp->t_rxtcur = tp->t_rxtcur;
1935 	otp->t_maxseg = tp->t_maxseg;
1936 	otp->t_srtt = tp->t_srtt;
1937 	otp->t_rttvar = tp->t_rttvar;
1938 	otp->t_rxtshift = tp->t_rxtshift;
1939 	otp->t_rttmin = tp->t_rttmin;
1940 	otp->t_rttupdated = tp->t_rttupdated;
1941 	otp->max_sndwnd = tp->max_sndwnd;
1942 	otp->t_softerror = tp->t_softerror;
1943 	otp->t_oobflags = tp->t_oobflags;
1944 	otp->t_iobc = tp->t_iobc;
1945 	otp->snd_scale = tp->snd_scale;
1946 	otp->rcv_scale = tp->rcv_scale;
1947 	otp->request_r_scale = tp->request_r_scale;
1948 	otp->requested_s_scale = tp->requested_s_scale;
1949 	otp->ts_recent = tp->ts_recent;
1950 	otp->ts_recent_age = tp->ts_recent_age;
1951 	otp->last_ack_sent = tp->last_ack_sent;
1952 	otp->cc_send = 0;
1953 	otp->cc_recv = 0;
1954 	otp->snd_recover = tp->snd_recover;
1955 	otp->snd_cwnd_prev = tp->snd_cwnd_prev;
1956 	otp->snd_ssthresh_prev = tp->snd_ssthresh_prev;
1957 	otp->t_badrxtwin = 0;
1958 }
1959 
1960 static int
1961 tcp_pcblist SYSCTL_HANDLER_ARGS
1962 {
1963 #pragma unused(oidp, arg1, arg2)
1964 	int error, i = 0, n, sz;
1965 	struct inpcb **inp_list;
1966 	inp_gen_t gencnt;
1967 	struct xinpgen xig;
1968 
1969 	/*
1970 	 * The process of preparing the TCB list is too time-consuming and
1971 	 * resource-intensive to repeat twice on every request.
1972 	 */
1973 	lck_rw_lock_shared(&tcbinfo.ipi_lock);
1974 	if (req->oldptr == USER_ADDR_NULL) {
1975 		n = tcbinfo.ipi_count;
1976 		req->oldidx = 2 * (sizeof(xig))
1977 		    + (n + n / 8) * sizeof(struct xtcpcb);
1978 		lck_rw_done(&tcbinfo.ipi_lock);
1979 		return 0;
1980 	}
1981 
1982 	if (req->newptr != USER_ADDR_NULL) {
1983 		lck_rw_done(&tcbinfo.ipi_lock);
1984 		return EPERM;
1985 	}
1986 
1987 	/*
1988 	 * OK, now we're committed to doing something.
1989 	 */
1990 	gencnt = tcbinfo.ipi_gencnt;
1991 	sz = n = tcbinfo.ipi_count;
1992 
1993 	bzero(&xig, sizeof(xig));
1994 	xig.xig_len = sizeof(xig);
1995 	xig.xig_count = n;
1996 	xig.xig_gen = gencnt;
1997 	xig.xig_sogen = so_gencnt;
1998 	error = SYSCTL_OUT(req, &xig, sizeof(xig));
1999 	if (error) {
2000 		lck_rw_done(&tcbinfo.ipi_lock);
2001 		return error;
2002 	}
2003 	/*
2004 	 * We are done if there is no pcb
2005 	 */
2006 	if (n == 0) {
2007 		lck_rw_done(&tcbinfo.ipi_lock);
2008 		return 0;
2009 	}
2010 
2011 	inp_list = kalloc_type(struct inpcb *, n, Z_WAITOK);
2012 	if (inp_list == NULL) {
2013 		lck_rw_done(&tcbinfo.ipi_lock);
2014 		return ENOMEM;
2015 	}
2016 
2017 	n = get_tcp_inp_list(inp_list, n, gencnt);
2018 
2019 	error = 0;
2020 	for (i = 0; i < n; i++) {
2021 		struct xtcpcb xt;
2022 		caddr_t inp_ppcb;
2023 		struct inpcb *inp;
2024 
2025 		inp = inp_list[i];
2026 
2027 		if (in_pcb_checkstate(inp, WNT_ACQUIRE, 0) == WNT_STOPUSING) {
2028 			continue;
2029 		}
2030 		socket_lock(inp->inp_socket, 1);
2031 		if (in_pcb_checkstate(inp, WNT_RELEASE, 1) == WNT_STOPUSING) {
2032 			socket_unlock(inp->inp_socket, 1);
2033 			continue;
2034 		}
2035 		if (inp->inp_gencnt > gencnt) {
2036 			socket_unlock(inp->inp_socket, 1);
2037 			continue;
2038 		}
2039 
2040 		bzero(&xt, sizeof(xt));
2041 		xt.xt_len = sizeof(xt);
2042 		/* XXX should avoid extra copy */
2043 		inpcb_to_compat(inp, &xt.xt_inp);
2044 		inp_ppcb = inp->inp_ppcb;
2045 		if (inp_ppcb != NULL) {
2046 			tcpcb_to_otcpcb((struct tcpcb *)(void *)inp_ppcb,
2047 			    &xt.xt_tp);
2048 		} else {
2049 			bzero((char *) &xt.xt_tp, sizeof(xt.xt_tp));
2050 		}
2051 		if (inp->inp_socket) {
2052 			sotoxsocket(inp->inp_socket, &xt.xt_socket);
2053 		}
2054 
2055 		socket_unlock(inp->inp_socket, 1);
2056 
2057 		error = SYSCTL_OUT(req, &xt, sizeof(xt));
2058 	}
2059 	if (!error) {
2060 		/*
2061 		 * Give the user an updated idea of our state.
2062 		 * If the generation differs from what we told
2063 		 * her before, she knows that something happened
2064 		 * while we were processing this request, and it
2065 		 * might be necessary to retry.
2066 		 */
2067 		bzero(&xig, sizeof(xig));
2068 		xig.xig_len = sizeof(xig);
2069 		xig.xig_gen = tcbinfo.ipi_gencnt;
2070 		xig.xig_sogen = so_gencnt;
2071 		xig.xig_count = tcbinfo.ipi_count;
2072 		error = SYSCTL_OUT(req, &xig, sizeof(xig));
2073 	}
2074 
2075 	lck_rw_done(&tcbinfo.ipi_lock);
2076 	kfree_type(struct inpcb *, sz, inp_list);
2077 	return error;
2078 }
2079 
2080 SYSCTL_PROC(_net_inet_tcp, TCPCTL_PCBLIST, pcblist,
2081     CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
2082     tcp_pcblist, "S,xtcpcb", "List of active TCP connections");
2083 
2084 #if XNU_TARGET_OS_OSX
2085 
2086 static void
tcpcb_to_xtcpcb64(struct tcpcb * tp,struct xtcpcb64 * otp)2087 tcpcb_to_xtcpcb64(struct tcpcb *tp, struct xtcpcb64 *otp)
2088 {
2089 	otp->t_segq = (uint32_t)VM_KERNEL_ADDRHASH(tp->t_segq.lh_first);
2090 	otp->t_dupacks = tp->t_dupacks;
2091 	otp->t_timer[TCPT_REXMT_EXT] = tp->t_timer[TCPT_REXMT];
2092 	otp->t_timer[TCPT_PERSIST_EXT] = tp->t_timer[TCPT_PERSIST];
2093 	otp->t_timer[TCPT_KEEP_EXT] = tp->t_timer[TCPT_KEEP];
2094 	otp->t_timer[TCPT_2MSL_EXT] = tp->t_timer[TCPT_2MSL];
2095 	otp->t_state = tp->t_state;
2096 	otp->t_flags = tp->t_flags;
2097 	otp->t_force = (tp->t_flagsext & TF_FORCE) ? 1 : 0;
2098 	otp->snd_una = tp->snd_una;
2099 	otp->snd_max = tp->snd_max;
2100 	otp->snd_nxt = tp->snd_nxt;
2101 	otp->snd_up = tp->snd_up;
2102 	otp->snd_wl1 = tp->snd_wl1;
2103 	otp->snd_wl2 = tp->snd_wl2;
2104 	otp->iss = tp->iss;
2105 	otp->irs = tp->irs;
2106 	otp->rcv_nxt = tp->rcv_nxt;
2107 	otp->rcv_adv = tp->rcv_adv;
2108 	otp->rcv_wnd = tp->rcv_wnd;
2109 	otp->rcv_up = tp->rcv_up;
2110 	otp->snd_wnd = tp->snd_wnd;
2111 	otp->snd_cwnd = tp->snd_cwnd;
2112 	otp->snd_ssthresh = tp->snd_ssthresh;
2113 	otp->t_maxopd = tp->t_maxopd;
2114 	otp->t_rcvtime = tp->t_rcvtime;
2115 	otp->t_starttime = tp->t_starttime;
2116 	otp->t_rtttime = tp->t_rtttime;
2117 	otp->t_rtseq = tp->t_rtseq;
2118 	otp->t_rxtcur = tp->t_rxtcur;
2119 	otp->t_maxseg = tp->t_maxseg;
2120 	otp->t_srtt = tp->t_srtt;
2121 	otp->t_rttvar = tp->t_rttvar;
2122 	otp->t_rxtshift = tp->t_rxtshift;
2123 	otp->t_rttmin = tp->t_rttmin;
2124 	otp->t_rttupdated = tp->t_rttupdated;
2125 	otp->max_sndwnd = tp->max_sndwnd;
2126 	otp->t_softerror = tp->t_softerror;
2127 	otp->t_oobflags = tp->t_oobflags;
2128 	otp->t_iobc = tp->t_iobc;
2129 	otp->snd_scale = tp->snd_scale;
2130 	otp->rcv_scale = tp->rcv_scale;
2131 	otp->request_r_scale = tp->request_r_scale;
2132 	otp->requested_s_scale = tp->requested_s_scale;
2133 	otp->ts_recent = tp->ts_recent;
2134 	otp->ts_recent_age = tp->ts_recent_age;
2135 	otp->last_ack_sent = tp->last_ack_sent;
2136 	otp->cc_send = 0;
2137 	otp->cc_recv = 0;
2138 	otp->snd_recover = tp->snd_recover;
2139 	otp->snd_cwnd_prev = tp->snd_cwnd_prev;
2140 	otp->snd_ssthresh_prev = tp->snd_ssthresh_prev;
2141 	otp->t_badrxtwin = 0;
2142 }
2143 
2144 
2145 static int
2146 tcp_pcblist64 SYSCTL_HANDLER_ARGS
2147 {
2148 #pragma unused(oidp, arg1, arg2)
2149 	int error, i = 0, n, sz;
2150 	struct inpcb **inp_list;
2151 	inp_gen_t gencnt;
2152 	struct xinpgen xig;
2153 
2154 	/*
2155 	 * The process of preparing the TCB list is too time-consuming and
2156 	 * resource-intensive to repeat twice on every request.
2157 	 */
2158 	lck_rw_lock_shared(&tcbinfo.ipi_lock);
2159 	if (req->oldptr == USER_ADDR_NULL) {
2160 		n = tcbinfo.ipi_count;
2161 		req->oldidx = 2 * (sizeof(xig))
2162 		    + (n + n / 8) * sizeof(struct xtcpcb64);
2163 		lck_rw_done(&tcbinfo.ipi_lock);
2164 		return 0;
2165 	}
2166 
2167 	if (req->newptr != USER_ADDR_NULL) {
2168 		lck_rw_done(&tcbinfo.ipi_lock);
2169 		return EPERM;
2170 	}
2171 
2172 	/*
2173 	 * OK, now we're committed to doing something.
2174 	 */
2175 	gencnt = tcbinfo.ipi_gencnt;
2176 	sz = n = tcbinfo.ipi_count;
2177 
2178 	bzero(&xig, sizeof(xig));
2179 	xig.xig_len = sizeof(xig);
2180 	xig.xig_count = n;
2181 	xig.xig_gen = gencnt;
2182 	xig.xig_sogen = so_gencnt;
2183 	error = SYSCTL_OUT(req, &xig, sizeof(xig));
2184 	if (error) {
2185 		lck_rw_done(&tcbinfo.ipi_lock);
2186 		return error;
2187 	}
2188 	/*
2189 	 * We are done if there is no pcb
2190 	 */
2191 	if (n == 0) {
2192 		lck_rw_done(&tcbinfo.ipi_lock);
2193 		return 0;
2194 	}
2195 
2196 	inp_list = kalloc_type(struct inpcb *, n, Z_WAITOK);
2197 	if (inp_list == NULL) {
2198 		lck_rw_done(&tcbinfo.ipi_lock);
2199 		return ENOMEM;
2200 	}
2201 
2202 	n = get_tcp_inp_list(inp_list, n, gencnt);
2203 
2204 	error = 0;
2205 	for (i = 0; i < n; i++) {
2206 		struct xtcpcb64 xt;
2207 		struct inpcb *inp;
2208 
2209 		inp = inp_list[i];
2210 
2211 		if (in_pcb_checkstate(inp, WNT_ACQUIRE, 0) == WNT_STOPUSING) {
2212 			continue;
2213 		}
2214 		socket_lock(inp->inp_socket, 1);
2215 		if (in_pcb_checkstate(inp, WNT_RELEASE, 1) == WNT_STOPUSING) {
2216 			socket_unlock(inp->inp_socket, 1);
2217 			continue;
2218 		}
2219 		if (inp->inp_gencnt > gencnt) {
2220 			socket_unlock(inp->inp_socket, 1);
2221 			continue;
2222 		}
2223 
2224 		bzero(&xt, sizeof(xt));
2225 		xt.xt_len = sizeof(xt);
2226 		inpcb_to_xinpcb64(inp, &xt.xt_inpcb);
2227 		xt.xt_inpcb.inp_ppcb =
2228 		    (uint64_t)VM_KERNEL_ADDRHASH(inp->inp_ppcb);
2229 		if (inp->inp_ppcb != NULL) {
2230 			tcpcb_to_xtcpcb64((struct tcpcb *)inp->inp_ppcb,
2231 			    &xt);
2232 		}
2233 		if (inp->inp_socket) {
2234 			sotoxsocket64(inp->inp_socket,
2235 			    &xt.xt_inpcb.xi_socket);
2236 		}
2237 
2238 		socket_unlock(inp->inp_socket, 1);
2239 
2240 		error = SYSCTL_OUT(req, &xt, sizeof(xt));
2241 	}
2242 	if (!error) {
2243 		/*
2244 		 * Give the user an updated idea of our state.
2245 		 * If the generation differs from what we told
2246 		 * her before, she knows that something happened
2247 		 * while we were processing this request, and it
2248 		 * might be necessary to retry.
2249 		 */
2250 		bzero(&xig, sizeof(xig));
2251 		xig.xig_len = sizeof(xig);
2252 		xig.xig_gen = tcbinfo.ipi_gencnt;
2253 		xig.xig_sogen = so_gencnt;
2254 		xig.xig_count = tcbinfo.ipi_count;
2255 		error = SYSCTL_OUT(req, &xig, sizeof(xig));
2256 	}
2257 
2258 	lck_rw_done(&tcbinfo.ipi_lock);
2259 	kfree_type(struct inpcb *, sz, inp_list);
2260 	return error;
2261 }
2262 
2263 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, pcblist64,
2264     CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
2265     tcp_pcblist64, "S,xtcpcb64", "List of active TCP connections");
2266 
2267 #endif /* XNU_TARGET_OS_OSX */
2268 
2269 static int
2270 tcp_pcblist_n SYSCTL_HANDLER_ARGS
2271 {
2272 #pragma unused(oidp, arg1, arg2)
2273 	int error = 0;
2274 
2275 	error = get_pcblist_n(IPPROTO_TCP, req, &tcbinfo);
2276 
2277 	return error;
2278 }
2279 
2280 
2281 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, pcblist_n,
2282     CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
2283     tcp_pcblist_n, "S,xtcpcb_n", "List of active TCP connections");
2284 
2285 static int
2286 tcp_progress_probe_enable SYSCTL_HANDLER_ARGS
2287 {
2288 #pragma unused(oidp, arg1, arg2)
2289 
2290 	return ntstat_tcp_progress_enable(req);
2291 }
2292 
2293 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, progress_enable,
2294     CTLTYPE_STRUCT | CTLFLAG_RW | CTLFLAG_LOCKED | CTLFLAG_ANYBODY, 0, 0,
2295     tcp_progress_probe_enable, "S", "Enable/disable TCP keepalive probing on the specified link(s)");
2296 
2297 
2298 __private_extern__ void
tcp_get_ports_used(ifnet_t ifp,int protocol,uint32_t flags,bitstr_t * bitfield)2299 tcp_get_ports_used(ifnet_t ifp, int protocol, uint32_t flags,
2300     bitstr_t *bitfield)
2301 {
2302 	inpcb_get_ports_used(ifp, protocol, flags, bitfield,
2303 	    &tcbinfo);
2304 }
2305 
2306 __private_extern__ uint32_t
tcp_count_opportunistic(unsigned int ifindex,u_int32_t flags)2307 tcp_count_opportunistic(unsigned int ifindex, u_int32_t flags)
2308 {
2309 	return inpcb_count_opportunistic(ifindex, &tcbinfo, flags);
2310 }
2311 
2312 __private_extern__ uint32_t
tcp_find_anypcb_byaddr(struct ifaddr * ifa)2313 tcp_find_anypcb_byaddr(struct ifaddr *ifa)
2314 {
2315 #if SKYWALK
2316 	if (netns_is_enabled()) {
2317 		return netns_find_anyres_byaddr(ifa, IPPROTO_TCP);
2318 	} else
2319 #endif /* SKYWALK */
2320 	return inpcb_find_anypcb_byaddr(ifa, &tcbinfo);
2321 }
2322 
2323 static void
tcp_handle_msgsize(struct ip * ip,struct inpcb * inp)2324 tcp_handle_msgsize(struct ip *ip, struct inpcb *inp)
2325 {
2326 	struct rtentry *rt = NULL;
2327 	u_short ifscope = IFSCOPE_NONE;
2328 	int mtu;
2329 	struct sockaddr_in icmpsrc = {
2330 		.sin_len = sizeof(struct sockaddr_in),
2331 		.sin_family = AF_INET, .sin_port = 0, .sin_addr = { .s_addr = 0 },
2332 		.sin_zero = { 0, 0, 0, 0, 0, 0, 0, 0 }
2333 	};
2334 	struct icmp *icp = NULL;
2335 
2336 	icp = (struct icmp *)(void *)
2337 	    ((caddr_t)ip - offsetof(struct icmp, icmp_ip));
2338 
2339 	icmpsrc.sin_addr = icp->icmp_ip.ip_dst;
2340 
2341 	/*
2342 	 * MTU discovery:
2343 	 * If we got a needfrag and there is a host route to the
2344 	 * original destination, and the MTU is not locked, then
2345 	 * set the MTU in the route to the suggested new value
2346 	 * (if given) and then notify as usual.  The ULPs will
2347 	 * notice that the MTU has changed and adapt accordingly.
2348 	 * If no new MTU was suggested, then we guess a new one
2349 	 * less than the current value.  If the new MTU is
2350 	 * unreasonably small (defined by sysctl tcp_minmss), then
2351 	 * we reset the MTU to the interface value and enable the
2352 	 * lock bit, indicating that we are no longer doing MTU
2353 	 * discovery.
2354 	 */
2355 	if (ROUTE_UNUSABLE(&(inp->inp_route)) == false) {
2356 		rt = inp->inp_route.ro_rt;
2357 	}
2358 
2359 	/*
2360 	 * icmp6_mtudisc_update scopes the routing lookup
2361 	 * to the incoming interface (delivered from mbuf
2362 	 * packet header.
2363 	 * That is mostly ok but for asymmetric networks
2364 	 * that may be an issue.
2365 	 * Frag needed OR Packet too big really communicates
2366 	 * MTU for the out data path.
2367 	 * Take the interface scope from cached route or
2368 	 * the last outgoing interface from inp
2369 	 */
2370 	if (rt != NULL) {
2371 		ifscope = (rt->rt_ifp != NULL) ?
2372 		    rt->rt_ifp->if_index : IFSCOPE_NONE;
2373 	} else {
2374 		ifscope = (inp->inp_last_outifp != NULL) ?
2375 		    inp->inp_last_outifp->if_index : IFSCOPE_NONE;
2376 	}
2377 
2378 	if ((rt == NULL) ||
2379 	    !(rt->rt_flags & RTF_HOST) ||
2380 	    (rt->rt_flags & (RTF_CLONING | RTF_PRCLONING))) {
2381 		rt = rtalloc1_scoped(SA(&icmpsrc), 0, RTF_CLONING | RTF_PRCLONING, ifscope);
2382 	} else if (rt) {
2383 		RT_LOCK(rt);
2384 		rtref(rt);
2385 		RT_UNLOCK(rt);
2386 	}
2387 
2388 	if (rt != NULL) {
2389 		RT_LOCK(rt);
2390 		if ((rt->rt_flags & RTF_HOST) &&
2391 		    !(rt->rt_rmx.rmx_locks & RTV_MTU)) {
2392 			mtu = ntohs(icp->icmp_nextmtu);
2393 			/*
2394 			 * XXX Stock BSD has changed the following
2395 			 * to compare with icp->icmp_ip.ip_len
2396 			 * to converge faster when sent packet
2397 			 * < route's MTU. We may want to adopt
2398 			 * that change.
2399 			 */
2400 			if (mtu == 0) {
2401 				mtu = ip_next_mtu(rt->rt_rmx.
2402 				    rmx_mtu, 1);
2403 			}
2404 #if DEBUG_MTUDISC
2405 			printf("MTU for %s reduced to %d\n",
2406 			    inet_ntop(AF_INET,
2407 			    &icmpsrc.sin_addr, ipv4str,
2408 			    sizeof(ipv4str)), mtu);
2409 #endif
2410 			if (mtu < max(296, (tcp_minmss +
2411 			    sizeof(struct tcpiphdr)))) {
2412 				rt->rt_rmx.rmx_locks |= RTV_MTU;
2413 			} else if (rt->rt_rmx.rmx_mtu > mtu) {
2414 				rt->rt_rmx.rmx_mtu = mtu;
2415 			}
2416 		}
2417 		RT_UNLOCK(rt);
2418 		rtfree(rt);
2419 	}
2420 }
2421 
2422 void
tcp_ctlinput(int cmd,struct sockaddr * sa,void * vip,__unused struct ifnet * ifp)2423 tcp_ctlinput(int cmd, struct sockaddr *sa, void *vip, __unused struct ifnet *ifp)
2424 {
2425 	tcp_seq icmp_tcp_seq;
2426 	struct ipctlparam *ctl_param = vip;
2427 	struct ip *ip = NULL;
2428 	struct mbuf *m = NULL;
2429 	struct in_addr faddr;
2430 	struct inpcb *inp;
2431 	struct tcpcb *tp;
2432 	struct tcphdr *th;
2433 	struct icmp *icp;
2434 	size_t off;
2435 #if SKYWALK
2436 	union sockaddr_in_4_6 sock_laddr;
2437 	struct protoctl_ev_val prctl_ev_val;
2438 #endif /* SKYWALK */
2439 	void (*notify)(struct inpcb *, int) = tcp_notify;
2440 
2441 	if (ctl_param != NULL) {
2442 		ip = ctl_param->ipc_icmp_ip;
2443 		icp = ctl_param->ipc_icmp;
2444 		m = ctl_param->ipc_m;
2445 		off = ctl_param->ipc_off;
2446 	} else {
2447 		ip = NULL;
2448 		icp = NULL;
2449 		m = NULL;
2450 		off = 0;
2451 	}
2452 
2453 	faddr = SIN(sa)->sin_addr;
2454 	if (sa->sa_family != AF_INET || faddr.s_addr == INADDR_ANY) {
2455 		return;
2456 	}
2457 
2458 	if ((unsigned)cmd >= PRC_NCMDS) {
2459 		return;
2460 	}
2461 
2462 	/* Source quench is deprecated */
2463 	if (cmd == PRC_QUENCH) {
2464 		return;
2465 	}
2466 
2467 	if (cmd == PRC_MSGSIZE) {
2468 		notify = tcp_mtudisc;
2469 	} else if (icmp_may_rst && (cmd == PRC_UNREACH_ADMIN_PROHIB ||
2470 	    cmd == PRC_UNREACH_PORT || cmd == PRC_UNREACH_PROTOCOL ||
2471 	    cmd == PRC_TIMXCEED_INTRANS) && ip) {
2472 		notify = tcp_drop_syn_sent;
2473 	}
2474 	/*
2475 	 * Hostdead is ugly because it goes linearly through all PCBs.
2476 	 * XXX: We never get this from ICMP, otherwise it makes an
2477 	 * excellent DoS attack on machines with many connections.
2478 	 */
2479 	else if (cmd == PRC_HOSTDEAD) {
2480 		ip = NULL;
2481 	} else if (inetctlerrmap[cmd] == 0 && !PRC_IS_REDIRECT(cmd)) {
2482 		return;
2483 	}
2484 
2485 #if SKYWALK
2486 	bzero(&prctl_ev_val, sizeof(prctl_ev_val));
2487 	bzero(&sock_laddr, sizeof(sock_laddr));
2488 #endif /* SKYWALK */
2489 
2490 	if (ip == NULL) {
2491 		in_pcbnotifyall(&tcbinfo, faddr, inetctlerrmap[cmd], notify);
2492 #if SKYWALK
2493 		protoctl_event_enqueue_nwk_wq_entry(ifp, NULL,
2494 		    sa, 0, 0, IPPROTO_TCP, cmd, NULL);
2495 #endif /* SKYWALK */
2496 		return;
2497 	}
2498 
2499 	/* Check if we can safely get the sport, dport and the sequence number from the tcp header. */
2500 	if (m == NULL ||
2501 	    (m->m_len < off + (sizeof(unsigned short) + sizeof(unsigned short) + sizeof(tcp_seq)))) {
2502 		/* Insufficient length */
2503 		return;
2504 	}
2505 
2506 	th = (struct tcphdr*)(void*)(mtod(m, uint8_t*) + off);
2507 	icmp_tcp_seq = ntohl(th->th_seq);
2508 
2509 	inp = in_pcblookup_hash(&tcbinfo, faddr, th->th_dport,
2510 	    ip->ip_src, th->th_sport, 0, NULL);
2511 
2512 	if (inp == NULL ||
2513 	    inp->inp_socket == NULL) {
2514 #if SKYWALK
2515 		if (cmd == PRC_MSGSIZE) {
2516 			prctl_ev_val.val = ntohs(icp->icmp_nextmtu);
2517 		}
2518 		prctl_ev_val.tcp_seq_number = icmp_tcp_seq;
2519 
2520 		sock_laddr.sin.sin_family = AF_INET;
2521 		sock_laddr.sin.sin_len = sizeof(sock_laddr.sin);
2522 		sock_laddr.sin.sin_addr = ip->ip_src;
2523 
2524 		protoctl_event_enqueue_nwk_wq_entry(ifp,
2525 		    SA(&sock_laddr), sa,
2526 		    th->th_sport, th->th_dport, IPPROTO_TCP,
2527 		    cmd, &prctl_ev_val);
2528 #endif /* SKYWALK */
2529 		return;
2530 	}
2531 
2532 	socket_lock(inp->inp_socket, 1);
2533 	if (in_pcb_checkstate(inp, WNT_RELEASE, 1) ==
2534 	    WNT_STOPUSING) {
2535 		socket_unlock(inp->inp_socket, 1);
2536 		return;
2537 	}
2538 
2539 	if (PRC_IS_REDIRECT(cmd)) {
2540 		/* signal EHOSTDOWN, as it flushes the cached route */
2541 		(*notify)(inp, EHOSTDOWN);
2542 	} else {
2543 		tp = intotcpcb(inp);
2544 		if (SEQ_GEQ(icmp_tcp_seq, tp->snd_una) &&
2545 		    SEQ_LT(icmp_tcp_seq, tp->snd_max)) {
2546 			if (cmd == PRC_MSGSIZE) {
2547 				tcp_handle_msgsize(ip, inp);
2548 			}
2549 
2550 			(*notify)(inp, inetctlerrmap[cmd]);
2551 		}
2552 	}
2553 	socket_unlock(inp->inp_socket, 1);
2554 }
2555 
2556 void
tcp6_ctlinput(int cmd,struct sockaddr * sa,void * d,__unused struct ifnet * ifp)2557 tcp6_ctlinput(int cmd, struct sockaddr *sa, void *d, __unused struct ifnet *ifp)
2558 {
2559 	tcp_seq icmp_tcp_seq;
2560 	struct in6_addr *dst;
2561 	void (*notify)(struct inpcb *, int) = tcp_notify;
2562 	struct ip6_hdr *ip6;
2563 	struct mbuf *m;
2564 	struct inpcb *inp;
2565 	struct tcpcb *tp;
2566 	struct icmp6_hdr *icmp6;
2567 	struct ip6ctlparam *ip6cp = NULL;
2568 	const struct sockaddr_in6 *sa6_src = NULL;
2569 	unsigned int mtu;
2570 	unsigned int off;
2571 
2572 	struct tcp_ports {
2573 		uint16_t th_sport;
2574 		uint16_t th_dport;
2575 	} t_ports;
2576 #if SKYWALK
2577 	union sockaddr_in_4_6 sock_laddr;
2578 	struct protoctl_ev_val prctl_ev_val;
2579 #endif /* SKYWALK */
2580 
2581 	if (sa->sa_family != AF_INET6 ||
2582 	    sa->sa_len != sizeof(struct sockaddr_in6)) {
2583 		return;
2584 	}
2585 
2586 	/* Source quench is deprecated */
2587 	if (cmd == PRC_QUENCH) {
2588 		return;
2589 	}
2590 
2591 	if ((unsigned)cmd >= PRC_NCMDS) {
2592 		return;
2593 	}
2594 
2595 	/* if the parameter is from icmp6, decode it. */
2596 	if (d != NULL) {
2597 		ip6cp = (struct ip6ctlparam *)d;
2598 		icmp6 = ip6cp->ip6c_icmp6;
2599 		m = ip6cp->ip6c_m;
2600 		ip6 = ip6cp->ip6c_ip6;
2601 		off = ip6cp->ip6c_off;
2602 		sa6_src = ip6cp->ip6c_src;
2603 		dst = ip6cp->ip6c_finaldst;
2604 	} else {
2605 		m = NULL;
2606 		ip6 = NULL;
2607 		off = 0;        /* fool gcc */
2608 		sa6_src = &sa6_any;
2609 		dst = NULL;
2610 	}
2611 
2612 	if (cmd == PRC_MSGSIZE) {
2613 		notify = tcp_mtudisc;
2614 	} else if (icmp_may_rst && (cmd == PRC_UNREACH_ADMIN_PROHIB ||
2615 	    cmd == PRC_UNREACH_PORT || cmd == PRC_TIMXCEED_INTRANS) &&
2616 	    ip6 != NULL) {
2617 		notify = tcp_drop_syn_sent;
2618 	}
2619 	/*
2620 	 * Hostdead is ugly because it goes linearly through all PCBs.
2621 	 * XXX: We never get this from ICMP, otherwise it makes an
2622 	 * excellent DoS attack on machines with many connections.
2623 	 */
2624 	else if (cmd == PRC_HOSTDEAD) {
2625 		ip6 = NULL;
2626 	} else if (inet6ctlerrmap[cmd] == 0 && !PRC_IS_REDIRECT(cmd)) {
2627 		return;
2628 	}
2629 
2630 #if SKYWALK
2631 	bzero(&prctl_ev_val, sizeof(prctl_ev_val));
2632 	bzero(&sock_laddr, sizeof(sock_laddr));
2633 #endif /* SKYWALK */
2634 
2635 	if (ip6 == NULL) {
2636 		in6_pcbnotify(&tcbinfo, sa, 0, SA(sa6_src), 0, cmd, NULL, notify);
2637 #if SKYWALK
2638 		protoctl_event_enqueue_nwk_wq_entry(ifp, NULL, sa,
2639 		    0, 0, IPPROTO_TCP, cmd, NULL);
2640 #endif /* SKYWALK */
2641 		return;
2642 	}
2643 
2644 	/* Check if we can safely get the ports from the tcp hdr */
2645 	if (m == NULL ||
2646 	    (m->m_pkthdr.len <
2647 	    (int32_t) (off + sizeof(struct tcp_ports)))) {
2648 		return;
2649 	}
2650 	bzero(&t_ports, sizeof(struct tcp_ports));
2651 	m_copydata(m, off, sizeof(struct tcp_ports), (caddr_t)&t_ports);
2652 
2653 	off += sizeof(struct tcp_ports);
2654 	if (m->m_pkthdr.len < (int32_t) (off + sizeof(tcp_seq))) {
2655 		return;
2656 	}
2657 	m_copydata(m, off, sizeof(tcp_seq), (caddr_t)&icmp_tcp_seq);
2658 	icmp_tcp_seq = ntohl(icmp_tcp_seq);
2659 
2660 	if (cmd == PRC_MSGSIZE) {
2661 		mtu = ntohl(icmp6->icmp6_mtu);
2662 		/*
2663 		 * If no alternative MTU was proposed, or the proposed
2664 		 * MTU was too small, set to the min.
2665 		 */
2666 		if (mtu < IPV6_MMTU) {
2667 			mtu = IPV6_MMTU - 8;
2668 		}
2669 	}
2670 
2671 	inp = in6_pcblookup_hash(&tcbinfo, &ip6->ip6_dst, t_ports.th_dport, ip6_input_getdstifscope(m),
2672 	    &ip6->ip6_src, t_ports.th_sport, ip6_input_getsrcifscope(m), 0, NULL);
2673 
2674 	if (inp == NULL ||
2675 	    inp->inp_socket == NULL) {
2676 #if SKYWALK
2677 		if (cmd == PRC_MSGSIZE) {
2678 			prctl_ev_val.val = mtu;
2679 		}
2680 		prctl_ev_val.tcp_seq_number = icmp_tcp_seq;
2681 
2682 		sock_laddr.sin6.sin6_family = AF_INET6;
2683 		sock_laddr.sin6.sin6_len = sizeof(sock_laddr.sin6);
2684 		sock_laddr.sin6.sin6_addr = ip6->ip6_src;
2685 
2686 		protoctl_event_enqueue_nwk_wq_entry(ifp,
2687 		    SA(&sock_laddr), sa,
2688 		    t_ports.th_sport, t_ports.th_dport, IPPROTO_TCP,
2689 		    cmd, &prctl_ev_val);
2690 #endif /* SKYWALK */
2691 		return;
2692 	}
2693 
2694 	socket_lock(inp->inp_socket, 1);
2695 	if (in_pcb_checkstate(inp, WNT_RELEASE, 1) ==
2696 	    WNT_STOPUSING) {
2697 		socket_unlock(inp->inp_socket, 1);
2698 		return;
2699 	}
2700 
2701 	if (PRC_IS_REDIRECT(cmd)) {
2702 		/* signal EHOSTDOWN, as it flushes the cached route */
2703 		(*notify)(inp, EHOSTDOWN);
2704 	} else {
2705 		tp = intotcpcb(inp);
2706 		if (SEQ_GEQ(icmp_tcp_seq, tp->snd_una) &&
2707 		    SEQ_LT(icmp_tcp_seq, tp->snd_max)) {
2708 			if (cmd == PRC_MSGSIZE) {
2709 				/*
2710 				 * Only process the offered MTU if it
2711 				 * is smaller than the current one.
2712 				 */
2713 				if (mtu < tp->t_maxseg +
2714 				    (sizeof(struct tcphdr) + sizeof(struct ip6_hdr))) {
2715 					(*notify)(inp, inetctlerrmap[cmd]);
2716 				}
2717 			} else {
2718 				(*notify)(inp, inetctlerrmap[cmd]);
2719 			}
2720 		}
2721 	}
2722 	socket_unlock(inp->inp_socket, 1);
2723 }
2724 
2725 
2726 /*
2727  * Following is where TCP initial sequence number generation occurs.
2728  *
2729  * There are two places where we must use initial sequence numbers:
2730  * 1.  In SYN-ACK packets.
2731  * 2.  In SYN packets.
2732  *
2733  * The ISNs in SYN-ACK packets have no monotonicity requirement,
2734  * and should be as unpredictable as possible to avoid the possibility
2735  * of spoofing and/or connection hijacking.  To satisfy this
2736  * requirement, SYN-ACK ISNs are generated via the arc4random()
2737  * function.  If exact RFC 1948 compliance is requested via sysctl,
2738  * these ISNs will be generated just like those in SYN packets.
2739  *
2740  * The ISNs in SYN packets must be monotonic; TIME_WAIT recycling
2741  * depends on this property.  In addition, these ISNs should be
2742  * unguessable so as to prevent connection hijacking.  To satisfy
2743  * the requirements of this situation, the algorithm outlined in
2744  * RFC 9293 is used to generate sequence numbers.
2745  *
2746  * For more information on the theory of operation, please see
2747  * RFC 9293.
2748  *
2749  * Implementation details:
2750  *
2751  * Time is based off the system timer, and is corrected so that it
2752  * increases by one megabyte per second.  This allows for proper
2753  * recycling on high speed LANs while still leaving over an hour
2754  * before rollover.
2755  *
2756  */
2757 
2758 #define ISN_BYTES_PER_SECOND 1048576
2759 
2760 tcp_seq
tcp_new_isn(struct tcpcb * tp)2761 tcp_new_isn(struct tcpcb *tp)
2762 {
2763 	uint32_t md5_buffer[4];
2764 	tcp_seq new_isn;
2765 	struct timespec timenow;
2766 	MD5_CTX isn_ctx;
2767 
2768 	nanouptime(&timenow);
2769 
2770 	/* Compute the md5 hash and return the ISN. */
2771 	MD5Init(&isn_ctx);
2772 	MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_fport,
2773 	    sizeof(u_short));
2774 	MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_lport,
2775 	    sizeof(u_short));
2776 	if ((tp->t_inpcb->inp_vflag & INP_IPV6) != 0) {
2777 		MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->in6p_faddr,
2778 		    sizeof(struct in6_addr));
2779 		MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->in6p_laddr,
2780 		    sizeof(struct in6_addr));
2781 	} else {
2782 		MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_faddr,
2783 		    sizeof(struct in_addr));
2784 		MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_laddr,
2785 		    sizeof(struct in_addr));
2786 	}
2787 	MD5Update(&isn_ctx, (u_char *) &isn_secret, sizeof(isn_secret));
2788 	MD5Final((u_char *) &md5_buffer, &isn_ctx);
2789 
2790 	new_isn = (tcp_seq) md5_buffer[0];
2791 
2792 	/*
2793 	 * We use a 128ns clock, which is equivalent to 600 Mbps and wraps at
2794 	 * 549 seconds, thus safe for 2 MSL lifetime of TIME-WAIT-state.
2795 	 */
2796 	new_isn += (timenow.tv_sec * NSEC_PER_SEC + timenow.tv_nsec) >> 7;
2797 
2798 	if (__probable(tcp_randomize_timestamps)) {
2799 		tp->t_ts_offset = md5_buffer[1];
2800 	}
2801 
2802 	return new_isn;
2803 }
2804 
2805 
2806 /*
2807  * When a specific ICMP unreachable message is received and the
2808  * connection state is SYN-SENT, drop the connection.  This behavior
2809  * is controlled by the icmp_may_rst sysctl.
2810  */
2811 void
tcp_drop_syn_sent(struct inpcb * inp,int errno)2812 tcp_drop_syn_sent(struct inpcb *inp, int errno)
2813 {
2814 	struct tcpcb *tp = intotcpcb(inp);
2815 
2816 	if (tp && tp->t_state == TCPS_SYN_SENT) {
2817 		tcp_drop(tp, errno);
2818 	}
2819 }
2820 
2821 /*
2822  * When `need fragmentation' ICMP is received, update our idea of the MSS
2823  * based on the new value in the route.  Also nudge TCP to send something,
2824  * since we know the packet we just sent was dropped.
2825  * This duplicates some code in the tcp_mss() function in tcp_input.c.
2826  */
2827 void
tcp_mtudisc(struct inpcb * inp,__unused int errno)2828 tcp_mtudisc(struct inpcb *inp, __unused int errno)
2829 {
2830 	struct tcpcb *tp = intotcpcb(inp);
2831 	struct rtentry *rt;
2832 	struct socket *so = inp->inp_socket;
2833 	int mss;
2834 	u_int32_t mtu;
2835 	u_int32_t protoHdrOverhead = sizeof(struct tcpiphdr);
2836 	int isipv6 = (tp->t_inpcb->inp_vflag & INP_IPV6) != 0;
2837 
2838 	/*
2839 	 * Nothing left to send after the socket is defunct or TCP is in the closed state
2840 	 */
2841 	if ((so->so_state & SS_DEFUNCT) || (tp != NULL && tp->t_state == TCPS_CLOSED)) {
2842 		return;
2843 	}
2844 
2845 	if (isipv6) {
2846 		protoHdrOverhead = sizeof(struct ip6_hdr) +
2847 		    sizeof(struct tcphdr);
2848 	}
2849 
2850 	if (tp != NULL) {
2851 		if (isipv6) {
2852 			rt = tcp_rtlookup6(inp, IFSCOPE_NONE);
2853 		} else {
2854 			rt = tcp_rtlookup(inp, IFSCOPE_NONE);
2855 		}
2856 		if (!rt || !rt->rt_rmx.rmx_mtu) {
2857 			tp->t_maxopd = tp->t_maxseg =
2858 			    isipv6 ? tcp_v6mssdflt :
2859 			    tcp_mssdflt;
2860 
2861 			/* Route locked during lookup above */
2862 			if (rt != NULL) {
2863 				RT_UNLOCK(rt);
2864 			}
2865 			return;
2866 		}
2867 		mtu = rt->rt_rmx.rmx_mtu;
2868 
2869 		/* Route locked during lookup above */
2870 		RT_UNLOCK(rt);
2871 
2872 #if NECP
2873 		// Adjust MTU if necessary.
2874 		mtu = necp_socket_get_effective_mtu(inp, mtu);
2875 #endif /* NECP */
2876 		mss = mtu - protoHdrOverhead;
2877 
2878 		if (tp->t_maxopd) {
2879 			mss = min(mss, tp->t_maxopd);
2880 		}
2881 		/*
2882 		 * XXX - The above conditional probably violates the TCP
2883 		 * spec.  The problem is that, since we don't know the
2884 		 * other end's MSS, we are supposed to use a conservative
2885 		 * default.  But, if we do that, then MTU discovery will
2886 		 * never actually take place, because the conservative
2887 		 * default is much less than the MTUs typically seen
2888 		 * on the Internet today.  For the moment, we'll sweep
2889 		 * this under the carpet.
2890 		 *
2891 		 * The conservative default might not actually be a problem
2892 		 * if the only case this occurs is when sending an initial
2893 		 * SYN with options and data to a host we've never talked
2894 		 * to before.  Then, they will reply with an MSS value which
2895 		 * will get recorded and the new parameters should get
2896 		 * recomputed.  For Further Study.
2897 		 */
2898 		if (tp->t_maxopd <= mss) {
2899 			return;
2900 		}
2901 		tp->t_maxopd = mss;
2902 
2903 		if ((tp->t_flags & (TF_REQ_TSTMP | TF_NOOPT)) == TF_REQ_TSTMP &&
2904 		    (tp->t_flags & TF_RCVD_TSTMP) == TF_RCVD_TSTMP) {
2905 			mss -= TCPOLEN_TSTAMP_APPA;
2906 		}
2907 
2908 #if MPTCP
2909 		mss -= mptcp_adj_mss(tp, TRUE);
2910 #endif
2911 		if (so->so_snd.sb_hiwat < mss) {
2912 			mss = so->so_snd.sb_hiwat;
2913 		}
2914 
2915 		tp->t_maxseg = mss;
2916 
2917 		ASSERT(tp->t_maxseg);
2918 
2919 		/*
2920 		 * Reset the slow-start flight size as it may depends on the
2921 		 * new MSS
2922 		 */
2923 		if (CC_ALGO(tp)->cwnd_init != NULL) {
2924 			CC_ALGO(tp)->cwnd_init(tp);
2925 		}
2926 
2927 		if (TCP_USE_RLEDBAT(tp, so) && tcp_cc_rledbat.rwnd_init != NULL) {
2928 			tcp_cc_rledbat.rwnd_init(tp);
2929 		}
2930 
2931 		tcpstat.tcps_mturesent++;
2932 		tp->t_rtttime = 0;
2933 		tp->snd_nxt = tp->snd_una;
2934 		tcp_output(tp);
2935 	}
2936 }
2937 
2938 /*
2939  * Look-up the routing entry to the peer of this inpcb.  If no route
2940  * is found and it cannot be allocated the return NULL.  This routine
2941  * is called by TCP routines that access the rmx structure and by tcp_mss
2942  * to get the interface MTU.  If a route is found, this routine will
2943  * hold the rtentry lock; the caller is responsible for unlocking.
2944  */
2945 struct rtentry *
tcp_rtlookup(struct inpcb * inp,unsigned int input_ifscope)2946 tcp_rtlookup(struct inpcb *inp, unsigned int input_ifscope)
2947 {
2948 	struct route *ro;
2949 	struct rtentry *rt;
2950 	struct tcpcb *tp;
2951 
2952 	LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_NOTOWNED);
2953 
2954 	ro = &inp->inp_route;
2955 	if ((rt = ro->ro_rt) != NULL) {
2956 		RT_LOCK(rt);
2957 	}
2958 
2959 	if (ROUTE_UNUSABLE(ro)) {
2960 		if (rt != NULL) {
2961 			RT_UNLOCK(rt);
2962 			rt = NULL;
2963 		}
2964 		ROUTE_RELEASE(ro);
2965 		/* No route yet, so try to acquire one */
2966 		if (inp->inp_faddr.s_addr != INADDR_ANY) {
2967 			unsigned int ifscope;
2968 
2969 			ro->ro_dst.sa_family = AF_INET;
2970 			ro->ro_dst.sa_len = sizeof(struct sockaddr_in);
2971 			SIN(&ro->ro_dst)->sin_addr = inp->inp_faddr;
2972 
2973 			/*
2974 			 * If the socket was bound to an interface, then
2975 			 * the bound-to-interface takes precedence over
2976 			 * the inbound interface passed in by the caller
2977 			 * (if we get here as part of the output path then
2978 			 * input_ifscope is IFSCOPE_NONE).
2979 			 */
2980 			ifscope = (inp->inp_flags & INP_BOUND_IF) ?
2981 			    inp->inp_boundifp->if_index : input_ifscope;
2982 
2983 			rtalloc_scoped(ro, ifscope);
2984 			if ((rt = ro->ro_rt) != NULL) {
2985 				RT_LOCK(rt);
2986 			}
2987 		}
2988 	}
2989 	if (rt != NULL) {
2990 		RT_LOCK_ASSERT_HELD(rt);
2991 	}
2992 
2993 	/*
2994 	 * Update MTU discovery determination. Don't do it if:
2995 	 *	1) it is disabled via the sysctl
2996 	 *	2) the route isn't up
2997 	 *	3) the MTU is locked (if it is, then discovery has been
2998 	 *	   disabled)
2999 	 */
3000 
3001 	tp = intotcpcb(inp);
3002 
3003 	if (!path_mtu_discovery || ((rt != NULL) &&
3004 	    (!(rt->rt_flags & RTF_UP) || (rt->rt_rmx.rmx_locks & RTV_MTU)))) {
3005 		tp->t_flags &= ~TF_PMTUD;
3006 	} else {
3007 		tp->t_flags |= TF_PMTUD;
3008 	}
3009 
3010 	if (rt != NULL && rt->rt_ifp != NULL) {
3011 		somultipages(inp->inp_socket,
3012 		    (rt->rt_ifp->if_hwassist & IFNET_MULTIPAGES));
3013 		tcp_set_tso(tp, rt->rt_ifp);
3014 		soif2kcl(inp->inp_socket,
3015 		    (rt->rt_ifp->if_eflags & IFEF_2KCL));
3016 		tcp_set_ecn(tp, rt->rt_ifp);
3017 		if (inp->inp_last_outifp == NULL) {
3018 			inp->inp_last_outifp = rt->rt_ifp;
3019 #if SKYWALK
3020 			if (NETNS_TOKEN_VALID(&inp->inp_netns_token)) {
3021 				netns_set_ifnet(&inp->inp_netns_token,
3022 				    inp->inp_last_outifp);
3023 			}
3024 #endif /* SKYWALK */
3025 		}
3026 	}
3027 
3028 	/* Note if the peer is local */
3029 	if (rt != NULL && !(rt->rt_ifp->if_flags & IFF_POINTOPOINT) &&
3030 	    (rt->rt_gateway->sa_family == AF_LINK ||
3031 	    rt->rt_ifp->if_flags & IFF_LOOPBACK ||
3032 	    in_localaddr(inp->inp_faddr))) {
3033 		tp->t_flags |= TF_LOCAL;
3034 	}
3035 
3036 	/*
3037 	 * Caller needs to call RT_UNLOCK(rt).
3038 	 */
3039 	return rt;
3040 }
3041 
3042 struct rtentry *
tcp_rtlookup6(struct inpcb * inp,unsigned int input_ifscope)3043 tcp_rtlookup6(struct inpcb *inp, unsigned int input_ifscope)
3044 {
3045 	struct route_in6 *ro6;
3046 	struct rtentry *rt;
3047 	struct tcpcb *tp;
3048 
3049 	LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_NOTOWNED);
3050 
3051 	ro6 = &inp->in6p_route;
3052 	if ((rt = ro6->ro_rt) != NULL) {
3053 		RT_LOCK(rt);
3054 	}
3055 
3056 	if (ROUTE_UNUSABLE(ro6)) {
3057 		if (rt != NULL) {
3058 			RT_UNLOCK(rt);
3059 			rt = NULL;
3060 		}
3061 		ROUTE_RELEASE(ro6);
3062 		/* No route yet, so try to acquire one */
3063 		if (!IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_faddr)) {
3064 			struct sockaddr_in6 *dst6;
3065 			unsigned int ifscope;
3066 
3067 			dst6 = SIN6(&ro6->ro_dst);
3068 			dst6->sin6_family = AF_INET6;
3069 			dst6->sin6_len = sizeof(*dst6);
3070 			dst6->sin6_addr = inp->in6p_faddr;
3071 
3072 			/*
3073 			 * If the socket was bound to an interface, then
3074 			 * the bound-to-interface takes precedence over
3075 			 * the inbound interface passed in by the caller
3076 			 * (if we get here as part of the output path then
3077 			 * input_ifscope is IFSCOPE_NONE).
3078 			 */
3079 			ifscope = (inp->inp_flags & INP_BOUND_IF) ?
3080 			    inp->inp_boundifp->if_index : input_ifscope;
3081 
3082 			rtalloc_scoped((struct route *)ro6, ifscope);
3083 			if ((rt = ro6->ro_rt) != NULL) {
3084 				RT_LOCK(rt);
3085 			}
3086 		}
3087 	}
3088 	if (rt != NULL) {
3089 		RT_LOCK_ASSERT_HELD(rt);
3090 	}
3091 
3092 	/*
3093 	 * Update path MTU Discovery determination
3094 	 * while looking up the route:
3095 	 *  1) we have a valid route to the destination
3096 	 *  2) the MTU is not locked (if it is, then discovery has been
3097 	 *    disabled)
3098 	 */
3099 
3100 
3101 	tp = intotcpcb(inp);
3102 
3103 	/*
3104 	 * Update MTU discovery determination. Don't do it if:
3105 	 *	1) it is disabled via the sysctl
3106 	 *	2) the route isn't up
3107 	 *	3) the MTU is locked (if it is, then discovery has been
3108 	 *	   disabled)
3109 	 */
3110 
3111 	if (!path_mtu_discovery || ((rt != NULL) &&
3112 	    (!(rt->rt_flags & RTF_UP) || (rt->rt_rmx.rmx_locks & RTV_MTU)))) {
3113 		tp->t_flags &= ~TF_PMTUD;
3114 	} else {
3115 		tp->t_flags |= TF_PMTUD;
3116 	}
3117 
3118 	if (rt != NULL && rt->rt_ifp != NULL) {
3119 		somultipages(inp->inp_socket,
3120 		    (rt->rt_ifp->if_hwassist & IFNET_MULTIPAGES));
3121 		tcp_set_tso(tp, rt->rt_ifp);
3122 		soif2kcl(inp->inp_socket,
3123 		    (rt->rt_ifp->if_eflags & IFEF_2KCL));
3124 		tcp_set_ecn(tp, rt->rt_ifp);
3125 		if (inp->inp_last_outifp == NULL) {
3126 			inp->inp_last_outifp = rt->rt_ifp;
3127 #if SKYWALK
3128 			if (NETNS_TOKEN_VALID(&inp->inp_netns_token)) {
3129 				netns_set_ifnet(&inp->inp_netns_token,
3130 				    inp->inp_last_outifp);
3131 			}
3132 #endif /* SKYWALK */
3133 		}
3134 
3135 		/* Note if the peer is local */
3136 		if (!(rt->rt_ifp->if_flags & IFF_POINTOPOINT) &&
3137 		    (IN6_IS_ADDR_LOOPBACK(&inp->in6p_faddr) ||
3138 		    IN6_IS_ADDR_LINKLOCAL(&inp->in6p_faddr) ||
3139 		    rt->rt_gateway->sa_family == AF_LINK ||
3140 		    in6_localaddr(&inp->in6p_faddr))) {
3141 			tp->t_flags |= TF_LOCAL;
3142 		}
3143 	}
3144 
3145 	/*
3146 	 * Caller needs to call RT_UNLOCK(rt).
3147 	 */
3148 	return rt;
3149 }
3150 
3151 #if IPSEC
3152 /* compute ESP/AH header size for TCP, including outer IP header. */
3153 size_t
ipsec_hdrsiz_tcp(struct tcpcb * tp)3154 ipsec_hdrsiz_tcp(struct tcpcb *tp)
3155 {
3156 	struct inpcb *inp;
3157 	struct mbuf *m;
3158 	size_t hdrsiz;
3159 	struct ip *ip;
3160 	struct ip6_hdr *ip6 = NULL;
3161 	struct tcphdr *th;
3162 
3163 	if ((tp == NULL) || ((inp = tp->t_inpcb) == NULL)) {
3164 		return 0;
3165 	}
3166 	MGETHDR(m, M_DONTWAIT, MT_DATA);        /* MAC-OK */
3167 	if (!m) {
3168 		return 0;
3169 	}
3170 
3171 	if ((inp->inp_vflag & INP_IPV6) != 0) {
3172 		ip6 = mtod(m, struct ip6_hdr *);
3173 		th = (struct tcphdr *)(void *)(ip6 + 1);
3174 		m->m_pkthdr.len = m->m_len =
3175 		    sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
3176 		tcp_fillheaders(m, tp, ip6, th);
3177 		hdrsiz = ipsec6_hdrsiz(m, IPSEC_DIR_OUTBOUND, inp);
3178 	} else {
3179 		ip = mtod(m, struct ip *);
3180 		th = (struct tcphdr *)(ip + 1);
3181 		m->m_pkthdr.len = m->m_len = sizeof(struct tcpiphdr);
3182 		tcp_fillheaders(m, tp, ip, th);
3183 		hdrsiz = ipsec4_hdrsiz(m, IPSEC_DIR_OUTBOUND, inp);
3184 	}
3185 	m_free(m);
3186 	return hdrsiz;
3187 }
3188 #endif /* IPSEC */
3189 
3190 int
tcp_lock(struct socket * so,int refcount,void * lr)3191 tcp_lock(struct socket *so, int refcount, void *lr)
3192 {
3193 	void *lr_saved;
3194 
3195 	if (lr == NULL) {
3196 		lr_saved = __builtin_return_address(0);
3197 	} else {
3198 		lr_saved = lr;
3199 	}
3200 
3201 retry:
3202 	if (so->so_pcb != NULL) {
3203 		if (so->so_flags & SOF_MP_SUBFLOW) {
3204 			struct mptcb *mp_tp = tptomptp(sototcpcb(so));
3205 			struct socket *mp_so = mptetoso(mp_tp->mpt_mpte);
3206 
3207 			socket_lock(mp_so, refcount);
3208 
3209 			/*
3210 			 * Check if we became non-MPTCP while waiting for the lock.
3211 			 * If yes, we have to retry to grab the right lock.
3212 			 */
3213 			if (!(so->so_flags & SOF_MP_SUBFLOW)) {
3214 				socket_unlock(mp_so, refcount);
3215 				goto retry;
3216 			}
3217 		} else {
3218 			lck_mtx_lock(&((struct inpcb *)so->so_pcb)->inpcb_mtx);
3219 
3220 			if (so->so_flags & SOF_MP_SUBFLOW) {
3221 				/*
3222 				 * While waiting for the lock, we might have
3223 				 * become MPTCP-enabled (see mptcp_subflow_socreate).
3224 				 */
3225 				lck_mtx_unlock(&((struct inpcb *)so->so_pcb)->inpcb_mtx);
3226 				goto retry;
3227 			}
3228 		}
3229 	} else {
3230 		panic("tcp_lock: so=%p NO PCB! lr=%p lrh= %s",
3231 		    so, lr_saved, solockhistory_nr(so));
3232 		/* NOTREACHED */
3233 	}
3234 
3235 	if (so->so_usecount < 0) {
3236 		panic("tcp_lock: so=%p so_pcb=%p lr=%p ref=%x lrh= %s",
3237 		    so, so->so_pcb, lr_saved, so->so_usecount,
3238 		    solockhistory_nr(so));
3239 		/* NOTREACHED */
3240 	}
3241 	if (refcount) {
3242 		so->so_usecount++;
3243 	}
3244 	so->lock_lr[so->next_lock_lr] = lr_saved;
3245 	so->next_lock_lr = (so->next_lock_lr + 1) % SO_LCKDBG_MAX;
3246 	return 0;
3247 }
3248 
3249 int
tcp_unlock(struct socket * so,int refcount,void * lr)3250 tcp_unlock(struct socket *so, int refcount, void *lr)
3251 {
3252 	void *lr_saved;
3253 
3254 	if (lr == NULL) {
3255 		lr_saved = __builtin_return_address(0);
3256 	} else {
3257 		lr_saved = lr;
3258 	}
3259 
3260 #ifdef MORE_TCPLOCK_DEBUG
3261 	printf("tcp_unlock: so=0x%llx sopcb=0x%llx lock=0x%llx ref=%x "
3262 	    "lr=0x%llx\n", (uint64_t)VM_KERNEL_ADDRPERM(so),
3263 	    (uint64_t)VM_KERNEL_ADDRPERM(so->so_pcb),
3264 	    (uint64_t)VM_KERNEL_ADDRPERM(&(sotoinpcb(so)->inpcb_mtx)),
3265 	    so->so_usecount, (uint64_t)VM_KERNEL_ADDRPERM(lr_saved));
3266 #endif
3267 	if (refcount) {
3268 		so->so_usecount--;
3269 	}
3270 
3271 	if (so->so_usecount < 0) {
3272 		panic("tcp_unlock: so=%p usecount=%x lrh= %s",
3273 		    so, so->so_usecount, solockhistory_nr(so));
3274 		/* NOTREACHED */
3275 	}
3276 	if (so->so_pcb == NULL) {
3277 		panic("tcp_unlock: so=%p NO PCB usecount=%x lr=%p lrh= %s",
3278 		    so, so->so_usecount, lr_saved, solockhistory_nr(so));
3279 		/* NOTREACHED */
3280 	} else {
3281 		so->unlock_lr[so->next_unlock_lr] = lr_saved;
3282 		so->next_unlock_lr = (so->next_unlock_lr + 1) % SO_LCKDBG_MAX;
3283 
3284 		if (so->so_flags & SOF_MP_SUBFLOW) {
3285 			struct mptcb *mp_tp = tptomptp(sototcpcb(so));
3286 			struct socket *mp_so = mptetoso(mp_tp->mpt_mpte);
3287 
3288 			socket_lock_assert_owned(mp_so);
3289 
3290 			socket_unlock(mp_so, refcount);
3291 		} else {
3292 			LCK_MTX_ASSERT(&((struct inpcb *)so->so_pcb)->inpcb_mtx,
3293 			    LCK_MTX_ASSERT_OWNED);
3294 			lck_mtx_unlock(&((struct inpcb *)so->so_pcb)->inpcb_mtx);
3295 		}
3296 	}
3297 	return 0;
3298 }
3299 
3300 lck_mtx_t *
tcp_getlock(struct socket * so,int flags)3301 tcp_getlock(struct socket *so, int flags)
3302 {
3303 	struct inpcb *inp = sotoinpcb(so);
3304 
3305 	if (so->so_pcb) {
3306 		if (so->so_usecount < 0) {
3307 			panic("tcp_getlock: so=%p usecount=%x lrh= %s",
3308 			    so, so->so_usecount, solockhistory_nr(so));
3309 		}
3310 
3311 		if (so->so_flags & SOF_MP_SUBFLOW) {
3312 			struct mptcb *mp_tp = tptomptp(sototcpcb(so));
3313 			struct socket *mp_so = mptetoso(mp_tp->mpt_mpte);
3314 
3315 			return mp_so->so_proto->pr_getlock(mp_so, flags);
3316 		} else {
3317 			return &inp->inpcb_mtx;
3318 		}
3319 	} else {
3320 		panic("tcp_getlock: so=%p NULL so_pcb %s",
3321 		    so, solockhistory_nr(so));
3322 		return so->so_proto->pr_domain->dom_mtx;
3323 	}
3324 }
3325 
3326 /*
3327  * Determine if we can grow the recieve socket buffer to avoid sending
3328  * a zero window update to the peer. We allow even socket buffers that
3329  * have fixed size (set by the application) to grow if the resource
3330  * constraints are met. They will also be trimmed after the application
3331  * reads data.
3332  */
3333 static void
tcp_sbrcv_grow_rwin(struct tcpcb * tp,struct sockbuf * sb)3334 tcp_sbrcv_grow_rwin(struct tcpcb *tp, struct sockbuf *sb)
3335 {
3336 	u_int32_t rcvbufinc = tp->t_maxseg << 4;
3337 	u_int32_t rcvbuf = sb->sb_hiwat;
3338 	struct socket *so = tp->t_inpcb->inp_socket;
3339 
3340 	if (tcp_recv_bg == 1 || IS_TCP_RECV_BG(so)) {
3341 		return;
3342 	}
3343 
3344 	if (tcp_do_autorcvbuf == 1 &&
3345 	    (tp->t_flags & TF_SLOWLINK) == 0 &&
3346 	    (so->so_flags1 & SOF1_EXTEND_BK_IDLE_WANTED) == 0 &&
3347 	    (rcvbuf - sb->sb_cc) < rcvbufinc &&
3348 	    rcvbuf < tcp_autorcvbuf_max &&
3349 	    (sb->sb_idealsize > 0 &&
3350 	    sb->sb_hiwat <= (sb->sb_idealsize + rcvbufinc))) {
3351 		sbreserve(sb,
3352 		    min((sb->sb_hiwat + rcvbufinc), tcp_autorcvbuf_max));
3353 	}
3354 }
3355 
3356 int32_t
tcp_sbspace(struct tcpcb * tp)3357 tcp_sbspace(struct tcpcb *tp)
3358 {
3359 	struct socket *so = tp->t_inpcb->inp_socket;
3360 	struct sockbuf *sb = &so->so_rcv;
3361 	u_int32_t rcvbuf;
3362 	int32_t space;
3363 	int32_t pending = 0;
3364 
3365 	if (so->so_flags & SOF_MP_SUBFLOW) {
3366 		/* We still need to grow TCP's buffer to have a BDP-estimate */
3367 		tcp_sbrcv_grow_rwin(tp, sb);
3368 
3369 		return mptcp_sbspace(tptomptp(tp));
3370 	}
3371 
3372 	tcp_sbrcv_grow_rwin(tp, sb);
3373 
3374 	/* hiwat might have changed */
3375 	rcvbuf = sb->sb_hiwat;
3376 
3377 	space =  ((int32_t) imin((rcvbuf - sb->sb_cc),
3378 	    (sb->sb_mbmax - sb->sb_mbcnt)));
3379 	if (space < 0) {
3380 		space = 0;
3381 	}
3382 
3383 #if CONTENT_FILTER
3384 	/* Compensate for data being processed by content filters */
3385 	pending = cfil_sock_data_space(sb);
3386 #endif /* CONTENT_FILTER */
3387 	if (pending > space) {
3388 		space = 0;
3389 	} else {
3390 		space -= pending;
3391 	}
3392 
3393 	/*
3394 	 * Avoid increasing window size if the current window
3395 	 * is already very low, we could be in "persist" mode and
3396 	 * we could break some apps (see rdar://5409343)
3397 	 */
3398 
3399 	if (space < tp->t_maxseg) {
3400 		return space;
3401 	}
3402 
3403 	/* Clip window size for slower link */
3404 
3405 	if (((tp->t_flags & TF_SLOWLINK) != 0) && slowlink_wsize > 0) {
3406 		return imin(space, slowlink_wsize);
3407 	}
3408 
3409 	return space;
3410 }
3411 /*
3412  * Checks TCP Segment Offloading capability for a given connection
3413  * and interface pair.
3414  */
3415 void
tcp_set_tso(struct tcpcb * tp,struct ifnet * ifp)3416 tcp_set_tso(struct tcpcb *tp, struct ifnet *ifp)
3417 {
3418 	struct inpcb *inp;
3419 	int isipv6;
3420 	struct ifnet *tunnel_ifp = NULL;
3421 #define IFNET_TSO_MASK (IFNET_TSO_IPV6 | IFNET_TSO_IPV4)
3422 
3423 	tp->t_flags &= ~TF_TSO;
3424 
3425 	/*
3426 	 * Bail if there's a non-TSO-capable filter on the interface.
3427 	 */
3428 	if (ifp == NULL || ifp->if_flt_no_tso_count > 0) {
3429 		return;
3430 	}
3431 
3432 	inp = tp->t_inpcb;
3433 	isipv6 = (inp->inp_vflag & INP_IPV6) != 0;
3434 
3435 #if MPTCP
3436 	/*
3437 	 * We can't use TSO if this tcpcb belongs to an MPTCP session.
3438 	 */
3439 	if (inp->inp_socket->so_flags & SOF_MP_SUBFLOW) {
3440 		return;
3441 	}
3442 #endif
3443 	/*
3444 	 * We can't use TSO if the TSO capability of the tunnel interface does
3445 	 * not match the capability of another interface known by TCP
3446 	 */
3447 	if (inp->inp_policyresult.results.result == NECP_KERNEL_POLICY_RESULT_IP_TUNNEL) {
3448 		u_int tunnel_if_index = inp->inp_policyresult.results.result_parameter.tunnel_interface_index;
3449 
3450 		if (tunnel_if_index != 0) {
3451 			ifnet_head_lock_shared();
3452 			tunnel_ifp = ifindex2ifnet[tunnel_if_index];
3453 			ifnet_head_done();
3454 		}
3455 
3456 		if (tunnel_ifp == NULL) {
3457 			return;
3458 		}
3459 
3460 		if ((ifp->if_hwassist & IFNET_TSO_MASK) != (tunnel_ifp->if_hwassist & IFNET_TSO_MASK)) {
3461 			if (tso_debug > 0) {
3462 				os_log(OS_LOG_DEFAULT,
3463 				    "%s: %u > %u TSO 0 tunnel_ifp %s hwassist mismatch with ifp %s",
3464 				    __func__,
3465 				    ntohs(tp->t_inpcb->inp_lport), ntohs(tp->t_inpcb->inp_fport),
3466 				    tunnel_ifp->if_xname, ifp->if_xname);
3467 			}
3468 			return;
3469 		}
3470 		if (inp->inp_last_outifp != NULL &&
3471 		    (inp->inp_last_outifp->if_hwassist & IFNET_TSO_MASK) != (tunnel_ifp->if_hwassist & IFNET_TSO_MASK)) {
3472 			if (tso_debug > 0) {
3473 				os_log(OS_LOG_DEFAULT,
3474 				    "%s: %u > %u TSO 0 tunnel_ifp %s hwassist mismatch with inp_last_outifp %s",
3475 				    __func__,
3476 				    ntohs(tp->t_inpcb->inp_lport), ntohs(tp->t_inpcb->inp_fport),
3477 				    tunnel_ifp->if_xname, inp->inp_last_outifp->if_xname);
3478 			}
3479 			return;
3480 		}
3481 		if ((inp->inp_flags & INP_BOUND_IF) && inp->inp_boundifp != NULL &&
3482 		    (inp->inp_boundifp->if_hwassist & IFNET_TSO_MASK) != (tunnel_ifp->if_hwassist & IFNET_TSO_MASK)) {
3483 			if (tso_debug > 0) {
3484 				os_log(OS_LOG_DEFAULT,
3485 				    "%s: %u > %u TSO 0 tunnel_ifp %s hwassist mismatch with inp_boundifp %s",
3486 				    __func__,
3487 				    ntohs(tp->t_inpcb->inp_lport), ntohs(tp->t_inpcb->inp_fport),
3488 				    tunnel_ifp->if_xname, inp->inp_boundifp->if_xname);
3489 			}
3490 			return;
3491 		}
3492 	}
3493 
3494 	if (isipv6) {
3495 		if (ifp->if_hwassist & IFNET_TSO_IPV6) {
3496 			tp->t_flags |= TF_TSO;
3497 			if (ifp->if_tso_v6_mtu != 0) {
3498 				tp->tso_max_segment_size = ifp->if_tso_v6_mtu;
3499 			} else {
3500 				tp->tso_max_segment_size = TCP_MAXWIN;
3501 			}
3502 		}
3503 	} else {
3504 		if (ifp->if_hwassist & IFNET_TSO_IPV4) {
3505 			tp->t_flags |= TF_TSO;
3506 			if (ifp->if_tso_v4_mtu != 0) {
3507 				tp->tso_max_segment_size = ifp->if_tso_v4_mtu;
3508 			} else {
3509 				tp->tso_max_segment_size = TCP_MAXWIN;
3510 			}
3511 			if (INTF_ADJUST_MTU_FOR_CLAT46(ifp)) {
3512 				tp->tso_max_segment_size -=
3513 				    CLAT46_HDR_EXPANSION_OVERHD;
3514 			}
3515 		}
3516 	}
3517 
3518 	if (tso_debug > 1) {
3519 		os_log(OS_LOG_DEFAULT, "%s: %u > %u TSO %d ifp %s",
3520 		    __func__,
3521 		    ntohs(tp->t_inpcb->inp_lport),
3522 		    ntohs(tp->t_inpcb->inp_fport),
3523 		    (tp->t_flags & TF_TSO) != 0,
3524 		    ifp != NULL ? ifp->if_xname : "<NULL>");
3525 	}
3526 }
3527 
3528 #define TIMEVAL_TO_TCPHZ(_tv_) ((uint32_t)((_tv_).tv_sec * TCP_RETRANSHZ + \
3529 	(_tv_).tv_usec / TCP_RETRANSHZ_TO_USEC))
3530 
3531 /*
3532  * Function to calculate the tcp clock. The tcp clock will get updated
3533  * at the boundaries of the tcp layer. This is done at 3 places:
3534  * 1. Right before processing an input tcp packet
3535  * 2. Whenever a connection wants to access the network using tcp_usrreqs
3536  * 3. When a tcp timer fires or before tcp slow timeout
3537  *
3538  */
3539 
3540 void
calculate_tcp_clock(void)3541 calculate_tcp_clock(void)
3542 {
3543 	struct timeval tv = tcp_uptime;
3544 	struct timeval interval = {.tv_sec = 0, .tv_usec = TCP_RETRANSHZ_TO_USEC};
3545 	struct timeval now, hold_now;
3546 	uint32_t incr = 0;
3547 
3548 	microuptime(&now);
3549 
3550 	/*
3551 	 * Update coarse-grained networking timestamp (in sec.); the idea
3552 	 * is to update the counter returnable via net_uptime() when
3553 	 * we read time.
3554 	 */
3555 	net_update_uptime_with_time(&now);
3556 
3557 	timevaladd(&tv, &interval);
3558 	if (timevalcmp(&now, &tv, >)) {
3559 		/* time to update the clock */
3560 		lck_spin_lock(&tcp_uptime_lock);
3561 		if (timevalcmp(&tcp_uptime, &now, >=)) {
3562 			/* clock got updated while waiting for the lock */
3563 			lck_spin_unlock(&tcp_uptime_lock);
3564 			return;
3565 		}
3566 
3567 		microuptime(&now);
3568 		hold_now = now;
3569 		tv = tcp_uptime;
3570 		timevalsub(&now, &tv);
3571 
3572 		incr = TIMEVAL_TO_TCPHZ(now);
3573 
3574 		/* Account for the previous remainder */
3575 		uint32_t remaining_us = (now.tv_usec % TCP_RETRANSHZ_TO_USEC) +
3576 		    tcp_now_remainder_us;
3577 		if (remaining_us >= TCP_RETRANSHZ_TO_USEC) {
3578 			incr += (remaining_us / TCP_RETRANSHZ_TO_USEC);
3579 		}
3580 
3581 		if (incr > 0) {
3582 			tcp_uptime = hold_now;
3583 			tcp_now_remainder_us = remaining_us % TCP_RETRANSHZ_TO_USEC;
3584 			tcp_now += incr;
3585 		}
3586 
3587 		lck_spin_unlock(&tcp_uptime_lock);
3588 	}
3589 }
3590 
3591 uint64_t
microuptime_ns(void)3592 microuptime_ns(void)
3593 {
3594 	uint64_t abstime = mach_absolute_time();
3595 	uint64_t ns = 0;
3596 	absolutetime_to_nanoseconds(abstime, &ns);
3597 
3598 	return ns;
3599 }
3600 
3601 #define MAX_BURST_INTERVAL_KERNEL_PACING_NSEC                                  \
3602 	(10 * NSEC_PER_MSEC) // Don't delay more than 10ms between two bursts
3603 static uint64_t
tcp_pacer_get_packet_interval(struct tcpcb * tp,uint16_t pkt_len)3604 tcp_pacer_get_packet_interval(struct tcpcb *tp, uint16_t pkt_len)
3605 {
3606 	if (tp->t_pacer.rate == 0) {
3607 		os_log_error(OS_LOG_DEFAULT,
3608 		    "pacer rate shouldn't be 0, CCA is %s (cwnd=%u, smoothed rtt=%u ms)",
3609 		    CC_ALGO(tp)->name, tp->snd_cwnd, tp->t_srtt >> TCP_RTT_SHIFT);
3610 
3611 		return MAX_BURST_INTERVAL_KERNEL_PACING_NSEC;
3612 	}
3613 
3614 	uint64_t interval = (uint64_t)pkt_len * NSEC_PER_SEC / tp->t_pacer.rate;
3615 	if (interval > MAX_BURST_INTERVAL_KERNEL_PACING_NSEC) {
3616 		interval = MAX_BURST_INTERVAL_KERNEL_PACING_NSEC;
3617 	}
3618 
3619 	return interval;
3620 }
3621 
3622 /* Return packet tx_time in nanoseconds (absolute as well as continuous) */
3623 uint64_t
tcp_pacer_get_packet_tx_time(struct tcpcb * tp,uint16_t pkt_len)3624 tcp_pacer_get_packet_tx_time(struct tcpcb *tp, uint16_t pkt_len)
3625 {
3626 	/*
3627 	 * size is a static variable as this function is called
3628 	 * multiple times for mss-sized packets and for high-speeds,
3629 	 * we'd want to send multiple packets that add up to burst_size
3630 	 * at the same time.
3631 	 */
3632 	static uint32_t size = 0;
3633 	uint64_t now = microuptime_ns();
3634 
3635 	if (pkt_len == 0 || now == 0) {
3636 		return now;
3637 	}
3638 
3639 	if (tp->t_pacer.packet_tx_time == 0) {
3640 		tp->t_pacer.packet_tx_time = now;
3641 		size = pkt_len;
3642 	} else {
3643 		size += pkt_len;
3644 		if (size > tp->t_pacer.tso_burst_size) {
3645 			/*
3646 			 * Increment tx_time by packet_interval and
3647 			 * reset size to this packet's len
3648 			 */
3649 			tp->t_pacer.packet_tx_time +=
3650 			    tcp_pacer_get_packet_interval(tp, pkt_len);
3651 			size = pkt_len;
3652 			if (now > tp->t_pacer.packet_tx_time) {
3653 				/*
3654 				 * If current time is bigger, then application
3655 				 * has already paced the packet. Also, we can't
3656 				 * set tx_time in the past.
3657 				 */
3658 				tp->t_pacer.packet_tx_time = now;
3659 			}
3660 		}
3661 	}
3662 
3663 	return tp->t_pacer.packet_tx_time;
3664 }
3665 
3666 void
tcp_set_mbuf_tx_time(struct mbuf * m,uint64_t tx_time)3667 tcp_set_mbuf_tx_time(struct mbuf *m, uint64_t tx_time)
3668 {
3669 	struct m_tag *tag = NULL;
3670 	tag = m_tag_create(KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_AQM,
3671 	    sizeof(uint64_t), M_WAITOK, m);
3672 	if (tag != NULL) {
3673 		m_tag_prepend(m, tag);
3674 		*(uint64_t *)tag->m_tag_data = tx_time;
3675 	}
3676 }
3677 
3678 /*
3679  * Compute receive window scaling that we are going to request
3680  * for this connection based on  sb_hiwat. Try to leave some
3681  * room to potentially increase the window size upto a maximum
3682  * defined by the constant tcp_autorcvbuf_max.
3683  */
3684 void
tcp_set_max_rwinscale(struct tcpcb * tp,struct socket * so)3685 tcp_set_max_rwinscale(struct tcpcb *tp, struct socket *so)
3686 {
3687 	uint32_t maxsockbufsize;
3688 
3689 	tp->request_r_scale = MAX((uint8_t)tcp_win_scale, tp->request_r_scale);
3690 	maxsockbufsize = ((so->so_rcv.sb_flags & SB_USRSIZE) != 0) ?
3691 	    so->so_rcv.sb_hiwat : tcp_autorcvbuf_max;
3692 
3693 	/*
3694 	 * Window scale should not exceed what is needed
3695 	 * to send the max receive window size; adding 1 to TCP_MAXWIN
3696 	 * ensures that.
3697 	 */
3698 	while (tp->request_r_scale < TCP_MAX_WINSHIFT &&
3699 	    ((TCP_MAXWIN + 1) << tp->request_r_scale) < maxsockbufsize) {
3700 		tp->request_r_scale++;
3701 	}
3702 	tp->request_r_scale = MIN(tp->request_r_scale, TCP_MAX_WINSHIFT);
3703 }
3704 
3705 int
tcp_notsent_lowat_check(struct socket * so)3706 tcp_notsent_lowat_check(struct socket *so)
3707 {
3708 	struct inpcb *inp = sotoinpcb(so);
3709 	struct tcpcb *tp = NULL;
3710 	int notsent = 0;
3711 
3712 	if (inp != NULL) {
3713 		tp = intotcpcb(inp);
3714 	}
3715 
3716 	if (tp == NULL) {
3717 		return 0;
3718 	}
3719 
3720 	notsent = so->so_snd.sb_cc -
3721 	    (tp->snd_nxt - tp->snd_una);
3722 
3723 	/*
3724 	 * When we send a FIN or SYN, not_sent can be negative.
3725 	 * In that case also we need to send a write event to the
3726 	 * process if it is waiting. In the FIN case, it will
3727 	 * get an error from send because cantsendmore will be set.
3728 	 */
3729 	if (notsent <= tp->t_notsent_lowat) {
3730 		return 1;
3731 	}
3732 
3733 	/*
3734 	 * When Nagle's algorithm is not disabled, it is better
3735 	 * to wakeup the client until there is atleast one
3736 	 * maxseg of data to write.
3737 	 */
3738 	if ((tp->t_flags & TF_NODELAY) == 0 &&
3739 	    notsent > 0 && notsent < tp->t_maxseg) {
3740 		return 1;
3741 	}
3742 	return 0;
3743 }
3744 
3745 void
tcp_rxtseg_insert(struct tcpcb * tp,tcp_seq start,tcp_seq end)3746 tcp_rxtseg_insert(struct tcpcb *tp, tcp_seq start, tcp_seq end)
3747 {
3748 	struct tcp_rxt_seg *rxseg = NULL, *prev = NULL, *next = NULL;
3749 	uint16_t rxcount = 0;
3750 
3751 	if (SLIST_EMPTY(&tp->t_rxt_segments)) {
3752 		tp->t_dsack_lastuna = tp->snd_una;
3753 	}
3754 	/*
3755 	 * First check if there is a segment already existing for this
3756 	 * sequence space.
3757 	 */
3758 
3759 	SLIST_FOREACH(rxseg, &tp->t_rxt_segments, rx_link) {
3760 		if (SEQ_GT(rxseg->rx_start, start)) {
3761 			break;
3762 		}
3763 		prev = rxseg;
3764 	}
3765 	next = rxseg;
3766 
3767 	/* check if prev seg is for this sequence */
3768 	if (prev != NULL && SEQ_LEQ(prev->rx_start, start) &&
3769 	    SEQ_GEQ(prev->rx_end, end)) {
3770 		prev->rx_count++;
3771 		return;
3772 	}
3773 
3774 	/*
3775 	 * There are a couple of possibilities at this point.
3776 	 * 1. prev overlaps with the beginning of this sequence
3777 	 * 2. next overlaps with the end of this sequence
3778 	 * 3. there is no overlap.
3779 	 */
3780 
3781 	if (prev != NULL && SEQ_GT(prev->rx_end, start)) {
3782 		if (prev->rx_start == start && SEQ_GT(end, prev->rx_end)) {
3783 			start = prev->rx_end + 1;
3784 			prev->rx_count++;
3785 		} else {
3786 			prev->rx_end = (start - 1);
3787 			rxcount = prev->rx_count;
3788 		}
3789 	}
3790 
3791 	if (next != NULL && SEQ_LT(next->rx_start, end)) {
3792 		if (SEQ_LEQ(next->rx_end, end)) {
3793 			end = next->rx_start - 1;
3794 			next->rx_count++;
3795 		} else {
3796 			next->rx_start = end + 1;
3797 			rxcount = next->rx_count;
3798 		}
3799 	}
3800 	if (!SEQ_LT(start, end)) {
3801 		return;
3802 	}
3803 
3804 	if (tcp_rxt_seg_max > 0 && tp->t_rxt_seg_count >= tcp_rxt_seg_max) {
3805 		rxseg = SLIST_FIRST(&tp->t_rxt_segments);
3806 		if (prev == rxseg) {
3807 			prev = NULL;
3808 		}
3809 		SLIST_REMOVE(&tp->t_rxt_segments, rxseg,
3810 		    tcp_rxt_seg, rx_link);
3811 
3812 		tcp_rxt_seg_drop++;
3813 		tp->t_rxt_seg_drop++;
3814 		TCP_LOG(tp, "removed rxseg list overflow %u:%u ",
3815 		    rxseg->rx_start, rxseg->rx_end);
3816 		zfree(tcp_rxt_seg_zone, rxseg);
3817 
3818 		tp->t_rxt_seg_count -= 1;
3819 	}
3820 
3821 	rxseg = zalloc_flags(tcp_rxt_seg_zone, Z_WAITOK | Z_ZERO | Z_NOFAIL);
3822 	rxseg->rx_start = start;
3823 	rxseg->rx_end = end;
3824 	rxseg->rx_count = rxcount + 1;
3825 
3826 	if (prev != NULL) {
3827 		SLIST_INSERT_AFTER(prev, rxseg, rx_link);
3828 	} else {
3829 		SLIST_INSERT_HEAD(&tp->t_rxt_segments, rxseg, rx_link);
3830 	}
3831 	tp->t_rxt_seg_count += 1;
3832 }
3833 
3834 struct tcp_rxt_seg *
tcp_rxtseg_find(struct tcpcb * tp,tcp_seq start,tcp_seq end)3835 tcp_rxtseg_find(struct tcpcb *tp, tcp_seq start, tcp_seq end)
3836 {
3837 	struct tcp_rxt_seg *rxseg;
3838 
3839 	if (SLIST_EMPTY(&tp->t_rxt_segments)) {
3840 		return NULL;
3841 	}
3842 
3843 	SLIST_FOREACH(rxseg, &tp->t_rxt_segments, rx_link) {
3844 		if (SEQ_LEQ(rxseg->rx_start, start) &&
3845 		    SEQ_GEQ(rxseg->rx_end, end)) {
3846 			return rxseg;
3847 		}
3848 		if (SEQ_GT(rxseg->rx_start, start)) {
3849 			break;
3850 		}
3851 	}
3852 	return NULL;
3853 }
3854 
3855 void
tcp_rxtseg_set_spurious(struct tcpcb * tp,tcp_seq start,tcp_seq end)3856 tcp_rxtseg_set_spurious(struct tcpcb *tp, tcp_seq start, tcp_seq end)
3857 {
3858 	struct tcp_rxt_seg *rxseg;
3859 
3860 	if (SLIST_EMPTY(&tp->t_rxt_segments)) {
3861 		return;
3862 	}
3863 
3864 	SLIST_FOREACH(rxseg, &tp->t_rxt_segments, rx_link) {
3865 		if (SEQ_GEQ(rxseg->rx_start, start) &&
3866 		    SEQ_LEQ(rxseg->rx_end, end)) {
3867 			/*
3868 			 * If the segment was retransmitted only once, mark it as
3869 			 * spurious.
3870 			 */
3871 			if (rxseg->rx_count == 1) {
3872 				rxseg->rx_flags |= TCP_RXT_SPURIOUS;
3873 			}
3874 		}
3875 
3876 		if (SEQ_GEQ(rxseg->rx_start, end)) {
3877 			break;
3878 		}
3879 	}
3880 	return;
3881 }
3882 
3883 void
tcp_rxtseg_clean(struct tcpcb * tp)3884 tcp_rxtseg_clean(struct tcpcb *tp)
3885 {
3886 	struct tcp_rxt_seg *rxseg, *next;
3887 
3888 	SLIST_FOREACH_SAFE(rxseg, &tp->t_rxt_segments, rx_link, next) {
3889 		SLIST_REMOVE(&tp->t_rxt_segments, rxseg,
3890 		    tcp_rxt_seg, rx_link);
3891 		zfree(tcp_rxt_seg_zone, rxseg);
3892 	}
3893 	tp->t_rxt_seg_count = 0;
3894 	tp->t_dsack_lastuna = tp->snd_max;
3895 }
3896 
3897 boolean_t
tcp_rxtseg_detect_bad_rexmt(struct tcpcb * tp,tcp_seq th_ack)3898 tcp_rxtseg_detect_bad_rexmt(struct tcpcb *tp, tcp_seq th_ack)
3899 {
3900 	boolean_t bad_rexmt;
3901 	struct tcp_rxt_seg *rxseg;
3902 
3903 	if (SLIST_EMPTY(&tp->t_rxt_segments)) {
3904 		return FALSE;
3905 	}
3906 
3907 	/*
3908 	 * If all of the segments in this window are not cumulatively
3909 	 * acknowledged, then there can still be undetected packet loss.
3910 	 * Do not restore congestion window in that case.
3911 	 */
3912 	if (SEQ_LT(th_ack, tp->snd_recover)) {
3913 		return FALSE;
3914 	}
3915 
3916 	bad_rexmt = TRUE;
3917 	SLIST_FOREACH(rxseg, &tp->t_rxt_segments, rx_link) {
3918 		if (!(rxseg->rx_flags & TCP_RXT_SPURIOUS)) {
3919 			bad_rexmt = FALSE;
3920 			break;
3921 		}
3922 	}
3923 	return bad_rexmt;
3924 }
3925 
3926 u_int32_t
tcp_rxtseg_total_size(struct tcpcb * tp)3927 tcp_rxtseg_total_size(struct tcpcb *tp)
3928 {
3929 	struct tcp_rxt_seg *rxseg;
3930 	u_int32_t total_size = 0;
3931 
3932 	SLIST_FOREACH(rxseg, &tp->t_rxt_segments, rx_link) {
3933 		total_size += (rxseg->rx_end - rxseg->rx_start) + 1;
3934 	}
3935 	return total_size;
3936 }
3937 
3938 int
tcp_seg_cmp(const struct tcp_seg_sent * seg1,const struct tcp_seg_sent * seg2)3939 tcp_seg_cmp(const struct tcp_seg_sent *seg1, const struct tcp_seg_sent *seg2)
3940 {
3941 	return (int)(seg1->end_seq - seg2->end_seq);
3942 }
3943 
RB_GENERATE(tcp_seg_sent_tree_head,tcp_seg_sent,seg_link,tcp_seg_cmp)3944 RB_GENERATE(tcp_seg_sent_tree_head, tcp_seg_sent, seg_link, tcp_seg_cmp)
3945 
3946 uint32_t
3947 tcp_seg_len(struct tcp_seg_sent *seg)
3948 {
3949 	if (SEQ_LT(seg->end_seq, seg->start_seq)) {
3950 		os_log_error(OS_LOG_DEFAULT, "segment end(%u) can't be smaller "
3951 		    "than segment start(%u)", seg->end_seq, seg->start_seq);
3952 	}
3953 
3954 	return seg->end_seq - seg->start_seq;
3955 }
3956 
3957 static struct tcp_seg_sent *
tcp_seg_alloc_init(struct tcpcb * tp)3958 tcp_seg_alloc_init(struct tcpcb *tp)
3959 {
3960 	struct tcp_seg_sent *seg = TAILQ_FIRST(&tp->seg_pool.free_segs);
3961 	if (seg != NULL) {
3962 		TAILQ_REMOVE(&tp->seg_pool.free_segs, seg, free_link);
3963 		tp->seg_pool.free_segs_count--;
3964 	} else {
3965 		// TODO: remove Z_WAITOK and Z_NOFAIL?
3966 		seg = zalloc_flags(tcp_seg_sent_zone, Z_WAITOK | Z_ZERO | Z_NOFAIL);
3967 		if (seg == NULL) {
3968 			return NULL;
3969 		}
3970 	}
3971 	bzero(seg, sizeof(*seg));
3972 
3973 	return seg;
3974 }
3975 
3976 static void
tcp_update_seg_after_rto(struct tcpcb * tp,struct tcp_seg_sent * found_seg,uint32_t xmit_ts,uint8_t flags)3977 tcp_update_seg_after_rto(struct tcpcb *tp, struct tcp_seg_sent *found_seg,
3978     uint32_t xmit_ts, uint8_t flags)
3979 {
3980 	tcp_rack_transmit_seg(tp, found_seg, found_seg->start_seq, found_seg->end_seq,
3981 	    xmit_ts, flags);
3982 	struct tcp_seg_sent *seg = TAILQ_FIRST(&tp->t_segs_sent);
3983 	if (found_seg == seg) {
3984 		// Move this segment to the end of time-ordered list.
3985 		TAILQ_REMOVE(&tp->t_segs_sent, seg, tx_link);
3986 		TAILQ_INSERT_TAIL(&tp->t_segs_sent, seg, tx_link);
3987 	}
3988 }
3989 
3990 static void
tcp_process_rxmt_segs_after_rto(struct tcpcb * tp,struct tcp_seg_sent * seg,tcp_seq start,uint32_t xmit_ts,uint8_t flags)3991 tcp_process_rxmt_segs_after_rto(struct tcpcb *tp, struct tcp_seg_sent *seg, tcp_seq start,
3992     uint32_t xmit_ts, uint8_t flags)
3993 {
3994 	struct tcp_seg_sent segment = {};
3995 
3996 	while (seg != NULL) {
3997 		if (SEQ_LEQ(seg->start_seq, start)) {
3998 			tcp_update_seg_after_rto(tp, seg, xmit_ts, flags);
3999 			break;
4000 		} else {
4001 			/* The segment is a part of the total RTO retransmission */
4002 			tcp_update_seg_after_rto(tp, seg, xmit_ts, flags);
4003 
4004 			/* Find the next segment ending at the start of current segment */
4005 			segment.end_seq = seg->start_seq;
4006 			seg = RB_FIND(tcp_seg_sent_tree_head, &tp->t_segs_sent_tree, &segment);
4007 		}
4008 	}
4009 }
4010 
4011 static struct tcp_seg_sent *
tcp_seg_sent_insert_before(struct tcpcb * tp,struct tcp_seg_sent * before,tcp_seq start,tcp_seq end,uint32_t xmit_ts,uint8_t flags)4012 tcp_seg_sent_insert_before(struct tcpcb *tp, struct tcp_seg_sent *before, tcp_seq start, tcp_seq end,
4013     uint32_t xmit_ts, uint8_t flags)
4014 {
4015 	struct tcp_seg_sent *seg = tcp_seg_alloc_init(tp);
4016 	/* segment MUST be allocated, there is no other fail-safe here */
4017 	tcp_rack_transmit_seg(tp, seg, start, end, xmit_ts, flags);
4018 	struct tcp_seg_sent *not_inserted = RB_INSERT(tcp_seg_sent_tree_head, &tp->t_segs_sent_tree, seg);
4019 	if (not_inserted) {
4020 		os_log(OS_LOG_DEFAULT, "segment %p[%u %u) was not inserted in the RB tree", not_inserted,
4021 		    not_inserted->start_seq, not_inserted->end_seq);
4022 	}
4023 	TAILQ_INSERT_BEFORE(before, seg, tx_link);
4024 
4025 	return seg;
4026 }
4027 
4028 static struct tcp_seg_sent *
tcp_seg_rto_insert_end(struct tcpcb * tp,tcp_seq start,tcp_seq end,uint32_t xmit_ts,uint8_t flags)4029 tcp_seg_rto_insert_end(struct tcpcb *tp, tcp_seq start, tcp_seq end,
4030     uint32_t xmit_ts, uint8_t flags)
4031 {
4032 	struct tcp_seg_sent *seg = tcp_seg_alloc_init(tp);
4033 	/* segment MUST be allocated, there is no other fail-safe here */
4034 	tcp_rack_transmit_seg(tp, seg, start, end, xmit_ts, flags);
4035 	struct tcp_seg_sent *not_inserted = RB_INSERT(tcp_seg_sent_tree_head, &tp->t_segs_sent_tree, seg);
4036 	if (not_inserted) {
4037 		os_log(OS_LOG_DEFAULT, "segment %p[%u %u) was not inserted in the RB tree", not_inserted,
4038 		    not_inserted->start_seq, not_inserted->end_seq);
4039 	}
4040 	TAILQ_INSERT_TAIL(&tp->t_segs_sent, seg, tx_link);
4041 
4042 	return seg;
4043 }
4044 
4045 void
tcp_seg_sent_insert(struct tcpcb * tp,struct tcp_seg_sent * seg,tcp_seq start,tcp_seq end,uint32_t xmit_ts,uint8_t flags)4046 tcp_seg_sent_insert(struct tcpcb *tp, struct tcp_seg_sent *seg, tcp_seq start, tcp_seq end,
4047     uint32_t xmit_ts, uint8_t flags)
4048 {
4049 	if (seg != NULL) {
4050 		uint8_t seg_flags = seg->flags | flags;
4051 		if (seg->end_seq == end) {
4052 			/* Entire seg retransmitted in RACK recovery, start and end sequence doesn't change */
4053 			if (seg->start_seq != start) {
4054 				os_log_error(OS_LOG_DEFAULT, "Segment start (%u) is not same as retransmitted "
4055 				    "start sequence number (%u)", seg->start_seq, start);
4056 			}
4057 			tcp_rack_transmit_seg(tp, seg, seg->start_seq, seg->end_seq, xmit_ts, seg_flags);
4058 			TAILQ_REMOVE(&tp->t_segs_sent, seg, tx_link);
4059 			TAILQ_INSERT_TAIL(&tp->t_segs_sent, seg, tx_link);
4060 		} else {
4061 			/*
4062 			 * Original segment is retransmitted partially, update start_seq by len
4063 			 * and create new segment for retransmitted part
4064 			 */
4065 			struct tcp_seg_sent *partial_seg = tcp_seg_alloc_init(tp);
4066 			if (partial_seg == NULL) {
4067 				return;
4068 			}
4069 			seg->start_seq += (end - start);
4070 			tcp_rack_transmit_seg(tp, partial_seg, start, end, xmit_ts, seg_flags);
4071 			struct tcp_seg_sent *not_inserted = RB_INSERT(tcp_seg_sent_tree_head,
4072 			    &tp->t_segs_sent_tree, partial_seg);
4073 			if (not_inserted) {
4074 				os_log(OS_LOG_DEFAULT, "segment %p[%u %u) was not inserted in the RB tree", not_inserted,
4075 				    not_inserted->start_seq, not_inserted->end_seq);
4076 			}
4077 			TAILQ_INSERT_TAIL(&tp->t_segs_sent, partial_seg, tx_link);
4078 		}
4079 
4080 		return;
4081 	}
4082 
4083 	if ((flags & TCP_SEGMENT_RETRANSMITTED_ATLEAST_ONCE) == 0) {
4084 		/* This is a new segment */
4085 		seg = tcp_seg_alloc_init(tp);
4086 		if (seg == NULL) {
4087 			return;
4088 		}
4089 
4090 		tcp_rack_transmit_seg(tp, seg, start, end, xmit_ts, flags);
4091 		struct tcp_seg_sent *not_inserted = RB_INSERT(tcp_seg_sent_tree_head, &tp->t_segs_sent_tree, seg);
4092 		if (not_inserted) {
4093 			os_log(OS_LOG_DEFAULT, "segment %p[%u %u) was not inserted in the RB tree", not_inserted,
4094 			    not_inserted->start_seq, not_inserted->end_seq);
4095 		}
4096 		TAILQ_INSERT_TAIL(&tp->t_segs_sent, seg, tx_link);
4097 
4098 		return;
4099 	}
4100 	/*
4101 	 * Either retransmitted after an RTO or PTO.
4102 	 * During RTO, time-ordered list may lose its order.
4103 	 * If retransmitted after RTO, check if the segment
4104 	 * already exists in RB tree and update its xmit_ts. Also,
4105 	 * if this seg is at the top of ordered list, then move it
4106 	 * to the end.
4107 	 */
4108 	struct tcp_seg_sent segment = {};
4109 	struct tcp_seg_sent *found_seg = NULL, *rxmt_seg = NULL;
4110 
4111 	/* Set the end sequence to search for existing segment */
4112 	segment.end_seq = end;
4113 	found_seg = RB_FIND(tcp_seg_sent_tree_head, &tp->t_segs_sent_tree, &segment);
4114 	if (found_seg != NULL) {
4115 		/* Found an exact match for retransmitted end sequence */
4116 		tcp_process_rxmt_segs_after_rto(tp, found_seg, start, xmit_ts, flags);
4117 		return;
4118 	}
4119 	/*
4120 	 * We come here when we don't find an exact match and end of segment
4121 	 * retransmitted after RTO lies within a segment.
4122 	 */
4123 	RB_FOREACH(found_seg, tcp_seg_sent_tree_head, &tp->t_segs_sent_tree) {
4124 		if (SEQ_LT(end, found_seg->end_seq) && SEQ_GT(end, found_seg->start_seq)) {
4125 			/*
4126 			 * This segment is partially retransmitted. We split this segment at the boundary of end
4127 			 * sequence. First insert the part being retransmitted at the end of time-ordered list.
4128 			 */
4129 			tcp_seg_rto_insert_end(tp, found_seg->start_seq, end, xmit_ts,
4130 			    found_seg->flags | flags);
4131 
4132 			if (SEQ_LEQ(found_seg->start_seq, start)) {
4133 				/*
4134 				 * We are done with the retransmitted part.
4135 				 * Move the start of existing segment
4136 				 */
4137 				found_seg->start_seq = end;
4138 			} else {
4139 				/*
4140 				 * This retransmitted sequence covers more than one segment
4141 				 * Look for segments covered by this retransmission below this segment
4142 				 */
4143 				segment.end_seq = found_seg->start_seq;
4144 				rxmt_seg = RB_FIND(tcp_seg_sent_tree_head, &tp->t_segs_sent_tree, &segment);
4145 
4146 				if (rxmt_seg != NULL) {
4147 					/* rxmt_seg is just before the current segment */
4148 					tcp_process_rxmt_segs_after_rto(tp, rxmt_seg, start, xmit_ts, flags);
4149 				}
4150 
4151 				/* Move the start of existing segment */
4152 				found_seg->start_seq = end;
4153 			}
4154 			return;
4155 		}
4156 	}
4157 }
4158 
4159 static void
tcp_seg_collect_acked_subtree(struct tcpcb * tp,struct tcp_seg_sent * seg,uint32_t acked_xmit_ts,uint32_t tsecr)4160 tcp_seg_collect_acked_subtree(struct tcpcb *tp, struct tcp_seg_sent *seg,
4161     uint32_t acked_xmit_ts, uint32_t tsecr)
4162 {
4163 	if (seg != NULL) {
4164 		tcp_seg_collect_acked_subtree(tp, RB_LEFT(seg, seg_link), acked_xmit_ts, tsecr);
4165 		tcp_seg_collect_acked_subtree(tp, RB_RIGHT(seg, seg_link), acked_xmit_ts, tsecr);
4166 		TAILQ_INSERT_TAIL(&tp->t_segs_acked, seg, ack_link);
4167 	}
4168 }
4169 
4170 /* Call this function with root of the rb tree */
4171 static void
tcp_seg_collect_acked(struct tcpcb * tp,struct tcp_seg_sent * seg,tcp_seq th_ack,uint32_t acked_xmit_ts,uint32_t tsecr)4172 tcp_seg_collect_acked(struct tcpcb *tp, struct tcp_seg_sent *seg, tcp_seq th_ack,
4173     uint32_t acked_xmit_ts, uint32_t tsecr)
4174 {
4175 	if (seg == NULL) {
4176 		return;
4177 	}
4178 
4179 	if (SEQ_GEQ(th_ack, seg->end_seq)) {
4180 		/* Delete the entire left sub-tree */
4181 		tcp_seg_collect_acked_subtree(tp, RB_LEFT(seg, seg_link), acked_xmit_ts, tsecr);
4182 		/* Evaluate the right sub-tree */
4183 		tcp_seg_collect_acked(tp, RB_RIGHT(seg, seg_link), th_ack, acked_xmit_ts, tsecr);
4184 		TAILQ_INSERT_TAIL(&tp->t_segs_acked, seg, ack_link);
4185 	} else {
4186 		/*
4187 		 * This ACK doesn't acknowledge the current root and its right sub-tree.
4188 		 * Evaluate the left sub-tree
4189 		 */
4190 		tcp_seg_collect_acked(tp, RB_LEFT(seg, seg_link), th_ack, acked_xmit_ts, tsecr);
4191 	}
4192 }
4193 
4194 static void
tcp_seg_delete_acked(struct tcpcb * tp,uint32_t acked_xmit_ts,uint32_t tsecr)4195 tcp_seg_delete_acked(struct tcpcb *tp, uint32_t acked_xmit_ts, uint32_t tsecr)
4196 {
4197 	struct tcp_seg_sent *acked_seg = NULL, *next = NULL;
4198 
4199 	TAILQ_FOREACH_SAFE(acked_seg, &tp->t_segs_acked, ack_link, next) {
4200 		/* Advance RACK state if applicable */
4201 		if (acked_seg->xmit_ts > acked_xmit_ts) {
4202 			tcp_rack_update_segment_acked(tp, tsecr, acked_seg->xmit_ts, acked_seg->end_seq,
4203 			    !!(acked_seg->flags & TCP_SEGMENT_RETRANSMITTED_ATLEAST_ONCE));
4204 		}
4205 		/* Check for reordering */
4206 		tcp_rack_detect_reordering_acked(tp, acked_seg);
4207 
4208 		const uint32_t seg_len = tcp_seg_len(acked_seg);
4209 		if (acked_seg->flags & TCP_SEGMENT_LOST) {
4210 			if (tp->bytes_lost < seg_len) {
4211 				os_log_error(OS_LOG_DEFAULT, "bytes_lost (%u) can't be smaller than already "
4212 				    "lost segment length (%u)", tp->bytes_lost, seg_len);
4213 			}
4214 			tp->bytes_lost -= seg_len;
4215 		}
4216 		if (acked_seg->flags & TCP_RACK_RETRANSMITTED) {
4217 			if (tp->bytes_retransmitted < seg_len) {
4218 				os_log_error(OS_LOG_DEFAULT, "bytes_retransmitted (%u) can't be smaller "
4219 				    "than already retransmited segment length (%u)",
4220 				    tp->bytes_retransmitted, seg_len);
4221 			}
4222 			tp->bytes_retransmitted -= seg_len;
4223 		}
4224 		if (acked_seg->flags & TCP_SEGMENT_SACKED) {
4225 			if (tp->bytes_sacked < seg_len) {
4226 				os_log_error(OS_LOG_DEFAULT, "bytes_sacked (%u) can't be smaller than already "
4227 				    "SACKed segment length (%u)", tp->bytes_sacked, seg_len);
4228 			}
4229 			tp->bytes_sacked -= seg_len;
4230 		}
4231 		TAILQ_REMOVE(&tp->t_segs_acked, acked_seg, ack_link);
4232 		TAILQ_REMOVE(&tp->t_segs_sent, acked_seg, tx_link);
4233 		RB_REMOVE(tcp_seg_sent_tree_head, &tp->t_segs_sent_tree, acked_seg);
4234 		tcp_seg_delete(tp, acked_seg);
4235 	}
4236 }
4237 
4238 void
tcp_segs_doack(struct tcpcb * tp,tcp_seq th_ack,struct tcpopt * to)4239 tcp_segs_doack(struct tcpcb *tp, tcp_seq th_ack, struct tcpopt *to)
4240 {
4241 	uint32_t tsecr = 0, acked_xmit_ts = 0;
4242 	tcp_seq acked_seq = th_ack;
4243 	bool was_retransmitted = false;
4244 
4245 	if (TAILQ_EMPTY(&tp->t_segs_sent)) {
4246 		return;
4247 	}
4248 
4249 	if (((to->to_flags & TOF_TS) != 0) && (to->to_tsecr != 0)) {
4250 		tsecr = to->to_tsecr;
4251 	}
4252 
4253 	struct tcp_seg_sent seg = {};
4254 	struct tcp_seg_sent *found_seg = NULL, *next = NULL;
4255 
4256 	found_seg = TAILQ_LAST(&tp->t_segs_sent, tcp_seg_sent_head);
4257 
4258 	if (tp->rack.segs_retransmitted == false) {
4259 		if (SEQ_GEQ(th_ack, found_seg->end_seq)) {
4260 			/*
4261 			 * ACK acknowledges the last sent segment completely (snd_max),
4262 			 * we can remove all segments from time ordered list.
4263 			 */
4264 			acked_seq = found_seg->end_seq;
4265 			acked_xmit_ts = found_seg->xmit_ts;
4266 			was_retransmitted = !!(found_seg->flags & TCP_SEGMENT_RETRANSMITTED_ATLEAST_ONCE);
4267 			tcp_segs_sent_clean(tp, false);
4268 
4269 			/* Advance RACK state */
4270 			tcp_rack_update_segment_acked(tp, tsecr, acked_xmit_ts, acked_seq, was_retransmitted);
4271 			return;
4272 		}
4273 	}
4274 	/*
4275 	 * If either not all segments are ACKed OR the time-ordered list contains retransmitted
4276 	 * segments, do a RB tree search for largest (completely) ACKed segment and remove the ACKed
4277 	 * segment and all segments left of it from both RB tree and time-ordered list.
4278 	 *
4279 	 * Set the end sequence to search for ACKed segment.
4280 	 */
4281 	seg.end_seq = th_ack;
4282 
4283 	if ((found_seg = RB_FIND(tcp_seg_sent_tree_head, &tp->t_segs_sent_tree, &seg)) != NULL) {
4284 		acked_seq = found_seg->end_seq;
4285 		acked_xmit_ts = found_seg->xmit_ts;
4286 		was_retransmitted = !!(found_seg->flags & TCP_SEGMENT_RETRANSMITTED_ATLEAST_ONCE);
4287 
4288 		/*
4289 		 * Remove all segments that are ACKed by this ACK.
4290 		 * We defer self-balancing of RB tree to the end
4291 		 * by calling RB_REMOVE after collecting all ACKed segments.
4292 		 */
4293 		tcp_seg_collect_acked(tp, RB_ROOT(&tp->t_segs_sent_tree), th_ack, acked_xmit_ts, tsecr);
4294 		tcp_seg_delete_acked(tp, acked_xmit_ts, tsecr);
4295 
4296 		/* Advance RACK state */
4297 		tcp_rack_update_segment_acked(tp, tsecr, acked_xmit_ts, acked_seq, was_retransmitted);
4298 
4299 		return;
4300 	}
4301 	/*
4302 	 * When TSO is enabled, it is possible that th_ack is less
4303 	 * than segment->end, hence we search the tree
4304 	 * until we find the largest (partially) ACKed segment.
4305 	 */
4306 	RB_FOREACH_SAFE(found_seg, tcp_seg_sent_tree_head, &tp->t_segs_sent_tree, next) {
4307 		if (SEQ_LT(th_ack, found_seg->end_seq) && SEQ_GT(th_ack, found_seg->start_seq)) {
4308 			acked_seq = th_ack;
4309 			acked_xmit_ts = found_seg->xmit_ts;
4310 			was_retransmitted = !!(found_seg->flags & TCP_SEGMENT_RETRANSMITTED_ATLEAST_ONCE);
4311 
4312 			/* Remove all segments completely ACKed by this ack */
4313 			tcp_seg_collect_acked(tp, RB_ROOT(&tp->t_segs_sent_tree), th_ack, acked_xmit_ts, tsecr);
4314 			tcp_seg_delete_acked(tp, acked_xmit_ts, tsecr);
4315 			found_seg->start_seq = th_ack;
4316 
4317 			/* Advance RACK state */
4318 			tcp_rack_update_segment_acked(tp, tsecr, acked_xmit_ts, acked_seq, was_retransmitted);
4319 			break;
4320 		}
4321 	}
4322 }
4323 
4324 static bool
tcp_seg_mark_sacked(struct tcpcb * tp,struct tcp_seg_sent * seg,uint32_t * newbytes_sacked)4325 tcp_seg_mark_sacked(struct tcpcb *tp, struct tcp_seg_sent *seg, uint32_t *newbytes_sacked)
4326 {
4327 	if (seg->flags & TCP_SEGMENT_SACKED) {
4328 		return false;
4329 	}
4330 
4331 	const uint32_t seg_len = tcp_seg_len(seg);
4332 
4333 	/* Check for reordering */
4334 	tcp_rack_detect_reordering_acked(tp, seg);
4335 
4336 	if (seg->flags & TCP_RACK_RETRANSMITTED) {
4337 		if (seg->flags & TCP_SEGMENT_LOST) {
4338 			/*
4339 			 * If the segment is not considered lost, we don't clear
4340 			 * retransmitted as it might still be in flight. The ONLY time
4341 			 * this can happen is when RTO happens and segment is retransmitted
4342 			 * and SACKed before RACK detects segment was lost.
4343 			 */
4344 			seg->flags &= ~(TCP_SEGMENT_LOST | TCP_RACK_RETRANSMITTED);
4345 			if (tp->bytes_lost < seg_len || tp->bytes_retransmitted < seg_len) {
4346 				os_log_error(OS_LOG_DEFAULT, "bytes_lost (%u) and/or bytes_retransmitted (%u) "
4347 				    "can't be smaller than already lost/retransmitted segment length (%u)", tp->bytes_lost,
4348 				    tp->bytes_retransmitted, seg_len);
4349 			}
4350 			tp->bytes_lost -= seg_len;
4351 			tp->bytes_retransmitted -= seg_len;
4352 		}
4353 	} else {
4354 		if (seg->flags & TCP_SEGMENT_LOST) {
4355 			seg->flags &= ~(TCP_SEGMENT_LOST);
4356 			if (tp->bytes_lost < seg_len) {
4357 				os_log_error(OS_LOG_DEFAULT, "bytes_lost (%u) can't be smaller "
4358 				    "than already lost segment length (%u)", tp->bytes_lost, seg_len);
4359 			}
4360 			tp->bytes_lost -= seg_len;
4361 		}
4362 	}
4363 	*newbytes_sacked += seg_len;
4364 	seg->flags |= TCP_SEGMENT_SACKED;
4365 	tp->bytes_sacked += seg_len;
4366 
4367 	return true;
4368 }
4369 
4370 static void
tcp_segs_dosack_matched(struct tcpcb * tp,struct tcp_seg_sent * found_seg,tcp_seq sblk_start,uint32_t tsecr,uint32_t * newbytes_sacked)4371 tcp_segs_dosack_matched(struct tcpcb *tp, struct tcp_seg_sent *found_seg,
4372     tcp_seq sblk_start, uint32_t tsecr,
4373     uint32_t *newbytes_sacked)
4374 {
4375 	struct tcp_seg_sent seg = {};
4376 
4377 	while (found_seg != NULL) {
4378 		if (sblk_start == found_seg->start_seq) {
4379 			/*
4380 			 * Covered the entire SACK block.
4381 			 * Record segment flags before they get erased.
4382 			 */
4383 			uint8_t seg_flags = found_seg->flags;
4384 			bool newly_marked = tcp_seg_mark_sacked(tp, found_seg, newbytes_sacked);
4385 			if (newly_marked) {
4386 				/* Advance RACK state */
4387 				tcp_rack_update_segment_acked(tp, tsecr, found_seg->xmit_ts,
4388 				    found_seg->end_seq,
4389 				    !!(seg_flags & TCP_SEGMENT_RETRANSMITTED_ATLEAST_ONCE));
4390 			}
4391 			break;
4392 		} else if (SEQ_GT(sblk_start, found_seg->start_seq)) {
4393 			if ((found_seg->flags & TCP_SEGMENT_SACKED) != 0) {
4394 				/* No need to process an already SACKED segment */
4395 				break;
4396 			}
4397 			/*
4398 			 * This segment is partially ACKed by SACK block
4399 			 * as sblk_start > segment start. Since it is
4400 			 * partially SACKed, we should split the unSACKed and
4401 			 * SACKed parts.
4402 			 */
4403 			/* First create a new segment for unSACKed part */
4404 			tcp_seg_sent_insert_before(tp, found_seg, found_seg->start_seq, sblk_start,
4405 			    found_seg->xmit_ts, found_seg->flags);
4406 			/* Now, update the SACKed part */
4407 			found_seg->start_seq = sblk_start;
4408 			/* Record seg flags before they get erased. */
4409 			uint8_t seg_flags = found_seg->flags;
4410 			bool newly_marked = tcp_seg_mark_sacked(tp, found_seg, newbytes_sacked);
4411 			if (newly_marked) {
4412 				/* Advance RACK state */
4413 				tcp_rack_update_segment_acked(tp, tsecr, found_seg->xmit_ts,
4414 				    found_seg->end_seq,
4415 				    !!(seg_flags & TCP_SEGMENT_RETRANSMITTED_ATLEAST_ONCE));
4416 			}
4417 			break;
4418 		} else {
4419 			/*
4420 			 * This segment lies within the SACK block
4421 			 * Record segment flags before they get erased.
4422 			 */
4423 			uint8_t seg_flags = found_seg->flags;
4424 			bool newly_marked = tcp_seg_mark_sacked(tp, found_seg, newbytes_sacked);
4425 			if (newly_marked) {
4426 				/* Advance RACK state */
4427 				tcp_rack_update_segment_acked(tp, tsecr, found_seg->xmit_ts,
4428 				    found_seg->end_seq,
4429 				    !!(seg_flags & TCP_SEGMENT_RETRANSMITTED_ATLEAST_ONCE));
4430 			}
4431 			/* Find the next segment ending at the start of current segment */
4432 			seg.end_seq = found_seg->start_seq;
4433 			found_seg = RB_FIND(tcp_seg_sent_tree_head, &tp->t_segs_sent_tree, &seg);
4434 		}
4435 	}
4436 }
4437 
4438 void
tcp_segs_dosack(struct tcpcb * tp,tcp_seq sblk_start,tcp_seq sblk_end,uint32_t tsecr,uint32_t * newbytes_sacked)4439 tcp_segs_dosack(struct tcpcb *tp, tcp_seq sblk_start, tcp_seq sblk_end,
4440     uint32_t tsecr, uint32_t *newbytes_sacked)
4441 {
4442 	/*
4443 	 * When we receive SACK, min RTT is computed after SACK processing which
4444 	 * means we are using min RTT from the previous ACK to advance RACK state
4445 	 * This is ok as we track a windowed min-filtered estimate over a period.
4446 	 */
4447 	struct tcp_seg_sent seg = {};
4448 	struct tcp_seg_sent *found_seg = NULL, *sacked_seg = NULL;
4449 
4450 	/* Set the end sequence to search for SACKed segment */
4451 	seg.end_seq = sblk_end;
4452 	found_seg = RB_FIND(tcp_seg_sent_tree_head, &tp->t_segs_sent_tree, &seg);
4453 
4454 	if (found_seg != NULL) {
4455 		/* We found an exact match for sblk_end */
4456 		tcp_segs_dosack_matched(tp, found_seg, sblk_start, tsecr, newbytes_sacked);
4457 		return;
4458 	}
4459 	/*
4460 	 * We come here when we don't find an exact match and sblk_end
4461 	 * lies within a segment. This would happen only when TSO is used.
4462 	 */
4463 	RB_FOREACH(found_seg, tcp_seg_sent_tree_head, &tp->t_segs_sent_tree) {
4464 		if (SEQ_LT(sblk_end, found_seg->end_seq) && SEQ_GT(sblk_end, found_seg->start_seq)) {
4465 			/*
4466 			 * This segment is partially SACKed. We split this segment at the boundary
4467 			 * of SACK block. First insert the newly SACKed part
4468 			 */
4469 			tcp_seq start = SEQ_LEQ(sblk_start, found_seg->start_seq) ? found_seg->start_seq : sblk_start;
4470 			struct tcp_seg_sent *inserted = tcp_seg_sent_insert_before(tp, found_seg, start,
4471 			    sblk_end, found_seg->xmit_ts, found_seg->flags);
4472 			/* Record seg flags before they get erased. */
4473 			uint8_t seg_flags = inserted->flags;
4474 			/* Mark the SACKed segment */
4475 			tcp_seg_mark_sacked(tp, inserted, newbytes_sacked);
4476 
4477 			/* Advance RACK state */
4478 			tcp_rack_update_segment_acked(tp, tsecr, inserted->xmit_ts,
4479 			    inserted->end_seq, !!(seg_flags & TCP_SEGMENT_RETRANSMITTED_ATLEAST_ONCE));
4480 
4481 			if (sblk_start == found_seg->start_seq) {
4482 				/*
4483 				 * We are done with this SACK block.
4484 				 * Move the start of existing segment
4485 				 */
4486 				found_seg->start_seq = sblk_end;
4487 				break;
4488 			}
4489 
4490 			if (SEQ_GT(sblk_start, found_seg->start_seq)) {
4491 				/* Insert the remaining unSACKed part before the SACKED segment inserted above */
4492 				tcp_seg_sent_insert_before(tp, inserted, found_seg->start_seq,
4493 				    sblk_start, found_seg->xmit_ts, found_seg->flags);
4494 				/* Move the start of existing segment */
4495 				found_seg->start_seq = sblk_end;
4496 				break;
4497 			} else {
4498 				/*
4499 				 * This SACK block covers more than one segment
4500 				 * Look for segments SACKed below this segment
4501 				 */
4502 				seg.end_seq = found_seg->start_seq;
4503 				sacked_seg = RB_FIND(tcp_seg_sent_tree_head, &tp->t_segs_sent_tree, &seg);
4504 
4505 				if (sacked_seg != NULL) {
4506 					/* We found an exact match for sblk_end */
4507 					tcp_segs_dosack_matched(tp, sacked_seg, sblk_start, tsecr, newbytes_sacked);
4508 				}
4509 
4510 				/* Move the start of existing segment */
4511 				found_seg->start_seq = sblk_end;
4512 			}
4513 			break;
4514 		}
4515 	}
4516 }
4517 
4518 void
tcp_segs_clear_sacked(struct tcpcb * tp)4519 tcp_segs_clear_sacked(struct tcpcb *tp)
4520 {
4521 	struct tcp_seg_sent *seg = NULL;
4522 
4523 	TAILQ_FOREACH(seg, &tp->t_segs_sent, tx_link)
4524 	{
4525 		const uint32_t seg_len = tcp_seg_len(seg);
4526 
4527 		if (seg->flags & TCP_SEGMENT_SACKED) {
4528 			seg->flags &= ~(TCP_SEGMENT_SACKED);
4529 			if (tp->bytes_sacked < seg_len) {
4530 				os_log_error(OS_LOG_DEFAULT, "bytes_sacked (%u) can't be smaller "
4531 				    "than already SACKed segment length (%u)", tp->bytes_sacked, seg_len);
4532 			}
4533 			tp->bytes_sacked -= seg_len;
4534 		}
4535 	}
4536 }
4537 
4538 void
tcp_mark_seg_lost(struct tcpcb * tp,struct tcp_seg_sent * seg)4539 tcp_mark_seg_lost(struct tcpcb *tp, struct tcp_seg_sent *seg)
4540 {
4541 	const uint32_t seg_len = tcp_seg_len(seg);
4542 
4543 	if (seg->flags & TCP_SEGMENT_LOST) {
4544 		if (seg->flags & TCP_RACK_RETRANSMITTED) {
4545 			/* Retransmission was lost */
4546 			seg->flags &= ~TCP_RACK_RETRANSMITTED;
4547 			if (tp->bytes_retransmitted < seg_len) {
4548 				os_log_error(OS_LOG_DEFAULT, "bytes_retransmitted (%u) can't be "
4549 				    "smaller than retransmited segment length (%u)",
4550 				    tp->bytes_retransmitted, seg_len);
4551 				return;
4552 			}
4553 			tp->bytes_retransmitted -= seg_len;
4554 		}
4555 	} else {
4556 		seg->flags |= TCP_SEGMENT_LOST;
4557 		tp->bytes_lost += seg_len;
4558 	}
4559 }
4560 
4561 void
tcp_seg_delete(struct tcpcb * tp,struct tcp_seg_sent * seg)4562 tcp_seg_delete(struct tcpcb *tp, struct tcp_seg_sent *seg)
4563 {
4564 	if (tp->seg_pool.free_segs_count >= TCP_SEG_POOL_MAX_ITEM_COUNT) {
4565 		zfree(tcp_seg_sent_zone, seg);
4566 	} else {
4567 		bzero(seg, sizeof(*seg));
4568 		TAILQ_INSERT_TAIL(&tp->seg_pool.free_segs, seg, free_link);
4569 		tp->seg_pool.free_segs_count++;
4570 	}
4571 }
4572 
4573 void
tcp_segs_sent_clean(struct tcpcb * tp,bool free_segs)4574 tcp_segs_sent_clean(struct tcpcb *tp, bool free_segs)
4575 {
4576 	struct tcp_seg_sent *seg = NULL, *next = NULL;
4577 
4578 	TAILQ_FOREACH_SAFE(seg, &tp->t_segs_sent, tx_link, next) {
4579 		/* Check for reordering */
4580 		tcp_rack_detect_reordering_acked(tp, seg);
4581 
4582 		TAILQ_REMOVE(&tp->t_segs_sent, seg, tx_link);
4583 		RB_REMOVE(tcp_seg_sent_tree_head, &tp->t_segs_sent_tree, seg);
4584 		tcp_seg_delete(tp, seg);
4585 	}
4586 	if (__improbable(!RB_EMPTY(&tp->t_segs_sent_tree))) {
4587 		os_log_error(OS_LOG_DEFAULT, "RB tree still contains segments while "
4588 		    "time ordered list is already empty");
4589 	}
4590 	if (__improbable(!TAILQ_EMPTY(&tp->t_segs_acked))) {
4591 		os_log_error(OS_LOG_DEFAULT, "Segment ACKed list shouldn't contain "
4592 		    "any segments as they are removed immediately after being ACKed");
4593 	}
4594 	/* Reset seg_retransmitted as we emptied the list */
4595 	tcp_rack_reset_segs_retransmitted(tp);
4596 	tp->bytes_lost = tp->bytes_sacked = tp->bytes_retransmitted = 0;
4597 
4598 	/* Empty the free segments pool */
4599 	if (free_segs) {
4600 		TAILQ_FOREACH_SAFE(seg, &tp->seg_pool.free_segs, free_link, next) {
4601 			TAILQ_REMOVE(&tp->seg_pool.free_segs, seg, free_link);
4602 			zfree(tcp_seg_sent_zone, seg);
4603 		}
4604 		tp->seg_pool.free_segs_count = 0;
4605 	}
4606 }
4607 
4608 void
tcp_get_connectivity_status(struct tcpcb * tp,struct tcp_conn_status * connstatus)4609 tcp_get_connectivity_status(struct tcpcb *tp,
4610     struct tcp_conn_status *connstatus)
4611 {
4612 	if (tp == NULL || connstatus == NULL) {
4613 		return;
4614 	}
4615 	bzero(connstatus, sizeof(*connstatus));
4616 	if (tp->t_rxtshift >= TCP_CONNECTIVITY_PROBES_MAX) {
4617 		if (TCPS_HAVEESTABLISHED(tp->t_state)) {
4618 			connstatus->write_probe_failed = 1;
4619 		} else {
4620 			connstatus->conn_probe_failed = 1;
4621 		}
4622 	}
4623 	if (tp->t_rtimo_probes >= TCP_CONNECTIVITY_PROBES_MAX) {
4624 		connstatus->read_probe_failed = 1;
4625 	}
4626 	if (tp->t_inpcb != NULL && tp->t_inpcb->inp_last_outifp != NULL &&
4627 	    (tp->t_inpcb->inp_last_outifp->if_eflags & IFEF_PROBE_CONNECTIVITY)) {
4628 		connstatus->probe_activated = 1;
4629 	}
4630 }
4631 
4632 void
tcp_disable_tfo(struct tcpcb * tp)4633 tcp_disable_tfo(struct tcpcb *tp)
4634 {
4635 	tp->t_flagsext &= ~TF_FASTOPEN;
4636 }
4637 
4638 static struct mbuf *
tcp_make_keepalive_frame(struct tcpcb * tp,struct ifnet * ifp,boolean_t is_probe)4639 tcp_make_keepalive_frame(struct tcpcb *tp, struct ifnet *ifp,
4640     boolean_t is_probe)
4641 {
4642 	struct inpcb *inp = tp->t_inpcb;
4643 	struct tcphdr *th;
4644 	u_int8_t *data;
4645 	int win = 0;
4646 	struct mbuf *m;
4647 
4648 	/*
4649 	 * The code assumes the IP + TCP headers fit in an mbuf packet header
4650 	 */
4651 	_CASSERT(sizeof(struct ip) + sizeof(struct tcphdr) <= _MHLEN);
4652 	_CASSERT(sizeof(struct ip6_hdr) + sizeof(struct tcphdr) <= _MHLEN);
4653 
4654 	MGETHDR(m, M_WAIT, MT_HEADER);
4655 	if (m == NULL) {
4656 		return NULL;
4657 	}
4658 	m->m_pkthdr.pkt_proto = IPPROTO_TCP;
4659 
4660 	data = mbuf_datastart(m);
4661 
4662 	if (inp->inp_vflag & INP_IPV4) {
4663 		bzero(data, sizeof(struct ip) + sizeof(struct tcphdr));
4664 		th = (struct tcphdr *)(void *) (data + sizeof(struct ip));
4665 		m->m_len = sizeof(struct ip) + sizeof(struct tcphdr);
4666 		m->m_pkthdr.len = m->m_len;
4667 	} else {
4668 		VERIFY(inp->inp_vflag & INP_IPV6);
4669 
4670 		bzero(data, sizeof(struct ip6_hdr)
4671 		    + sizeof(struct tcphdr));
4672 		th = (struct tcphdr *)(void *)(data + sizeof(struct ip6_hdr));
4673 		m->m_len = sizeof(struct ip6_hdr) +
4674 		    sizeof(struct tcphdr);
4675 		m->m_pkthdr.len = m->m_len;
4676 	}
4677 
4678 	tcp_fillheaders(m, tp, data, th);
4679 
4680 	if (inp->inp_vflag & INP_IPV4) {
4681 		struct ip *ip;
4682 
4683 		ip = (__typeof__(ip))(void *)data;
4684 
4685 		ip->ip_id = rfc6864 ? 0 : ip_randomid((uint64_t)m);
4686 		ip->ip_off = htons(IP_DF);
4687 		ip->ip_len = htons(sizeof(struct ip) + sizeof(struct tcphdr));
4688 		ip->ip_ttl = inp->inp_ip_ttl;
4689 		ip->ip_tos |= (inp->inp_ip_tos & ~IPTOS_ECN_MASK);
4690 		ip->ip_sum = in_cksum_hdr(ip);
4691 	} else {
4692 		struct ip6_hdr *ip6;
4693 
4694 		ip6 = (__typeof__(ip6))(void *)data;
4695 
4696 		ip6->ip6_plen = htons(sizeof(struct tcphdr));
4697 		ip6->ip6_hlim = in6_selecthlim(inp, ifp);
4698 		ip6->ip6_flow = ip6->ip6_flow & ~IPV6_FLOW_ECN_MASK;
4699 
4700 		if (IN6_IS_SCOPE_EMBED(&ip6->ip6_src)) {
4701 			ip6->ip6_src.s6_addr16[1] = 0;
4702 		}
4703 		if (IN6_IS_SCOPE_EMBED(&ip6->ip6_dst)) {
4704 			ip6->ip6_dst.s6_addr16[1] = 0;
4705 		}
4706 	}
4707 	th->th_flags = TH_ACK;
4708 
4709 	win = tcp_sbspace(tp);
4710 	if (win > ((int32_t)TCP_MAXWIN << tp->rcv_scale)) {
4711 		win = (int32_t)TCP_MAXWIN << tp->rcv_scale;
4712 	}
4713 	th->th_win = htons((u_short) (win >> tp->rcv_scale));
4714 
4715 	if (is_probe) {
4716 		th->th_seq = htonl(tp->snd_una - 1);
4717 	} else {
4718 		th->th_seq = htonl(tp->snd_una);
4719 	}
4720 	th->th_ack = htonl(tp->rcv_nxt);
4721 
4722 	/* Force recompute TCP checksum to be the final value */
4723 	th->th_sum = 0;
4724 	if (inp->inp_vflag & INP_IPV4) {
4725 		th->th_sum = inet_cksum(m, IPPROTO_TCP,
4726 		    sizeof(struct ip), sizeof(struct tcphdr));
4727 	} else {
4728 		th->th_sum = inet6_cksum(m, IPPROTO_TCP,
4729 		    sizeof(struct ip6_hdr), sizeof(struct tcphdr));
4730 	}
4731 
4732 	return m;
4733 }
4734 
4735 void
tcp_fill_keepalive_offload_frames(ifnet_t ifp,struct ifnet_keepalive_offload_frame * frames_array,u_int32_t frames_array_count,size_t frame_data_offset,u_int32_t * used_frames_count)4736 tcp_fill_keepalive_offload_frames(ifnet_t ifp,
4737     struct ifnet_keepalive_offload_frame *frames_array,
4738     u_int32_t frames_array_count, size_t frame_data_offset,
4739     u_int32_t *used_frames_count)
4740 {
4741 	struct inpcb *inp;
4742 	inp_gen_t gencnt;
4743 	u_int32_t frame_index = *used_frames_count;
4744 
4745 	/* Validation of the parameters */
4746 	if (ifp == NULL || frames_array == NULL ||
4747 	    frames_array_count == 0 ||
4748 	    frame_index >= frames_array_count ||
4749 	    frame_data_offset >= IFNET_KEEPALIVE_OFFLOAD_FRAME_DATA_SIZE) {
4750 		return;
4751 	}
4752 
4753 	/* Fast exit when no process is using the socket option TCP_KEEPALIVE_OFFLOAD */
4754 	if (ifp->if_tcp_kao_cnt == 0) {
4755 		return;
4756 	}
4757 
4758 	/*
4759 	 * This function is called outside the regular TCP processing
4760 	 * so we need to update the TCP clock.
4761 	 */
4762 	calculate_tcp_clock();
4763 
4764 	lck_rw_lock_shared(&tcbinfo.ipi_lock);
4765 	gencnt = tcbinfo.ipi_gencnt;
4766 	LIST_FOREACH(inp, tcbinfo.ipi_listhead, inp_list) {
4767 		struct socket *so;
4768 		struct ifnet_keepalive_offload_frame *frame;
4769 		struct mbuf *m = NULL;
4770 		struct tcpcb *tp = intotcpcb(inp);
4771 
4772 		if (frame_index >= frames_array_count) {
4773 			break;
4774 		}
4775 
4776 		if (inp->inp_gencnt > gencnt ||
4777 		    inp->inp_state == INPCB_STATE_DEAD) {
4778 			continue;
4779 		}
4780 
4781 		if ((so = inp->inp_socket) == NULL ||
4782 		    (so->so_state & SS_DEFUNCT)) {
4783 			continue;
4784 		}
4785 		/*
4786 		 * check for keepalive offload flag without socket
4787 		 * lock to avoid a deadlock
4788 		 */
4789 		if (!(inp->inp_flags2 & INP2_KEEPALIVE_OFFLOAD)) {
4790 			continue;
4791 		}
4792 
4793 		if (!(inp->inp_vflag & (INP_IPV4 | INP_IPV6))) {
4794 			continue;
4795 		}
4796 		if (inp->inp_ppcb == NULL ||
4797 		    in_pcb_checkstate(inp, WNT_ACQUIRE, 0) == WNT_STOPUSING) {
4798 			continue;
4799 		}
4800 		socket_lock(so, 1);
4801 		/* Release the want count */
4802 		if (inp->inp_ppcb == NULL ||
4803 		    (in_pcb_checkstate(inp, WNT_RELEASE, 1) == WNT_STOPUSING)) {
4804 			socket_unlock(so, 1);
4805 			continue;
4806 		}
4807 		if ((inp->inp_vflag & INP_IPV4) &&
4808 		    (inp->inp_laddr.s_addr == INADDR_ANY ||
4809 		    inp->inp_faddr.s_addr == INADDR_ANY)) {
4810 			socket_unlock(so, 1);
4811 			continue;
4812 		}
4813 		if ((inp->inp_vflag & INP_IPV6) &&
4814 		    (IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr) ||
4815 		    IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_faddr))) {
4816 			socket_unlock(so, 1);
4817 			continue;
4818 		}
4819 		if (inp->inp_lport == 0 || inp->inp_fport == 0) {
4820 			socket_unlock(so, 1);
4821 			continue;
4822 		}
4823 		if (inp->inp_last_outifp == NULL ||
4824 		    inp->inp_last_outifp->if_index != ifp->if_index) {
4825 			socket_unlock(so, 1);
4826 			continue;
4827 		}
4828 		if ((inp->inp_vflag & INP_IPV4) && frame_data_offset +
4829 		    sizeof(struct ip) + sizeof(struct tcphdr) >
4830 		    IFNET_KEEPALIVE_OFFLOAD_FRAME_DATA_SIZE) {
4831 			socket_unlock(so, 1);
4832 			continue;
4833 		} else if (!(inp->inp_vflag & INP_IPV4) && frame_data_offset +
4834 		    sizeof(struct ip6_hdr) + sizeof(struct tcphdr) >
4835 		    IFNET_KEEPALIVE_OFFLOAD_FRAME_DATA_SIZE) {
4836 			socket_unlock(so, 1);
4837 			continue;
4838 		}
4839 		/*
4840 		 * There is no point in waking up the device for connections
4841 		 * that are not established. Long lived connection are meant
4842 		 * for processes that will sent and receive data
4843 		 */
4844 		if (tp->t_state != TCPS_ESTABLISHED) {
4845 			socket_unlock(so, 1);
4846 			continue;
4847 		}
4848 		/*
4849 		 * This inp has all the information that is needed to
4850 		 * generate an offload frame.
4851 		 */
4852 		frame = &frames_array[frame_index];
4853 		frame->type = IFNET_KEEPALIVE_OFFLOAD_FRAME_TCP;
4854 		frame->ether_type = (inp->inp_vflag & INP_IPV4) ?
4855 		    IFNET_KEEPALIVE_OFFLOAD_FRAME_ETHERTYPE_IPV4 :
4856 		    IFNET_KEEPALIVE_OFFLOAD_FRAME_ETHERTYPE_IPV6;
4857 		frame->interval = (uint16_t)(tp->t_keepidle > 0 ? tp->t_keepidle :
4858 		    tcp_keepidle);
4859 		frame->keep_cnt = (uint8_t)TCP_CONN_KEEPCNT(tp);
4860 		frame->keep_retry = (uint16_t)TCP_CONN_KEEPINTVL(tp);
4861 		if (so->so_options & SO_NOWAKEFROMSLEEP) {
4862 			frame->flags |=
4863 			    IFNET_KEEPALIVE_OFFLOAD_FLAG_NOWAKEFROMSLEEP;
4864 		}
4865 		frame->local_port = ntohs(inp->inp_lport);
4866 		frame->remote_port = ntohs(inp->inp_fport);
4867 		frame->local_seq = tp->snd_nxt;
4868 		frame->remote_seq = tp->rcv_nxt;
4869 		if (inp->inp_vflag & INP_IPV4) {
4870 			ASSERT(frame_data_offset + sizeof(struct ip) + sizeof(struct tcphdr) <= UINT8_MAX);
4871 			frame->length = (uint8_t)(frame_data_offset +
4872 			    sizeof(struct ip) + sizeof(struct tcphdr));
4873 			frame->reply_length =  frame->length;
4874 
4875 			frame->addr_length = sizeof(struct in_addr);
4876 			bcopy(&inp->inp_laddr, frame->local_addr,
4877 			    sizeof(struct in_addr));
4878 			bcopy(&inp->inp_faddr, frame->remote_addr,
4879 			    sizeof(struct in_addr));
4880 		} else {
4881 			struct in6_addr *ip6;
4882 
4883 			ASSERT(frame_data_offset + sizeof(struct ip6_hdr) + sizeof(struct tcphdr) <= UINT8_MAX);
4884 			frame->length = (uint8_t)(frame_data_offset +
4885 			    sizeof(struct ip6_hdr) + sizeof(struct tcphdr));
4886 			frame->reply_length =  frame->length;
4887 
4888 			frame->addr_length = sizeof(struct in6_addr);
4889 			ip6 = (struct in6_addr *)(void *)frame->local_addr;
4890 			bcopy(&inp->in6p_laddr, ip6, sizeof(struct in6_addr));
4891 			if (IN6_IS_SCOPE_EMBED(ip6)) {
4892 				ip6->s6_addr16[1] = 0;
4893 			}
4894 
4895 			ip6 = (struct in6_addr *)(void *)frame->remote_addr;
4896 			bcopy(&inp->in6p_faddr, ip6, sizeof(struct in6_addr));
4897 			if (IN6_IS_SCOPE_EMBED(ip6)) {
4898 				ip6->s6_addr16[1] = 0;
4899 			}
4900 		}
4901 
4902 		/*
4903 		 * First the probe
4904 		 */
4905 		m = tcp_make_keepalive_frame(tp, ifp, TRUE);
4906 		if (m == NULL) {
4907 			socket_unlock(so, 1);
4908 			continue;
4909 		}
4910 		bcopy(m_mtod_current(m), frame->data + frame_data_offset, m->m_len);
4911 		m_freem(m);
4912 
4913 		/*
4914 		 * Now the response packet to incoming probes
4915 		 */
4916 		m = tcp_make_keepalive_frame(tp, ifp, FALSE);
4917 		if (m == NULL) {
4918 			socket_unlock(so, 1);
4919 			continue;
4920 		}
4921 		bcopy(m_mtod_current(m), frame->reply_data + frame_data_offset,
4922 		    m->m_len);
4923 		m_freem(m);
4924 
4925 		frame_index++;
4926 		socket_unlock(so, 1);
4927 	}
4928 	lck_rw_done(&tcbinfo.ipi_lock);
4929 	*used_frames_count = frame_index;
4930 }
4931 
4932 static bool
inp_matches_kao_frame(ifnet_t ifp,struct ifnet_keepalive_offload_frame * frame,struct inpcb * inp)4933 inp_matches_kao_frame(ifnet_t ifp, struct ifnet_keepalive_offload_frame *frame,
4934     struct inpcb *inp)
4935 {
4936 	if (inp->inp_ppcb == NULL) {
4937 		return false;
4938 	}
4939 	/* Release the want count */
4940 	if (in_pcb_checkstate(inp, WNT_RELEASE, 1) == WNT_STOPUSING) {
4941 		return false;
4942 	}
4943 	if (inp->inp_last_outifp == NULL ||
4944 	    inp->inp_last_outifp->if_index != ifp->if_index) {
4945 		return false;
4946 	}
4947 	if (frame->local_port != ntohs(inp->inp_lport) ||
4948 	    frame->remote_port != ntohs(inp->inp_fport)) {
4949 		return false;
4950 	}
4951 	if (inp->inp_vflag & INP_IPV4) {
4952 		if (memcmp(&inp->inp_laddr, frame->local_addr,
4953 		    sizeof(struct in_addr)) != 0 ||
4954 		    memcmp(&inp->inp_faddr, frame->remote_addr,
4955 		    sizeof(struct in_addr)) != 0) {
4956 			return false;
4957 		}
4958 	} else if (inp->inp_vflag & INP_IPV6) {
4959 		if (memcmp(&inp->inp_laddr, frame->local_addr,
4960 		    sizeof(struct in6_addr)) != 0 ||
4961 		    memcmp(&inp->inp_faddr, frame->remote_addr,
4962 		    sizeof(struct in6_addr)) != 0) {
4963 			return false;
4964 		}
4965 	} else {
4966 		return false;
4967 	}
4968 	return true;
4969 }
4970 
4971 int
tcp_notify_kao_timeout(ifnet_t ifp,struct ifnet_keepalive_offload_frame * frame)4972 tcp_notify_kao_timeout(ifnet_t ifp,
4973     struct ifnet_keepalive_offload_frame *frame)
4974 {
4975 	struct inpcb *inp = NULL;
4976 	struct socket *so = NULL;
4977 	bool found = false;
4978 
4979 	/*
4980 	 *  Unlock the list before posting event on the matching socket
4981 	 */
4982 	lck_rw_lock_shared(&tcbinfo.ipi_lock);
4983 
4984 	LIST_FOREACH(inp, tcbinfo.ipi_listhead, inp_list) {
4985 		if ((so = inp->inp_socket) == NULL ||
4986 		    (so->so_state & SS_DEFUNCT)) {
4987 			continue;
4988 		}
4989 		if (!(inp->inp_flags2 & INP2_KEEPALIVE_OFFLOAD)) {
4990 			continue;
4991 		}
4992 		if (!(inp->inp_vflag & (INP_IPV4 | INP_IPV6))) {
4993 			continue;
4994 		}
4995 		if (inp->inp_ppcb == NULL ||
4996 		    in_pcb_checkstate(inp, WNT_ACQUIRE, 0) == WNT_STOPUSING) {
4997 			continue;
4998 		}
4999 		socket_lock(so, 1);
5000 		if (inp_matches_kao_frame(ifp, frame, inp)) {
5001 			/*
5002 			 * Keep the matching socket locked
5003 			 */
5004 			found = true;
5005 			break;
5006 		}
5007 		socket_unlock(so, 1);
5008 	}
5009 	lck_rw_done(&tcbinfo.ipi_lock);
5010 
5011 	if (found) {
5012 		ASSERT(inp != NULL);
5013 		ASSERT(so != NULL);
5014 		ASSERT(so == inp->inp_socket);
5015 		/*
5016 		 * Drop the TCP connection like tcptimers() does
5017 		 */
5018 		struct tcpcb *tp = inp->inp_ppcb;
5019 
5020 		tcpstat.tcps_keepdrops++;
5021 		soevent(so,
5022 		    (SO_FILT_HINT_LOCKED | SO_FILT_HINT_TIMEOUT));
5023 		tp = tcp_drop(tp, ETIMEDOUT);
5024 
5025 		tcpstat.tcps_ka_offload_drops++;
5026 		os_log_info(OS_LOG_DEFAULT, "%s: dropped lport %u fport %u\n",
5027 		    __func__, frame->local_port, frame->remote_port);
5028 
5029 		socket_unlock(so, 1);
5030 	}
5031 
5032 	return 0;
5033 }
5034 
5035 errno_t
tcp_notify_ack_id_valid(struct tcpcb * tp,struct socket * so,u_int32_t notify_id)5036 tcp_notify_ack_id_valid(struct tcpcb *tp, struct socket *so,
5037     u_int32_t notify_id)
5038 {
5039 	struct tcp_notify_ack_marker *elm;
5040 
5041 	if (so->so_snd.sb_cc == 0) {
5042 		return ENOBUFS;
5043 	}
5044 
5045 	SLIST_FOREACH(elm, &tp->t_notify_ack, notify_next) {
5046 		/* Duplicate id is not allowed */
5047 		if (elm->notify_id == notify_id) {
5048 			return EINVAL;
5049 		}
5050 		/* Duplicate position is not allowed */
5051 		if (elm->notify_snd_una == tp->snd_una + so->so_snd.sb_cc) {
5052 			return EINVAL;
5053 		}
5054 	}
5055 	return 0;
5056 }
5057 
5058 errno_t
tcp_add_notify_ack_marker(struct tcpcb * tp,u_int32_t notify_id)5059 tcp_add_notify_ack_marker(struct tcpcb *tp, u_int32_t notify_id)
5060 {
5061 	struct tcp_notify_ack_marker *nm, *elm = NULL;
5062 	struct socket *so = tp->t_inpcb->inp_socket;
5063 
5064 	nm = kalloc_type(struct tcp_notify_ack_marker, M_WAIT | Z_ZERO);
5065 	if (nm == NULL) {
5066 		return ENOMEM;
5067 	}
5068 	nm->notify_id = notify_id;
5069 	nm->notify_snd_una = tp->snd_una + so->so_snd.sb_cc;
5070 
5071 	SLIST_FOREACH(elm, &tp->t_notify_ack, notify_next) {
5072 		if (SEQ_GT(nm->notify_snd_una, elm->notify_snd_una)) {
5073 			break;
5074 		}
5075 	}
5076 
5077 	if (elm == NULL) {
5078 		VERIFY(SLIST_EMPTY(&tp->t_notify_ack));
5079 		SLIST_INSERT_HEAD(&tp->t_notify_ack, nm, notify_next);
5080 	} else {
5081 		SLIST_INSERT_AFTER(elm, nm, notify_next);
5082 	}
5083 	tp->t_notify_ack_count++;
5084 	return 0;
5085 }
5086 
5087 void
tcp_notify_ack_free(struct tcpcb * tp)5088 tcp_notify_ack_free(struct tcpcb *tp)
5089 {
5090 	struct tcp_notify_ack_marker *elm, *next;
5091 	if (SLIST_EMPTY(&tp->t_notify_ack)) {
5092 		return;
5093 	}
5094 
5095 	SLIST_FOREACH_SAFE(elm, &tp->t_notify_ack, notify_next, next) {
5096 		SLIST_REMOVE(&tp->t_notify_ack, elm, tcp_notify_ack_marker,
5097 		    notify_next);
5098 		kfree_type(struct tcp_notify_ack_marker, elm);
5099 	}
5100 	SLIST_INIT(&tp->t_notify_ack);
5101 	tp->t_notify_ack_count = 0;
5102 }
5103 
5104 inline void
tcp_notify_acknowledgement(struct tcpcb * tp,struct socket * so)5105 tcp_notify_acknowledgement(struct tcpcb *tp, struct socket *so)
5106 {
5107 	struct tcp_notify_ack_marker *elm;
5108 
5109 	elm = SLIST_FIRST(&tp->t_notify_ack);
5110 	if (SEQ_GEQ(tp->snd_una, elm->notify_snd_una)) {
5111 		soevent(so, SO_FILT_HINT_LOCKED | SO_FILT_HINT_NOTIFY_ACK);
5112 	}
5113 }
5114 
5115 void
tcp_get_notify_ack_count(struct tcpcb * tp,struct tcp_notify_ack_complete * retid)5116 tcp_get_notify_ack_count(struct tcpcb *tp,
5117     struct tcp_notify_ack_complete *retid)
5118 {
5119 	struct tcp_notify_ack_marker *elm;
5120 	uint32_t  complete = 0;
5121 
5122 	SLIST_FOREACH(elm, &tp->t_notify_ack, notify_next) {
5123 		if (SEQ_GEQ(tp->snd_una, elm->notify_snd_una)) {
5124 			ASSERT(complete < UINT32_MAX);
5125 			complete++;
5126 		} else {
5127 			break;
5128 		}
5129 	}
5130 	retid->notify_pending = tp->t_notify_ack_count - complete;
5131 	retid->notify_complete_count = min(TCP_MAX_NOTIFY_ACK, complete);
5132 }
5133 
5134 void
tcp_get_notify_ack_ids(struct tcpcb * tp,struct tcp_notify_ack_complete * retid)5135 tcp_get_notify_ack_ids(struct tcpcb *tp,
5136     struct tcp_notify_ack_complete *retid)
5137 {
5138 	size_t i = 0;
5139 	struct tcp_notify_ack_marker *elm, *next;
5140 
5141 	SLIST_FOREACH_SAFE(elm, &tp->t_notify_ack, notify_next, next) {
5142 		if (i >= retid->notify_complete_count) {
5143 			break;
5144 		}
5145 		if (SEQ_GEQ(tp->snd_una, elm->notify_snd_una)) {
5146 			retid->notify_complete_id[i++] = elm->notify_id;
5147 			SLIST_REMOVE(&tp->t_notify_ack, elm,
5148 			    tcp_notify_ack_marker, notify_next);
5149 			kfree_type(struct tcp_notify_ack_marker, elm);
5150 			tp->t_notify_ack_count--;
5151 		} else {
5152 			break;
5153 		}
5154 	}
5155 }
5156 
5157 bool
tcp_notify_ack_active(struct socket * so)5158 tcp_notify_ack_active(struct socket *so)
5159 {
5160 	if ((SOCK_DOM(so) == PF_INET || SOCK_DOM(so) == PF_INET6) &&
5161 	    SOCK_TYPE(so) == SOCK_STREAM) {
5162 		struct tcpcb *tp = intotcpcb(sotoinpcb(so));
5163 
5164 		if (!SLIST_EMPTY(&tp->t_notify_ack)) {
5165 			struct tcp_notify_ack_marker *elm;
5166 			elm = SLIST_FIRST(&tp->t_notify_ack);
5167 			if (SEQ_GEQ(tp->snd_una, elm->notify_snd_una)) {
5168 				return true;
5169 			}
5170 		}
5171 	}
5172 	return false;
5173 }
5174 
5175 inline int32_t
inp_get_sndbytes_allunsent(struct socket * so,u_int32_t th_ack)5176 inp_get_sndbytes_allunsent(struct socket *so, u_int32_t th_ack)
5177 {
5178 	struct inpcb *inp = sotoinpcb(so);
5179 	struct tcpcb *tp = intotcpcb(inp);
5180 
5181 	if ((so->so_snd.sb_flags & SB_SNDBYTE_CNT) &&
5182 	    so->so_snd.sb_cc > 0) {
5183 		int32_t unsent, sent;
5184 		sent = tp->snd_max - th_ack;
5185 		if (tp->t_flags & TF_SENTFIN) {
5186 			sent--;
5187 		}
5188 		unsent = so->so_snd.sb_cc - sent;
5189 		return unsent;
5190 	}
5191 	return 0;
5192 }
5193 
5194 uint8_t
tcp_get_ace(struct tcphdr * th)5195 tcp_get_ace(struct tcphdr *th)
5196 {
5197 	uint8_t ace = 0;
5198 	if (th->th_flags & TH_ECE) {
5199 		ace += 1;
5200 	}
5201 	if (th->th_flags & TH_CWR) {
5202 		ace += 2;
5203 	}
5204 	if (th->th_x2 & (TH_AE >> 8)) {
5205 		ace += 4;
5206 	}
5207 
5208 	return ace;
5209 }
5210 
5211 #define IFP_PER_FLOW_STAT(_ipv4_, _stat_) { \
5212 	if (_ipv4_) { \
5213 	        ifp->if_ipv4_stat->_stat_++; \
5214 	} else { \
5215 	        ifp->if_ipv6_stat->_stat_++; \
5216 	} \
5217 }
5218 
5219 #define FLOW_ECN_ENABLED(_flags_) \
5220     ((_flags_ & (TE_ECN_ON)) == (TE_ECN_ON))
5221 
5222 void
tcp_update_stats_per_flow(struct ifnet_stats_per_flow * ifs,struct ifnet * ifp)5223 tcp_update_stats_per_flow(struct ifnet_stats_per_flow *ifs,
5224     struct ifnet *ifp)
5225 {
5226 	if (ifp == NULL || !IF_FULLY_ATTACHED(ifp)) {
5227 		return;
5228 	}
5229 
5230 	ifnet_lock_shared(ifp);
5231 	if (ifs->ecn_flags & TE_SETUPSENT) {
5232 		if (ifs->ecn_flags & TE_CLIENT_SETUP) {
5233 			IFP_PER_FLOW_STAT(ifs->ipv4, ecn_client_setup);
5234 			if (FLOW_ECN_ENABLED(ifs->ecn_flags)) {
5235 				IFP_PER_FLOW_STAT(ifs->ipv4,
5236 				    ecn_client_success);
5237 			} else if (ifs->ecn_flags & TE_LOST_SYN) {
5238 				IFP_PER_FLOW_STAT(ifs->ipv4,
5239 				    ecn_syn_lost);
5240 			} else {
5241 				IFP_PER_FLOW_STAT(ifs->ipv4,
5242 				    ecn_peer_nosupport);
5243 			}
5244 		} else {
5245 			IFP_PER_FLOW_STAT(ifs->ipv4, ecn_server_setup);
5246 			if (FLOW_ECN_ENABLED(ifs->ecn_flags)) {
5247 				IFP_PER_FLOW_STAT(ifs->ipv4,
5248 				    ecn_server_success);
5249 			} else if (ifs->ecn_flags & TE_LOST_SYN) {
5250 				IFP_PER_FLOW_STAT(ifs->ipv4,
5251 				    ecn_synack_lost);
5252 			} else {
5253 				IFP_PER_FLOW_STAT(ifs->ipv4,
5254 				    ecn_peer_nosupport);
5255 			}
5256 		}
5257 	} else {
5258 		IFP_PER_FLOW_STAT(ifs->ipv4, ecn_off_conn);
5259 	}
5260 	if (FLOW_ECN_ENABLED(ifs->ecn_flags)) {
5261 		if (ifs->ecn_flags & TE_RECV_ECN_CE) {
5262 			tcpstat.tcps_ecn_conn_recv_ce++;
5263 			IFP_PER_FLOW_STAT(ifs->ipv4, ecn_conn_recv_ce);
5264 		}
5265 		if (ifs->ecn_flags & TE_RECV_ECN_ECE) {
5266 			tcpstat.tcps_ecn_conn_recv_ece++;
5267 			IFP_PER_FLOW_STAT(ifs->ipv4, ecn_conn_recv_ece);
5268 		}
5269 		if (ifs->ecn_flags & (TE_RECV_ECN_CE | TE_RECV_ECN_ECE)) {
5270 			if (ifs->txretransmitbytes > 0 ||
5271 			    ifs->rxoutoforderbytes > 0) {
5272 				tcpstat.tcps_ecn_conn_pl_ce++;
5273 				IFP_PER_FLOW_STAT(ifs->ipv4, ecn_conn_plce);
5274 			} else {
5275 				tcpstat.tcps_ecn_conn_nopl_ce++;
5276 				IFP_PER_FLOW_STAT(ifs->ipv4, ecn_conn_noplce);
5277 			}
5278 		} else {
5279 			if (ifs->txretransmitbytes > 0 ||
5280 			    ifs->rxoutoforderbytes > 0) {
5281 				tcpstat.tcps_ecn_conn_plnoce++;
5282 				IFP_PER_FLOW_STAT(ifs->ipv4, ecn_conn_plnoce);
5283 			}
5284 		}
5285 	}
5286 
5287 	/* Other stats are interesting for non-local connections only */
5288 	if (ifs->local) {
5289 		ifnet_lock_done(ifp);
5290 		return;
5291 	}
5292 
5293 	if (ifs->ipv4) {
5294 		ifp->if_ipv4_stat->timestamp = net_uptime();
5295 		if (FLOW_ECN_ENABLED(ifs->ecn_flags)) {
5296 			tcp_flow_ecn_perf_stats(ifs, &ifp->if_ipv4_stat->ecn_on);
5297 		} else {
5298 			tcp_flow_ecn_perf_stats(ifs, &ifp->if_ipv4_stat->ecn_off);
5299 		}
5300 	} else {
5301 		ifp->if_ipv6_stat->timestamp = net_uptime();
5302 		if (FLOW_ECN_ENABLED(ifs->ecn_flags)) {
5303 			tcp_flow_ecn_perf_stats(ifs, &ifp->if_ipv6_stat->ecn_on);
5304 		} else {
5305 			tcp_flow_ecn_perf_stats(ifs, &ifp->if_ipv6_stat->ecn_off);
5306 		}
5307 	}
5308 
5309 	if (ifs->rxmit_drop) {
5310 		if (FLOW_ECN_ENABLED(ifs->ecn_flags)) {
5311 			IFP_PER_FLOW_STAT(ifs->ipv4, ecn_on.rxmit_drop);
5312 		} else {
5313 			IFP_PER_FLOW_STAT(ifs->ipv4, ecn_off.rxmit_drop);
5314 		}
5315 	}
5316 	if (ifs->ecn_fallback_synloss) {
5317 		IFP_PER_FLOW_STAT(ifs->ipv4, ecn_fallback_synloss);
5318 	}
5319 	if (ifs->ecn_fallback_droprst) {
5320 		IFP_PER_FLOW_STAT(ifs->ipv4, ecn_fallback_droprst);
5321 	}
5322 	if (ifs->ecn_fallback_droprxmt) {
5323 		IFP_PER_FLOW_STAT(ifs->ipv4, ecn_fallback_droprxmt);
5324 	}
5325 	if (ifs->ecn_fallback_ce) {
5326 		IFP_PER_FLOW_STAT(ifs->ipv4, ecn_fallback_ce);
5327 	}
5328 	if (ifs->ecn_fallback_reorder) {
5329 		IFP_PER_FLOW_STAT(ifs->ipv4, ecn_fallback_reorder);
5330 	}
5331 	if (ifs->ecn_recv_ce > 0) {
5332 		IFP_PER_FLOW_STAT(ifs->ipv4, ecn_recv_ce);
5333 	}
5334 	if (ifs->ecn_recv_ece > 0) {
5335 		IFP_PER_FLOW_STAT(ifs->ipv4, ecn_recv_ece);
5336 	}
5337 
5338 	tcp_flow_lim_stats(ifs, &ifp->if_lim_stat);
5339 	ifnet_lock_done(ifp);
5340 }
5341 
5342 #if SKYWALK
5343 
5344 #include <skywalk/core/skywalk_var.h>
5345 #include <skywalk/nexus/flowswitch/nx_flowswitch.h>
5346 
5347 void
tcp_add_fsw_flow(struct tcpcb * tp,struct ifnet * ifp)5348 tcp_add_fsw_flow(struct tcpcb *tp, struct ifnet *ifp)
5349 {
5350 	struct inpcb *inp = tp->t_inpcb;
5351 	struct socket *so = inp->inp_socket;
5352 	uuid_t fsw_uuid;
5353 	struct nx_flow_req nfr;
5354 	int err;
5355 
5356 	if (!NX_FSW_TCP_RX_AGG_ENABLED()) {
5357 		return;
5358 	}
5359 
5360 	if (ifp == NULL || kern_nexus_get_flowswitch_instance(ifp, fsw_uuid)) {
5361 		TCP_LOG_FSW_FLOW(tp, "skip ifp no fsw");
5362 		return;
5363 	}
5364 
5365 	memset(&nfr, 0, sizeof(nfr));
5366 
5367 	if (inp->inp_vflag & INP_IPV4) {
5368 		ASSERT(!(inp->inp_laddr.s_addr == INADDR_ANY ||
5369 		    inp->inp_faddr.s_addr == INADDR_ANY ||
5370 		    IN_MULTICAST(ntohl(inp->inp_laddr.s_addr)) ||
5371 		    IN_MULTICAST(ntohl(inp->inp_faddr.s_addr))));
5372 		nfr.nfr_saddr.sin.sin_len = sizeof(struct sockaddr_in);
5373 		nfr.nfr_saddr.sin.sin_family = AF_INET;
5374 		nfr.nfr_saddr.sin.sin_port = inp->inp_lport;
5375 		memcpy(&nfr.nfr_saddr.sin.sin_addr, &inp->inp_laddr,
5376 		    sizeof(struct in_addr));
5377 		nfr.nfr_daddr.sin.sin_len = sizeof(struct sockaddr_in);
5378 		nfr.nfr_daddr.sin.sin_family = AF_INET;
5379 		nfr.nfr_daddr.sin.sin_port = inp->inp_fport;
5380 		memcpy(&nfr.nfr_daddr.sin.sin_addr, &inp->inp_faddr,
5381 		    sizeof(struct in_addr));
5382 	} else {
5383 		ASSERT(!(IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr) ||
5384 		    IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_faddr) ||
5385 		    IN6_IS_ADDR_MULTICAST(&inp->in6p_laddr) ||
5386 		    IN6_IS_ADDR_MULTICAST(&inp->in6p_faddr)));
5387 		nfr.nfr_saddr.sin6.sin6_len = sizeof(struct sockaddr_in6);
5388 		nfr.nfr_saddr.sin6.sin6_family = AF_INET6;
5389 		nfr.nfr_saddr.sin6.sin6_port = inp->inp_lport;
5390 		memcpy(&nfr.nfr_saddr.sin6.sin6_addr, &inp->in6p_laddr,
5391 		    sizeof(struct in6_addr));
5392 		nfr.nfr_daddr.sin6.sin6_len = sizeof(struct sockaddr_in6);
5393 		nfr.nfr_daddr.sin.sin_family = AF_INET6;
5394 		nfr.nfr_daddr.sin6.sin6_port = inp->inp_fport;
5395 		memcpy(&nfr.nfr_daddr.sin6.sin6_addr, &inp->in6p_faddr,
5396 		    sizeof(struct in6_addr));
5397 		/* clear embedded scope ID */
5398 		if (IN6_IS_SCOPE_EMBED(&nfr.nfr_saddr.sin6.sin6_addr)) {
5399 			nfr.nfr_saddr.sin6.sin6_addr.s6_addr16[1] = 0;
5400 		}
5401 		if (IN6_IS_SCOPE_EMBED(&nfr.nfr_daddr.sin6.sin6_addr)) {
5402 			nfr.nfr_daddr.sin6.sin6_addr.s6_addr16[1] = 0;
5403 		}
5404 	}
5405 
5406 	nfr.nfr_nx_port = 1;
5407 	nfr.nfr_ip_protocol = IPPROTO_TCP;
5408 	nfr.nfr_transport_protocol = IPPROTO_TCP;
5409 	nfr.nfr_flags = NXFLOWREQF_ASIS;
5410 	nfr.nfr_epid = (so != NULL ? so->last_pid : 0);
5411 	if (NETNS_TOKEN_VALID(&inp->inp_netns_token)) {
5412 		nfr.nfr_port_reservation = inp->inp_netns_token;
5413 		nfr.nfr_flags |= NXFLOWREQF_EXT_PORT_RSV;
5414 	}
5415 	ASSERT(inp->inp_flowhash != 0);
5416 	nfr.nfr_inp_flowhash = inp->inp_flowhash;
5417 
5418 	uuid_generate_random(nfr.nfr_flow_uuid);
5419 	err = kern_nexus_flow_add(kern_nexus_shared_controller(), fsw_uuid,
5420 	    &nfr, sizeof(nfr));
5421 
5422 	if (err == 0) {
5423 		uuid_copy(tp->t_fsw_uuid, fsw_uuid);
5424 		uuid_copy(tp->t_flow_uuid, nfr.nfr_flow_uuid);
5425 	}
5426 
5427 	TCP_LOG_FSW_FLOW(tp, "add err %d\n", err);
5428 }
5429 
5430 void
tcp_del_fsw_flow(struct tcpcb * tp)5431 tcp_del_fsw_flow(struct tcpcb *tp)
5432 {
5433 	if (uuid_is_null(tp->t_fsw_uuid) || uuid_is_null(tp->t_flow_uuid)) {
5434 		return;
5435 	}
5436 
5437 	struct nx_flow_req nfr;
5438 	uuid_copy(nfr.nfr_flow_uuid, tp->t_flow_uuid);
5439 
5440 	/* It's possible for this call to fail if the nexus has detached */
5441 	int err = kern_nexus_flow_del(kern_nexus_shared_controller(),
5442 	    tp->t_fsw_uuid, &nfr, sizeof(nfr));
5443 	VERIFY(err == 0 || err == ENOENT || err == ENXIO);
5444 
5445 	uuid_clear(tp->t_fsw_uuid);
5446 	uuid_clear(tp->t_flow_uuid);
5447 
5448 	TCP_LOG_FSW_FLOW(tp, "del err %d\n", err);
5449 }
5450 
5451 #endif /* SKYWALK */
5452