1 /*
2 * Copyright (c) 2000-2024 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995
30 * The Regents of the University of California. All rights reserved.
31 *
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions
34 * are met:
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce the above copyright
38 * notice, this list of conditions and the following disclaimer in the
39 * documentation and/or other materials provided with the distribution.
40 * 3. All advertising materials mentioning features or use of this software
41 * must display the following acknowledgement:
42 * This product includes software developed by the University of
43 * California, Berkeley and its contributors.
44 * 4. Neither the name of the University nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE.
59 *
60 * @(#)tcp_subr.c 8.2 (Berkeley) 5/24/95
61 */
62 /*
63 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
64 * support for mandatory and extensible security protections. This notice
65 * is included in support of clause 2.2 (b) of the Apple Public License,
66 * Version 2.0.
67 */
68
69 #include "tcp_includes.h"
70
71 #include <sys/param.h>
72 #include <sys/systm.h>
73 #include <sys/kernel.h>
74 #include <sys/sysctl.h>
75 #include <sys/malloc.h>
76 #include <sys/mbuf.h>
77 #include <sys/domain.h>
78 #include <sys/proc.h>
79 #include <sys/kauth.h>
80 #include <sys/socket.h>
81 #include <sys/socketvar.h>
82 #include <sys/protosw.h>
83 #include <sys/random.h>
84 #include <sys/syslog.h>
85 #include <sys/mcache.h>
86 #include <kern/locks.h>
87 #include <kern/uipc_domain.h>
88 #include <kern/zalloc.h>
89
90 #include <dev/random/randomdev.h>
91
92 #include <net/route.h>
93 #include <net/if.h>
94 #include <net/content_filter.h>
95 #include <net/ntstat.h>
96 #include <net/multi_layer_pkt_log.h>
97
98 #define tcp_minmssoverload fring
99 #define _IP_VHL
100 #include <netinet/in.h>
101 #include <netinet/in_systm.h>
102 #include <netinet/ip.h>
103 #include <netinet/ip_icmp.h>
104 #include <netinet/ip6.h>
105 #include <netinet/icmp6.h>
106 #include <netinet/in_pcb.h>
107 #include <netinet6/in6_pcb.h>
108 #include <netinet/in_var.h>
109 #include <netinet/ip_var.h>
110 #include <netinet/icmp_var.h>
111 #include <netinet6/ip6_var.h>
112 #include <netinet/mptcp_var.h>
113 #include <netinet/tcp.h>
114 #include <netinet/tcp_fsm.h>
115 #include <netinet/tcp_seq.h>
116 #include <netinet/tcp_syncookie.h>
117 #include <netinet/tcp_timer.h>
118 #include <netinet/tcp_var.h>
119 #include <netinet/tcp_cc.h>
120 #include <netinet/tcp_cache.h>
121 #include <kern/thread_call.h>
122
123 #include <netinet6/tcp6_var.h>
124 #include <netinet/tcpip.h>
125 #include <netinet/tcp_log.h>
126
127 #include <netinet6/ip6protosw.h>
128 #include <netinet6/esp.h>
129
130 #if IPSEC
131 #include <netinet6/ipsec.h>
132 #include <netinet6/ipsec6.h>
133 #endif /* IPSEC */
134
135 #if NECP
136 #include <net/necp.h>
137 #endif /* NECP */
138
139 #undef tcp_minmssoverload
140
141 #include <net/sockaddr_utils.h>
142
143 #include <corecrypto/ccaes.h>
144 #include <libkern/crypto/aes.h>
145 #include <libkern/crypto/md5.h>
146 #include <sys/kdebug.h>
147 #include <mach/sdt.h>
148 #include <pexpert/pexpert.h>
149 #include <mach/mach_time.h>
150 #include <os/ptrtools.h>
151
152 #define DBG_FNC_TCP_CLOSE NETDBG_CODE(DBG_NETTCP, ((5 << 8) | 2))
153
154 static tcp_cc tcp_ccgen;
155
156 struct mem_acct *tcp_memacct;
157
158 extern struct tcptimerlist tcp_timer_list;
159 extern struct tcptailq tcp_tw_tailq;
160
161 extern int tcp_awdl_rtobase;
162
163 SYSCTL_SKMEM_TCP_INT(TCPCTL_MSSDFLT, mssdflt, CTLFLAG_RW | CTLFLAG_LOCKED,
164 int, tcp_mssdflt, TCP_MSS, "Default TCP Maximum Segment Size");
165
166 SYSCTL_SKMEM_TCP_INT(TCPCTL_V6MSSDFLT, v6mssdflt,
167 CTLFLAG_RW | CTLFLAG_LOCKED, int, tcp_v6mssdflt, TCP6_MSS,
168 "Default TCP Maximum Segment Size for IPv6");
169
170 int tcp_sysctl_fastopenkey(struct sysctl_oid *, void *, int,
171 struct sysctl_req *);
172 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, fastopen_key, CTLTYPE_STRING | CTLFLAG_WR,
173 0, 0, tcp_sysctl_fastopenkey, "S", "TCP Fastopen key");
174
175 /* Current count of half-open TFO connections */
176 int tcp_tfo_halfcnt = 0;
177
178 /* Maximum of half-open TFO connection backlog */
179 SYSCTL_SKMEM_TCP_INT(OID_AUTO, fastopen_backlog,
180 CTLFLAG_RW | CTLFLAG_LOCKED, int, tcp_tfo_backlog, 10,
181 "Backlog queue for half-open TFO connections");
182
183 SYSCTL_SKMEM_TCP_INT(OID_AUTO, fastopen, CTLFLAG_RW | CTLFLAG_LOCKED,
184 int, tcp_fastopen, TCP_FASTOPEN_CLIENT | TCP_FASTOPEN_SERVER,
185 "Enable TCP Fastopen (RFC 7413)");
186
187 /* ToDo - remove once uTCP stops using it */
188 SYSCTL_SKMEM_TCP_INT(OID_AUTO, now_init, CTLFLAG_RD | CTLFLAG_LOCKED,
189 uint32_t, tcp_now_init, 0, "Initial tcp now value");
190
191 /* ToDo - remove once uTCP stops using it */
192 SYSCTL_SKMEM_TCP_INT(OID_AUTO, microuptime_init, CTLFLAG_RD | CTLFLAG_LOCKED,
193 uint32_t, tcp_microuptime_init, 0, "Initial tcp uptime value in micro seconds");
194
195 /*
196 * Minimum MSS we accept and use. This prevents DoS attacks where
197 * we are forced to a ridiculous low MSS like 20 and send hundreds
198 * of packets instead of one. The effect scales with the available
199 * bandwidth and quickly saturates the CPU and network interface
200 * with packet generation and sending. Set to zero to disable MINMSS
201 * checking. This setting prevents us from sending too small packets.
202 */
203 SYSCTL_SKMEM_TCP_INT(OID_AUTO, minmss, CTLFLAG_RW | CTLFLAG_LOCKED,
204 int, tcp_minmss, TCP_MINMSS, "Minmum TCP Maximum Segment Size");
205
206 SYSCTL_UINT(_net_inet_tcp, OID_AUTO, pcbcount, CTLFLAG_RD | CTLFLAG_LOCKED,
207 &tcbinfo.ipi_count, 0, "Number of active PCBs");
208
209 SYSCTL_SKMEM_TCP_INT(OID_AUTO, icmp_may_rst, CTLFLAG_RW | CTLFLAG_LOCKED,
210 static int, icmp_may_rst, 1,
211 "Certain ICMP unreachable messages may abort connections in SYN_SENT");
212
213 int tcp_do_timestamps = 1;
214 #if (DEVELOPMENT || DEBUG)
215 SYSCTL_INT(_net_inet_tcp, OID_AUTO, do_timestamps,
216 CTLFLAG_RW | CTLFLAG_LOCKED, &tcp_do_timestamps, 0, "enable TCP timestamps");
217 #endif /* (DEVELOPMENT || DEBUG) */
218
219 SYSCTL_SKMEM_TCP_INT(OID_AUTO, rtt_min, CTLFLAG_RW | CTLFLAG_LOCKED,
220 int, tcp_TCPTV_MIN, 100, "min rtt value allowed");
221
222 SYSCTL_SKMEM_TCP_INT(OID_AUTO, rexmt_slop, CTLFLAG_RW,
223 int, tcp_rexmt_slop, TCPTV_REXMTSLOP, "Slop added to retransmit timeout");
224
225 SYSCTL_SKMEM_TCP_INT(OID_AUTO, randomize_ports, CTLFLAG_RW | CTLFLAG_LOCKED,
226 __private_extern__ int, tcp_use_randomport, 0,
227 "Randomize TCP port numbers");
228
229 SYSCTL_SKMEM_TCP_INT(OID_AUTO, win_scale_factor, CTLFLAG_RW | CTLFLAG_LOCKED,
230 __private_extern__ int, tcp_win_scale, 3, "Window scaling factor");
231
232 #if (DEVELOPMENT || DEBUG)
233 SYSCTL_SKMEM_TCP_INT(OID_AUTO, init_rtt_from_cache,
234 CTLFLAG_RW | CTLFLAG_LOCKED, static int, tcp_init_rtt_from_cache, 1,
235 "Initalize RTT from route cache");
236 #else
237 SYSCTL_SKMEM_TCP_INT(OID_AUTO, init_rtt_from_cache,
238 CTLFLAG_RD | CTLFLAG_LOCKED, static int, tcp_init_rtt_from_cache, 1,
239 "Initalize RTT from route cache");
240 #endif /* (DEVELOPMENT || DEBUG) */
241
242 static int tso_debug = 0;
243 SYSCTL_INT(_net_inet_tcp, OID_AUTO, tso_debug, CTLFLAG_RW | CTLFLAG_LOCKED,
244 &tso_debug, 0, "TSO verbosity");
245
246 static int tcp_rxt_seg_max = 1024;
247 SYSCTL_INT(_net_inet_tcp, OID_AUTO, rxt_seg_max, CTLFLAG_RW | CTLFLAG_LOCKED,
248 &tcp_rxt_seg_max, 0, "");
249
250 static unsigned long tcp_rxt_seg_drop = 0;
251 SYSCTL_ULONG(_net_inet_tcp, OID_AUTO, rxt_seg_drop, CTLFLAG_RD | CTLFLAG_LOCKED,
252 &tcp_rxt_seg_drop, "");
253
254 static void tcp_notify(struct inpcb *, int);
255
256 static KALLOC_TYPE_DEFINE(tcp_bwmeas_zone, struct bwmeas, NET_KT_DEFAULT);
257 KALLOC_TYPE_DEFINE(tcp_reass_zone, struct tseg_qent, NET_KT_DEFAULT);
258 KALLOC_TYPE_DEFINE(tcp_rxt_seg_zone, struct tcp_rxt_seg, NET_KT_DEFAULT);
259 KALLOC_TYPE_DEFINE(tcp_seg_sent_zone, struct tcp_seg_sent, NET_KT_DEFAULT);
260
261 extern int slowlink_wsize; /* window correction for slow links */
262 extern int path_mtu_discovery;
263
264 static void tcp_sbrcv_grow_rwin(struct tcpcb *tp, struct sockbuf *sb);
265
266 #define TCP_BWMEAS_BURST_MINSIZE 6
267 #define TCP_BWMEAS_BURST_MAXSIZE 25
268
269 /*
270 * Target size of TCP PCB hash tables. Must be a power of two.
271 *
272 * Note that this can be overridden by the kernel environment
273 * variable net.inet.tcp.tcbhashsize
274 */
275 #ifndef TCBHASHSIZE
276 #define TCBHASHSIZE CONFIG_TCBHASHSIZE
277 #endif
278
279 __private_extern__ int tcp_tcbhashsize = TCBHASHSIZE;
280 SYSCTL_INT(_net_inet_tcp, OID_AUTO, tcbhashsize, CTLFLAG_RD | CTLFLAG_LOCKED,
281 &tcp_tcbhashsize, 0, "Size of TCP control-block hashtable");
282
283 /*
284 * This is the actual shape of what we allocate using the zone
285 * allocator. Doing it this way allows us to protect both structures
286 * using the same generation count, and also eliminates the overhead
287 * of allocating tcpcbs separately. By hiding the structure here,
288 * we avoid changing most of the rest of the code (although it needs
289 * to be changed, eventually, for greater efficiency).
290 */
291 #define ALIGNMENT 32
292 struct inp_tp {
293 struct inpcb inp;
294 struct tcpcb tcb __attribute__((aligned(ALIGNMENT)));
295 };
296 #undef ALIGNMENT
297
298 static KALLOC_TYPE_DEFINE(tcpcbzone, struct inp_tp, NET_KT_DEFAULT);
299
300 os_log_t tcp_mpkl_log_object = NULL;
301
302 static void tcpcb_to_otcpcb(struct tcpcb *, struct otcpcb *);
303
304 int tcp_notsent_lowat_check(struct socket *so);
305 static void tcp_flow_lim_stats(struct ifnet_stats_per_flow *ifs,
306 struct if_lim_perf_stat *stat);
307 static void tcp_flow_ecn_perf_stats(struct ifnet_stats_per_flow *ifs,
308 struct if_tcp_ecn_perf_stat *stat);
309
310 static aes_encrypt_ctx tfo_ctx; /* Crypto-context for TFO */
311
312 void
tcp_tfo_gen_cookie(struct inpcb * inp,u_char * out __sized_by (blk_size),size_t blk_size)313 tcp_tfo_gen_cookie(struct inpcb *inp, u_char *out __sized_by(blk_size), size_t blk_size)
314 {
315 u_char in[CCAES_BLOCK_SIZE];
316 int isipv6 = inp->inp_vflag & INP_IPV6;
317
318 VERIFY(blk_size == CCAES_BLOCK_SIZE);
319
320 bzero(&in[0], CCAES_BLOCK_SIZE);
321 bzero(&out[0], CCAES_BLOCK_SIZE);
322
323 if (isipv6) {
324 memcpy(in, &inp->in6p_faddr, sizeof(struct in6_addr));
325 } else {
326 memcpy(in, &inp->inp_faddr, sizeof(struct in_addr));
327 }
328
329 aes_encrypt_cbc(in, NULL, 1, out, &tfo_ctx);
330 }
331
332 __private_extern__ int
tcp_sysctl_fastopenkey(__unused struct sysctl_oid * oidp,__unused void * arg1,__unused int arg2,struct sysctl_req * req)333 tcp_sysctl_fastopenkey(__unused struct sysctl_oid *oidp, __unused void *arg1,
334 __unused int arg2, struct sysctl_req *req)
335 {
336 int error = 0;
337 /*
338 * TFO-key is expressed as a string in hex format
339 * +1 to account for the \0 char
340 * +1 because sysctl_io_string() expects a string length but the sysctl command
341 * now includes the terminating \0 in newlen -- see rdar://77205344
342 */
343 char keystring[TCP_FASTOPEN_KEYLEN * 2 + 2];
344 u_int32_t key[TCP_FASTOPEN_KEYLEN / sizeof(u_int32_t)];
345 int i;
346 size_t ks_len;
347
348 /*
349 * sysctl_io_string copies keystring into the oldptr of the sysctl_req.
350 * Make sure everything is zero, to avoid putting garbage in there or
351 * leaking the stack.
352 */
353 bzero(keystring, sizeof(keystring));
354
355 error = sysctl_io_string(req, keystring, sizeof(keystring), 0, NULL);
356 if (error) {
357 os_log(OS_LOG_DEFAULT,
358 "%s: sysctl_io_string() error %d, req->newlen %lu, sizeof(keystring) %lu",
359 __func__, error, req->newlen, sizeof(keystring));
360 goto exit;
361 }
362 if (req->newptr == USER_ADDR_NULL) {
363 goto exit;
364 }
365
366 ks_len = strbuflen(keystring, sizeof(keystring));
367 if (ks_len != TCP_FASTOPEN_KEYLEN * 2) {
368 os_log(OS_LOG_DEFAULT,
369 "%s: strlen(keystring) %lu != TCP_FASTOPEN_KEYLEN * 2 %u, newlen %lu",
370 __func__, ks_len, TCP_FASTOPEN_KEYLEN * 2, req->newlen);
371 error = EINVAL;
372 goto exit;
373 }
374
375 for (i = 0; i < (TCP_FASTOPEN_KEYLEN / sizeof(u_int32_t)); i++) {
376 /*
377 * We jump over the keystring in 8-character (4 byte in hex)
378 * steps
379 */
380 if (sscanf(__unsafe_null_terminated_from_indexable(&keystring[i * 8]), "%8x", &key[i]) != 1) {
381 error = EINVAL;
382 os_log(OS_LOG_DEFAULT,
383 "%s: sscanf() != 1, error EINVAL", __func__);
384 goto exit;
385 }
386 }
387
388 aes_encrypt_key128((u_char *)key, &tfo_ctx);
389
390 exit:
391 return error;
392 }
393
394 static int scale_to_powerof2(int size);
395
396 /*
397 * This helper routine returns one of the following scaled value of size:
398 * 1. Rounded down power of two value of size if the size value passed as
399 * argument is not a power of two and the rounded up value overflows.
400 * OR
401 * 2. Rounded up power of two value of size if the size value passed as
402 * argument is not a power of two and the rounded up value does not overflow
403 * OR
404 * 3. Same value as argument size if it is already a power of two.
405 */
406 static int
scale_to_powerof2(int size)407 scale_to_powerof2(int size)
408 {
409 /* Handle special case of size = 0 */
410 int ret = size ? size : 1;
411
412 if (!powerof2(ret)) {
413 while (!powerof2(size)) {
414 /*
415 * Clear out least significant
416 * set bit till size is left with
417 * its highest set bit at which point
418 * it is rounded down power of two.
419 */
420 size = size & (size - 1);
421 }
422
423 /* Check for overflow when rounding up */
424 if (0 == (size << 1)) {
425 ret = size;
426 } else {
427 ret = size << 1;
428 }
429 }
430
431 return ret;
432 }
433
434 /*
435 * Round the floating point to the next integer
436 * Eg. 1.3 will round up to 2.
437 */
438 uint32_t
tcp_ceil(double a)439 tcp_ceil(double a)
440 {
441 double res = (uint32_t) a;
442 return (uint32_t)(res + (res < a));
443 }
444
445 uint32_t
tcp_round_to(uint32_t val,uint32_t round)446 tcp_round_to(uint32_t val, uint32_t round)
447 {
448 /*
449 * Round up or down based on the middle. Meaning, if we round upon a
450 * multiple of 10, 16 will round to 20 and 14 will round to 10.
451 */
452 return ((val + (round / 2)) / round) * round;
453 }
454
455 /*
456 * Round up to the next multiple of base.
457 * Eg. for a base of 64, 65 will become 128,
458 * 2896 will become 2944.
459 */
460 uint32_t
tcp_round_up(uint32_t val,uint32_t base)461 tcp_round_up(uint32_t val, uint32_t base)
462 {
463 if (base == 1 || val % base == 0) {
464 return val;
465 }
466
467 return ((val + base) / base) * base;
468 }
469
470 uint32_t
471 ntoh24(u_char *p __sized_by(3))
472 {
473 uint32_t v;
474
475 v = (uint32_t)(p[0] << 16);
476 v |= (uint32_t)(p[1] << 8);
477 v |= (uint32_t)(p[2] << 0);
478 return v;
479 }
480
481 uint32_t
tcp_packets_this_ack(struct tcpcb * tp,uint32_t acked)482 tcp_packets_this_ack(struct tcpcb *tp, uint32_t acked)
483 {
484 return acked / tp->t_maxseg +
485 (((acked % tp->t_maxseg) != 0) ? 1 : 0);
486 }
487
488 static void
tcp_tfo_init(void)489 tcp_tfo_init(void)
490 {
491 u_char key[TCP_FASTOPEN_KEYLEN];
492
493 read_frandom(key, sizeof(key));
494 aes_encrypt_key128(key, &tfo_ctx);
495 }
496
497 static u_char isn_secret[32];
498
499 /*
500 * Tcp initialization
501 */
502 void
tcp_init(struct protosw * pp,struct domain * dp)503 tcp_init(struct protosw *pp, struct domain *dp)
504 {
505 #pragma unused(dp)
506 static int tcp_initialized = 0;
507 struct inpcbinfo *pcbinfo;
508 struct timeval now;
509
510 VERIFY((pp->pr_flags & (PR_INITIALIZED | PR_ATTACHED)) == PR_ATTACHED);
511
512 if (tcp_memacct == NULL) {
513 uint64_t hlimit = max_mem_actual >> 5;
514 tcp_memacct = mem_acct_register("TCP", hlimit, 80);
515 if (tcp_memacct == NULL) {
516 panic("mem_acct_register returned NULL");
517 }
518 }
519 pp->pr_mem_acct = tcp_memacct;
520
521 if (!os_atomic_cmpxchg(&tcp_initialized, 0, 1, relaxed)) {
522 return;
523 }
524
525 #if DEBUG || DEVELOPMENT
526 (void) PE_parse_boot_argn("tcp_rxt_seg_max", &tcp_rxt_seg_max,
527 sizeof(tcp_rxt_seg_max));
528 #endif /* DEBUG || DEVELOPMENT */
529
530 tcp_ccgen = 1;
531 tcp_keepinit = TCPTV_KEEP_INIT;
532 tcp_keepidle = TCPTV_KEEP_IDLE;
533 tcp_keepintvl = TCPTV_KEEPINTVL;
534 tcp_keepcnt = TCPTV_KEEPCNT;
535 tcp_maxpersistidle = TCPTV_KEEP_IDLE;
536 tcp_msl = TCPTV_MSL;
537
538 microuptime(&now);
539 tcp_now = (uint32_t)now.tv_sec * 1000 + now.tv_usec / TCP_RETRANSHZ_TO_USEC;
540
541 /* ToDo - remove once uTCP stops using it */
542 tcp_now_init = tcp_now;
543 tcp_microuptime_init = tcp_now;
544 SYSCTL_SKMEM_UPDATE_FIELD(tcp.microuptime_init, tcp_microuptime_init);
545 SYSCTL_SKMEM_UPDATE_FIELD(tcp.now_init, tcp_now_init);
546
547 tcp_tfo_init();
548 tcp_syncookie_init();
549
550 LIST_INIT(&tcb);
551 tcbinfo.ipi_listhead = &tcb;
552
553 pcbinfo = &tcbinfo;
554
555 /*
556 * allocate group, lock attributes and lock for tcp pcb mutexes
557 */
558 pcbinfo->ipi_lock_grp = lck_grp_alloc_init("tcppcb",
559 LCK_GRP_ATTR_NULL);
560 lck_attr_setdefault(&pcbinfo->ipi_lock_attr);
561 lck_rw_init(&pcbinfo->ipi_lock, pcbinfo->ipi_lock_grp,
562 &pcbinfo->ipi_lock_attr);
563
564 if (tcp_tcbhashsize == 0) {
565 /* Set to default */
566 tcp_tcbhashsize = 512;
567 }
568
569 if (!powerof2(tcp_tcbhashsize)) {
570 int old_hash_size = tcp_tcbhashsize;
571 tcp_tcbhashsize = scale_to_powerof2(tcp_tcbhashsize);
572 /* Lower limit of 16 */
573 if (tcp_tcbhashsize < 16) {
574 tcp_tcbhashsize = 16;
575 }
576 printf("WARNING: TCB hash size not a power of 2, "
577 "scaled from %d to %d.\n",
578 old_hash_size,
579 tcp_tcbhashsize);
580 }
581
582 hashinit_counted_by(tcp_tcbhashsize, tcbinfo.ipi_hashbase,
583 tcbinfo.ipi_hashbase_count);
584 tcbinfo.ipi_hashmask = tcbinfo.ipi_hashbase_count - 1;
585 hashinit_counted_by(tcp_tcbhashsize, tcbinfo.ipi_porthashbase,
586 tcbinfo.ipi_porthashbase_count);
587 tcbinfo.ipi_porthashmask = tcbinfo.ipi_porthashbase_count - 1;
588 tcbinfo.ipi_zone = tcpcbzone;
589
590 tcbinfo.ipi_gc = tcp_gc;
591 tcbinfo.ipi_timer = tcp_itimer;
592 in_pcbinfo_attach(&tcbinfo);
593
594 #define TCP_MINPROTOHDR (sizeof(struct ip6_hdr) + sizeof(struct tcphdr))
595 if (max_protohdr < TCP_MINPROTOHDR) {
596 max_protohdr = (int)P2ROUNDUP(TCP_MINPROTOHDR, sizeof(uint32_t));
597 }
598 if (max_linkhdr + max_protohdr > MCLBYTES) {
599 panic("tcp_init");
600 }
601 #undef TCP_MINPROTOHDR
602
603 /* Initialize time wait and timer lists */
604 TAILQ_INIT(&tcp_tw_tailq);
605
606 bzero(&tcp_timer_list, sizeof(tcp_timer_list));
607 LIST_INIT(&tcp_timer_list.lhead);
608 /*
609 * allocate group and attribute for the tcp timer list
610 */
611 tcp_timer_list.mtx_grp = lck_grp_alloc_init("tcptimerlist",
612 LCK_GRP_ATTR_NULL);
613 lck_mtx_init(&tcp_timer_list.mtx, tcp_timer_list.mtx_grp,
614 LCK_ATTR_NULL);
615
616 tcp_timer_list.call = thread_call_allocate(tcp_run_timerlist, NULL);
617 if (tcp_timer_list.call == NULL) {
618 panic("failed to allocate call entry 1 in tcp_init");
619 }
620
621 /* Initialize TCP Cache */
622 tcp_cache_init();
623
624 tcp_mpkl_log_object = MPKL_CREATE_LOGOBJECT("com.apple.xnu.tcp");
625 if (tcp_mpkl_log_object == NULL) {
626 panic("MPKL_CREATE_LOGOBJECT failed");
627 }
628
629 if (PE_parse_boot_argn("tcp_log", &tcp_log_enable_flags, sizeof(tcp_log_enable_flags))) {
630 os_log(OS_LOG_DEFAULT, "tcp_init: set tcp_log_enable_flags to 0x%x", tcp_log_enable_flags);
631 }
632
633 if (PE_parse_boot_argn("tcp_link_heuristics", &tcp_link_heuristics_flags, sizeof(tcp_link_heuristics_flags))) {
634 os_log(OS_LOG_DEFAULT, "tcp_init: set tcp_link_heuristics_flags to 0x%x", tcp_link_heuristics_flags);
635 }
636
637 /*
638 * If more than 4GB of actual memory is available, increase the
639 * maximum allowed receive and send socket buffer size.
640 */
641 if (mem_actual >= (1ULL << (GBSHIFT + 2))) {
642 if (serverperfmode) {
643 tcp_autorcvbuf_max = 8 * 1024 * 1024;
644 tcp_autosndbuf_max = 8 * 1024 * 1024;
645 } else {
646 tcp_autorcvbuf_max = 4 * 1024 * 1024;
647 tcp_autosndbuf_max = 4 * 1024 * 1024;
648 }
649
650 SYSCTL_SKMEM_UPDATE_FIELD(tcp.autorcvbufmax, tcp_autorcvbuf_max);
651 SYSCTL_SKMEM_UPDATE_FIELD(tcp.autosndbufmax, tcp_autosndbuf_max);
652 }
653
654 /* Initialize the TCP CCA array */
655 tcp_cc_init();
656
657 read_frandom(&isn_secret, sizeof(isn_secret));
658 }
659
660 /*
661 * Fill in the IP and TCP headers for an outgoing packet, given the tcpcb.
662 * tcp_template used to store this data in mbufs, but we now recopy it out
663 * of the tcpcb each time to conserve mbufs.
664 */
665 void
tcp_fillheaders(struct mbuf * m,struct tcpcb * tp,void * ip_ptr,void * tcp_ptr,struct sockaddr * local,struct sockaddr * remote)666 tcp_fillheaders(struct mbuf *m, struct tcpcb *tp, void *ip_ptr, void *tcp_ptr,
667 struct sockaddr *local, struct sockaddr *remote)
668 {
669 struct inpcb *inp = tp->t_inpcb;
670 struct tcphdr *tcp_hdr = (struct tcphdr *)tcp_ptr;
671
672 bool isipv6 = false;
673
674 if (local != NULL && remote != NULL) {
675 isipv6 = (local->sa_family == AF_INET6);
676 } else {
677 isipv6 = (inp->inp_vflag & INP_IPV6) != 0;
678 }
679
680 if (isipv6) {
681 struct ip6_hdr *ip6;
682
683 ip6 = (struct ip6_hdr *)ip_ptr;
684 ip6->ip6_flow = (ip6->ip6_flow & ~IPV6_FLOWINFO_MASK) |
685 (inp->inp_flow & IPV6_FLOWINFO_MASK);
686 ip6->ip6_vfc = (ip6->ip6_vfc & ~IPV6_VERSION_MASK) |
687 (IPV6_VERSION & IPV6_VERSION_MASK);
688 ip6->ip6_plen = htons(sizeof(struct tcphdr));
689 ip6->ip6_nxt = IPPROTO_TCP;
690 ip6->ip6_hlim = 0;
691 if (local != NULL) {
692 ip6->ip6_src = SIN6(local)->sin6_addr;
693 } else {
694 ip6->ip6_src = inp->in6p_laddr;
695 }
696 if (remote != NULL) {
697 ip6->ip6_dst = SIN6(remote)->sin6_addr;
698 } else {
699 ip6->ip6_dst = inp->in6p_faddr;
700 }
701
702 if (m->m_flags & M_PKTHDR) {
703 uint32_t lifscope = IFSCOPE_NONE, fifscope = IFSCOPE_NONE;
704 if (!IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr)) {
705 lifscope = inp->inp_lifscope;
706 } else if (SIN6(local)->sin6_scope_id != IFSCOPE_NONE) {
707 lifscope = SIN6(local)->sin6_scope_id;
708 }
709 if (!IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_faddr)) {
710 fifscope = inp->inp_fifscope;
711 } else if (SIN6(remote)->sin6_scope_id != IFSCOPE_NONE) {
712 fifscope = SIN6(remote)->sin6_scope_id;
713 }
714 ip6_output_setsrcifscope(m, lifscope, NULL);
715 ip6_output_setdstifscope(m, fifscope, NULL);
716 }
717 tcp_hdr->th_sum = in6_pseudo(&ip6->ip6_src, &ip6->ip6_dst,
718 htonl(sizeof(struct tcphdr) + IPPROTO_TCP));
719 } else {
720 struct ip *ip = (struct ip *) ip_ptr;
721
722 ip->ip_vhl = IP_VHL_BORING;
723 ip->ip_tos = 0;
724 ip->ip_len = 0;
725 ip->ip_id = 0;
726 ip->ip_off = 0;
727 ip->ip_ttl = 0;
728 ip->ip_sum = 0;
729 ip->ip_p = IPPROTO_TCP;
730 if (local != NULL) {
731 ip->ip_src = SIN(local)->sin_addr;
732 } else {
733 ip->ip_src = inp->inp_laddr;
734 }
735 if (remote != NULL) {
736 ip->ip_dst = SIN(remote)->sin_addr;
737 } else {
738 ip->ip_dst = inp->inp_faddr;
739 }
740 tcp_hdr->th_sum =
741 in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr,
742 htons(sizeof(struct tcphdr) + IPPROTO_TCP));
743 }
744 if (local != NULL) {
745 tcp_hdr->th_sport = SIN(local)->sin_port;
746 } else {
747 tcp_hdr->th_sport = inp->inp_lport;
748 }
749 if (remote != NULL) {
750 tcp_hdr->th_dport = SIN(remote)->sin_port;
751 } else {
752 tcp_hdr->th_dport = inp->inp_fport;
753 }
754 tcp_hdr->th_seq = 0;
755 tcp_hdr->th_ack = 0;
756 tcp_hdr->th_x2 = 0;
757 tcp_hdr->th_off = 5;
758 tcp_hdr->th_flags = 0;
759 tcp_hdr->th_win = 0;
760 tcp_hdr->th_urp = 0;
761 }
762
763 static uint8_t
tcp_filloptions(struct tcpopt * peer_to,uint16_t thflags,uint16_t mss,uint8_t rcv_scale,uint32_t ts_offset,u_char * __counted_by (TCP_MAXOLEN)optp)764 tcp_filloptions(struct tcpopt *peer_to, uint16_t thflags, uint16_t mss, uint8_t rcv_scale,
765 uint32_t ts_offset, u_char *__counted_by(TCP_MAXOLEN) optp)
766 {
767 uint8_t optlen = 0;
768 struct tcpopt to;
769
770 to.to_flags = 0;
771
772 if (thflags & TH_SYN) {
773 to.to_mss = mss;
774 to.to_flags = TOF_MSS;
775 if (peer_to->to_flags & TOF_SCALE) {
776 to.to_wscale = rcv_scale;
777 to.to_flags |= TOF_SCALE;
778 }
779 if (peer_to->to_flags & TOF_SACKPERM) {
780 to.to_flags |= TOF_SACKPERM;
781 }
782 }
783 if ((peer_to->to_flags & TOF_TS)) {
784 uint32_t tcp_now_local = os_access_once(tcp_now);
785 to.to_tsval = ts_offset + tcp_now_local;
786 to.to_tsecr = peer_to->to_tsval;
787 to.to_flags |= TOF_TS;
788 }
789 optlen = tcp_addoptions(&to, optp, optp + TCP_MAXOLEN);
790
791 return optlen;
792 }
793
794 /*
795 * Create template to be used to send tcp packets on a connection.
796 * Allocates an mbuf and fills in a skeletal tcp/ip header. The only
797 * use for this function is in keepalives, which use tcp_respond.
798 */
799 struct tcptemp *
tcp_maketemplate(struct tcpcb * tp,struct mbuf ** mp,struct sockaddr * local,struct sockaddr * remote)800 tcp_maketemplate(struct tcpcb *tp, struct mbuf **mp,
801 struct sockaddr *local, struct sockaddr *remote)
802 {
803 struct mbuf *m;
804 struct tcptemp *n;
805
806 *mp = m = m_get(M_DONTWAIT, MT_HEADER);
807 if (m == NULL) {
808 return NULL;
809 }
810 m->m_len = sizeof(struct tcptemp);
811 n = mtod(m, struct tcptemp *);
812
813 tcp_fillheaders(m, tp, (void *)&n->tt_ipgen, (void *)&n->tt_t, local, remote);
814 return n;
815 }
816
817 /*
818 * Send a single message to the TCP at address specified by
819 * the given TCP/IP header. If m == 0, then we make a copy
820 * of the tcpiphdr at ti and send directly to the addressed host.
821 * This is used to force keep alive messages out using the TCP
822 * template for a connection. If flags are given then we send
823 * a message back to the TCP which originated the * segment ti,
824 * and discard the mbuf containing it and any other attached mbufs.
825 *
826 * In any case the ack and sequence number of the transmitted
827 * segment are as specified by the parameters.
828 *
829 * NOTE: If m != NULL, then ti must point to *inside* the mbuf.
830 */
831 void
tcp_respond(struct tcpcb * tp,void * ipgen __sized_by (ipgen_size),size_t ipgen_size __unused,struct tcphdr * th,struct mbuf * m,tcp_seq ack,tcp_seq seq,uint32_t rcv_win,uint16_t flags,struct tcpopt * peer_to,uint16_t mss,uint8_t rcv_scale,uint32_t ts_offset,struct tcp_respond_args * tra,bool send_syncookie)832 tcp_respond(struct tcpcb *tp, void *ipgen __sized_by(ipgen_size), size_t ipgen_size __unused,
833 struct tcphdr *th, struct mbuf *m, tcp_seq ack, tcp_seq seq, uint32_t rcv_win, uint16_t flags,
834 struct tcpopt *peer_to, uint16_t mss, uint8_t rcv_scale, uint32_t ts_offset,
835 struct tcp_respond_args *tra, bool send_syncookie)
836 {
837 uint16_t tlen;
838 uint8_t optlen = 0;
839 int win = 0;
840 struct route *ro = 0;
841 struct route sro;
842 struct ip *ip;
843 struct tcphdr *nth;
844 struct route_in6 *ro6 = 0;
845 struct route_in6 sro6;
846 struct ip6_hdr *ip6;
847 int isipv6;
848 struct ifnet *outif;
849 int sotc = SO_TC_UNSPEC;
850 bool check_qos_marking_again = FALSE;
851 uint32_t sifscope = IFSCOPE_NONE, fifscope = IFSCOPE_NONE;
852
853 isipv6 = IP_VHL_V(((struct ip *)ipgen)->ip_vhl) == 6;
854 ip6 = ipgen;
855 ip = ipgen;
856
857 if (tp) {
858 check_qos_marking_again = tp->t_inpcb->inp_socket->so_flags1 & SOF1_QOSMARKING_POLICY_OVERRIDE ? FALSE : TRUE;
859 sifscope = tp->t_inpcb->inp_lifscope;
860 fifscope = tp->t_inpcb->inp_fifscope;
861 if (!(flags & TH_RST)) {
862 win = tcp_sbspace(tp);
863 if (win > (int32_t)TCP_MAXWIN << tp->rcv_scale) {
864 win = (int32_t)TCP_MAXWIN << tp->rcv_scale;
865 }
866 }
867 if (isipv6) {
868 ro6 = &tp->t_inpcb->in6p_route;
869 } else {
870 ro = &tp->t_inpcb->inp_route;
871 }
872 } else {
873 if (isipv6) {
874 ro6 = &sro6;
875 bzero(ro6, sizeof(*ro6));
876 } else {
877 ro = &sro;
878 bzero(ro, sizeof(*ro));
879 }
880 if (rcv_win != 0) {
881 /* Set TCP receive window if provided */
882 win = rcv_win;
883 }
884 }
885 if (m == 0) {
886 m = m_gethdr(M_DONTWAIT, MT_HEADER); /* MAC-OK */
887 if (m == NULL) {
888 return;
889 }
890 tlen = 0;
891 m->m_data += max_linkhdr;
892 if (isipv6) {
893 VERIFY((MHLEN - max_linkhdr) >=
894 (sizeof(*ip6) + sizeof(*nth)));
895 bcopy((caddr_t)ip6, mtod(m, caddr_t),
896 sizeof(struct ip6_hdr));
897 ip6 = mtod(m, struct ip6_hdr *);
898 nth = (struct tcphdr *)(void *)(ip6 + 1);
899 } else {
900 VERIFY((MHLEN - max_linkhdr) >=
901 (sizeof(*ip) + sizeof(*nth)));
902 bcopy((caddr_t)ip, mtod(m, caddr_t), sizeof(struct ip));
903 ip = mtod(m, struct ip *);
904 nth = (struct tcphdr *)(void *)(ip + 1);
905 }
906 bcopy(th, nth, sizeof(struct tcphdr));
907 #if MPTCP
908 if ((tp) && (tp->t_mpflags & TMPF_RESET)) {
909 flags = (TH_RST | TH_ACK);
910 } else if (!send_syncookie)
911 #endif
912 flags = TH_ACK;
913 } else {
914 m_freem(m->m_next);
915 m->m_next = 0;
916 m->m_data = (uintptr_t)ipgen;
917 /* m_len is set later */
918 tlen = 0;
919 #define xchg(a, b, type) { type t; t = a; a = b; b = t; }
920 if (isipv6) {
921 ip6_getsrcifaddr_info(m, &sifscope, NULL);
922 ip6_getdstifaddr_info(m, &fifscope, NULL);
923 if (!in6_embedded_scope) {
924 m->m_pkthdr.pkt_flags &= ~PKTF_IFAINFO;
925 }
926 /* Expect 32-bit aligned IP on strict-align platforms */
927 IP6_HDR_STRICT_ALIGNMENT_CHECK(ip6);
928 xchg(ip6->ip6_dst, ip6->ip6_src, struct in6_addr);
929 nth = (struct tcphdr *)(void *)(ip6 + 1);
930 } else {
931 /* Expect 32-bit aligned IP on strict-align platforms */
932 IP_HDR_STRICT_ALIGNMENT_CHECK(ip);
933 xchg(ip->ip_dst.s_addr, ip->ip_src.s_addr, n_long);
934 nth = (struct tcphdr *)(void *)(ip + 1);
935 }
936 if (th != nth) {
937 /*
938 * this is usually a case when an extension header
939 * exists between the IPv6 header and the
940 * TCP header.
941 */
942 nth->th_sport = th->th_sport;
943 nth->th_dport = th->th_dport;
944 }
945 xchg(nth->th_dport, nth->th_sport, n_short);
946 #undef xchg
947 }
948
949 if (peer_to != NULL) {
950 u_char *optp = (u_char *)(nth + 1);
951 optlen = tcp_filloptions(peer_to, flags, mss, rcv_scale, ts_offset, optp);
952 tlen += optlen;
953 }
954
955 if (isipv6) {
956 ip6->ip6_plen = htons((u_short)(sizeof(struct tcphdr) +
957 tlen));
958 tlen += sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
959 ip6_output_setsrcifscope(m, sifscope, NULL);
960 ip6_output_setdstifscope(m, fifscope, NULL);
961 } else {
962 tlen += sizeof(struct tcpiphdr);
963 ip->ip_len = tlen;
964 ip->ip_ttl = (uint8_t)ip_defttl;
965 }
966 m->m_len = tlen;
967 m->m_pkthdr.len = tlen;
968 m->m_pkthdr.rcvif = 0;
969 if (tra->keep_alive) {
970 m->m_pkthdr.pkt_flags |= PKTF_KEEPALIVE;
971 }
972
973 nth->th_seq = htonl(seq);
974 nth->th_ack = htonl(ack);
975 nth->th_x2 = 0;
976 nth->th_off = (sizeof(struct tcphdr) + optlen) >> 2;
977 tcp_set_flags(nth, flags);
978 if (tp) {
979 nth->th_win = htons((u_short) (win >> tp->rcv_scale));
980 } else {
981 nth->th_win = htons((u_short)win);
982 }
983 nth->th_urp = 0;
984 if (isipv6) {
985 nth->th_sum = 0;
986 nth->th_sum = in6_pseudo(&ip6->ip6_src, &ip6->ip6_dst,
987 htonl((tlen - sizeof(struct ip6_hdr)) + IPPROTO_TCP));
988 m->m_pkthdr.csum_flags = CSUM_TCPIPV6;
989 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
990 ip6->ip6_hlim = in6_selecthlim(tp ? tp->t_inpcb : NULL,
991 ro6 && ro6->ro_rt ? ro6->ro_rt->rt_ifp : NULL);
992 } else {
993 nth->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr,
994 htons((u_short)(tlen - sizeof(struct ip) + ip->ip_p)));
995 m->m_pkthdr.csum_flags = CSUM_TCP;
996 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
997 }
998 #if NECP
999 necp_mark_packet_from_socket(m, tp ? tp->t_inpcb : NULL, 0, 0, 0, 0);
1000 #endif /* NECP */
1001
1002 #if IPSEC
1003 if (tp != NULL && tp->t_inpcb->inp_sp != NULL &&
1004 ipsec_setsocket(m, tp ? tp->t_inpcb->inp_socket : NULL) != 0) {
1005 m_freem(m);
1006 return;
1007 }
1008 #endif
1009
1010 if (tp != NULL) {
1011 u_int32_t svc_flags = 0;
1012 if (isipv6) {
1013 svc_flags |= PKT_SCF_IPV6;
1014 }
1015 sotc = tp->t_inpcb->inp_socket->so_traffic_class;
1016 if ((flags & TH_RST) == 0) {
1017 set_packet_service_class(m, tp->t_inpcb->inp_socket,
1018 sotc, svc_flags);
1019 } else {
1020 m_set_service_class(m, MBUF_SC_BK_SYS);
1021 }
1022
1023 /* Embed flowhash and flow control flags */
1024 m->m_pkthdr.pkt_flowsrc = FLOWSRC_INPCB;
1025 m->m_pkthdr.pkt_flowid = tp->t_inpcb->inp_flowhash;
1026 m->m_pkthdr.pkt_flags |= (PKTF_FLOW_ID | PKTF_FLOW_LOCALSRC | PKTF_FLOW_ADV);
1027 m->m_pkthdr.pkt_proto = IPPROTO_TCP;
1028 m->m_pkthdr.tx_tcp_pid = tp->t_inpcb->inp_socket->last_pid;
1029 m->m_pkthdr.tx_tcp_e_pid = tp->t_inpcb->inp_socket->e_pid;
1030
1031 if (flags & TH_RST) {
1032 m->m_pkthdr.comp_gencnt = tp->t_comp_ack_gencnt;
1033 }
1034 } else {
1035 if (flags & TH_RST) {
1036 m->m_pkthdr.comp_gencnt = TCP_ACK_COMPRESSION_DUMMY;
1037 m_set_service_class(m, MBUF_SC_BK_SYS);
1038 }
1039 }
1040
1041 if (isipv6) {
1042 struct ip6_out_args ip6oa;
1043 bzero(&ip6oa, sizeof(ip6oa));
1044 ip6oa.ip6oa_boundif = tra->ifscope;
1045 ip6oa.ip6oa_flags = IP6OAF_SELECT_SRCIF | IP6OAF_BOUND_SRCADDR;
1046 ip6oa.ip6oa_sotc = SO_TC_UNSPEC;
1047 ip6oa.ip6oa_netsvctype = _NET_SERVICE_TYPE_UNSPEC;
1048
1049 if (tra->ifscope != IFSCOPE_NONE) {
1050 ip6oa.ip6oa_flags |= IP6OAF_BOUND_IF;
1051 }
1052 if (tra->nocell) {
1053 ip6oa.ip6oa_flags |= IP6OAF_NO_CELLULAR;
1054 }
1055 if (tra->noexpensive) {
1056 ip6oa.ip6oa_flags |= IP6OAF_NO_EXPENSIVE;
1057 }
1058 if (tra->noconstrained) {
1059 ip6oa.ip6oa_flags |= IP6OAF_NO_CONSTRAINED;
1060 }
1061 if (tra->awdl_unrestricted) {
1062 ip6oa.ip6oa_flags |= IP6OAF_AWDL_UNRESTRICTED;
1063 }
1064 if (tra->intcoproc_allowed) {
1065 ip6oa.ip6oa_flags |= IP6OAF_INTCOPROC_ALLOWED;
1066 }
1067 if (tra->management_allowed) {
1068 ip6oa.ip6oa_flags |= IP6OAF_MANAGEMENT_ALLOWED;
1069 }
1070 if (tra->ultra_constrained_allowed) {
1071 ip6oa.ip6oa_flags |= IP6OAF_ULTRA_CONSTRAINED_ALLOWED;
1072 }
1073 ip6oa.ip6oa_sotc = sotc;
1074 if (tp != NULL) {
1075 if ((tp->t_inpcb->inp_socket->so_flags1 & SOF1_QOSMARKING_ALLOWED)) {
1076 ip6oa.ip6oa_flags |= IP6OAF_QOSMARKING_ALLOWED;
1077 }
1078 ip6oa.qos_marking_gencount = tp->t_inpcb->inp_policyresult.results.qos_marking_gencount;
1079 if (check_qos_marking_again) {
1080 ip6oa.ip6oa_flags |= IP6OAF_REDO_QOSMARKING_POLICY;
1081 }
1082 ip6oa.ip6oa_netsvctype = tp->t_inpcb->inp_socket->so_netsvctype;
1083 }
1084 (void) ip6_output(m, NULL, ro6, IPV6_OUTARGS, NULL,
1085 NULL, &ip6oa);
1086
1087 if (check_qos_marking_again) {
1088 struct inpcb *inp = tp->t_inpcb;
1089 inp->inp_policyresult.results.qos_marking_gencount = ip6oa.qos_marking_gencount;
1090 if (ip6oa.ip6oa_flags & IP6OAF_QOSMARKING_ALLOWED) {
1091 inp->inp_socket->so_flags1 |= SOF1_QOSMARKING_ALLOWED;
1092 } else {
1093 inp->inp_socket->so_flags1 &= ~SOF1_QOSMARKING_ALLOWED;
1094 }
1095 }
1096
1097 if (tp != NULL && ro6 != NULL && ro6->ro_rt != NULL &&
1098 (outif = ro6->ro_rt->rt_ifp) !=
1099 tp->t_inpcb->in6p_last_outifp) {
1100 tp->t_inpcb->in6p_last_outifp = outif;
1101 #if SKYWALK
1102 if (NETNS_TOKEN_VALID(&tp->t_inpcb->inp_netns_token)) {
1103 netns_set_ifnet(&tp->t_inpcb->inp_netns_token,
1104 tp->t_inpcb->in6p_last_outifp);
1105 }
1106 #endif /* SKYWALK */
1107 }
1108
1109 if (ro6 == &sro6) {
1110 ROUTE_RELEASE(ro6);
1111 }
1112 } else {
1113 struct ip_out_args ipoa;
1114 bzero(&ipoa, sizeof(ipoa));
1115 ipoa.ipoa_boundif = tra->ifscope;
1116 ipoa.ipoa_flags = IPOAF_SELECT_SRCIF | IPOAF_BOUND_SRCADDR;
1117 ipoa.ipoa_sotc = SO_TC_UNSPEC;
1118 ipoa.ipoa_netsvctype = _NET_SERVICE_TYPE_UNSPEC;
1119
1120 if (tra->ifscope != IFSCOPE_NONE) {
1121 ipoa.ipoa_flags |= IPOAF_BOUND_IF;
1122 }
1123 if (tra->nocell) {
1124 ipoa.ipoa_flags |= IPOAF_NO_CELLULAR;
1125 }
1126 if (tra->noexpensive) {
1127 ipoa.ipoa_flags |= IPOAF_NO_EXPENSIVE;
1128 }
1129 if (tra->noconstrained) {
1130 ipoa.ipoa_flags |= IPOAF_NO_CONSTRAINED;
1131 }
1132 if (tra->awdl_unrestricted) {
1133 ipoa.ipoa_flags |= IPOAF_AWDL_UNRESTRICTED;
1134 }
1135 if (tra->management_allowed) {
1136 ipoa.ipoa_flags |= IPOAF_MANAGEMENT_ALLOWED;
1137 }
1138 ipoa.ipoa_sotc = sotc;
1139 if (tp != NULL) {
1140 if ((tp->t_inpcb->inp_socket->so_flags1 & SOF1_QOSMARKING_ALLOWED)) {
1141 ipoa.ipoa_flags |= IPOAF_QOSMARKING_ALLOWED;
1142 }
1143 if (!(tp->t_inpcb->inp_socket->so_flags1 & SOF1_QOSMARKING_POLICY_OVERRIDE)) {
1144 ipoa.ipoa_flags |= IPOAF_REDO_QOSMARKING_POLICY;
1145 }
1146 ipoa.qos_marking_gencount = tp->t_inpcb->inp_policyresult.results.qos_marking_gencount;
1147 ipoa.ipoa_netsvctype = tp->t_inpcb->inp_socket->so_netsvctype;
1148 }
1149 if (ro != &sro) {
1150 /* Copy the cached route and take an extra reference */
1151 inp_route_copyout(tp->t_inpcb, &sro);
1152 }
1153 /*
1154 * For consistency, pass a local route copy.
1155 */
1156 (void) ip_output(m, NULL, &sro, IP_OUTARGS, NULL, &ipoa);
1157
1158 if (check_qos_marking_again) {
1159 struct inpcb *inp = tp->t_inpcb;
1160 inp->inp_policyresult.results.qos_marking_gencount = ipoa.qos_marking_gencount;
1161 if (ipoa.ipoa_flags & IPOAF_QOSMARKING_ALLOWED) {
1162 inp->inp_socket->so_flags1 |= SOF1_QOSMARKING_ALLOWED;
1163 } else {
1164 inp->inp_socket->so_flags1 &= ~SOF1_QOSMARKING_ALLOWED;
1165 }
1166 }
1167 if (tp != NULL && sro.ro_rt != NULL &&
1168 (outif = sro.ro_rt->rt_ifp) !=
1169 tp->t_inpcb->inp_last_outifp) {
1170 tp->t_inpcb->inp_last_outifp = outif;
1171 #if SKYWALK
1172 if (NETNS_TOKEN_VALID(&tp->t_inpcb->inp_netns_token)) {
1173 netns_set_ifnet(&tp->t_inpcb->inp_netns_token, outif);
1174 }
1175 #endif /* SKYWALK */
1176 }
1177 if (ro != &sro) {
1178 /* Synchronize cached PCB route */
1179 inp_route_copyin(tp->t_inpcb, &sro);
1180 } else {
1181 ROUTE_RELEASE(&sro);
1182 }
1183 }
1184 }
1185
1186 /*
1187 * Create a new TCP control block, making an
1188 * empty reassembly queue and hooking it to the argument
1189 * protocol control block. The `inp' parameter must have
1190 * come from the zone allocator set up in tcp_init().
1191 */
1192 struct tcpcb *
tcp_newtcpcb(struct inpcb * inp)1193 tcp_newtcpcb(struct inpcb *inp)
1194 {
1195 struct inp_tp *it;
1196 struct tcpcb *tp;
1197 int isipv6 = (inp->inp_vflag & INP_IPV6) != 0;
1198 uint32_t random_32;
1199
1200 calculate_tcp_clock();
1201
1202 it = (struct inp_tp *)(void *)inp;
1203 tp = &it->tcb;
1204
1205 bzero((char *) tp, sizeof(struct tcpcb));
1206 LIST_INIT(&tp->t_segq);
1207 tp->t_maxseg = tp->t_maxopd = isipv6 ? tcp_v6mssdflt : tcp_mssdflt;
1208
1209 tp->t_flags = TF_REQ_SCALE | (tcp_do_timestamps ? TF_REQ_TSTMP : 0);
1210 tp->t_flagsext |= TF_SACK_ENABLE;
1211
1212 if (tcp_rack) {
1213 tp->t_flagsext |= TF_RACK_ENABLED;
1214 }
1215
1216 if (tcp_syncookie == 1) {
1217 tp->t_flagsext |= TF_SYN_COOKIE_ENABLED;
1218 } else if (tcp_syncookie == 2) {
1219 tp->t_flagsext |= TF_SYN_COOKIE_FORCE_ENABLED;
1220 }
1221
1222 TAILQ_INIT(&tp->snd_holes);
1223 SLIST_INIT(&tp->t_rxt_segments);
1224 TAILQ_INIT(&tp->t_segs_sent);
1225 RB_INIT(&tp->t_segs_sent_tree);
1226 TAILQ_INIT(&tp->t_segs_acked);
1227 TAILQ_INIT(&tp->seg_pool.free_segs);
1228 SLIST_INIT(&tp->t_notify_ack);
1229 tp->t_inpcb = inp;
1230 /*
1231 * Init srtt to TCPTV_SRTTBASE (0), so we can tell that we have no
1232 * rtt estimate. Set rttvar so that srtt + 4 * rttvar gives
1233 * reasonable initial retransmit time.
1234 */
1235 tp->t_srtt = TCPTV_SRTTBASE;
1236 tp->t_rttvar =
1237 ((TCPTV_RTOBASE - TCPTV_SRTTBASE) << TCP_RTTVAR_SHIFT) / 4;
1238 tp->t_rttmin = tcp_TCPTV_MIN;
1239 tp->t_rxtcur = TCPTV_RTOBASE;
1240
1241 if (tcp_use_newreno) {
1242 /* use newreno by default */
1243 tp->tcp_cc_index = TCP_CC_ALGO_NEWRENO_INDEX;
1244 #if (DEVELOPMENT || DEBUG)
1245 } else if (tcp_use_ledbat) {
1246 /* use ledbat for testing */
1247 tp->tcp_cc_index = TCP_CC_ALGO_BACKGROUND_INDEX;
1248 #endif
1249 } else {
1250 /* Set L4S state even if ifp might be NULL */
1251 tcp_set_l4s(tp, inp->inp_last_outifp);
1252 if (tp->l4s_enabled) {
1253 tp->tcp_cc_index = TCP_CC_ALGO_PRAGUE_INDEX;
1254 } else {
1255 tp->tcp_cc_index = TCP_CC_ALGO_CUBIC_INDEX;
1256 }
1257 }
1258
1259 tcp_cc_allocate_state(tp);
1260
1261 if (CC_ALGO(tp)->init != NULL) {
1262 CC_ALGO(tp)->init(tp);
1263 }
1264
1265 /* Initialize rledbat if we are using recv_bg */
1266 if (tcp_rledbat == 1 && TCP_RECV_BG(inp->inp_socket) &&
1267 tcp_cc_rledbat.init != NULL) {
1268 tcp_cc_rledbat.init(tp);
1269 }
1270
1271 tp->snd_cwnd = tcp_initial_cwnd(tp);
1272 tp->snd_ssthresh = TCP_MAXWIN << TCP_MAX_WINSHIFT;
1273 tp->snd_ssthresh_prev = TCP_MAXWIN << TCP_MAX_WINSHIFT;
1274 tp->t_rcvtime = tcp_now;
1275 tp->tentry.te_timer_start = tcp_now;
1276 tp->t_persist_timeout = tcp_max_persist_timeout;
1277 tp->t_persist_stop = 0;
1278 tp->t_rexmtthresh = (uint8_t)tcprexmtthresh;
1279 tp->rack.reo_wnd_multi = 1;
1280 tp->rfbuf_ts = tcp_now;
1281 tp->rfbuf_space = tcp_initial_cwnd(tp);
1282 tp->t_forced_acks = TCP_FORCED_ACKS_COUNT;
1283 tp->bytes_lost = tp->bytes_sacked = tp->bytes_retransmitted = 0;
1284
1285 /* Enable bandwidth measurement on this connection */
1286 tp->t_flagsext |= TF_MEASURESNDBW;
1287 if (tp->t_bwmeas == NULL) {
1288 tp->t_bwmeas = tcp_bwmeas_alloc(tp);
1289 if (tp->t_bwmeas == NULL) {
1290 tp->t_flagsext &= ~TF_MEASURESNDBW;
1291 }
1292 }
1293
1294 /* Clear time wait tailq entry */
1295 tp->t_twentry.tqe_next = NULL;
1296 tp->t_twentry.tqe_prev = NULL;
1297
1298 read_frandom(&random_32, sizeof(random_32));
1299 tp->t_comp_ack_gencnt = random_32;
1300 if (tp->t_comp_ack_gencnt <= TCP_ACK_COMPRESSION_DUMMY ||
1301 tp->t_comp_ack_gencnt > INT_MAX) {
1302 tp->t_comp_ack_gencnt = TCP_ACK_COMPRESSION_DUMMY + 1;
1303 }
1304 tp->t_comp_ack_lastinc = tcp_now;
1305
1306 /* Initialize Accurate ECN state */
1307 tp->t_client_accecn_state = tcp_connection_client_accurate_ecn_feature_disabled;
1308 tp->t_server_accecn_state = tcp_connection_server_accurate_ecn_feature_disabled;
1309
1310 /*
1311 * IPv4 TTL initialization is necessary for an IPv6 socket as well,
1312 * because the socket may be bound to an IPv6 wildcard address,
1313 * which may match an IPv4-mapped IPv6 address.
1314 */
1315 inp->inp_ip_ttl = (uint8_t)ip_defttl;
1316 inp->inp_ppcb = (caddr_t)tp;
1317 return tp; /* XXX */
1318 }
1319
1320 /*
1321 * Drop a TCP connection, reporting
1322 * the specified error. If connection is synchronized,
1323 * then send a RST to peer.
1324 */
1325 struct tcpcb *
tcp_drop(struct tcpcb * tp,int errno)1326 tcp_drop(struct tcpcb *tp, int errno)
1327 {
1328 struct socket *so = tp->t_inpcb->inp_socket;
1329 #if CONFIG_DTRACE
1330 struct inpcb *inp = tp->t_inpcb;
1331 #endif
1332
1333 if (TCPS_HAVERCVDSYN(tp->t_state)) {
1334 DTRACE_TCP4(state__change, void, NULL, struct inpcb *, inp,
1335 struct tcpcb *, tp, int32_t, TCPS_CLOSED);
1336 TCP_LOG_STATE(tp, TCPS_CLOSED);
1337 tp->t_state = TCPS_CLOSED;
1338 (void) tcp_output(tp);
1339 tcpstat.tcps_drops++;
1340 } else {
1341 tcpstat.tcps_conndrops++;
1342 }
1343 if (errno == ETIMEDOUT && tp->t_softerror) {
1344 errno = tp->t_softerror;
1345 }
1346 so->so_error = (u_short)errno;
1347
1348 TCP_LOG_CONNECTION_SUMMARY(tp);
1349
1350 return tcp_close(tp);
1351 }
1352
1353 void
tcp_getrt_rtt(struct tcpcb * tp,struct rtentry * rt)1354 tcp_getrt_rtt(struct tcpcb *tp, struct rtentry *rt)
1355 {
1356 TCP_LOG_RTM_RTT(tp, rt);
1357
1358 if (rt->rt_rmx.rmx_rtt != 0 && tcp_init_rtt_from_cache != 0) {
1359 uint32_t rtt = rt->rt_rmx.rmx_rtt;
1360 uint32_t rttvar;
1361 /*
1362 * XXX the lock bit for RTT indicates that the value
1363 * is also a minimum value; this is subject to time.
1364 */
1365 if (rt->rt_rmx.rmx_locks & RTV_RTT) {
1366 tp->t_rttmin = rtt / (RTM_RTTUNIT / TCP_RETRANSHZ);
1367 } else {
1368 tp->t_rttmin = TCPTV_REXMTMIN;
1369 }
1370
1371 rtt = rtt / (RTM_RTTUNIT / (TCP_RETRANSHZ * TCP_RTT_SCALE));
1372 tcpstat.tcps_usedrtt++;
1373
1374 if (rt->rt_rmx.rmx_rttvar) {
1375 rttvar = rt->rt_rmx.rmx_rttvar /
1376 (RTM_RTTUNIT / (TCP_RETRANSHZ * TCP_RTTVAR_SCALE));
1377 tcpstat.tcps_usedrttvar++;
1378 } else {
1379 /* default variation is +- 1 rtt */
1380 rttvar =
1381 tp->t_srtt * TCP_RTTVAR_SCALE / TCP_RTT_SCALE;
1382 }
1383
1384 TCPT_RANGESET(tp->t_rxtcur,
1385 tcp_rto_formula(tp->t_rttmin, rtt, rttvar),
1386 tp->t_rttmin, TCPTV_REXMTMAX,
1387 TCP_ADD_REXMTSLOP(tp));
1388 } else if (tp->t_state < TCPS_ESTABLISHED && tp->t_srtt == 0 &&
1389 tp->t_rxtshift == 0) {
1390 struct ifnet *ifp = rt->rt_ifp;
1391
1392 if (ifp != NULL && (ifp->if_eflags & IFEF_AWDL) != 0) {
1393 /*
1394 * AWDL needs a special value for the default initial retransmission timeout
1395 */
1396 if (tcp_awdl_rtobase > tcp_TCPTV_MIN) {
1397 tp->t_rttvar = ((tcp_awdl_rtobase - TCPTV_SRTTBASE) << TCP_RTTVAR_SHIFT) / 4;
1398 } else {
1399 tp->t_rttvar = ((tcp_TCPTV_MIN - TCPTV_SRTTBASE) << TCP_RTTVAR_SHIFT) / 4;
1400 }
1401 TCPT_RANGESET(tp->t_rxtcur,
1402 TCP_REXMTVAL(tp),
1403 tp->t_rttmin, TCPTV_REXMTMAX,
1404 TCP_ADD_REXMTSLOP(tp));
1405 }
1406 }
1407
1408 TCP_LOG_RTT_INFO(tp);
1409 }
1410
1411 static inline void
tcp_create_ifnet_stats_per_flow(struct tcpcb * tp,struct ifnet_stats_per_flow * ifs)1412 tcp_create_ifnet_stats_per_flow(struct tcpcb *tp,
1413 struct ifnet_stats_per_flow *ifs)
1414 {
1415 struct inpcb *inp;
1416 struct socket *so;
1417 if (tp == NULL || ifs == NULL) {
1418 return;
1419 }
1420
1421 bzero(ifs, sizeof(*ifs));
1422 inp = tp->t_inpcb;
1423 so = inp->inp_socket;
1424
1425 ifs->ipv4 = (inp->inp_vflag & INP_IPV6) ? 0 : 1;
1426 ifs->local = (tp->t_flags & TF_LOCAL) ? 1 : 0;
1427 ifs->connreset = (so->so_error == ECONNRESET) ? 1 : 0;
1428 ifs->conntimeout = (so->so_error == ETIMEDOUT) ? 1 : 0;
1429 ifs->ecn_flags = tp->ecn_flags;
1430 ifs->txretransmitbytes = tp->t_stat.txretransmitbytes;
1431 ifs->rxoutoforderbytes = tp->t_stat.rxoutoforderbytes;
1432 ifs->rxmitpkts = tp->t_stat.rxmitpkts;
1433 ifs->rcvoopack = tp->t_rcvoopack;
1434 ifs->pawsdrop = tp->t_pawsdrop;
1435 ifs->sack_recovery_episodes = tp->t_sack_recovery_episode;
1436 ifs->reordered_pkts = tp->t_reordered_pkts;
1437 ifs->dsack_sent = tp->t_dsack_sent;
1438 ifs->dsack_recvd = tp->t_dsack_recvd;
1439 ifs->srtt = tp->t_srtt;
1440 ifs->rttupdated = tp->t_rttupdated;
1441 ifs->rttvar = tp->t_rttvar;
1442 ifs->rttmin = get_base_rtt(tp);
1443 if (tp->t_bwmeas != NULL && tp->t_bwmeas->bw_sndbw_max > 0) {
1444 ifs->bw_sndbw_max = tp->t_bwmeas->bw_sndbw_max;
1445 } else {
1446 ifs->bw_sndbw_max = 0;
1447 }
1448 if (tp->t_bwmeas != NULL && tp->t_bwmeas->bw_rcvbw_max > 0) {
1449 ifs->bw_rcvbw_max = tp->t_bwmeas->bw_rcvbw_max;
1450 } else {
1451 ifs->bw_rcvbw_max = 0;
1452 }
1453 ifs->bk_txpackets = so->so_tc_stats[MBUF_TC_BK].txpackets;
1454 ifs->txpackets = inp->inp_mstat.ms_total.ts_txpackets;
1455 ifs->rxpackets = inp->inp_mstat.ms_total.ts_rxpackets;
1456 }
1457
1458 static inline void
tcp_flow_ecn_perf_stats(struct ifnet_stats_per_flow * ifs,struct if_tcp_ecn_perf_stat * stat)1459 tcp_flow_ecn_perf_stats(struct ifnet_stats_per_flow *ifs,
1460 struct if_tcp_ecn_perf_stat *stat)
1461 {
1462 u_int64_t curval, oldval;
1463 stat->total_txpkts += ifs->txpackets;
1464 stat->total_rxpkts += ifs->rxpackets;
1465 stat->total_rxmitpkts += ifs->rxmitpkts;
1466 stat->total_oopkts += ifs->rcvoopack;
1467 stat->total_reorderpkts += (ifs->reordered_pkts +
1468 ifs->pawsdrop + ifs->dsack_sent + ifs->dsack_recvd);
1469
1470 /* Average RTT */
1471 curval = ifs->srtt >> TCP_RTT_SHIFT;
1472 if (curval > 0 && ifs->rttupdated >= 16) {
1473 if (stat->rtt_avg == 0) {
1474 stat->rtt_avg = curval;
1475 } else {
1476 oldval = stat->rtt_avg;
1477 stat->rtt_avg = ((oldval << 4) - oldval + curval) >> 4;
1478 }
1479 }
1480
1481 /* RTT variance */
1482 curval = ifs->rttvar >> TCP_RTTVAR_SHIFT;
1483 if (curval > 0 && ifs->rttupdated >= 16) {
1484 if (stat->rtt_var == 0) {
1485 stat->rtt_var = curval;
1486 } else {
1487 oldval = stat->rtt_var;
1488 stat->rtt_var =
1489 ((oldval << 4) - oldval + curval) >> 4;
1490 }
1491 }
1492
1493 /* SACK episodes */
1494 stat->sack_episodes += ifs->sack_recovery_episodes;
1495 if (ifs->connreset) {
1496 stat->rst_drop++;
1497 }
1498 }
1499
1500 static inline void
tcp_flow_lim_stats(struct ifnet_stats_per_flow * ifs,struct if_lim_perf_stat * stat)1501 tcp_flow_lim_stats(struct ifnet_stats_per_flow *ifs,
1502 struct if_lim_perf_stat *stat)
1503 {
1504 u_int64_t curval, oldval;
1505
1506 stat->lim_total_txpkts += ifs->txpackets;
1507 stat->lim_total_rxpkts += ifs->rxpackets;
1508 stat->lim_total_retxpkts += ifs->rxmitpkts;
1509 stat->lim_total_oopkts += ifs->rcvoopack;
1510
1511 if (ifs->bw_sndbw_max > 0) {
1512 /* convert from bytes per ms to bits per second */
1513 ifs->bw_sndbw_max *= 8000;
1514 stat->lim_ul_max_bandwidth = MAX(stat->lim_ul_max_bandwidth,
1515 ifs->bw_sndbw_max);
1516 }
1517
1518 if (ifs->bw_rcvbw_max > 0) {
1519 /* convert from bytes per ms to bits per second */
1520 ifs->bw_rcvbw_max *= 8000;
1521 stat->lim_dl_max_bandwidth = MAX(stat->lim_dl_max_bandwidth,
1522 ifs->bw_rcvbw_max);
1523 }
1524
1525 /* Average RTT */
1526 curval = ifs->srtt >> TCP_RTT_SHIFT;
1527 if (curval > 0 && ifs->rttupdated >= 16) {
1528 if (stat->lim_rtt_average == 0) {
1529 stat->lim_rtt_average = curval;
1530 } else {
1531 oldval = stat->lim_rtt_average;
1532 stat->lim_rtt_average =
1533 ((oldval << 4) - oldval + curval) >> 4;
1534 }
1535 }
1536
1537 /* RTT variance */
1538 curval = ifs->rttvar >> TCP_RTTVAR_SHIFT;
1539 if (curval > 0 && ifs->rttupdated >= 16) {
1540 if (stat->lim_rtt_variance == 0) {
1541 stat->lim_rtt_variance = curval;
1542 } else {
1543 oldval = stat->lim_rtt_variance;
1544 stat->lim_rtt_variance =
1545 ((oldval << 4) - oldval + curval) >> 4;
1546 }
1547 }
1548
1549 if (stat->lim_rtt_min == 0) {
1550 stat->lim_rtt_min = ifs->rttmin;
1551 } else {
1552 stat->lim_rtt_min = MIN(stat->lim_rtt_min, ifs->rttmin);
1553 }
1554
1555 /* connection timeouts */
1556 stat->lim_conn_attempts++;
1557 if (ifs->conntimeout) {
1558 stat->lim_conn_timeouts++;
1559 }
1560
1561 /* bytes sent using background delay-based algorithms */
1562 stat->lim_bk_txpkts += ifs->bk_txpackets;
1563 }
1564
1565 static void
tcp_free_reassq(struct tcpcb * tp)1566 tcp_free_reassq(struct tcpcb *tp)
1567 {
1568 struct tseg_qent *q;
1569
1570 while ((q = LIST_FIRST(&tp->t_segq)) != NULL) {
1571 struct mbuf *m;
1572
1573 LIST_REMOVE(q, tqe_q);
1574 m = tcp_destroy_reass_qent(tp, q);
1575 m_freem(m);
1576 }
1577 }
1578
1579 struct tseg_qent *
tcp_create_reass_qent(struct tcpcb * tp,struct mbuf * m,struct tcphdr * th,int len)1580 tcp_create_reass_qent(struct tcpcb *tp, struct mbuf *m,
1581 struct tcphdr *th, int len)
1582 {
1583 struct tseg_qent *te;
1584 int size;
1585
1586 te = tcp_reass_qent_alloc(tp->t_inpcb->inp_socket->so_proto);
1587 if (te == NULL) {
1588 return NULL;
1589 }
1590
1591 tp->t_reassqlen++;
1592 OSIncrementAtomic(&tcp_reass_total_qlen);
1593
1594 size = m_chain_capacity(m);
1595 tcp_memacct_add(size);
1596 tp->t_reassq_mbcnt += size;
1597
1598 te->tqe_m = m;
1599 te->tqe_th = th;
1600 te->tqe_len = len;
1601
1602 return te;
1603 }
1604
1605 struct mbuf *
tcp_destroy_reass_qent(struct tcpcb * tp,struct tseg_qent * q)1606 tcp_destroy_reass_qent(struct tcpcb *tp, struct tseg_qent *q)
1607 {
1608 struct mbuf *m = q->tqe_m;
1609 int size;
1610
1611 size = m_chain_capacity(m);
1612 tcp_memacct_sub(size);
1613 tp->t_reassq_mbcnt -= size;
1614
1615 tp->t_reassqlen--;
1616 OSDecrementAtomic(&tcp_reass_total_qlen);
1617 tcp_reass_qent_free(tp->t_inpcb->inp_socket->so_proto, q);
1618
1619 return m;
1620 }
1621
1622 struct tseg_qent *
tcp_reass_qent_alloc(struct protosw * proto)1623 tcp_reass_qent_alloc(struct protosw *proto)
1624 {
1625 struct tseg_qent *reass;
1626
1627 if (proto_memacct_hardlimit(proto)) {
1628 return NULL;
1629 }
1630 reass = zalloc_flags(tcp_reass_zone, Z_NOPAGEWAIT);
1631 if (reass == NULL) {
1632 return NULL;
1633 }
1634
1635 proto_memacct_add(proto, kalloc_type_size(tcp_reass_zone));
1636
1637 return reass;
1638 }
1639
1640 void
tcp_reass_qent_free(struct protosw * proto,struct tseg_qent * te)1641 tcp_reass_qent_free(struct protosw *proto, struct tseg_qent *te)
1642 {
1643 proto_memacct_sub(proto, kalloc_type_size(tcp_reass_zone));
1644 zfree(tcp_reass_zone, te);
1645 }
1646
1647 /*
1648 * Close a TCP control block:
1649 * discard all space held by the tcp
1650 * discard internet protocol block
1651 * wake up any sleepers
1652 */
1653 struct tcpcb *
tcp_close(struct tcpcb * tp)1654 tcp_close(struct tcpcb *tp)
1655 {
1656 struct inpcb *inp = tp->t_inpcb;
1657 struct socket *so = inp->inp_socket;
1658 int isipv6 = (inp->inp_vflag & INP_IPV6) != 0;
1659 struct route *ro;
1660 struct rtentry *rt;
1661 int dosavessthresh;
1662 struct ifnet_stats_per_flow ifs;
1663
1664 /* tcp_close was called previously, bail */
1665 if (inp->inp_ppcb == NULL) {
1666 return NULL;
1667 }
1668
1669 tcp_del_fsw_flow(tp);
1670
1671 tcp_canceltimers(tp);
1672 KERNEL_DEBUG(DBG_FNC_TCP_CLOSE | DBG_FUNC_START, tp, 0, 0, 0, 0);
1673
1674 /*
1675 * If another thread for this tcp is currently in ip (indicated by
1676 * the TF_SENDINPROG flag), defer the cleanup until after it returns
1677 * back to tcp. This is done to serialize the close until after all
1678 * pending output is finished, in order to avoid having the PCB be
1679 * detached and the cached route cleaned, only for ip to cache the
1680 * route back into the PCB again. Note that we've cleared all the
1681 * timers at this point. Set TF_CLOSING to indicate to tcp_output()
1682 * that is should call us again once it returns from ip; at that
1683 * point both flags should be cleared and we can proceed further
1684 * with the cleanup.
1685 */
1686 if ((tp->t_flags & TF_CLOSING) ||
1687 inp->inp_sndinprog_cnt > 0) {
1688 tp->t_flags |= TF_CLOSING;
1689 return NULL;
1690 }
1691
1692 TCP_LOG_CONNECTION_SUMMARY(tp);
1693
1694 DTRACE_TCP4(state__change, void, NULL, struct inpcb *, inp,
1695 struct tcpcb *, tp, int32_t, TCPS_CLOSED);
1696
1697 ro = (isipv6 ? (struct route *)&inp->in6p_route : &inp->inp_route);
1698 rt = ro->ro_rt;
1699 if (rt != NULL) {
1700 RT_LOCK_SPIN(rt);
1701 }
1702
1703 /*
1704 * If we got enough samples through the srtt filter,
1705 * save the rtt and rttvar in the routing entry.
1706 * 'Enough' is arbitrarily defined as the 16 samples.
1707 * 16 samples is enough for the srtt filter to converge
1708 * to within 5% of the correct value; fewer samples and
1709 * we could save a very bogus rtt.
1710 *
1711 * Don't update the default route's characteristics and don't
1712 * update anything that the user "locked".
1713 */
1714 if (tp->t_rttupdated >= 16) {
1715 u_int32_t i = 0;
1716 bool log_rtt = false;
1717
1718 if (isipv6) {
1719 struct sockaddr_in6 *sin6;
1720
1721 if (rt == NULL) {
1722 goto no_valid_rt;
1723 }
1724 sin6 = SIN6(rt_key(rt));
1725 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
1726 goto no_valid_rt;
1727 }
1728 } else if (ROUTE_UNUSABLE(ro) ||
1729 SIN(rt_key(rt))->sin_addr.s_addr == INADDR_ANY) {
1730 DTRACE_TCP4(state__change, void, NULL,
1731 struct inpcb *, inp, struct tcpcb *, tp,
1732 int32_t, TCPS_CLOSED);
1733 TCP_LOG_STATE(tp, TCPS_CLOSED);
1734 tp->t_state = TCPS_CLOSED;
1735 goto no_valid_rt;
1736 }
1737
1738 RT_LOCK_ASSERT_HELD(rt);
1739 if ((rt->rt_rmx.rmx_locks & RTV_RTT) == 0) {
1740 i = tp->t_srtt *
1741 (RTM_RTTUNIT / (TCP_RETRANSHZ * TCP_RTT_SCALE));
1742 if (rt->rt_rmx.rmx_rtt && i) {
1743 /*
1744 * filter this update to half the old & half
1745 * the new values, converting scale.
1746 * See route.h and tcp_var.h for a
1747 * description of the scaling constants.
1748 */
1749 rt->rt_rmx.rmx_rtt =
1750 (rt->rt_rmx.rmx_rtt + i) / 2;
1751 } else {
1752 rt->rt_rmx.rmx_rtt = i;
1753 }
1754 tcpstat.tcps_cachedrtt++;
1755 log_rtt = true;
1756 }
1757 if ((rt->rt_rmx.rmx_locks & RTV_RTTVAR) == 0) {
1758 i = tp->t_rttvar *
1759 (RTM_RTTUNIT / (TCP_RETRANSHZ * TCP_RTTVAR_SCALE));
1760 if (rt->rt_rmx.rmx_rttvar && i) {
1761 rt->rt_rmx.rmx_rttvar =
1762 (rt->rt_rmx.rmx_rttvar + i) / 2;
1763 } else {
1764 rt->rt_rmx.rmx_rttvar = i;
1765 }
1766 tcpstat.tcps_cachedrttvar++;
1767 log_rtt = true;
1768 }
1769 if (log_rtt) {
1770 TCP_LOG_RTM_RTT(tp, rt);
1771 TCP_LOG_RTT_INFO(tp);
1772 }
1773 /*
1774 * The old comment here said:
1775 * update the pipelimit (ssthresh) if it has been updated
1776 * already or if a pipesize was specified & the threshhold
1777 * got below half the pipesize. I.e., wait for bad news
1778 * before we start updating, then update on both good
1779 * and bad news.
1780 *
1781 * But we want to save the ssthresh even if no pipesize is
1782 * specified explicitly in the route, because such
1783 * connections still have an implicit pipesize specified
1784 * by the global tcp_sendspace. In the absence of a reliable
1785 * way to calculate the pipesize, it will have to do.
1786 */
1787 i = tp->snd_ssthresh;
1788 if (rt->rt_rmx.rmx_sendpipe != 0) {
1789 dosavessthresh = (i < rt->rt_rmx.rmx_sendpipe / 2);
1790 } else {
1791 dosavessthresh = (i < so->so_snd.sb_hiwat / 2);
1792 }
1793 if (((rt->rt_rmx.rmx_locks & RTV_SSTHRESH) == 0 &&
1794 i != 0 && rt->rt_rmx.rmx_ssthresh != 0) ||
1795 dosavessthresh) {
1796 /*
1797 * convert the limit from user data bytes to
1798 * packets then to packet data bytes.
1799 */
1800 i = (i + tp->t_maxseg / 2) / tp->t_maxseg;
1801 if (i < 2) {
1802 i = 2;
1803 }
1804 i *= (u_int32_t)(tp->t_maxseg +
1805 isipv6 ? sizeof(struct ip6_hdr) +
1806 sizeof(struct tcphdr) :
1807 sizeof(struct tcpiphdr));
1808 if (rt->rt_rmx.rmx_ssthresh) {
1809 rt->rt_rmx.rmx_ssthresh =
1810 (rt->rt_rmx.rmx_ssthresh + i) / 2;
1811 } else {
1812 rt->rt_rmx.rmx_ssthresh = i;
1813 }
1814 tcpstat.tcps_cachedssthresh++;
1815 }
1816 }
1817
1818 /*
1819 * Mark route for deletion if no information is cached.
1820 */
1821 if (rt != NULL && (so->so_flags & SOF_OVERFLOW)) {
1822 if (!(rt->rt_rmx.rmx_locks & RTV_RTT) &&
1823 rt->rt_rmx.rmx_rtt == 0) {
1824 rt->rt_flags |= RTF_DELCLONE;
1825 }
1826 }
1827
1828 no_valid_rt:
1829 if (rt != NULL) {
1830 RT_UNLOCK(rt);
1831 }
1832
1833 /* free the reassembly queue, if any */
1834 tcp_free_reassq(tp);
1835
1836 /* performance stats per interface */
1837 tcp_create_ifnet_stats_per_flow(tp, &ifs);
1838 tcp_update_stats_per_flow(&ifs, inp->inp_last_outifp);
1839
1840 tcp_free_sackholes(tp);
1841 tcp_notify_ack_free(tp);
1842
1843 inp_decr_sndbytes_allunsent(so, tp->snd_una);
1844
1845 if (tp->t_bwmeas != NULL) {
1846 tcp_bwmeas_free(tp);
1847 }
1848 tcp_rxtseg_clean(tp);
1849 tcp_segs_sent_clean(tp, true);
1850
1851 /* Free the packet list */
1852 if (tp->t_pktlist_head != NULL) {
1853 m_freem_list(tp->t_pktlist_head);
1854 }
1855 TCP_PKTLIST_CLEAR(tp);
1856
1857 TCP_LOG_STATE(tp, TCPS_CLOSED);
1858 tp->t_state = TCPS_CLOSED;
1859
1860 /*
1861 * Issue a wakeup before detach so that we don't miss
1862 * a wakeup
1863 */
1864 sodisconnectwakeup(so);
1865
1866 /*
1867 * Make sure to clear the TCP Keep Alive Offload as it is
1868 * ref counted on the interface
1869 */
1870 tcp_clear_keep_alive_offload(so);
1871
1872 /*
1873 * If this is a socket that does not want to wakeup the device
1874 * for it's traffic, the application might need to know that the
1875 * socket is closed, send a notification.
1876 */
1877 if ((so->so_options & SO_NOWAKEFROMSLEEP) &&
1878 inp->inp_state != INPCB_STATE_DEAD &&
1879 !(inp->inp_flags2 & INP2_TIMEWAIT)) {
1880 socket_post_kev_msg_closed(so);
1881 }
1882
1883 if (CC_ALGO(tp)->cleanup != NULL) {
1884 CC_ALGO(tp)->cleanup(tp);
1885 }
1886
1887 tp->tcp_cc_index = TCP_CC_ALGO_NONE;
1888
1889 if (TCP_USE_RLEDBAT(tp, so) && tcp_cc_rledbat.cleanup != NULL) {
1890 tcp_cc_rledbat.cleanup(tp);
1891 }
1892
1893 /* Can happen if we close the socket before receiving the third ACK */
1894 if ((tp->t_tfo_flags & TFO_F_COOKIE_VALID)) {
1895 OSDecrementAtomic(&tcp_tfo_halfcnt);
1896
1897 /* Panic if something has gone terribly wrong. */
1898 VERIFY(tcp_tfo_halfcnt >= 0);
1899
1900 tp->t_tfo_flags &= ~TFO_F_COOKIE_VALID;
1901 }
1902
1903 if (SOCK_CHECK_DOM(so, PF_INET6)) {
1904 in6_pcbdetach(inp);
1905 } else {
1906 in_pcbdetach(inp);
1907 }
1908
1909 /*
1910 * Call soisdisconnected after detach because it might unlock the socket
1911 */
1912 soisdisconnected(so);
1913 tcpstat.tcps_closed++;
1914 KERNEL_DEBUG(DBG_FNC_TCP_CLOSE | DBG_FUNC_END,
1915 tcpstat.tcps_closed, 0, 0, 0, 0);
1916 return NULL;
1917 }
1918
1919 void
tcp_drain(void)1920 tcp_drain(void)
1921 {
1922 struct inpcb *inp;
1923 struct tcpcb *tp;
1924
1925 if (!lck_rw_try_lock_exclusive(&tcbinfo.ipi_lock)) {
1926 return;
1927 }
1928
1929 LIST_FOREACH(inp, tcbinfo.ipi_listhead, inp_list) {
1930 if (in_pcb_checkstate(inp, WNT_ACQUIRE, 0) !=
1931 WNT_STOPUSING) {
1932 socket_lock(inp->inp_socket, 1);
1933 if (in_pcb_checkstate(inp, WNT_RELEASE, 1)
1934 == WNT_STOPUSING) {
1935 /* lost a race, try the next one */
1936 socket_unlock(inp->inp_socket, 1);
1937 continue;
1938 }
1939 tp = intotcpcb(inp);
1940
1941 so_drain_extended_bk_idle(inp->inp_socket);
1942
1943 socket_unlock(inp->inp_socket, 1);
1944 }
1945 }
1946 lck_rw_done(&tcbinfo.ipi_lock);
1947 }
1948
1949 /*
1950 * Notify a tcp user of an asynchronous error;
1951 * store error as soft error, but wake up user
1952 * (for now, won't do anything until can select for soft error).
1953 *
1954 * Do not wake up user since there currently is no mechanism for
1955 * reporting soft errors (yet - a kqueue filter may be added).
1956 */
1957 static void
tcp_notify(struct inpcb * inp,int error)1958 tcp_notify(struct inpcb *inp, int error)
1959 {
1960 struct tcpcb *tp;
1961
1962 if (inp == NULL || (inp->inp_state == INPCB_STATE_DEAD)) {
1963 return; /* pcb is gone already */
1964 }
1965 tp = (struct tcpcb *)inp->inp_ppcb;
1966
1967 VERIFY(tp != NULL);
1968 /*
1969 * Ignore some errors if we are hooked up.
1970 * If connection hasn't completed, has retransmitted several times,
1971 * and receives a second error, give up now. This is better
1972 * than waiting a long time to establish a connection that
1973 * can never complete.
1974 */
1975 if (tp->t_state == TCPS_ESTABLISHED &&
1976 (error == EHOSTUNREACH || error == ENETUNREACH ||
1977 error == EHOSTDOWN)) {
1978 if (inp->inp_route.ro_rt) {
1979 rtfree(inp->inp_route.ro_rt);
1980 inp->inp_route.ro_rt = (struct rtentry *)NULL;
1981 }
1982 } else if (tp->t_state < TCPS_ESTABLISHED && tp->t_rxtshift > 3 &&
1983 tp->t_softerror) {
1984 tcp_drop(tp, error);
1985 } else {
1986 tp->t_softerror = error;
1987 }
1988 }
1989
1990 struct bwmeas *
tcp_bwmeas_alloc(struct tcpcb * tp)1991 tcp_bwmeas_alloc(struct tcpcb *tp)
1992 {
1993 struct bwmeas *elm;
1994 elm = zalloc_flags(tcp_bwmeas_zone, Z_ZERO | Z_WAITOK);
1995 elm->bw_minsizepkts = TCP_BWMEAS_BURST_MINSIZE;
1996 elm->bw_minsize = elm->bw_minsizepkts * tp->t_maxseg;
1997 return elm;
1998 }
1999
2000 void
tcp_bwmeas_free(struct tcpcb * tp)2001 tcp_bwmeas_free(struct tcpcb *tp)
2002 {
2003 zfree(tcp_bwmeas_zone, tp->t_bwmeas);
2004 tp->t_bwmeas = NULL;
2005 tp->t_flagsext &= ~(TF_MEASURESNDBW);
2006 }
2007
2008 int
get_tcp_inp_list(struct inpcb * __single * inp_list __counted_by (n),size_t n,inp_gen_t gencnt)2009 get_tcp_inp_list(struct inpcb * __single *inp_list __counted_by(n), size_t n, inp_gen_t gencnt)
2010 {
2011 struct tcpcb *tp;
2012 struct inpcb *inp;
2013 int i = 0;
2014
2015 LIST_FOREACH(inp, tcbinfo.ipi_listhead, inp_list) {
2016 if (i >= n) {
2017 break;
2018 }
2019 if (inp->inp_gencnt <= gencnt &&
2020 inp->inp_state != INPCB_STATE_DEAD) {
2021 inp_list[i++] = inp;
2022 }
2023 }
2024
2025 TAILQ_FOREACH(tp, &tcp_tw_tailq, t_twentry) {
2026 if (i >= n) {
2027 break;
2028 }
2029 inp = tp->t_inpcb;
2030 if (inp->inp_gencnt <= gencnt &&
2031 inp->inp_state != INPCB_STATE_DEAD) {
2032 inp_list[i++] = inp;
2033 }
2034 }
2035 return i;
2036 }
2037
2038 /*
2039 * tcpcb_to_otcpcb copies specific bits of a tcpcb to a otcpcb format.
2040 * The otcpcb data structure is passed to user space and must not change.
2041 */
2042 static void
tcpcb_to_otcpcb(struct tcpcb * tp,struct otcpcb * otp)2043 tcpcb_to_otcpcb(struct tcpcb *tp, struct otcpcb *otp)
2044 {
2045 otp->t_segq = (uint32_t)VM_KERNEL_ADDRHASH(tp->t_segq.lh_first);
2046 otp->t_dupacks = tp->t_dupacks;
2047 otp->t_timer[TCPT_REXMT_EXT] = tp->t_timer[TCPT_REXMT];
2048 otp->t_timer[TCPT_PERSIST_EXT] = tp->t_timer[TCPT_PERSIST];
2049 otp->t_timer[TCPT_KEEP_EXT] = tp->t_timer[TCPT_KEEP];
2050 otp->t_timer[TCPT_2MSL_EXT] = tp->t_timer[TCPT_2MSL];
2051 otp->t_inpcb =
2052 (_TCPCB_PTR(struct inpcb *))VM_KERNEL_ADDRHASH(tp->t_inpcb);
2053 otp->t_state = tp->t_state;
2054 otp->t_flags = tp->t_flags;
2055 otp->t_force = (tp->t_flagsext & TF_FORCE) ? 1 : 0;
2056 otp->snd_una = tp->snd_una;
2057 otp->snd_max = tp->snd_max;
2058 otp->snd_nxt = tp->snd_nxt;
2059 otp->snd_up = tp->snd_up;
2060 otp->snd_wl1 = tp->snd_wl1;
2061 otp->snd_wl2 = tp->snd_wl2;
2062 otp->iss = tp->iss;
2063 otp->irs = tp->irs;
2064 otp->rcv_nxt = tp->rcv_nxt;
2065 otp->rcv_adv = tp->rcv_adv;
2066 otp->rcv_wnd = tp->rcv_wnd;
2067 otp->rcv_up = tp->rcv_up;
2068 otp->snd_wnd = tp->snd_wnd;
2069 otp->snd_cwnd = tp->snd_cwnd;
2070 otp->snd_ssthresh = tp->snd_ssthresh;
2071 otp->t_maxopd = tp->t_maxopd;
2072 otp->t_rcvtime = tp->t_rcvtime;
2073 otp->t_starttime = tp->t_starttime;
2074 otp->t_rtttime = tp->t_rtttime;
2075 otp->t_rtseq = tp->t_rtseq;
2076 otp->t_rxtcur = tp->t_rxtcur;
2077 otp->t_maxseg = tp->t_maxseg;
2078 otp->t_srtt = tp->t_srtt;
2079 otp->t_rttvar = tp->t_rttvar;
2080 otp->t_rxtshift = tp->t_rxtshift;
2081 otp->t_rttmin = tp->t_rttmin;
2082 otp->t_rttupdated = tp->t_rttupdated;
2083 otp->max_sndwnd = tp->max_sndwnd;
2084 otp->t_softerror = tp->t_softerror;
2085 otp->t_oobflags = tp->t_oobflags;
2086 otp->t_iobc = tp->t_iobc;
2087 otp->snd_scale = tp->snd_scale;
2088 otp->rcv_scale = tp->rcv_scale;
2089 otp->request_r_scale = tp->request_r_scale;
2090 otp->requested_s_scale = tp->requested_s_scale;
2091 otp->ts_recent = tp->ts_recent;
2092 otp->ts_recent_age = tp->ts_recent_age;
2093 otp->last_ack_sent = tp->last_ack_sent;
2094 otp->cc_send = 0;
2095 otp->cc_recv = 0;
2096 otp->snd_recover = tp->snd_recover;
2097 otp->snd_cwnd_prev = tp->snd_cwnd_prev;
2098 otp->snd_ssthresh_prev = tp->snd_ssthresh_prev;
2099 otp->t_badrxtwin = 0;
2100 }
2101
2102 static int
2103 tcp_pcblist SYSCTL_HANDLER_ARGS
2104 {
2105 #pragma unused(oidp, arg1, arg2)
2106 int error, i = 0, n, sz;
2107 struct inpcb **inp_list;
2108 inp_gen_t gencnt;
2109 struct xinpgen xig;
2110
2111 /*
2112 * The process of preparing the TCB list is too time-consuming and
2113 * resource-intensive to repeat twice on every request.
2114 */
2115 lck_rw_lock_shared(&tcbinfo.ipi_lock);
2116 if (req->oldptr == USER_ADDR_NULL) {
2117 n = tcbinfo.ipi_count;
2118 req->oldidx = 2 * (sizeof(xig))
2119 + (n + n / 8) * sizeof(struct xtcpcb);
2120 lck_rw_done(&tcbinfo.ipi_lock);
2121 return 0;
2122 }
2123
2124 if (req->newptr != USER_ADDR_NULL) {
2125 lck_rw_done(&tcbinfo.ipi_lock);
2126 return EPERM;
2127 }
2128
2129 /*
2130 * OK, now we're committed to doing something.
2131 */
2132 gencnt = tcbinfo.ipi_gencnt;
2133 sz = n = tcbinfo.ipi_count;
2134
2135 bzero(&xig, sizeof(xig));
2136 xig.xig_len = sizeof(xig);
2137 xig.xig_count = n;
2138 xig.xig_gen = gencnt;
2139 xig.xig_sogen = so_gencnt;
2140 error = SYSCTL_OUT(req, &xig, sizeof(xig));
2141 if (error) {
2142 lck_rw_done(&tcbinfo.ipi_lock);
2143 return error;
2144 }
2145 /*
2146 * We are done if there is no pcb
2147 */
2148 if (n == 0) {
2149 lck_rw_done(&tcbinfo.ipi_lock);
2150 return 0;
2151 }
2152
2153 inp_list = kalloc_type(struct inpcb *, n, Z_WAITOK);
2154 if (inp_list == NULL) {
2155 lck_rw_done(&tcbinfo.ipi_lock);
2156 return ENOMEM;
2157 }
2158
2159 n = get_tcp_inp_list(inp_list, n, gencnt);
2160
2161 error = 0;
2162 for (i = 0; i < n; i++) {
2163 struct xtcpcb xt;
2164 caddr_t inp_ppcb __single;
2165 struct inpcb *inp;
2166
2167 inp = inp_list[i];
2168
2169 if (in_pcb_checkstate(inp, WNT_ACQUIRE, 0) == WNT_STOPUSING) {
2170 continue;
2171 }
2172 socket_lock(inp->inp_socket, 1);
2173 if (in_pcb_checkstate(inp, WNT_RELEASE, 1) == WNT_STOPUSING) {
2174 socket_unlock(inp->inp_socket, 1);
2175 continue;
2176 }
2177 if (inp->inp_gencnt > gencnt) {
2178 socket_unlock(inp->inp_socket, 1);
2179 continue;
2180 }
2181
2182 bzero(&xt, sizeof(xt));
2183 xt.xt_len = sizeof(xt);
2184 /* XXX should avoid extra copy */
2185 inpcb_to_compat(inp, &xt.xt_inp);
2186 inp_ppcb = inp->inp_ppcb;
2187 if (inp_ppcb != NULL) {
2188 tcpcb_to_otcpcb((struct tcpcb *)(void *)inp_ppcb,
2189 &xt.xt_tp);
2190 } else {
2191 bzero((char *) &xt.xt_tp, sizeof(xt.xt_tp));
2192 }
2193 if (inp->inp_socket) {
2194 sotoxsocket(inp->inp_socket, &xt.xt_socket);
2195 }
2196
2197 socket_unlock(inp->inp_socket, 1);
2198
2199 error = SYSCTL_OUT(req, &xt, sizeof(xt));
2200 }
2201 if (!error) {
2202 /*
2203 * Give the user an updated idea of our state.
2204 * If the generation differs from what we told
2205 * her before, she knows that something happened
2206 * while we were processing this request, and it
2207 * might be necessary to retry.
2208 */
2209 bzero(&xig, sizeof(xig));
2210 xig.xig_len = sizeof(xig);
2211 xig.xig_gen = tcbinfo.ipi_gencnt;
2212 xig.xig_sogen = so_gencnt;
2213 xig.xig_count = tcbinfo.ipi_count;
2214 error = SYSCTL_OUT(req, &xig, sizeof(xig));
2215 }
2216
2217 lck_rw_done(&tcbinfo.ipi_lock);
2218 kfree_type(struct inpcb *, sz, inp_list);
2219 return error;
2220 }
2221
2222 SYSCTL_PROC(_net_inet_tcp, TCPCTL_PCBLIST, pcblist,
2223 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
2224 tcp_pcblist, "S,xtcpcb", "List of active TCP connections");
2225
2226 #if XNU_TARGET_OS_OSX
2227
2228 static void
tcpcb_to_xtcpcb64(struct tcpcb * tp,struct xtcpcb64 * otp)2229 tcpcb_to_xtcpcb64(struct tcpcb *tp, struct xtcpcb64 *otp)
2230 {
2231 otp->t_segq = (uint32_t)VM_KERNEL_ADDRHASH(tp->t_segq.lh_first);
2232 otp->t_dupacks = tp->t_dupacks;
2233 otp->t_timer[TCPT_REXMT_EXT] = tp->t_timer[TCPT_REXMT];
2234 otp->t_timer[TCPT_PERSIST_EXT] = tp->t_timer[TCPT_PERSIST];
2235 otp->t_timer[TCPT_KEEP_EXT] = tp->t_timer[TCPT_KEEP];
2236 otp->t_timer[TCPT_2MSL_EXT] = tp->t_timer[TCPT_2MSL];
2237 otp->t_state = tp->t_state;
2238 otp->t_flags = tp->t_flags;
2239 otp->t_force = (tp->t_flagsext & TF_FORCE) ? 1 : 0;
2240 otp->snd_una = tp->snd_una;
2241 otp->snd_max = tp->snd_max;
2242 otp->snd_nxt = tp->snd_nxt;
2243 otp->snd_up = tp->snd_up;
2244 otp->snd_wl1 = tp->snd_wl1;
2245 otp->snd_wl2 = tp->snd_wl2;
2246 otp->iss = tp->iss;
2247 otp->irs = tp->irs;
2248 otp->rcv_nxt = tp->rcv_nxt;
2249 otp->rcv_adv = tp->rcv_adv;
2250 otp->rcv_wnd = tp->rcv_wnd;
2251 otp->rcv_up = tp->rcv_up;
2252 otp->snd_wnd = tp->snd_wnd;
2253 otp->snd_cwnd = tp->snd_cwnd;
2254 otp->snd_ssthresh = tp->snd_ssthresh;
2255 otp->t_maxopd = tp->t_maxopd;
2256 otp->t_rcvtime = tp->t_rcvtime;
2257 otp->t_starttime = tp->t_starttime;
2258 otp->t_rtttime = tp->t_rtttime;
2259 otp->t_rtseq = tp->t_rtseq;
2260 otp->t_rxtcur = tp->t_rxtcur;
2261 otp->t_maxseg = tp->t_maxseg;
2262 otp->t_srtt = tp->t_srtt;
2263 otp->t_rttvar = tp->t_rttvar;
2264 otp->t_rxtshift = tp->t_rxtshift;
2265 otp->t_rttmin = tp->t_rttmin;
2266 otp->t_rttupdated = tp->t_rttupdated;
2267 otp->max_sndwnd = tp->max_sndwnd;
2268 otp->t_softerror = tp->t_softerror;
2269 otp->t_oobflags = tp->t_oobflags;
2270 otp->t_iobc = tp->t_iobc;
2271 otp->snd_scale = tp->snd_scale;
2272 otp->rcv_scale = tp->rcv_scale;
2273 otp->request_r_scale = tp->request_r_scale;
2274 otp->requested_s_scale = tp->requested_s_scale;
2275 otp->ts_recent = tp->ts_recent;
2276 otp->ts_recent_age = tp->ts_recent_age;
2277 otp->last_ack_sent = tp->last_ack_sent;
2278 otp->cc_send = 0;
2279 otp->cc_recv = 0;
2280 otp->snd_recover = tp->snd_recover;
2281 otp->snd_cwnd_prev = tp->snd_cwnd_prev;
2282 otp->snd_ssthresh_prev = tp->snd_ssthresh_prev;
2283 otp->t_badrxtwin = 0;
2284 }
2285
2286
2287 static int
2288 tcp_pcblist64 SYSCTL_HANDLER_ARGS
2289 {
2290 #pragma unused(oidp, arg1, arg2)
2291 int error, i = 0, n, sz;
2292 struct inpcb **inp_list;
2293 inp_gen_t gencnt;
2294 struct xinpgen xig;
2295
2296 /*
2297 * The process of preparing the TCB list is too time-consuming and
2298 * resource-intensive to repeat twice on every request.
2299 */
2300 lck_rw_lock_shared(&tcbinfo.ipi_lock);
2301 if (req->oldptr == USER_ADDR_NULL) {
2302 n = tcbinfo.ipi_count;
2303 req->oldidx = 2 * (sizeof(xig))
2304 + (n + n / 8) * sizeof(struct xtcpcb64);
2305 lck_rw_done(&tcbinfo.ipi_lock);
2306 return 0;
2307 }
2308
2309 if (req->newptr != USER_ADDR_NULL) {
2310 lck_rw_done(&tcbinfo.ipi_lock);
2311 return EPERM;
2312 }
2313
2314 /*
2315 * OK, now we're committed to doing something.
2316 */
2317 gencnt = tcbinfo.ipi_gencnt;
2318 sz = n = tcbinfo.ipi_count;
2319
2320 bzero(&xig, sizeof(xig));
2321 xig.xig_len = sizeof(xig);
2322 xig.xig_count = n;
2323 xig.xig_gen = gencnt;
2324 xig.xig_sogen = so_gencnt;
2325 error = SYSCTL_OUT(req, &xig, sizeof(xig));
2326 if (error) {
2327 lck_rw_done(&tcbinfo.ipi_lock);
2328 return error;
2329 }
2330 /*
2331 * We are done if there is no pcb
2332 */
2333 if (n == 0) {
2334 lck_rw_done(&tcbinfo.ipi_lock);
2335 return 0;
2336 }
2337
2338 inp_list = kalloc_type(struct inpcb *, n, Z_WAITOK);
2339 if (inp_list == NULL) {
2340 lck_rw_done(&tcbinfo.ipi_lock);
2341 return ENOMEM;
2342 }
2343
2344 n = get_tcp_inp_list(inp_list, n, gencnt);
2345
2346 error = 0;
2347 for (i = 0; i < n; i++) {
2348 struct xtcpcb64 xt;
2349 struct inpcb *inp;
2350
2351 inp = inp_list[i];
2352
2353 if (in_pcb_checkstate(inp, WNT_ACQUIRE, 0) == WNT_STOPUSING) {
2354 continue;
2355 }
2356 socket_lock(inp->inp_socket, 1);
2357 if (in_pcb_checkstate(inp, WNT_RELEASE, 1) == WNT_STOPUSING) {
2358 socket_unlock(inp->inp_socket, 1);
2359 continue;
2360 }
2361 if (inp->inp_gencnt > gencnt) {
2362 socket_unlock(inp->inp_socket, 1);
2363 continue;
2364 }
2365
2366 bzero(&xt, sizeof(xt));
2367 xt.xt_len = sizeof(xt);
2368 inpcb_to_xinpcb64(inp, &xt.xt_inpcb);
2369 xt.xt_inpcb.inp_ppcb =
2370 (uint64_t)VM_KERNEL_ADDRHASH(inp->inp_ppcb);
2371 if (inp->inp_ppcb != NULL) {
2372 tcpcb_to_xtcpcb64((struct tcpcb *)inp->inp_ppcb,
2373 &xt);
2374 }
2375 if (inp->inp_socket) {
2376 sotoxsocket64(inp->inp_socket,
2377 &xt.xt_inpcb.xi_socket);
2378 }
2379
2380 socket_unlock(inp->inp_socket, 1);
2381
2382 error = SYSCTL_OUT(req, &xt, sizeof(xt));
2383 }
2384 if (!error) {
2385 /*
2386 * Give the user an updated idea of our state.
2387 * If the generation differs from what we told
2388 * her before, she knows that something happened
2389 * while we were processing this request, and it
2390 * might be necessary to retry.
2391 */
2392 bzero(&xig, sizeof(xig));
2393 xig.xig_len = sizeof(xig);
2394 xig.xig_gen = tcbinfo.ipi_gencnt;
2395 xig.xig_sogen = so_gencnt;
2396 xig.xig_count = tcbinfo.ipi_count;
2397 error = SYSCTL_OUT(req, &xig, sizeof(xig));
2398 }
2399
2400 lck_rw_done(&tcbinfo.ipi_lock);
2401 kfree_type(struct inpcb *, sz, inp_list);
2402 return error;
2403 }
2404
2405 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, pcblist64,
2406 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
2407 tcp_pcblist64, "S,xtcpcb64", "List of active TCP connections");
2408
2409 #endif /* XNU_TARGET_OS_OSX */
2410
2411 static int
2412 tcp_pcblist_n SYSCTL_HANDLER_ARGS
2413 {
2414 #pragma unused(oidp, arg1, arg2)
2415 int error = 0;
2416
2417 error = get_pcblist_n(IPPROTO_TCP, req, &tcbinfo);
2418
2419 return error;
2420 }
2421
2422
2423 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, pcblist_n,
2424 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
2425 tcp_pcblist_n, "S,xtcpcb_n", "List of active TCP connections");
2426
2427 static int
2428 tcp_progress_probe_enable SYSCTL_HANDLER_ARGS
2429 {
2430 #pragma unused(oidp, arg1, arg2)
2431
2432 return ntstat_tcp_progress_enable(req);
2433 }
2434
2435 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, progress_enable,
2436 CTLTYPE_STRUCT | CTLFLAG_RW | CTLFLAG_LOCKED | CTLFLAG_ANYBODY, 0, 0,
2437 tcp_progress_probe_enable, "S", "Enable/disable TCP keepalive probing on the specified link(s)");
2438
2439
2440 __private_extern__ void
tcp_get_ports_used(ifnet_t ifp,int protocol,uint32_t flags,bitstr_t * __counted_by (bitstr_size (IP_PORTRANGE_SIZE))bitfield)2441 tcp_get_ports_used(ifnet_t ifp, int protocol, uint32_t flags,
2442 bitstr_t *__counted_by(bitstr_size(IP_PORTRANGE_SIZE)) bitfield)
2443 {
2444 inpcb_get_ports_used(ifp, protocol, flags, bitfield,
2445 &tcbinfo);
2446 }
2447
2448 __private_extern__ uint32_t
tcp_count_opportunistic(unsigned int ifindex,u_int32_t flags)2449 tcp_count_opportunistic(unsigned int ifindex, u_int32_t flags)
2450 {
2451 return inpcb_count_opportunistic(ifindex, &tcbinfo, flags);
2452 }
2453
2454 __private_extern__ uint32_t
tcp_find_anypcb_byaddr(struct ifaddr * ifa)2455 tcp_find_anypcb_byaddr(struct ifaddr *ifa)
2456 {
2457 #if SKYWALK
2458 if (netns_is_enabled()) {
2459 return netns_find_anyres_byaddr(ifa, IPPROTO_TCP);
2460 } else
2461 #endif /* SKYWALK */
2462 return inpcb_find_anypcb_byaddr(ifa, &tcbinfo);
2463 }
2464
2465 static void
tcp_handle_msgsize(struct ip * ip,struct inpcb * inp)2466 tcp_handle_msgsize(struct ip *ip, struct inpcb *inp)
2467 {
2468 struct rtentry *rt = NULL;
2469 u_short ifscope = IFSCOPE_NONE;
2470 int mtu;
2471 struct sockaddr_in icmpsrc = {
2472 .sin_len = sizeof(struct sockaddr_in),
2473 .sin_family = AF_INET, .sin_port = 0, .sin_addr = { .s_addr = 0 },
2474 .sin_zero = { 0, 0, 0, 0, 0, 0, 0, 0 }
2475 };
2476 struct icmp *icp = NULL;
2477
2478 icp = __container_of(ip, struct icmp, icmp_ip);
2479 icmpsrc.sin_addr = icp->icmp_ip.ip_dst;
2480
2481 /*
2482 * MTU discovery:
2483 * If we got a needfrag and there is a host route to the
2484 * original destination, and the MTU is not locked, then
2485 * set the MTU in the route to the suggested new value
2486 * (if given) and then notify as usual. The ULPs will
2487 * notice that the MTU has changed and adapt accordingly.
2488 * If no new MTU was suggested, then we guess a new one
2489 * less than the current value. If the new MTU is
2490 * unreasonably small (defined by sysctl tcp_minmss), then
2491 * we reset the MTU to the interface value and enable the
2492 * lock bit, indicating that we are no longer doing MTU
2493 * discovery.
2494 */
2495 if (ROUTE_UNUSABLE(&(inp->inp_route)) == false) {
2496 rt = inp->inp_route.ro_rt;
2497 }
2498
2499 /*
2500 * icmp6_mtudisc_update scopes the routing lookup
2501 * to the incoming interface (delivered from mbuf
2502 * packet header.
2503 * That is mostly ok but for asymmetric networks
2504 * that may be an issue.
2505 * Frag needed OR Packet too big really communicates
2506 * MTU for the out data path.
2507 * Take the interface scope from cached route or
2508 * the last outgoing interface from inp
2509 */
2510 if (rt != NULL) {
2511 ifscope = (rt->rt_ifp != NULL) ?
2512 rt->rt_ifp->if_index : IFSCOPE_NONE;
2513 } else {
2514 ifscope = (inp->inp_last_outifp != NULL) ?
2515 inp->inp_last_outifp->if_index : IFSCOPE_NONE;
2516 }
2517
2518 if ((rt == NULL) ||
2519 !(rt->rt_flags & RTF_HOST) ||
2520 (rt->rt_flags & (RTF_CLONING | RTF_PRCLONING))) {
2521 rt = rtalloc1_scoped(SA(&icmpsrc), 0, RTF_CLONING | RTF_PRCLONING, ifscope);
2522 } else if (rt) {
2523 RT_LOCK(rt);
2524 rtref(rt);
2525 RT_UNLOCK(rt);
2526 }
2527
2528 if (rt != NULL) {
2529 RT_LOCK(rt);
2530 if ((rt->rt_flags & RTF_HOST) &&
2531 !(rt->rt_rmx.rmx_locks & RTV_MTU)) {
2532 mtu = ntohs(icp->icmp_nextmtu);
2533 /*
2534 * XXX Stock BSD has changed the following
2535 * to compare with icp->icmp_ip.ip_len
2536 * to converge faster when sent packet
2537 * < route's MTU. We may want to adopt
2538 * that change.
2539 */
2540 if (mtu == 0) {
2541 mtu = ip_next_mtu(rt->rt_rmx.
2542 rmx_mtu, 1);
2543 }
2544 #if DEBUG_MTUDISC
2545 printf("MTU for %s reduced to %d\n",
2546 inet_ntop(AF_INET,
2547 &icmpsrc.sin_addr, ipv4str,
2548 sizeof(ipv4str)), mtu);
2549 #endif
2550 if (mtu < max(296, (tcp_minmss +
2551 sizeof(struct tcpiphdr)))) {
2552 rt->rt_rmx.rmx_locks |= RTV_MTU;
2553 } else if (rt->rt_rmx.rmx_mtu > mtu) {
2554 rt->rt_rmx.rmx_mtu = mtu;
2555 }
2556 }
2557 RT_UNLOCK(rt);
2558 rtfree(rt);
2559 }
2560 }
2561
2562 void
tcp_ctlinput(int cmd,struct sockaddr * sa,void * vip,__unused struct ifnet * ifp)2563 tcp_ctlinput(int cmd, struct sockaddr *sa, void *vip, __unused struct ifnet *ifp)
2564 {
2565 tcp_seq icmp_tcp_seq;
2566 struct ipctlparam *ctl_param __single = vip;
2567 struct ip *ip = NULL;
2568 struct mbuf *m = NULL;
2569 struct in_addr faddr;
2570 struct inpcb *inp;
2571 struct tcpcb *tp;
2572 struct tcphdr *th;
2573 struct icmp *icp;
2574 size_t off;
2575 #if SKYWALK
2576 union sockaddr_in_4_6 sock_laddr;
2577 struct protoctl_ev_val prctl_ev_val;
2578 #endif /* SKYWALK */
2579 void (*notify)(struct inpcb *, int) = tcp_notify;
2580
2581 if (ctl_param != NULL) {
2582 ip = ctl_param->ipc_icmp_ip;
2583 icp = ctl_param->ipc_icmp;
2584 m = ctl_param->ipc_m;
2585 off = ctl_param->ipc_off;
2586 } else {
2587 ip = NULL;
2588 icp = NULL;
2589 m = NULL;
2590 off = 0;
2591 }
2592
2593 faddr = SIN(sa)->sin_addr;
2594 if (sa->sa_family != AF_INET || faddr.s_addr == INADDR_ANY) {
2595 return;
2596 }
2597
2598 if ((unsigned)cmd >= PRC_NCMDS) {
2599 return;
2600 }
2601
2602 /* Source quench is deprecated */
2603 if (cmd == PRC_QUENCH) {
2604 return;
2605 }
2606
2607 if (cmd == PRC_MSGSIZE) {
2608 notify = tcp_mtudisc;
2609 } else if (icmp_may_rst && (cmd == PRC_UNREACH_ADMIN_PROHIB ||
2610 cmd == PRC_UNREACH_PORT || cmd == PRC_UNREACH_PROTOCOL ||
2611 cmd == PRC_TIMXCEED_INTRANS) && ip) {
2612 notify = tcp_drop_syn_sent;
2613 }
2614 /*
2615 * Hostdead is ugly because it goes linearly through all PCBs.
2616 * XXX: We never get this from ICMP, otherwise it makes an
2617 * excellent DoS attack on machines with many connections.
2618 */
2619 else if (cmd == PRC_HOSTDEAD) {
2620 ip = NULL;
2621 } else if (inetctlerrmap[cmd] == 0 && !PRC_IS_REDIRECT(cmd)) {
2622 return;
2623 }
2624
2625 #if SKYWALK
2626 bzero(&prctl_ev_val, sizeof(prctl_ev_val));
2627 bzero(&sock_laddr, sizeof(sock_laddr));
2628 #endif /* SKYWALK */
2629
2630 if (ip == NULL) {
2631 in_pcbnotifyall(&tcbinfo, faddr, inetctlerrmap[cmd], notify);
2632 #if SKYWALK
2633 protoctl_event_enqueue_nwk_wq_entry(ifp, NULL,
2634 sa, 0, 0, IPPROTO_TCP, cmd, NULL);
2635 #endif /* SKYWALK */
2636 return;
2637 }
2638
2639 /* Check if we can safely get the sport, dport and the sequence number from the tcp header. */
2640 if (m == NULL ||
2641 (m->m_len < off + (sizeof(unsigned short) + sizeof(unsigned short) + sizeof(tcp_seq)))) {
2642 /* Insufficient length */
2643 return;
2644 }
2645
2646 th = (struct tcphdr*)(void*)(mtod(m, uint8_t*) + off);
2647 icmp_tcp_seq = ntohl(th->th_seq);
2648
2649 inp = in_pcblookup_hash(&tcbinfo, faddr, th->th_dport,
2650 ip->ip_src, th->th_sport, 0, NULL);
2651
2652 if (inp == NULL ||
2653 inp->inp_socket == NULL) {
2654 #if SKYWALK
2655 if (cmd == PRC_MSGSIZE) {
2656 prctl_ev_val.val = ntohs(icp->icmp_nextmtu);
2657 }
2658 prctl_ev_val.tcp_seq_number = icmp_tcp_seq;
2659
2660 sock_laddr.sin.sin_family = AF_INET;
2661 sock_laddr.sin.sin_len = sizeof(sock_laddr.sin);
2662 sock_laddr.sin.sin_addr = ip->ip_src;
2663
2664 protoctl_event_enqueue_nwk_wq_entry(ifp,
2665 SA(&sock_laddr), sa,
2666 th->th_sport, th->th_dport, IPPROTO_TCP,
2667 cmd, &prctl_ev_val);
2668 #endif /* SKYWALK */
2669 return;
2670 }
2671
2672 socket_lock(inp->inp_socket, 1);
2673 if (in_pcb_checkstate(inp, WNT_RELEASE, 1) ==
2674 WNT_STOPUSING) {
2675 socket_unlock(inp->inp_socket, 1);
2676 return;
2677 }
2678
2679 if (PRC_IS_REDIRECT(cmd)) {
2680 /* signal EHOSTDOWN, as it flushes the cached route */
2681 (*notify)(inp, EHOSTDOWN);
2682 } else {
2683 tp = intotcpcb(inp);
2684 if (SEQ_GEQ(icmp_tcp_seq, tp->snd_una) &&
2685 SEQ_LT(icmp_tcp_seq, tp->snd_max)) {
2686 if (cmd == PRC_MSGSIZE) {
2687 tcp_handle_msgsize(ip, inp);
2688 }
2689
2690 (*notify)(inp, inetctlerrmap[cmd]);
2691 }
2692 }
2693 socket_unlock(inp->inp_socket, 1);
2694 }
2695
2696 void
tcp6_ctlinput(int cmd,struct sockaddr * sa,void * d,__unused struct ifnet * ifp)2697 tcp6_ctlinput(int cmd, struct sockaddr *sa, void *d, __unused struct ifnet *ifp)
2698 {
2699 tcp_seq icmp_tcp_seq;
2700 struct in6_addr *dst;
2701 void (*notify)(struct inpcb *, int) = tcp_notify;
2702 struct ip6_hdr *ip6;
2703 struct mbuf *m;
2704 struct inpcb *inp;
2705 struct tcpcb *tp;
2706 struct icmp6_hdr *icmp6;
2707 struct ip6ctlparam *ip6cp = NULL;
2708 const struct sockaddr_in6 *sa6_src = NULL;
2709 unsigned int mtu;
2710 unsigned int off;
2711
2712 struct tcp_ports {
2713 uint16_t th_sport;
2714 uint16_t th_dport;
2715 } t_ports;
2716 #if SKYWALK
2717 union sockaddr_in_4_6 sock_laddr;
2718 struct protoctl_ev_val prctl_ev_val;
2719 #endif /* SKYWALK */
2720
2721 if (sa->sa_family != AF_INET6 ||
2722 sa->sa_len != sizeof(struct sockaddr_in6)) {
2723 return;
2724 }
2725
2726 /* Source quench is deprecated */
2727 if (cmd == PRC_QUENCH) {
2728 return;
2729 }
2730
2731 if ((unsigned)cmd >= PRC_NCMDS) {
2732 return;
2733 }
2734
2735 /* if the parameter is from icmp6, decode it. */
2736 if (d != NULL) {
2737 ip6cp = (struct ip6ctlparam *)d;
2738 icmp6 = ip6cp->ip6c_icmp6;
2739 m = ip6cp->ip6c_m;
2740 ip6 = ip6cp->ip6c_ip6;
2741 off = ip6cp->ip6c_off;
2742 sa6_src = ip6cp->ip6c_src;
2743 dst = ip6cp->ip6c_finaldst;
2744 } else {
2745 m = NULL;
2746 ip6 = NULL;
2747 off = 0; /* fool gcc */
2748 sa6_src = &sa6_any;
2749 dst = NULL;
2750 }
2751
2752 if (cmd == PRC_MSGSIZE) {
2753 notify = tcp_mtudisc;
2754 } else if (icmp_may_rst && (cmd == PRC_UNREACH_ADMIN_PROHIB ||
2755 cmd == PRC_UNREACH_PORT || cmd == PRC_TIMXCEED_INTRANS) &&
2756 ip6 != NULL) {
2757 notify = tcp_drop_syn_sent;
2758 }
2759 /*
2760 * Hostdead is ugly because it goes linearly through all PCBs.
2761 * XXX: We never get this from ICMP, otherwise it makes an
2762 * excellent DoS attack on machines with many connections.
2763 */
2764 else if (cmd == PRC_HOSTDEAD) {
2765 ip6 = NULL;
2766 } else if (inet6ctlerrmap[cmd] == 0 && !PRC_IS_REDIRECT(cmd)) {
2767 return;
2768 }
2769
2770 #if SKYWALK
2771 bzero(&prctl_ev_val, sizeof(prctl_ev_val));
2772 bzero(&sock_laddr, sizeof(sock_laddr));
2773 #endif /* SKYWALK */
2774
2775 if (ip6 == NULL) {
2776 in6_pcbnotify(&tcbinfo, sa, 0, SA(sa6_src), 0, cmd, NULL, notify);
2777 #if SKYWALK
2778 protoctl_event_enqueue_nwk_wq_entry(ifp, NULL, sa,
2779 0, 0, IPPROTO_TCP, cmd, NULL);
2780 #endif /* SKYWALK */
2781 return;
2782 }
2783
2784 /* Check if we can safely get the ports from the tcp hdr */
2785 if (m == NULL ||
2786 (m->m_pkthdr.len <
2787 (int32_t) (off + sizeof(struct tcp_ports)))) {
2788 return;
2789 }
2790 bzero(&t_ports, sizeof(struct tcp_ports));
2791 m_copydata(m, off, sizeof(struct tcp_ports), (caddr_t)&t_ports);
2792
2793 off += sizeof(struct tcp_ports);
2794 if (m->m_pkthdr.len < (int32_t) (off + sizeof(tcp_seq))) {
2795 return;
2796 }
2797 m_copydata(m, off, sizeof(tcp_seq), (caddr_t)&icmp_tcp_seq);
2798 icmp_tcp_seq = ntohl(icmp_tcp_seq);
2799
2800 if (cmd == PRC_MSGSIZE) {
2801 mtu = ntohl(icmp6->icmp6_mtu);
2802 /*
2803 * If no alternative MTU was proposed, or the proposed
2804 * MTU was too small, set to the min.
2805 */
2806 if (mtu < IPV6_MMTU) {
2807 mtu = IPV6_MMTU - 8;
2808 }
2809 }
2810
2811 inp = in6_pcblookup_hash(&tcbinfo, &ip6->ip6_dst, t_ports.th_dport, ip6_input_getdstifscope(m),
2812 &ip6->ip6_src, t_ports.th_sport, ip6_input_getsrcifscope(m), 0, NULL);
2813
2814 if (inp == NULL ||
2815 inp->inp_socket == NULL) {
2816 #if SKYWALK
2817 if (cmd == PRC_MSGSIZE) {
2818 prctl_ev_val.val = mtu;
2819 }
2820 prctl_ev_val.tcp_seq_number = icmp_tcp_seq;
2821
2822 sock_laddr.sin6.sin6_family = AF_INET6;
2823 sock_laddr.sin6.sin6_len = sizeof(sock_laddr.sin6);
2824 sock_laddr.sin6.sin6_addr = ip6->ip6_src;
2825
2826 protoctl_event_enqueue_nwk_wq_entry(ifp,
2827 SA(&sock_laddr), sa,
2828 t_ports.th_sport, t_ports.th_dport, IPPROTO_TCP,
2829 cmd, &prctl_ev_val);
2830 #endif /* SKYWALK */
2831 return;
2832 }
2833
2834 socket_lock(inp->inp_socket, 1);
2835 if (in_pcb_checkstate(inp, WNT_RELEASE, 1) ==
2836 WNT_STOPUSING) {
2837 socket_unlock(inp->inp_socket, 1);
2838 return;
2839 }
2840
2841 if (PRC_IS_REDIRECT(cmd)) {
2842 /* signal EHOSTDOWN, as it flushes the cached route */
2843 (*notify)(inp, EHOSTDOWN);
2844 } else {
2845 tp = intotcpcb(inp);
2846 if (SEQ_GEQ(icmp_tcp_seq, tp->snd_una) &&
2847 SEQ_LT(icmp_tcp_seq, tp->snd_max)) {
2848 if (cmd == PRC_MSGSIZE) {
2849 /*
2850 * Only process the offered MTU if it
2851 * is smaller than the current one.
2852 */
2853 if (mtu < tp->t_maxseg +
2854 (sizeof(struct tcphdr) + sizeof(struct ip6_hdr))) {
2855 (*notify)(inp, inetctlerrmap[cmd]);
2856 }
2857 } else {
2858 (*notify)(inp, inetctlerrmap[cmd]);
2859 }
2860 }
2861 }
2862 socket_unlock(inp->inp_socket, 1);
2863 }
2864
2865
2866 /*
2867 * Following is where TCP initial sequence number generation occurs.
2868 *
2869 * There are two places where we must use initial sequence numbers:
2870 * 1. In SYN-ACK packets.
2871 * 2. In SYN packets.
2872 *
2873 * The ISNs in SYN-ACK packets have no monotonicity requirement,
2874 * and should be as unpredictable as possible to avoid the possibility
2875 * of spoofing and/or connection hijacking. To satisfy this
2876 * requirement, SYN-ACK ISNs are generated via the arc4random()
2877 * function. If exact RFC 1948 compliance is requested via sysctl,
2878 * these ISNs will be generated just like those in SYN packets.
2879 *
2880 * The ISNs in SYN packets must be monotonic; TIME_WAIT recycling
2881 * depends on this property. In addition, these ISNs should be
2882 * unguessable so as to prevent connection hijacking. To satisfy
2883 * the requirements of this situation, the algorithm outlined in
2884 * RFC 9293 is used to generate sequence numbers.
2885 *
2886 * For more information on the theory of operation, please see
2887 * RFC 9293.
2888 *
2889 * Implementation details:
2890 *
2891 * Time is based off the system timer, and is corrected so that it
2892 * increases by one megabyte per second. This allows for proper
2893 * recycling on high speed LANs while still leaving over an hour
2894 * before rollover.
2895 *
2896 */
2897
2898 #define ISN_BYTES_PER_SECOND 1048576
2899
2900 tcp_seq
tcp_new_isn(struct tcpcb * tp)2901 tcp_new_isn(struct tcpcb *tp)
2902 {
2903 uint32_t md5_buffer[4];
2904 tcp_seq new_isn;
2905 struct timespec timenow;
2906 MD5_CTX isn_ctx;
2907
2908 nanouptime(&timenow);
2909
2910 /* Compute the md5 hash and return the ISN. */
2911 MD5Init(&isn_ctx);
2912 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_fport,
2913 sizeof(u_short));
2914 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_lport,
2915 sizeof(u_short));
2916 if ((tp->t_inpcb->inp_vflag & INP_IPV6) != 0) {
2917 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->in6p_faddr,
2918 sizeof(struct in6_addr));
2919 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->in6p_laddr,
2920 sizeof(struct in6_addr));
2921 } else {
2922 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_faddr,
2923 sizeof(struct in_addr));
2924 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_laddr,
2925 sizeof(struct in_addr));
2926 }
2927 MD5Update(&isn_ctx, (u_char *) &isn_secret, sizeof(isn_secret));
2928 MD5Final((u_char *) &md5_buffer, &isn_ctx);
2929
2930 new_isn = (tcp_seq) md5_buffer[0];
2931
2932 /*
2933 * We use a 128ns clock, which is equivalent to 600 Mbps and wraps at
2934 * 549 seconds, thus safe for 2 MSL lifetime of TIME-WAIT-state.
2935 */
2936 new_isn += (timenow.tv_sec * NSEC_PER_SEC + timenow.tv_nsec) >> 7;
2937
2938 if (__probable(tcp_randomize_timestamps)) {
2939 tp->t_ts_offset = md5_buffer[1];
2940 }
2941 tp->t_latest_tx = tcp_now;
2942
2943 return new_isn;
2944 }
2945
2946
2947 /*
2948 * When a specific ICMP unreachable message is received and the
2949 * connection state is SYN-SENT, drop the connection. This behavior
2950 * is controlled by the icmp_may_rst sysctl.
2951 */
2952 void
tcp_drop_syn_sent(struct inpcb * inp,int errno)2953 tcp_drop_syn_sent(struct inpcb *inp, int errno)
2954 {
2955 struct tcpcb *tp = intotcpcb(inp);
2956
2957 if (tp && tp->t_state == TCPS_SYN_SENT) {
2958 tcp_drop(tp, errno);
2959 }
2960 }
2961
2962 /*
2963 * Get effective MTU for redirect virtual interface. Redirect
2964 * virtual interface switches between multiple delegated interfaces.
2965 * For cases, where redirect forwards packets to an ipsec interface,
2966 * MTU should be adjusted to consider ESP encapsulation overhead.
2967 */
2968 uint32_t
tcp_get_effective_mtu(struct rtentry * rt,uint32_t current_mtu)2969 tcp_get_effective_mtu(struct rtentry *rt, uint32_t current_mtu)
2970 {
2971 ifnet_t ifp = NULL;
2972 ifnet_t delegated_ifp = NULL;
2973 ifnet_t outgoing_ifp = NULL;
2974 uint32_t min_mtu = 0;
2975 uint32_t outgoing_mtu = 0;
2976 uint32_t tunnel_overhead = 0;
2977
2978 if (rt == NULL || rt->rt_ifp == NULL) {
2979 return current_mtu;
2980 }
2981
2982 ifp = rt->rt_ifp;
2983 if (ifp->if_subfamily != IFNET_SUBFAMILY_REDIRECT) {
2984 return current_mtu;
2985 }
2986
2987 delegated_ifp = ifp->if_delegated.ifp;
2988 if (delegated_ifp == NULL || delegated_ifp->if_family != IFNET_FAMILY_IPSEC) {
2989 return current_mtu;
2990 }
2991
2992 min_mtu = MIN(delegated_ifp->if_mtu, current_mtu);
2993
2994 outgoing_ifp = delegated_ifp->if_delegated.ifp;
2995 if (outgoing_ifp == NULL) {
2996 return min_mtu;
2997 }
2998
2999 outgoing_mtu = outgoing_ifp->if_mtu;
3000 if (outgoing_mtu > 0) {
3001 tunnel_overhead = (u_int32_t)(esp_hdrsiz(NULL) + sizeof(struct ip6_hdr));
3002 if (outgoing_mtu > tunnel_overhead) {
3003 outgoing_mtu -= tunnel_overhead;
3004 }
3005 if (outgoing_mtu < min_mtu) {
3006 return outgoing_mtu;
3007 }
3008 }
3009
3010 return min_mtu;
3011 }
3012
3013 /*
3014 * When `need fragmentation' ICMP is received, update our idea of the MSS
3015 * based on the new value in the route. Also nudge TCP to send something,
3016 * since we know the packet we just sent was dropped.
3017 * This duplicates some code in the tcp_mss() function in tcp_input.c.
3018 */
3019 void
tcp_mtudisc(struct inpcb * inp,__unused int errno)3020 tcp_mtudisc(struct inpcb *inp, __unused int errno)
3021 {
3022 struct tcpcb *tp = intotcpcb(inp);
3023 struct rtentry *rt;
3024 struct socket *so = inp->inp_socket;
3025 int mss;
3026 u_int32_t mtu;
3027 u_int32_t protoHdrOverhead = sizeof(struct tcpiphdr);
3028 int isipv6 = (tp->t_inpcb->inp_vflag & INP_IPV6) != 0;
3029
3030 /*
3031 * Nothing left to send after the socket is defunct or TCP is in the closed state
3032 */
3033 if ((so->so_state & SS_DEFUNCT) || (tp != NULL && tp->t_state == TCPS_CLOSED)) {
3034 return;
3035 }
3036
3037 if (isipv6) {
3038 protoHdrOverhead = sizeof(struct ip6_hdr) +
3039 sizeof(struct tcphdr);
3040 }
3041
3042 if (tp != NULL) {
3043 if (isipv6) {
3044 rt = tcp_rtlookup6(inp, IFSCOPE_NONE);
3045 } else {
3046 rt = tcp_rtlookup(inp, IFSCOPE_NONE);
3047 }
3048 if (!rt || !rt->rt_rmx.rmx_mtu) {
3049 tp->t_maxopd = tp->t_maxseg =
3050 isipv6 ? tcp_v6mssdflt :
3051 tcp_mssdflt;
3052
3053 /* Route locked during lookup above */
3054 if (rt != NULL) {
3055 RT_UNLOCK(rt);
3056 }
3057 return;
3058 }
3059 mtu = rt->rt_rmx.rmx_mtu;
3060
3061 mtu = tcp_get_effective_mtu(rt, mtu);
3062
3063 /* Route locked during lookup above */
3064 RT_UNLOCK(rt);
3065
3066 #if NECP
3067 // Adjust MTU if necessary.
3068 mtu = necp_socket_get_effective_mtu(inp, mtu);
3069 #endif /* NECP */
3070 mss = mtu - protoHdrOverhead;
3071
3072 if (tp->t_maxopd) {
3073 mss = min(mss, tp->t_maxopd);
3074 }
3075 /*
3076 * XXX - The above conditional probably violates the TCP
3077 * spec. The problem is that, since we don't know the
3078 * other end's MSS, we are supposed to use a conservative
3079 * default. But, if we do that, then MTU discovery will
3080 * never actually take place, because the conservative
3081 * default is much less than the MTUs typically seen
3082 * on the Internet today. For the moment, we'll sweep
3083 * this under the carpet.
3084 *
3085 * The conservative default might not actually be a problem
3086 * if the only case this occurs is when sending an initial
3087 * SYN with options and data to a host we've never talked
3088 * to before. Then, they will reply with an MSS value which
3089 * will get recorded and the new parameters should get
3090 * recomputed. For Further Study.
3091 */
3092 if (tp->t_maxopd <= mss) {
3093 return;
3094 }
3095 tp->t_maxopd = mss;
3096
3097 if ((tp->t_flags & (TF_REQ_TSTMP | TF_NOOPT)) == TF_REQ_TSTMP &&
3098 (tp->t_flags & TF_RCVD_TSTMP) == TF_RCVD_TSTMP) {
3099 mss -= TCPOLEN_TSTAMP_APPA;
3100 }
3101
3102 #if MPTCP
3103 mss -= mptcp_adj_mss(tp, TRUE);
3104 #endif
3105 if (so->so_snd.sb_hiwat < mss) {
3106 mss = so->so_snd.sb_hiwat;
3107 }
3108
3109 tp->t_maxseg = mss;
3110
3111 ASSERT(tp->t_maxseg);
3112
3113 /*
3114 * Reset the slow-start flight size as it may depends on the
3115 * new MSS
3116 */
3117 if (CC_ALGO(tp)->cwnd_init != NULL) {
3118 CC_ALGO(tp)->cwnd_init(tp);
3119 }
3120
3121 if (TCP_USE_RLEDBAT(tp, so) && tcp_cc_rledbat.rwnd_init != NULL) {
3122 tcp_cc_rledbat.rwnd_init(tp);
3123 }
3124
3125 tcpstat.tcps_mturesent++;
3126 tp->t_rtttime = 0;
3127 tp->snd_nxt = tp->snd_una;
3128 tcp_output(tp);
3129 }
3130 }
3131
3132 /*
3133 * Look-up the routing entry to the peer of this inpcb. If no route
3134 * is found and it cannot be allocated the return NULL. This routine
3135 * is called by TCP routines that access the rmx structure and by tcp_mss
3136 * to get the interface MTU. If a route is found, this routine will
3137 * hold the rtentry lock; the caller is responsible for unlocking.
3138 */
3139 struct rtentry *
tcp_rtlookup(struct inpcb * inp,unsigned int input_ifscope)3140 tcp_rtlookup(struct inpcb *inp, unsigned int input_ifscope)
3141 {
3142 struct route *ro;
3143 struct rtentry *rt;
3144 struct tcpcb *tp;
3145
3146 LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_NOTOWNED);
3147
3148 ro = &inp->inp_route;
3149 if ((rt = ro->ro_rt) != NULL) {
3150 RT_LOCK(rt);
3151 }
3152
3153 if (ROUTE_UNUSABLE(ro)) {
3154 if (rt != NULL) {
3155 RT_UNLOCK(rt);
3156 rt = NULL;
3157 }
3158 ROUTE_RELEASE(ro);
3159 /* No route yet, so try to acquire one */
3160 if (inp->inp_faddr.s_addr != INADDR_ANY) {
3161 unsigned int ifscope;
3162
3163 ro->ro_dst.sa_family = AF_INET;
3164 ro->ro_dst.sa_len = sizeof(struct sockaddr_in);
3165 SIN(&ro->ro_dst)->sin_addr = inp->inp_faddr;
3166
3167 /*
3168 * If the socket was bound to an interface, then
3169 * the bound-to-interface takes precedence over
3170 * the inbound interface passed in by the caller
3171 * (if we get here as part of the output path then
3172 * input_ifscope is IFSCOPE_NONE).
3173 */
3174 ifscope = (inp->inp_flags & INP_BOUND_IF) ?
3175 inp->inp_boundifp->if_index : input_ifscope;
3176
3177 rtalloc_scoped(ro, ifscope);
3178 if ((rt = ro->ro_rt) != NULL) {
3179 RT_LOCK(rt);
3180 }
3181 }
3182 }
3183 if (rt != NULL) {
3184 RT_LOCK_ASSERT_HELD(rt);
3185 }
3186
3187 /*
3188 * Update MTU discovery determination. Don't do it if:
3189 * 1) it is disabled via the sysctl
3190 * 2) the route isn't up
3191 * 3) the MTU is locked (if it is, then discovery has been
3192 * disabled)
3193 */
3194
3195 tp = intotcpcb(inp);
3196
3197 if (!path_mtu_discovery || ((rt != NULL) &&
3198 (!(rt->rt_flags & RTF_UP) || (rt->rt_rmx.rmx_locks & RTV_MTU)))) {
3199 tp->t_flags &= ~TF_PMTUD;
3200 } else {
3201 tp->t_flags |= TF_PMTUD;
3202 }
3203
3204 if (rt != NULL && rt->rt_ifp != NULL) {
3205 somultipages(inp->inp_socket,
3206 (rt->rt_ifp->if_hwassist & IFNET_MULTIPAGES));
3207 tcp_set_tso(tp, rt->rt_ifp);
3208 soif2kcl(inp->inp_socket,
3209 (rt->rt_ifp->if_eflags & IFEF_2KCL));
3210 /* Don't do ECN and L4S for Loopback & Cellular (if L4S is default) */
3211 if ((rt->rt_ifp->if_flags & IFF_LOOPBACK) == 0 &&
3212 !(IFNET_IS_CELLULAR(rt->rt_ifp) && rt->rt_ifp->if_l4s_mode == IFRTYPE_L4S_DEFAULT)) {
3213 tcp_set_ecn(tp);
3214 tcp_set_l4s(tp, rt->rt_ifp);
3215 }
3216 if (inp->inp_last_outifp == NULL) {
3217 inp->inp_last_outifp = rt->rt_ifp;
3218 #if SKYWALK
3219 if (NETNS_TOKEN_VALID(&inp->inp_netns_token)) {
3220 netns_set_ifnet(&inp->inp_netns_token,
3221 inp->inp_last_outifp);
3222 }
3223 #endif /* SKYWALK */
3224 }
3225 }
3226
3227 /* Note if the peer is local */
3228 if (rt != NULL && !(rt->rt_ifp->if_flags & IFF_POINTOPOINT) &&
3229 (rt->rt_gateway->sa_family == AF_LINK ||
3230 rt->rt_ifp->if_flags & IFF_LOOPBACK ||
3231 in_localaddr(inp->inp_faddr))) {
3232 tp->t_flags |= TF_LOCAL;
3233 }
3234
3235 /*
3236 * Caller needs to call RT_UNLOCK(rt).
3237 */
3238 return rt;
3239 }
3240
3241 struct rtentry *
tcp_rtlookup6(struct inpcb * inp,unsigned int input_ifscope)3242 tcp_rtlookup6(struct inpcb *inp, unsigned int input_ifscope)
3243 {
3244 struct route_in6 *ro6;
3245 struct rtentry *rt;
3246 struct tcpcb *tp;
3247
3248 LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_NOTOWNED);
3249
3250 ro6 = &inp->in6p_route;
3251 if ((rt = ro6->ro_rt) != NULL) {
3252 RT_LOCK(rt);
3253 }
3254
3255 if (ROUTE_UNUSABLE(ro6)) {
3256 if (rt != NULL) {
3257 RT_UNLOCK(rt);
3258 rt = NULL;
3259 }
3260 ROUTE_RELEASE(ro6);
3261 /* No route yet, so try to acquire one */
3262 if (!IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_faddr)) {
3263 struct sockaddr_in6 *dst6;
3264 unsigned int ifscope;
3265
3266 dst6 = SIN6(&ro6->ro_dst);
3267 dst6->sin6_family = AF_INET6;
3268 dst6->sin6_len = sizeof(*dst6);
3269 dst6->sin6_addr = inp->in6p_faddr;
3270
3271 /*
3272 * If the socket was bound to an interface, then
3273 * the bound-to-interface takes precedence over
3274 * the inbound interface passed in by the caller
3275 * (if we get here as part of the output path then
3276 * input_ifscope is IFSCOPE_NONE).
3277 */
3278 ifscope = (inp->inp_flags & INP_BOUND_IF) ?
3279 inp->inp_boundifp->if_index : input_ifscope;
3280
3281 rtalloc_scoped((struct route *)ro6, ifscope);
3282 if ((rt = ro6->ro_rt) != NULL) {
3283 RT_LOCK(rt);
3284 }
3285 }
3286 }
3287 if (rt != NULL) {
3288 RT_LOCK_ASSERT_HELD(rt);
3289 }
3290
3291 /*
3292 * Update path MTU Discovery determination
3293 * while looking up the route:
3294 * 1) we have a valid route to the destination
3295 * 2) the MTU is not locked (if it is, then discovery has been
3296 * disabled)
3297 */
3298
3299
3300 tp = intotcpcb(inp);
3301
3302 /*
3303 * Update MTU discovery determination. Don't do it if:
3304 * 1) it is disabled via the sysctl
3305 * 2) the route isn't up
3306 * 3) the MTU is locked (if it is, then discovery has been
3307 * disabled)
3308 */
3309
3310 if (!path_mtu_discovery || ((rt != NULL) &&
3311 (!(rt->rt_flags & RTF_UP) || (rt->rt_rmx.rmx_locks & RTV_MTU)))) {
3312 tp->t_flags &= ~TF_PMTUD;
3313 } else {
3314 tp->t_flags |= TF_PMTUD;
3315 }
3316
3317 if (rt != NULL && rt->rt_ifp != NULL) {
3318 somultipages(inp->inp_socket,
3319 (rt->rt_ifp->if_hwassist & IFNET_MULTIPAGES));
3320 tcp_set_tso(tp, rt->rt_ifp);
3321 soif2kcl(inp->inp_socket,
3322 (rt->rt_ifp->if_eflags & IFEF_2KCL));
3323 /* Don't do ECN and L4S for Loopback & Cellular (if L4S is default) */
3324 if ((rt->rt_ifp->if_flags & IFF_LOOPBACK) == 0 &&
3325 !(IFNET_IS_CELLULAR(rt->rt_ifp) && rt->rt_ifp->if_l4s_mode == IFRTYPE_L4S_DEFAULT)) {
3326 tcp_set_ecn(tp);
3327 tcp_set_l4s(tp, rt->rt_ifp);
3328 }
3329 if (inp->inp_last_outifp == NULL) {
3330 inp->inp_last_outifp = rt->rt_ifp;
3331 #if SKYWALK
3332 if (NETNS_TOKEN_VALID(&inp->inp_netns_token)) {
3333 netns_set_ifnet(&inp->inp_netns_token,
3334 inp->inp_last_outifp);
3335 }
3336 #endif /* SKYWALK */
3337 }
3338
3339 /* Note if the peer is local */
3340 if (!(rt->rt_ifp->if_flags & IFF_POINTOPOINT) &&
3341 (IN6_IS_ADDR_LOOPBACK(&inp->in6p_faddr) ||
3342 IN6_IS_ADDR_LINKLOCAL(&inp->in6p_faddr) ||
3343 rt->rt_gateway->sa_family == AF_LINK ||
3344 in6_localaddr(&inp->in6p_faddr))) {
3345 tp->t_flags |= TF_LOCAL;
3346 }
3347 }
3348
3349 /*
3350 * Caller needs to call RT_UNLOCK(rt).
3351 */
3352 return rt;
3353 }
3354
3355 #if IPSEC
3356 /* compute ESP/AH header size for TCP, including outer IP header. */
3357 size_t
ipsec_hdrsiz_tcp(struct tcpcb * tp)3358 ipsec_hdrsiz_tcp(struct tcpcb *tp)
3359 {
3360 struct inpcb *inp;
3361 struct mbuf *m;
3362 size_t hdrsiz;
3363 struct ip *ip;
3364 struct ip6_hdr *ip6 = NULL;
3365 struct tcphdr *th;
3366
3367 if ((tp == NULL) || ((inp = tp->t_inpcb) == NULL)) {
3368 return 0;
3369 }
3370 MGETHDR(m, M_DONTWAIT, MT_DATA); /* MAC-OK */
3371 if (!m) {
3372 return 0;
3373 }
3374
3375 if ((inp->inp_vflag & INP_IPV6) != 0) {
3376 ip6 = mtod(m, struct ip6_hdr *);
3377 th = (struct tcphdr *)(void *)(ip6 + 1);
3378 m->m_pkthdr.len = m->m_len =
3379 sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
3380 tcp_fillheaders(m, tp, ip6, th, NULL, NULL);
3381 hdrsiz = ipsec6_hdrsiz(m, IPSEC_DIR_OUTBOUND, inp);
3382 } else {
3383 ip = mtod(m, struct ip *);
3384 th = (struct tcphdr *)(ip + 1);
3385 m->m_pkthdr.len = m->m_len = sizeof(struct tcpiphdr);
3386 tcp_fillheaders(m, tp, ip, th, NULL, NULL);
3387 hdrsiz = ipsec4_hdrsiz(m, IPSEC_DIR_OUTBOUND, inp);
3388 }
3389 m_free(m);
3390 return hdrsiz;
3391 }
3392 #endif /* IPSEC */
3393
3394 int
tcp_lock(struct socket * so,int refcount,void * lr)3395 tcp_lock(struct socket *so, int refcount, void *lr)
3396 {
3397 lr_ref_t lr_saved = TCP_INIT_LR_SAVED(lr);
3398
3399 retry:
3400 if (so->so_pcb != NULL) {
3401 if (so->so_flags & SOF_MP_SUBFLOW) {
3402 struct mptcb *mp_tp = tptomptp(sototcpcb(so));
3403 struct socket *mp_so = mptetoso(mp_tp->mpt_mpte);
3404
3405 socket_lock(mp_so, refcount);
3406
3407 /*
3408 * Check if we became non-MPTCP while waiting for the lock.
3409 * If yes, we have to retry to grab the right lock.
3410 */
3411 if (!(so->so_flags & SOF_MP_SUBFLOW)) {
3412 socket_unlock(mp_so, refcount);
3413 goto retry;
3414 }
3415 } else {
3416 lck_mtx_lock(&((struct inpcb *)so->so_pcb)->inpcb_mtx);
3417
3418 if (so->so_flags & SOF_MP_SUBFLOW) {
3419 /*
3420 * While waiting for the lock, we might have
3421 * become MPTCP-enabled (see mptcp_subflow_socreate).
3422 */
3423 lck_mtx_unlock(&((struct inpcb *)so->so_pcb)->inpcb_mtx);
3424 goto retry;
3425 }
3426 }
3427 } else {
3428 panic("tcp_lock: so=%p NO PCB! lr=%p lrh= %s",
3429 so, lr_saved, solockhistory_nr(so));
3430 /* NOTREACHED */
3431 }
3432
3433 if (so->so_usecount < 0) {
3434 panic("tcp_lock: so=%p so_pcb=%p lr=%p ref=%x lrh= %s",
3435 so, so->so_pcb, lr_saved, so->so_usecount,
3436 solockhistory_nr(so));
3437 /* NOTREACHED */
3438 }
3439 if (refcount) {
3440 so->so_usecount++;
3441 }
3442 so->lock_lr[so->next_lock_lr] = lr_saved;
3443 so->next_lock_lr = (so->next_lock_lr + 1) % SO_LCKDBG_MAX;
3444 return 0;
3445 }
3446
3447 int
tcp_unlock(struct socket * so,int refcount,void * lr)3448 tcp_unlock(struct socket *so, int refcount, void *lr)
3449 {
3450 lr_ref_t lr_saved = TCP_INIT_LR_SAVED(lr);
3451
3452
3453 #ifdef MORE_TCPLOCK_DEBUG
3454 printf("tcp_unlock: so=0x%llx sopcb=0x%llx lock=0x%llx ref=%x "
3455 "lr=0x%llx\n", (uint64_t)VM_KERNEL_ADDRPERM(so),
3456 (uint64_t)VM_KERNEL_ADDRPERM(so->so_pcb),
3457 (uint64_t)VM_KERNEL_ADDRPERM(&(sotoinpcb(so)->inpcb_mtx)),
3458 so->so_usecount, (uint64_t)VM_KERNEL_ADDRPERM(lr_saved));
3459 #endif
3460 if (refcount) {
3461 so->so_usecount--;
3462 }
3463
3464 if (so->so_usecount < 0) {
3465 panic("tcp_unlock: so=%p usecount=%x lrh= %s",
3466 so, so->so_usecount, solockhistory_nr(so));
3467 /* NOTREACHED */
3468 }
3469 if (so->so_pcb == NULL) {
3470 panic("tcp_unlock: so=%p NO PCB usecount=%x lr=%p lrh= %s",
3471 so, so->so_usecount, lr_saved, solockhistory_nr(so));
3472 /* NOTREACHED */
3473 } else {
3474 so->unlock_lr[so->next_unlock_lr] = lr_saved;
3475 so->next_unlock_lr = (so->next_unlock_lr + 1) % SO_LCKDBG_MAX;
3476
3477 if (so->so_flags & SOF_MP_SUBFLOW) {
3478 struct mptcb *mp_tp = tptomptp(sototcpcb(so));
3479 struct socket *mp_so = mptetoso(mp_tp->mpt_mpte);
3480
3481 socket_lock_assert_owned(mp_so);
3482
3483 socket_unlock(mp_so, refcount);
3484 } else {
3485 LCK_MTX_ASSERT(&((struct inpcb *)so->so_pcb)->inpcb_mtx,
3486 LCK_MTX_ASSERT_OWNED);
3487 lck_mtx_unlock(&((struct inpcb *)so->so_pcb)->inpcb_mtx);
3488 }
3489 }
3490 return 0;
3491 }
3492
3493 lck_mtx_t *
tcp_getlock(struct socket * so,int flags)3494 tcp_getlock(struct socket *so, int flags)
3495 {
3496 struct inpcb *inp = sotoinpcb(so);
3497
3498 if (so->so_pcb) {
3499 if (so->so_usecount < 0) {
3500 panic("tcp_getlock: so=%p usecount=%x lrh= %s",
3501 so, so->so_usecount, solockhistory_nr(so));
3502 }
3503
3504 if (so->so_flags & SOF_MP_SUBFLOW) {
3505 struct mptcb *mp_tp = tptomptp(sototcpcb(so));
3506 struct socket *mp_so = mptetoso(mp_tp->mpt_mpte);
3507
3508 return mp_so->so_proto->pr_getlock(mp_so, flags);
3509 } else {
3510 return &inp->inpcb_mtx;
3511 }
3512 } else {
3513 panic("tcp_getlock: so=%p NULL so_pcb %s",
3514 so, solockhistory_nr(so));
3515 return so->so_proto->pr_domain->dom_mtx;
3516 }
3517 }
3518
3519 /*
3520 * Determine if we can grow the recieve socket buffer to avoid sending
3521 * a zero window update to the peer. We allow even socket buffers that
3522 * have fixed size (set by the application) to grow if the resource
3523 * constraints are met. They will also be trimmed after the application
3524 * reads data.
3525 */
3526 static void
tcp_sbrcv_grow_rwin(struct tcpcb * tp,struct sockbuf * sb)3527 tcp_sbrcv_grow_rwin(struct tcpcb *tp, struct sockbuf *sb)
3528 {
3529 u_int32_t rcvbufinc = tp->t_maxseg << 4;
3530 u_int32_t rcvbuf = sb->sb_hiwat;
3531 struct socket *so = tp->t_inpcb->inp_socket;
3532
3533 if (tcp_recv_bg == 1 || IS_TCP_RECV_BG(so)) {
3534 return;
3535 }
3536
3537 if (tcp_do_autorcvbuf == 1 &&
3538 (tp->t_flags & TF_SLOWLINK) == 0 &&
3539 (so->so_flags1 & SOF1_EXTEND_BK_IDLE_WANTED) == 0 &&
3540 (rcvbuf - sb->sb_cc) < rcvbufinc &&
3541 rcvbuf < tcp_autorcvbuf_max &&
3542 (sb->sb_idealsize > 0 &&
3543 sb->sb_hiwat <= (sb->sb_idealsize + rcvbufinc))) {
3544 sbreserve(sb,
3545 min((sb->sb_hiwat + rcvbufinc), tcp_autorcvbuf_max));
3546 }
3547 }
3548
3549 int32_t
tcp_sbspace(struct tcpcb * tp)3550 tcp_sbspace(struct tcpcb *tp)
3551 {
3552 struct socket *so = tp->t_inpcb->inp_socket;
3553 struct sockbuf *sb = &so->so_rcv;
3554 u_int32_t rcvbuf;
3555 int32_t space;
3556 int32_t pending = 0;
3557
3558 if (so->so_flags & SOF_MP_SUBFLOW) {
3559 /* We still need to grow TCP's buffer to have a BDP-estimate */
3560 tcp_sbrcv_grow_rwin(tp, sb);
3561
3562 return mptcp_sbspace(tptomptp(tp));
3563 }
3564
3565 tcp_sbrcv_grow_rwin(tp, sb);
3566
3567 /* hiwat might have changed */
3568 rcvbuf = sb->sb_hiwat;
3569
3570 space = ((int32_t) imin((rcvbuf - sb->sb_cc),
3571 (sb->sb_mbmax - sb->sb_mbcnt)));
3572 if (space < 0) {
3573 space = 0;
3574 }
3575
3576 #if CONTENT_FILTER
3577 /* Compensate for data being processed by content filters */
3578 pending = cfil_sock_data_space(sb);
3579 #endif /* CONTENT_FILTER */
3580 if (pending > space) {
3581 space = 0;
3582 } else {
3583 space -= pending;
3584 }
3585
3586 /*
3587 * Avoid increasing window size if the current window
3588 * is already very low, we could be in "persist" mode and
3589 * we could break some apps (see rdar://5409343)
3590 */
3591
3592 if (space < tp->t_maxseg) {
3593 return space;
3594 }
3595
3596 /* Clip window size for slower link */
3597
3598 if (((tp->t_flags & TF_SLOWLINK) != 0) && slowlink_wsize > 0) {
3599 return imin(space, slowlink_wsize);
3600 }
3601
3602 return space;
3603 }
3604 /*
3605 * Checks TCP Segment Offloading capability for a given connection
3606 * and interface pair.
3607 */
3608 void
tcp_set_tso(struct tcpcb * tp,struct ifnet * ifp)3609 tcp_set_tso(struct tcpcb *tp, struct ifnet *ifp)
3610 {
3611 struct inpcb *inp;
3612 int isipv6;
3613 struct ifnet *tunnel_ifp = NULL;
3614 #define IFNET_TSO_MASK (IFNET_TSO_IPV6 | IFNET_TSO_IPV4)
3615
3616 tp->t_flags &= ~TF_TSO;
3617
3618 /*
3619 * Bail if there's a non-TSO-capable filter on the interface.
3620 */
3621 if (ifp == NULL || ifp->if_flt_no_tso_count > 0) {
3622 return;
3623 }
3624
3625 inp = tp->t_inpcb;
3626 isipv6 = (inp->inp_vflag & INP_IPV6) != 0;
3627
3628 #if MPTCP
3629 /*
3630 * We can't use TSO if this tcpcb belongs to an MPTCP session.
3631 */
3632 if (inp->inp_socket->so_flags & SOF_MP_SUBFLOW) {
3633 return;
3634 }
3635 #endif
3636 /*
3637 * We can't use TSO if the TSO capability of the tunnel interface does
3638 * not match the capability of another interface known by TCP
3639 */
3640 if (inp->inp_policyresult.results.result == NECP_KERNEL_POLICY_RESULT_IP_TUNNEL) {
3641 u_int tunnel_if_index = inp->inp_policyresult.results.result_parameter.tunnel_interface_index;
3642
3643 if (tunnel_if_index != 0) {
3644 ifnet_head_lock_shared();
3645 tunnel_ifp = ifindex2ifnet[tunnel_if_index];
3646 ifnet_head_done();
3647 }
3648
3649 if (tunnel_ifp == NULL) {
3650 return;
3651 }
3652
3653 if ((ifp->if_hwassist & IFNET_TSO_MASK) != (tunnel_ifp->if_hwassist & IFNET_TSO_MASK)) {
3654 if (tso_debug > 0) {
3655 os_log(OS_LOG_DEFAULT,
3656 "%s: %u > %u TSO 0 tunnel_ifp %s hwassist mismatch with ifp %s",
3657 __func__,
3658 ntohs(tp->t_inpcb->inp_lport), ntohs(tp->t_inpcb->inp_fport),
3659 tunnel_ifp->if_xname, ifp->if_xname);
3660 }
3661 return;
3662 }
3663 if (inp->inp_last_outifp != NULL &&
3664 (inp->inp_last_outifp->if_hwassist & IFNET_TSO_MASK) != (tunnel_ifp->if_hwassist & IFNET_TSO_MASK)) {
3665 if (tso_debug > 0) {
3666 os_log(OS_LOG_DEFAULT,
3667 "%s: %u > %u TSO 0 tunnel_ifp %s hwassist mismatch with inp_last_outifp %s",
3668 __func__,
3669 ntohs(tp->t_inpcb->inp_lport), ntohs(tp->t_inpcb->inp_fport),
3670 tunnel_ifp->if_xname, inp->inp_last_outifp->if_xname);
3671 }
3672 return;
3673 }
3674 if ((inp->inp_flags & INP_BOUND_IF) && inp->inp_boundifp != NULL &&
3675 (inp->inp_boundifp->if_hwassist & IFNET_TSO_MASK) != (tunnel_ifp->if_hwassist & IFNET_TSO_MASK)) {
3676 if (tso_debug > 0) {
3677 os_log(OS_LOG_DEFAULT,
3678 "%s: %u > %u TSO 0 tunnel_ifp %s hwassist mismatch with inp_boundifp %s",
3679 __func__,
3680 ntohs(tp->t_inpcb->inp_lport), ntohs(tp->t_inpcb->inp_fport),
3681 tunnel_ifp->if_xname, inp->inp_boundifp->if_xname);
3682 }
3683 return;
3684 }
3685 }
3686
3687 if (isipv6) {
3688 if (ifp->if_hwassist & IFNET_TSO_IPV6) {
3689 tp->t_flags |= TF_TSO;
3690 if (ifp->if_tso_v6_mtu != 0) {
3691 tp->tso_max_segment_size = ifp->if_tso_v6_mtu;
3692 } else {
3693 tp->tso_max_segment_size = TCP_MAXWIN;
3694 }
3695 }
3696 } else {
3697 if (ifp->if_hwassist & IFNET_TSO_IPV4) {
3698 tp->t_flags |= TF_TSO;
3699 if (ifp->if_tso_v4_mtu != 0) {
3700 tp->tso_max_segment_size = ifp->if_tso_v4_mtu;
3701 } else {
3702 tp->tso_max_segment_size = TCP_MAXWIN;
3703 }
3704 if (INTF_ADJUST_MTU_FOR_CLAT46(ifp)) {
3705 tp->tso_max_segment_size -=
3706 CLAT46_HDR_EXPANSION_OVERHD;
3707 }
3708 }
3709 }
3710
3711 if (tso_debug > 1) {
3712 os_log(OS_LOG_DEFAULT, "%s: %u > %u TSO %d ifp %s",
3713 __func__,
3714 ntohs(tp->t_inpcb->inp_lport),
3715 ntohs(tp->t_inpcb->inp_fport),
3716 (tp->t_flags & TF_TSO) != 0,
3717 ifp != NULL ? ifp->if_xname : "<NULL>");
3718 }
3719 }
3720
3721 /*
3722 * Function to calculate the tcp clock. The tcp clock will get updated
3723 * at the boundaries of the tcp layer. This is done at 3 places:
3724 * 1. Right before processing an input tcp packet
3725 * 2. Whenever a connection wants to access the network using tcp_usrreqs
3726 * 3. When a tcp timer fires or before tcp slow timeout
3727 *
3728 */
3729 void
calculate_tcp_clock(void)3730 calculate_tcp_clock(void)
3731 {
3732 uint32_t current_tcp_now;
3733 struct timeval now;
3734 uint32_t tmp;
3735
3736 microuptime(&now);
3737
3738 /*
3739 * Update coarse-grained networking timestamp (in sec.); the idea
3740 * is to update the counter returnable via net_uptime() when
3741 * we read time.
3742 */
3743 net_update_uptime_with_time(&now);
3744
3745 current_tcp_now = (uint32_t)now.tv_sec * 1000 + now.tv_usec / TCP_RETRANSHZ_TO_USEC;
3746
3747 tmp = os_atomic_load(&tcp_now, relaxed);
3748 if (tmp < current_tcp_now) {
3749 os_atomic_cmpxchg(&tcp_now, tmp, current_tcp_now, relaxed);
3750
3751 /*
3752 * No cmpxchg loop needed here. If someone else updated quicker,
3753 * we can take that value. The only requirement is that
3754 * tcp_now never decreases.
3755 */
3756 }
3757 }
3758
3759 /*
3760 * Compute receive window scaling that we are going to request
3761 * for this connection based on sb_hiwat. Try to leave some
3762 * room to potentially increase the window size upto a maximum
3763 * defined by the constant tcp_autorcvbuf_max.
3764 */
3765 uint8_t
tcp_get_max_rwinscale(struct tcpcb * tp,struct socket * so)3766 tcp_get_max_rwinscale(struct tcpcb *tp, struct socket *so)
3767 {
3768 uint8_t rcv_wscale;
3769 uint32_t maxsockbufsize;
3770
3771 rcv_wscale = MAX((uint8_t)tcp_win_scale, tp->request_r_scale);
3772 maxsockbufsize = ((so->so_rcv.sb_flags & SB_USRSIZE) != 0) ?
3773 so->so_rcv.sb_hiwat : tcp_autorcvbuf_max;
3774
3775 /*
3776 * Window scale should not exceed what is needed
3777 * to send the max receive window size; adding 1 to TCP_MAXWIN
3778 * ensures that.
3779 */
3780 while (rcv_wscale < TCP_MAX_WINSHIFT &&
3781 ((TCP_MAXWIN + 1) << rcv_wscale) < maxsockbufsize) {
3782 rcv_wscale++;
3783 }
3784 rcv_wscale = MIN(rcv_wscale, TCP_MAX_WINSHIFT);
3785
3786 return rcv_wscale;
3787 }
3788
3789 int
tcp_notsent_lowat_check(struct socket * so)3790 tcp_notsent_lowat_check(struct socket *so)
3791 {
3792 struct inpcb *inp = sotoinpcb(so);
3793 struct tcpcb *tp = NULL;
3794 int notsent = 0;
3795
3796 if (inp != NULL) {
3797 tp = intotcpcb(inp);
3798 }
3799
3800 if (tp == NULL) {
3801 return 0;
3802 }
3803
3804 notsent = so->so_snd.sb_cc -
3805 (tp->snd_nxt - tp->snd_una);
3806
3807 /*
3808 * When we send a FIN or SYN, not_sent can be negative.
3809 * In that case also we need to send a write event to the
3810 * process if it is waiting. In the FIN case, it will
3811 * get an error from send because cantsendmore will be set.
3812 */
3813 if (notsent <= tp->t_notsent_lowat) {
3814 return 1;
3815 }
3816
3817 /*
3818 * When Nagle's algorithm is not disabled, it is better
3819 * to wakeup the client until there is atleast one
3820 * maxseg of data to write.
3821 */
3822 if ((tp->t_flags & TF_NODELAY) == 0 &&
3823 notsent > 0 && notsent < tp->t_maxseg) {
3824 return 1;
3825 }
3826 return 0;
3827 }
3828
3829 void
tcp_rxtseg_insert(struct tcpcb * tp,tcp_seq start,tcp_seq end)3830 tcp_rxtseg_insert(struct tcpcb *tp, tcp_seq start, tcp_seq end)
3831 {
3832 struct tcp_rxt_seg *rxseg = NULL, *prev = NULL, *next = NULL;
3833 uint16_t rxcount = 0;
3834
3835 if (SLIST_EMPTY(&tp->t_rxt_segments)) {
3836 tp->t_dsack_lastuna = tp->snd_una;
3837 }
3838 /*
3839 * First check if there is a segment already existing for this
3840 * sequence space.
3841 */
3842
3843 SLIST_FOREACH(rxseg, &tp->t_rxt_segments, rx_link) {
3844 if (SEQ_GT(rxseg->rx_start, start)) {
3845 break;
3846 }
3847 prev = rxseg;
3848 }
3849 next = rxseg;
3850
3851 /* check if prev seg is for this sequence */
3852 if (prev != NULL && SEQ_LEQ(prev->rx_start, start) &&
3853 SEQ_GEQ(prev->rx_end, end)) {
3854 prev->rx_count++;
3855 return;
3856 }
3857
3858 /*
3859 * There are a couple of possibilities at this point.
3860 * 1. prev overlaps with the beginning of this sequence
3861 * 2. next overlaps with the end of this sequence
3862 * 3. there is no overlap.
3863 */
3864
3865 if (prev != NULL && SEQ_GT(prev->rx_end, start)) {
3866 if (prev->rx_start == start && SEQ_GT(end, prev->rx_end)) {
3867 start = prev->rx_end + 1;
3868 prev->rx_count++;
3869 } else {
3870 prev->rx_end = (start - 1);
3871 rxcount = prev->rx_count;
3872 }
3873 }
3874
3875 if (next != NULL && SEQ_LT(next->rx_start, end)) {
3876 if (SEQ_LEQ(next->rx_end, end)) {
3877 end = next->rx_start - 1;
3878 next->rx_count++;
3879 } else {
3880 next->rx_start = end + 1;
3881 rxcount = next->rx_count;
3882 }
3883 }
3884 if (!SEQ_LT(start, end)) {
3885 return;
3886 }
3887
3888 if (tcp_rxt_seg_max > 0 && tp->t_rxt_seg_count >= tcp_rxt_seg_max) {
3889 rxseg = SLIST_FIRST(&tp->t_rxt_segments);
3890 if (prev == rxseg) {
3891 prev = NULL;
3892 }
3893 SLIST_REMOVE(&tp->t_rxt_segments, rxseg,
3894 tcp_rxt_seg, rx_link);
3895
3896 tcp_rxt_seg_drop++;
3897 tp->t_rxt_seg_drop++;
3898 zfree(tcp_rxt_seg_zone, rxseg);
3899 tcp_memacct_sub(kalloc_type_size(tcp_rxt_seg_zone));
3900
3901 tp->t_rxt_seg_count -= 1;
3902 }
3903
3904 rxseg = zalloc_flags(tcp_rxt_seg_zone, Z_WAITOK | Z_ZERO | Z_NOFAIL);
3905 tcp_memacct_add(kalloc_type_size(tcp_rxt_seg_zone));
3906 rxseg->rx_start = start;
3907 rxseg->rx_end = end;
3908 rxseg->rx_count = rxcount + 1;
3909
3910 if (prev != NULL) {
3911 SLIST_INSERT_AFTER(prev, rxseg, rx_link);
3912 } else {
3913 SLIST_INSERT_HEAD(&tp->t_rxt_segments, rxseg, rx_link);
3914 }
3915 tp->t_rxt_seg_count += 1;
3916 }
3917
3918 struct tcp_rxt_seg *
tcp_rxtseg_find(struct tcpcb * tp,tcp_seq start,tcp_seq end)3919 tcp_rxtseg_find(struct tcpcb *tp, tcp_seq start, tcp_seq end)
3920 {
3921 struct tcp_rxt_seg *rxseg;
3922
3923 if (SLIST_EMPTY(&tp->t_rxt_segments)) {
3924 return NULL;
3925 }
3926
3927 SLIST_FOREACH(rxseg, &tp->t_rxt_segments, rx_link) {
3928 if (SEQ_LEQ(rxseg->rx_start, start) &&
3929 SEQ_GEQ(rxseg->rx_end, end)) {
3930 return rxseg;
3931 }
3932 if (SEQ_GT(rxseg->rx_start, start)) {
3933 break;
3934 }
3935 }
3936 return NULL;
3937 }
3938
3939 void
tcp_rxtseg_set_spurious(struct tcpcb * tp,tcp_seq start,tcp_seq end)3940 tcp_rxtseg_set_spurious(struct tcpcb *tp, tcp_seq start, tcp_seq end)
3941 {
3942 struct tcp_rxt_seg *rxseg;
3943
3944 if (SLIST_EMPTY(&tp->t_rxt_segments)) {
3945 return;
3946 }
3947
3948 SLIST_FOREACH(rxseg, &tp->t_rxt_segments, rx_link) {
3949 if (SEQ_GEQ(rxseg->rx_start, start) &&
3950 SEQ_LEQ(rxseg->rx_end, end)) {
3951 /*
3952 * If the segment was retransmitted only once, mark it as
3953 * spurious.
3954 */
3955 if (rxseg->rx_count == 1) {
3956 rxseg->rx_flags |= TCP_RXT_SPURIOUS;
3957 }
3958 }
3959
3960 if (SEQ_GEQ(rxseg->rx_start, end)) {
3961 break;
3962 }
3963 }
3964 return;
3965 }
3966
3967 void
tcp_rxtseg_clean(struct tcpcb * tp)3968 tcp_rxtseg_clean(struct tcpcb *tp)
3969 {
3970 struct tcp_rxt_seg *rxseg, *next;
3971
3972 SLIST_FOREACH_SAFE(rxseg, &tp->t_rxt_segments, rx_link, next) {
3973 SLIST_REMOVE(&tp->t_rxt_segments, rxseg,
3974 tcp_rxt_seg, rx_link);
3975 zfree(tcp_rxt_seg_zone, rxseg);
3976 tcp_memacct_sub(kalloc_type_size(tcp_rxt_seg_zone));
3977 }
3978 tp->t_rxt_seg_count = 0;
3979 tp->t_dsack_lastuna = tp->snd_max;
3980 }
3981
3982 boolean_t
tcp_rxtseg_detect_bad_rexmt(struct tcpcb * tp,tcp_seq th_ack)3983 tcp_rxtseg_detect_bad_rexmt(struct tcpcb *tp, tcp_seq th_ack)
3984 {
3985 boolean_t bad_rexmt;
3986 struct tcp_rxt_seg *rxseg;
3987
3988 if (SLIST_EMPTY(&tp->t_rxt_segments)) {
3989 return FALSE;
3990 }
3991
3992 /*
3993 * If all of the segments in this window are not cumulatively
3994 * acknowledged, then there can still be undetected packet loss.
3995 * Do not restore congestion window in that case.
3996 */
3997 if (SEQ_LT(th_ack, tp->snd_recover)) {
3998 return FALSE;
3999 }
4000
4001 bad_rexmt = TRUE;
4002 SLIST_FOREACH(rxseg, &tp->t_rxt_segments, rx_link) {
4003 if (!(rxseg->rx_flags & TCP_RXT_SPURIOUS)) {
4004 bad_rexmt = FALSE;
4005 break;
4006 }
4007 }
4008 return bad_rexmt;
4009 }
4010
4011 u_int32_t
tcp_rxtseg_total_size(struct tcpcb * tp)4012 tcp_rxtseg_total_size(struct tcpcb *tp)
4013 {
4014 struct tcp_rxt_seg *rxseg;
4015 u_int32_t total_size = 0;
4016
4017 SLIST_FOREACH(rxseg, &tp->t_rxt_segments, rx_link) {
4018 total_size += (rxseg->rx_end - rxseg->rx_start) + 1;
4019 }
4020 return total_size;
4021 }
4022
4023 static void tcp_rack_free_and_disable(struct tcpcb *tp);
4024
4025 int
tcp_seg_cmp(const struct tcp_seg_sent * seg1,const struct tcp_seg_sent * seg2)4026 tcp_seg_cmp(const struct tcp_seg_sent *seg1, const struct tcp_seg_sent *seg2)
4027 {
4028 return (int)(seg1->end_seq - seg2->end_seq);
4029 }
4030
RB_GENERATE(tcp_seg_sent_tree_head,tcp_seg_sent,seg_link,tcp_seg_cmp)4031 RB_GENERATE(tcp_seg_sent_tree_head, tcp_seg_sent, seg_link, tcp_seg_cmp)
4032
4033 uint32_t
4034 tcp_seg_len(struct tcp_seg_sent *seg)
4035 {
4036 if (SEQ_LT(seg->end_seq, seg->start_seq)) {
4037 os_log_error(OS_LOG_DEFAULT, "segment end(%u) can't be smaller "
4038 "than segment start(%u)", seg->end_seq, seg->start_seq);
4039 }
4040
4041 return seg->end_seq - seg->start_seq;
4042 }
4043
4044 static struct tcp_seg_sent *
tcp_seg_alloc_init(struct tcpcb * tp)4045 tcp_seg_alloc_init(struct tcpcb *tp)
4046 {
4047 struct tcp_seg_sent *seg = TAILQ_FIRST(&tp->seg_pool.free_segs);
4048 if (seg != NULL) {
4049 TAILQ_REMOVE(&tp->seg_pool.free_segs, seg, free_link);
4050 tp->seg_pool.free_segs_count--;
4051
4052 bzero(seg, sizeof(*seg));
4053 } else {
4054 if (tcp_memacct_hardlimit()) {
4055 return NULL;
4056 }
4057
4058 seg = zalloc_flags(tcp_seg_sent_zone, Z_NOPAGEWAIT | Z_ZERO);
4059 if (seg == NULL) {
4060 return NULL;
4061 }
4062 tcp_memacct_add(kalloc_type_size(tcp_seg_sent_zone));
4063 }
4064
4065 return seg;
4066 }
4067
4068 static void
tcp_update_seg_after_rto(struct tcpcb * tp,struct tcp_seg_sent * found_seg,uint32_t xmit_ts,uint8_t flags)4069 tcp_update_seg_after_rto(struct tcpcb *tp, struct tcp_seg_sent *found_seg,
4070 uint32_t xmit_ts, uint8_t flags)
4071 {
4072 tcp_rack_transmit_seg(tp, found_seg, found_seg->start_seq, found_seg->end_seq,
4073 xmit_ts, flags);
4074 struct tcp_seg_sent *seg = TAILQ_FIRST(&tp->t_segs_sent);
4075 if (found_seg == seg) {
4076 // Move this segment to the end of time-ordered list.
4077 TAILQ_REMOVE(&tp->t_segs_sent, seg, tx_link);
4078 TAILQ_INSERT_TAIL(&tp->t_segs_sent, seg, tx_link);
4079 }
4080 }
4081
4082 static void
tcp_process_rxmt_segs_after_rto(struct tcpcb * tp,struct tcp_seg_sent * seg,tcp_seq start,uint32_t xmit_ts,uint8_t flags)4083 tcp_process_rxmt_segs_after_rto(struct tcpcb *tp, struct tcp_seg_sent *seg, tcp_seq start,
4084 uint32_t xmit_ts, uint8_t flags)
4085 {
4086 struct tcp_seg_sent segment = {};
4087
4088 while (seg != NULL) {
4089 if (SEQ_LEQ(seg->start_seq, start)) {
4090 tcp_update_seg_after_rto(tp, seg, xmit_ts, flags);
4091 break;
4092 } else {
4093 /* The segment is a part of the total RTO retransmission */
4094 tcp_update_seg_after_rto(tp, seg, xmit_ts, flags);
4095
4096 /* Find the next segment ending at the start of current segment */
4097 segment.end_seq = seg->start_seq;
4098 seg = RB_FIND(tcp_seg_sent_tree_head, &tp->t_segs_sent_tree, &segment);
4099 }
4100 }
4101 }
4102
4103 static struct tcp_seg_sent *
tcp_seg_sent_insert_before(struct tcpcb * tp,struct tcp_seg_sent * before,tcp_seq start,tcp_seq end,uint32_t xmit_ts,uint8_t flags)4104 tcp_seg_sent_insert_before(struct tcpcb *tp, struct tcp_seg_sent *before, tcp_seq start, tcp_seq end,
4105 uint32_t xmit_ts, uint8_t flags)
4106 {
4107 struct tcp_seg_sent *seg = tcp_seg_alloc_init(tp);
4108 if (seg == NULL) {
4109 tcp_rack_free_and_disable(tp);
4110 return NULL;
4111 }
4112 tcp_rack_transmit_seg(tp, seg, start, end, xmit_ts, flags);
4113 struct tcp_seg_sent *not_inserted = RB_INSERT(tcp_seg_sent_tree_head, &tp->t_segs_sent_tree, seg);
4114 if (not_inserted) {
4115 TCP_LOG(tp, "segment %p[%u %u) was not inserted in the RB tree", not_inserted,
4116 not_inserted->start_seq, not_inserted->end_seq);
4117 }
4118 TAILQ_INSERT_BEFORE(before, seg, tx_link);
4119
4120 return seg;
4121 }
4122
4123 static struct tcp_seg_sent *
tcp_seg_rto_insert_end(struct tcpcb * tp,tcp_seq start,tcp_seq end,uint32_t xmit_ts,uint8_t flags)4124 tcp_seg_rto_insert_end(struct tcpcb *tp, tcp_seq start, tcp_seq end,
4125 uint32_t xmit_ts, uint8_t flags)
4126 {
4127 struct tcp_seg_sent *seg = tcp_seg_alloc_init(tp);
4128 if (seg == NULL) {
4129 tcp_rack_free_and_disable(tp);
4130 return NULL;
4131 }
4132 /* segment MUST be allocated, there is no other fail-safe here */
4133 tcp_rack_transmit_seg(tp, seg, start, end, xmit_ts, flags);
4134 struct tcp_seg_sent *not_inserted = RB_INSERT(tcp_seg_sent_tree_head, &tp->t_segs_sent_tree, seg);
4135 if (not_inserted) {
4136 TCP_LOG(tp, "segment %p[%u %u) was not inserted in the RB tree", not_inserted,
4137 not_inserted->start_seq, not_inserted->end_seq);
4138 }
4139 TAILQ_INSERT_TAIL(&tp->t_segs_sent, seg, tx_link);
4140
4141 return seg;
4142 }
4143
4144 void
tcp_seg_sent_insert(struct tcpcb * tp,struct tcp_seg_sent * seg,tcp_seq start,tcp_seq end,uint32_t xmit_ts,uint8_t flags)4145 tcp_seg_sent_insert(struct tcpcb *tp, struct tcp_seg_sent *seg, tcp_seq start, tcp_seq end,
4146 uint32_t xmit_ts, uint8_t flags)
4147 {
4148 if (seg != NULL) {
4149 uint8_t seg_flags = seg->flags | flags;
4150 if (seg->end_seq == end) {
4151 /* Entire seg retransmitted in RACK recovery, start and end sequence doesn't change */
4152 if (seg->start_seq != start) {
4153 os_log_error(OS_LOG_DEFAULT, "Segment start (%u) is not same as retransmitted "
4154 "start sequence number (%u)", seg->start_seq, start);
4155 }
4156 tcp_rack_transmit_seg(tp, seg, seg->start_seq, seg->end_seq, xmit_ts, seg_flags);
4157 TAILQ_REMOVE(&tp->t_segs_sent, seg, tx_link);
4158 TAILQ_INSERT_TAIL(&tp->t_segs_sent, seg, tx_link);
4159 } else {
4160 /*
4161 * Original segment is retransmitted partially, update start_seq by len
4162 * and create new segment for retransmitted part
4163 */
4164 struct tcp_seg_sent *partial_seg = tcp_seg_alloc_init(tp);
4165 if (partial_seg == NULL) {
4166 tcp_rack_free_and_disable(tp);
4167 return;
4168 }
4169 seg->start_seq += (end - start);
4170 tcp_rack_transmit_seg(tp, partial_seg, start, end, xmit_ts, seg_flags);
4171 struct tcp_seg_sent *not_inserted = RB_INSERT(tcp_seg_sent_tree_head,
4172 &tp->t_segs_sent_tree, partial_seg);
4173 if (not_inserted) {
4174 TCP_LOG(tp, "segment %p[%u %u) was not inserted in the RB tree", not_inserted,
4175 not_inserted->start_seq, not_inserted->end_seq);
4176 }
4177 TAILQ_INSERT_TAIL(&tp->t_segs_sent, partial_seg, tx_link);
4178 }
4179
4180 return;
4181 }
4182
4183 if ((flags & TCP_SEGMENT_RETRANSMITTED_ATLEAST_ONCE) == 0) {
4184 /* This is a new segment */
4185 seg = tcp_seg_alloc_init(tp);
4186 if (seg == NULL) {
4187 tcp_rack_free_and_disable(tp);
4188 return;
4189 }
4190
4191 tcp_rack_transmit_seg(tp, seg, start, end, xmit_ts, flags);
4192 struct tcp_seg_sent *not_inserted = RB_INSERT(tcp_seg_sent_tree_head, &tp->t_segs_sent_tree, seg);
4193 if (not_inserted) {
4194 TCP_LOG(tp, "segment %p[%u %u) was not inserted in the RB tree", not_inserted,
4195 not_inserted->start_seq, not_inserted->end_seq);
4196 }
4197 TAILQ_INSERT_TAIL(&tp->t_segs_sent, seg, tx_link);
4198
4199 return;
4200 }
4201 /*
4202 * Either retransmitted after an RTO or PTO.
4203 * During RTO, time-ordered list may lose its order.
4204 * If retransmitted after RTO, check if the segment
4205 * already exists in RB tree and update its xmit_ts. Also,
4206 * if this seg is at the top of ordered list, then move it
4207 * to the end.
4208 */
4209 struct tcp_seg_sent segment = {};
4210 struct tcp_seg_sent *found_seg = NULL, *rxmt_seg = NULL;
4211
4212 /* Set the end sequence to search for existing segment */
4213 segment.end_seq = end;
4214 found_seg = RB_FIND(tcp_seg_sent_tree_head, &tp->t_segs_sent_tree, &segment);
4215 if (found_seg != NULL) {
4216 /* Found an exact match for retransmitted end sequence */
4217 tcp_process_rxmt_segs_after_rto(tp, found_seg, start, xmit_ts, flags);
4218 return;
4219 }
4220 /*
4221 * We come here when we don't find an exact match and end of segment
4222 * retransmitted after RTO lies within a segment.
4223 */
4224 RB_FOREACH(found_seg, tcp_seg_sent_tree_head, &tp->t_segs_sent_tree) {
4225 if (SEQ_LT(end, found_seg->end_seq) && SEQ_GT(end, found_seg->start_seq)) {
4226 /*
4227 * This segment is partially retransmitted. We split this segment at the boundary of end
4228 * sequence. First insert the part being retransmitted at the end of time-ordered list.
4229 */
4230 struct tcp_seg_sent *inserted_seg = tcp_seg_rto_insert_end(tp, found_seg->start_seq, end, xmit_ts,
4231 found_seg->flags | flags);
4232 /* If segment is not allocated, RACK is already disabled and cleaned up */
4233 if (inserted_seg == NULL) {
4234 return;
4235 }
4236
4237 if (SEQ_LEQ(found_seg->start_seq, start)) {
4238 /*
4239 * We are done with the retransmitted part.
4240 * Move the start of existing segment
4241 */
4242 found_seg->start_seq = end;
4243 } else {
4244 /*
4245 * This retransmitted sequence covers more than one segment
4246 * Look for segments covered by this retransmission below this segment
4247 */
4248 segment.end_seq = found_seg->start_seq;
4249 rxmt_seg = RB_FIND(tcp_seg_sent_tree_head, &tp->t_segs_sent_tree, &segment);
4250
4251 if (rxmt_seg != NULL) {
4252 /* rxmt_seg is just before the current segment */
4253 tcp_process_rxmt_segs_after_rto(tp, rxmt_seg, start, xmit_ts, flags);
4254 }
4255
4256 /* Move the start of existing segment */
4257 found_seg->start_seq = end;
4258 }
4259 return;
4260 }
4261 }
4262 }
4263
4264 static void
tcp_seg_collect_acked_subtree(struct tcpcb * tp,struct tcp_seg_sent * seg,uint32_t acked_xmit_ts,uint32_t tsecr)4265 tcp_seg_collect_acked_subtree(struct tcpcb *tp, struct tcp_seg_sent *seg,
4266 uint32_t acked_xmit_ts, uint32_t tsecr)
4267 {
4268 if (seg != NULL) {
4269 tcp_seg_collect_acked_subtree(tp, RB_LEFT(seg, seg_link), acked_xmit_ts, tsecr);
4270 tcp_seg_collect_acked_subtree(tp, RB_RIGHT(seg, seg_link), acked_xmit_ts, tsecr);
4271 TAILQ_INSERT_TAIL(&tp->t_segs_acked, seg, ack_link);
4272 }
4273 }
4274
4275 /* Call this function with root of the rb tree */
4276 static void
tcp_seg_collect_acked(struct tcpcb * tp,struct tcp_seg_sent * seg,tcp_seq th_ack,uint32_t acked_xmit_ts,uint32_t tsecr)4277 tcp_seg_collect_acked(struct tcpcb *tp, struct tcp_seg_sent *seg, tcp_seq th_ack,
4278 uint32_t acked_xmit_ts, uint32_t tsecr)
4279 {
4280 if (seg == NULL) {
4281 return;
4282 }
4283
4284 if (SEQ_GEQ(th_ack, seg->end_seq)) {
4285 /* Delete the entire left sub-tree */
4286 tcp_seg_collect_acked_subtree(tp, RB_LEFT(seg, seg_link), acked_xmit_ts, tsecr);
4287 /* Evaluate the right sub-tree */
4288 tcp_seg_collect_acked(tp, RB_RIGHT(seg, seg_link), th_ack, acked_xmit_ts, tsecr);
4289 TAILQ_INSERT_TAIL(&tp->t_segs_acked, seg, ack_link);
4290 } else {
4291 /*
4292 * This ACK doesn't acknowledge the current root and its right sub-tree.
4293 * Evaluate the left sub-tree
4294 */
4295 tcp_seg_collect_acked(tp, RB_LEFT(seg, seg_link), th_ack, acked_xmit_ts, tsecr);
4296 }
4297 }
4298
4299 static void
tcp_seg_delete_acked(struct tcpcb * tp,uint32_t acked_xmit_ts,uint32_t tsecr)4300 tcp_seg_delete_acked(struct tcpcb *tp, uint32_t acked_xmit_ts, uint32_t tsecr)
4301 {
4302 struct tcp_seg_sent *acked_seg = NULL, *next = NULL;
4303
4304 TAILQ_FOREACH_SAFE(acked_seg, &tp->t_segs_acked, ack_link, next) {
4305 /* Advance RACK state if applicable */
4306 if (acked_seg->xmit_ts > acked_xmit_ts) {
4307 tcp_rack_update_segment_acked(tp, tsecr, acked_seg->xmit_ts, acked_seg->end_seq,
4308 !!(acked_seg->flags & TCP_SEGMENT_RETRANSMITTED_ATLEAST_ONCE));
4309 }
4310 /* Check for reordering */
4311 tcp_rack_detect_reordering_acked(tp, acked_seg);
4312
4313 const uint32_t seg_len = tcp_seg_len(acked_seg);
4314 if (acked_seg->flags & TCP_SEGMENT_LOST) {
4315 if (tp->bytes_lost < seg_len) {
4316 os_log_error(OS_LOG_DEFAULT, "bytes_lost (%u) can't be smaller than already "
4317 "lost segment length (%u)", tp->bytes_lost, seg_len);
4318 }
4319 tp->bytes_lost -= seg_len;
4320 }
4321 if (acked_seg->flags & TCP_RACK_RETRANSMITTED) {
4322 if (tp->bytes_retransmitted < seg_len) {
4323 os_log_error(OS_LOG_DEFAULT, "bytes_retransmitted (%u) can't be smaller "
4324 "than already retransmited segment length (%u)",
4325 tp->bytes_retransmitted, seg_len);
4326 }
4327 tp->bytes_retransmitted -= seg_len;
4328 }
4329 if (acked_seg->flags & TCP_SEGMENT_SACKED) {
4330 if (tp->bytes_sacked < seg_len) {
4331 os_log_error(OS_LOG_DEFAULT, "bytes_sacked (%u) can't be smaller than already "
4332 "SACKed segment length (%u)", tp->bytes_sacked, seg_len);
4333 }
4334 tp->bytes_sacked -= seg_len;
4335 }
4336 TAILQ_REMOVE(&tp->t_segs_acked, acked_seg, ack_link);
4337 TAILQ_REMOVE(&tp->t_segs_sent, acked_seg, tx_link);
4338 RB_REMOVE(tcp_seg_sent_tree_head, &tp->t_segs_sent_tree, acked_seg);
4339 tcp_seg_delete(tp, acked_seg);
4340 }
4341 }
4342
4343 void
tcp_segs_doack(struct tcpcb * tp,tcp_seq th_ack,struct tcpopt * to)4344 tcp_segs_doack(struct tcpcb *tp, tcp_seq th_ack, struct tcpopt *to)
4345 {
4346 uint32_t tsecr = 0, acked_xmit_ts = 0;
4347 tcp_seq acked_seq = th_ack;
4348 bool was_retransmitted = false;
4349
4350 if (TAILQ_EMPTY(&tp->t_segs_sent)) {
4351 return;
4352 }
4353
4354 if (((to->to_flags & TOF_TS) != 0) && (to->to_tsecr != 0)) {
4355 tsecr = to->to_tsecr;
4356 }
4357
4358 struct tcp_seg_sent seg = {};
4359 struct tcp_seg_sent *found_seg = NULL, *next = NULL;
4360
4361 found_seg = TAILQ_LAST(&tp->t_segs_sent, tcp_seg_sent_head);
4362
4363 if (tp->rack.segs_retransmitted == false) {
4364 if (SEQ_GEQ(th_ack, found_seg->end_seq)) {
4365 /*
4366 * ACK acknowledges the last sent segment completely (snd_max),
4367 * we can remove all segments from time ordered list.
4368 */
4369 acked_seq = found_seg->end_seq;
4370 acked_xmit_ts = found_seg->xmit_ts;
4371 was_retransmitted = !!(found_seg->flags & TCP_SEGMENT_RETRANSMITTED_ATLEAST_ONCE);
4372 tcp_segs_sent_clean(tp, false);
4373
4374 /* Advance RACK state */
4375 tcp_rack_update_segment_acked(tp, tsecr, acked_xmit_ts, acked_seq, was_retransmitted);
4376 return;
4377 }
4378 }
4379 /*
4380 * If either not all segments are ACKed OR the time-ordered list contains retransmitted
4381 * segments, do a RB tree search for largest (completely) ACKed segment and remove the ACKed
4382 * segment and all segments left of it from both RB tree and time-ordered list.
4383 *
4384 * Set the end sequence to search for ACKed segment.
4385 */
4386 seg.end_seq = th_ack;
4387
4388 if ((found_seg = RB_FIND(tcp_seg_sent_tree_head, &tp->t_segs_sent_tree, &seg)) != NULL) {
4389 acked_seq = found_seg->end_seq;
4390 acked_xmit_ts = found_seg->xmit_ts;
4391 was_retransmitted = !!(found_seg->flags & TCP_SEGMENT_RETRANSMITTED_ATLEAST_ONCE);
4392
4393 /*
4394 * Remove all segments that are ACKed by this ACK.
4395 * We defer self-balancing of RB tree to the end
4396 * by calling RB_REMOVE after collecting all ACKed segments.
4397 */
4398 tcp_seg_collect_acked(tp, RB_ROOT(&tp->t_segs_sent_tree), th_ack, acked_xmit_ts, tsecr);
4399 tcp_seg_delete_acked(tp, acked_xmit_ts, tsecr);
4400
4401 /* Advance RACK state */
4402 tcp_rack_update_segment_acked(tp, tsecr, acked_xmit_ts, acked_seq, was_retransmitted);
4403
4404 return;
4405 }
4406 /*
4407 * When TSO is enabled, it is possible that th_ack is less
4408 * than segment->end, hence we search the tree
4409 * until we find the largest (partially) ACKed segment.
4410 */
4411 RB_FOREACH_SAFE(found_seg, tcp_seg_sent_tree_head, &tp->t_segs_sent_tree, next) {
4412 if (SEQ_LT(th_ack, found_seg->end_seq) && SEQ_GT(th_ack, found_seg->start_seq)) {
4413 acked_seq = th_ack;
4414 acked_xmit_ts = found_seg->xmit_ts;
4415 was_retransmitted = !!(found_seg->flags & TCP_SEGMENT_RETRANSMITTED_ATLEAST_ONCE);
4416
4417 /* Remove all segments completely ACKed by this ack */
4418 tcp_seg_collect_acked(tp, RB_ROOT(&tp->t_segs_sent_tree), th_ack, acked_xmit_ts, tsecr);
4419 tcp_seg_delete_acked(tp, acked_xmit_ts, tsecr);
4420 found_seg->start_seq = th_ack;
4421
4422 /* Advance RACK state */
4423 tcp_rack_update_segment_acked(tp, tsecr, acked_xmit_ts, acked_seq, was_retransmitted);
4424 break;
4425 }
4426 }
4427 }
4428
4429 static bool
tcp_seg_mark_sacked(struct tcpcb * tp,struct tcp_seg_sent * seg,uint32_t * newbytes_sacked)4430 tcp_seg_mark_sacked(struct tcpcb *tp, struct tcp_seg_sent *seg, uint32_t *newbytes_sacked)
4431 {
4432 if (seg->flags & TCP_SEGMENT_SACKED) {
4433 return false;
4434 }
4435
4436 const uint32_t seg_len = tcp_seg_len(seg);
4437
4438 /* Check for reordering */
4439 tcp_rack_detect_reordering_acked(tp, seg);
4440
4441 if (seg->flags & TCP_RACK_RETRANSMITTED) {
4442 if (seg->flags & TCP_SEGMENT_LOST) {
4443 /*
4444 * If the segment is not considered lost, we don't clear
4445 * retransmitted as it might still be in flight. The ONLY time
4446 * this can happen is when RTO happens and segment is retransmitted
4447 * and SACKed before RACK detects segment was lost.
4448 */
4449 seg->flags &= ~(TCP_SEGMENT_LOST | TCP_RACK_RETRANSMITTED);
4450 if (tp->bytes_lost < seg_len || tp->bytes_retransmitted < seg_len) {
4451 os_log_error(OS_LOG_DEFAULT, "bytes_lost (%u) and/or bytes_retransmitted (%u) "
4452 "can't be smaller than already lost/retransmitted segment length (%u)", tp->bytes_lost,
4453 tp->bytes_retransmitted, seg_len);
4454 }
4455 tp->bytes_lost -= seg_len;
4456 tp->bytes_retransmitted -= seg_len;
4457 }
4458 } else {
4459 if (seg->flags & TCP_SEGMENT_LOST) {
4460 seg->flags &= ~(TCP_SEGMENT_LOST);
4461 if (tp->bytes_lost < seg_len) {
4462 os_log_error(OS_LOG_DEFAULT, "bytes_lost (%u) can't be smaller "
4463 "than already lost segment length (%u)", tp->bytes_lost, seg_len);
4464 }
4465 tp->bytes_lost -= seg_len;
4466 }
4467 }
4468 *newbytes_sacked += seg_len;
4469 seg->flags |= TCP_SEGMENT_SACKED;
4470 tp->bytes_sacked += seg_len;
4471
4472 return true;
4473 }
4474
4475 static void
tcp_segs_dosack_matched(struct tcpcb * tp,struct tcp_seg_sent * found_seg,tcp_seq sblk_start,uint32_t tsecr,uint32_t * newbytes_sacked)4476 tcp_segs_dosack_matched(struct tcpcb *tp, struct tcp_seg_sent *found_seg,
4477 tcp_seq sblk_start, uint32_t tsecr,
4478 uint32_t *newbytes_sacked)
4479 {
4480 struct tcp_seg_sent seg = {};
4481
4482 while (found_seg != NULL) {
4483 if (sblk_start == found_seg->start_seq) {
4484 /*
4485 * Covered the entire SACK block.
4486 * Record segment flags before they get erased.
4487 */
4488 uint8_t seg_flags = found_seg->flags;
4489 bool newly_marked = tcp_seg_mark_sacked(tp, found_seg, newbytes_sacked);
4490 if (newly_marked) {
4491 /* Advance RACK state */
4492 tcp_rack_update_segment_acked(tp, tsecr, found_seg->xmit_ts,
4493 found_seg->end_seq,
4494 !!(seg_flags & TCP_SEGMENT_RETRANSMITTED_ATLEAST_ONCE));
4495 }
4496 break;
4497 } else if (SEQ_GT(sblk_start, found_seg->start_seq)) {
4498 if ((found_seg->flags & TCP_SEGMENT_SACKED) != 0) {
4499 /* No need to process an already SACKED segment */
4500 break;
4501 }
4502 /*
4503 * This segment is partially ACKed by SACK block
4504 * as sblk_start > segment start. Since it is
4505 * partially SACKed, we should split the unSACKed and
4506 * SACKed parts.
4507 */
4508 /* First create a new segment for unSACKed part */
4509 struct tcp_seg_sent *inserted_seg = tcp_seg_sent_insert_before(tp, found_seg, found_seg->start_seq, sblk_start,
4510 found_seg->xmit_ts, found_seg->flags);
4511 /* If segment is not allocated, RACK is already disabled and cleaned up */
4512 if (inserted_seg == NULL) {
4513 return;
4514 }
4515 /* Now, update the SACKed part */
4516 found_seg->start_seq = sblk_start;
4517 /* Record seg flags before they get erased. */
4518 uint8_t seg_flags = found_seg->flags;
4519 bool newly_marked = tcp_seg_mark_sacked(tp, found_seg, newbytes_sacked);
4520 if (newly_marked) {
4521 /* Advance RACK state */
4522 tcp_rack_update_segment_acked(tp, tsecr, found_seg->xmit_ts,
4523 found_seg->end_seq,
4524 !!(seg_flags & TCP_SEGMENT_RETRANSMITTED_ATLEAST_ONCE));
4525 }
4526 break;
4527 } else {
4528 /*
4529 * This segment lies within the SACK block
4530 * Record segment flags before they get erased.
4531 */
4532 uint8_t seg_flags = found_seg->flags;
4533 bool newly_marked = tcp_seg_mark_sacked(tp, found_seg, newbytes_sacked);
4534 if (newly_marked) {
4535 /* Advance RACK state */
4536 tcp_rack_update_segment_acked(tp, tsecr, found_seg->xmit_ts,
4537 found_seg->end_seq,
4538 !!(seg_flags & TCP_SEGMENT_RETRANSMITTED_ATLEAST_ONCE));
4539 }
4540 /* Find the next segment ending at the start of current segment */
4541 seg.end_seq = found_seg->start_seq;
4542 found_seg = RB_FIND(tcp_seg_sent_tree_head, &tp->t_segs_sent_tree, &seg);
4543 }
4544 }
4545 }
4546
4547 void
tcp_segs_dosack(struct tcpcb * tp,tcp_seq sblk_start,tcp_seq sblk_end,uint32_t tsecr,uint32_t * newbytes_sacked)4548 tcp_segs_dosack(struct tcpcb *tp, tcp_seq sblk_start, tcp_seq sblk_end,
4549 uint32_t tsecr, uint32_t *newbytes_sacked)
4550 {
4551 /*
4552 * When we receive SACK, min RTT is computed after SACK processing which
4553 * means we are using min RTT from the previous ACK to advance RACK state
4554 * This is ok as we track a windowed min-filtered estimate over a period.
4555 */
4556 struct tcp_seg_sent seg = {};
4557 struct tcp_seg_sent *found_seg = NULL, *sacked_seg = NULL;
4558
4559 /* Set the end sequence to search for SACKed segment */
4560 seg.end_seq = sblk_end;
4561 found_seg = RB_FIND(tcp_seg_sent_tree_head, &tp->t_segs_sent_tree, &seg);
4562
4563 if (found_seg != NULL) {
4564 /* We found an exact match for sblk_end */
4565 tcp_segs_dosack_matched(tp, found_seg, sblk_start, tsecr, newbytes_sacked);
4566 return;
4567 }
4568 /*
4569 * We come here when we don't find an exact match and sblk_end
4570 * lies within a segment. This would happen only when TSO is used.
4571 */
4572 RB_FOREACH(found_seg, tcp_seg_sent_tree_head, &tp->t_segs_sent_tree) {
4573 if (SEQ_LT(sblk_end, found_seg->end_seq) && SEQ_GT(sblk_end, found_seg->start_seq)) {
4574 /*
4575 * This segment is partially SACKed. We split this segment at the boundary
4576 * of SACK block. First insert the newly SACKed part
4577 */
4578 tcp_seq start = SEQ_LEQ(sblk_start, found_seg->start_seq) ? found_seg->start_seq : sblk_start;
4579 struct tcp_seg_sent *newly_sacked = tcp_seg_sent_insert_before(tp, found_seg, start,
4580 sblk_end, found_seg->xmit_ts, found_seg->flags);
4581 /* If segment is not allocated, RACK is already disabled and cleaned up */
4582 if (newly_sacked == NULL) {
4583 return;
4584 }
4585 /* Record seg flags before they get erased. */
4586 uint8_t seg_flags = newly_sacked->flags;
4587 /* Mark the SACKed segment */
4588 tcp_seg_mark_sacked(tp, newly_sacked, newbytes_sacked);
4589
4590 /* Advance RACK state */
4591 tcp_rack_update_segment_acked(tp, tsecr, newly_sacked->xmit_ts,
4592 newly_sacked->end_seq, !!(seg_flags & TCP_SEGMENT_RETRANSMITTED_ATLEAST_ONCE));
4593
4594 if (sblk_start == found_seg->start_seq) {
4595 /*
4596 * We are done with this SACK block.
4597 * Move the start of existing segment
4598 */
4599 found_seg->start_seq = sblk_end;
4600 break;
4601 }
4602
4603 if (SEQ_GT(sblk_start, found_seg->start_seq)) {
4604 /* Insert the remaining unSACKed part before the SACKED segment inserted above */
4605 struct tcp_seg_sent *unsacked = tcp_seg_sent_insert_before(tp, newly_sacked, found_seg->start_seq,
4606 sblk_start, found_seg->xmit_ts, found_seg->flags);
4607 /* If segment is not allocated, RACK is already disabled and cleaned up */
4608 if (unsacked == NULL) {
4609 return;
4610 }
4611 /* Move the start of existing segment */
4612 found_seg->start_seq = sblk_end;
4613 break;
4614 } else {
4615 /*
4616 * This SACK block covers more than one segment
4617 * Look for segments SACKed below this segment
4618 */
4619 seg.end_seq = found_seg->start_seq;
4620 sacked_seg = RB_FIND(tcp_seg_sent_tree_head, &tp->t_segs_sent_tree, &seg);
4621
4622 if (sacked_seg != NULL) {
4623 /* We found an exact match for sblk_end */
4624 tcp_segs_dosack_matched(tp, sacked_seg, sblk_start, tsecr, newbytes_sacked);
4625 }
4626
4627 /*
4628 * RACK might have been disabled (if a segment allocation failed) and all associated
4629 * state freed. If RACK hasn't been disabled, move the start of existing segment.
4630 */
4631 if (TCP_RACK_ENABLED(tp)) {
4632 found_seg->start_seq = sblk_end;
4633 }
4634 }
4635 break;
4636 }
4637 }
4638 }
4639
4640 void
tcp_segs_clear_sacked(struct tcpcb * tp)4641 tcp_segs_clear_sacked(struct tcpcb *tp)
4642 {
4643 struct tcp_seg_sent *seg = NULL;
4644
4645 TAILQ_FOREACH(seg, &tp->t_segs_sent, tx_link)
4646 {
4647 const uint32_t seg_len = tcp_seg_len(seg);
4648
4649 if (seg->flags & TCP_SEGMENT_SACKED) {
4650 seg->flags &= ~(TCP_SEGMENT_SACKED);
4651 if (tp->bytes_sacked < seg_len) {
4652 os_log_error(OS_LOG_DEFAULT, "bytes_sacked (%u) can't be smaller "
4653 "than already SACKed segment length (%u)", tp->bytes_sacked, seg_len);
4654 }
4655 tp->bytes_sacked -= seg_len;
4656 }
4657 }
4658 }
4659
4660 void
tcp_mark_seg_lost(struct tcpcb * tp,struct tcp_seg_sent * seg)4661 tcp_mark_seg_lost(struct tcpcb *tp, struct tcp_seg_sent *seg)
4662 {
4663 const uint32_t seg_len = tcp_seg_len(seg);
4664
4665 if (seg->flags & TCP_SEGMENT_LOST) {
4666 if (seg->flags & TCP_RACK_RETRANSMITTED) {
4667 /* Retransmission was lost */
4668 seg->flags &= ~TCP_RACK_RETRANSMITTED;
4669 if (tp->bytes_retransmitted < seg_len) {
4670 os_log_error(OS_LOG_DEFAULT, "bytes_retransmitted (%u) can't be "
4671 "smaller than retransmited segment length (%u)",
4672 tp->bytes_retransmitted, seg_len);
4673 return;
4674 }
4675 tp->bytes_retransmitted -= seg_len;
4676 }
4677 } else {
4678 seg->flags |= TCP_SEGMENT_LOST;
4679 tp->bytes_lost += seg_len;
4680 }
4681 }
4682
4683 void
tcp_seg_delete(struct tcpcb * tp,struct tcp_seg_sent * seg)4684 tcp_seg_delete(struct tcpcb *tp, struct tcp_seg_sent *seg)
4685 {
4686 if (tp->seg_pool.free_segs_count >= TCP_SEG_POOL_MAX_ITEM_COUNT) {
4687 zfree(tcp_seg_sent_zone, seg);
4688 tcp_memacct_sub(kalloc_type_size(tcp_seg_sent_zone));
4689 } else {
4690 bzero(seg, sizeof(*seg));
4691 TAILQ_INSERT_TAIL(&tp->seg_pool.free_segs, seg, free_link);
4692 tp->seg_pool.free_segs_count++;
4693 }
4694 }
4695
4696 void
tcp_segs_sent_clean(struct tcpcb * tp,bool free_segs)4697 tcp_segs_sent_clean(struct tcpcb *tp, bool free_segs)
4698 {
4699 struct tcp_seg_sent *seg = NULL, *next = NULL;
4700
4701 TAILQ_FOREACH_SAFE(seg, &tp->t_segs_sent, tx_link, next) {
4702 /* Check for reordering */
4703 tcp_rack_detect_reordering_acked(tp, seg);
4704
4705 TAILQ_REMOVE(&tp->t_segs_sent, seg, tx_link);
4706 RB_REMOVE(tcp_seg_sent_tree_head, &tp->t_segs_sent_tree, seg);
4707 tcp_seg_delete(tp, seg);
4708 }
4709 if (__improbable(!RB_EMPTY(&tp->t_segs_sent_tree))) {
4710 os_log_error(OS_LOG_DEFAULT, "RB tree still contains segments while "
4711 "time ordered list is already empty");
4712 }
4713 if (__improbable(!TAILQ_EMPTY(&tp->t_segs_acked))) {
4714 os_log_error(OS_LOG_DEFAULT, "Segment ACKed list shouldn't contain "
4715 "any segments as they are removed immediately after being ACKed");
4716 }
4717 /* Reset seg_retransmitted as we emptied the list */
4718 tcp_rack_reset_segs_retransmitted(tp);
4719 tp->bytes_lost = tp->bytes_sacked = tp->bytes_retransmitted = 0;
4720
4721 /* Empty the free segments pool */
4722 if (free_segs) {
4723 TAILQ_FOREACH_SAFE(seg, &tp->seg_pool.free_segs, free_link, next) {
4724 TAILQ_REMOVE(&tp->seg_pool.free_segs, seg, free_link);
4725 zfree(tcp_seg_sent_zone, seg);
4726 tcp_memacct_sub(kalloc_type_size(tcp_seg_sent_zone));
4727 }
4728 tp->seg_pool.free_segs_count = 0;
4729 }
4730 }
4731
4732 void
tcp_rack_free_and_disable(struct tcpcb * tp)4733 tcp_rack_free_and_disable(struct tcpcb *tp)
4734 {
4735 TCP_LOG(tp, "not enough memory to allocate segment, disabling RACK");
4736 tcp_segs_sent_clean(tp, true);
4737 tp->t_flagsext &= ~TF_RACK_ENABLED;
4738 }
4739
4740 void
tcp_get_connectivity_status(struct tcpcb * tp,struct tcp_conn_status * connstatus)4741 tcp_get_connectivity_status(struct tcpcb *tp,
4742 struct tcp_conn_status *connstatus)
4743 {
4744 if (tp == NULL || connstatus == NULL) {
4745 return;
4746 }
4747 bzero(connstatus, sizeof(*connstatus));
4748 if (tp->t_rxtshift >= TCP_CONNECTIVITY_PROBES_MAX) {
4749 if (TCPS_HAVEESTABLISHED(tp->t_state)) {
4750 connstatus->write_probe_failed = 1;
4751 } else {
4752 connstatus->conn_probe_failed = 1;
4753 }
4754 }
4755 if (tp->t_rtimo_probes >= TCP_CONNECTIVITY_PROBES_MAX) {
4756 connstatus->read_probe_failed = 1;
4757 }
4758 if (tp->t_inpcb != NULL && tp->t_inpcb->inp_last_outifp != NULL &&
4759 (tp->t_inpcb->inp_last_outifp->if_eflags & IFEF_PROBE_CONNECTIVITY)) {
4760 connstatus->probe_activated = 1;
4761 }
4762 }
4763
4764 void
tcp_disable_tfo(struct tcpcb * tp)4765 tcp_disable_tfo(struct tcpcb *tp)
4766 {
4767 tp->t_flagsext &= ~TF_FASTOPEN;
4768 }
4769
4770 static struct mbuf *
tcp_make_keepalive_frame(struct tcpcb * tp,struct ifnet * ifp,boolean_t is_probe)4771 tcp_make_keepalive_frame(struct tcpcb *tp, struct ifnet *ifp,
4772 boolean_t is_probe)
4773 {
4774 struct inpcb *inp = tp->t_inpcb;
4775 struct tcphdr *th;
4776 caddr_t data;
4777 int win = 0;
4778 struct mbuf *m;
4779
4780 /*
4781 * The code assumes the IP + TCP headers fit in an mbuf packet header
4782 */
4783 static_assert(sizeof(struct ip) + sizeof(struct tcphdr) <= _MHLEN);
4784 static_assert(sizeof(struct ip6_hdr) + sizeof(struct tcphdr) <= _MHLEN);
4785
4786 MGETHDR(m, M_WAIT, MT_HEADER);
4787 if (m == NULL) {
4788 return NULL;
4789 }
4790 m->m_pkthdr.pkt_proto = IPPROTO_TCP;
4791
4792 data = m_mtod_lower_bound(m);
4793
4794 if (inp->inp_vflag & INP_IPV4) {
4795 bzero(data, sizeof(struct ip) + sizeof(struct tcphdr));
4796 th = (struct tcphdr *)(void *) (data + sizeof(struct ip));
4797 m->m_len = sizeof(struct ip) + sizeof(struct tcphdr);
4798 m->m_pkthdr.len = m->m_len;
4799 } else {
4800 VERIFY(inp->inp_vflag & INP_IPV6);
4801
4802 bzero(data, sizeof(struct ip6_hdr)
4803 + sizeof(struct tcphdr));
4804 th = (struct tcphdr *)(void *)(data + sizeof(struct ip6_hdr));
4805 m->m_len = sizeof(struct ip6_hdr) +
4806 sizeof(struct tcphdr);
4807 m->m_pkthdr.len = m->m_len;
4808 }
4809
4810 tcp_fillheaders(m, tp, data, th, NULL, NULL);
4811
4812 if (inp->inp_vflag & INP_IPV4) {
4813 struct ip *ip;
4814
4815 ip = (__typeof__(ip))(void *)data;
4816
4817 ip->ip_id = rfc6864 ? 0 : ip_randomid((uint64_t)m);
4818 ip->ip_off = htons(IP_DF);
4819 ip->ip_len = htons(sizeof(struct ip) + sizeof(struct tcphdr));
4820 ip->ip_ttl = inp->inp_ip_ttl;
4821 ip->ip_tos |= (inp->inp_ip_tos & ~IPTOS_ECN_MASK);
4822 ip->ip_sum = in_cksum_hdr(ip);
4823 } else {
4824 struct ip6_hdr *ip6;
4825
4826 ip6 = (__typeof__(ip6))(void *)data;
4827
4828 ip6->ip6_plen = htons(sizeof(struct tcphdr));
4829 ip6->ip6_hlim = in6_selecthlim(inp, ifp);
4830 ip6->ip6_flow = ip6->ip6_flow & ~IPV6_FLOW_ECN_MASK;
4831
4832 if (IN6_IS_SCOPE_EMBED(&ip6->ip6_src)) {
4833 ip6->ip6_src.s6_addr16[1] = 0;
4834 }
4835 if (IN6_IS_SCOPE_EMBED(&ip6->ip6_dst)) {
4836 ip6->ip6_dst.s6_addr16[1] = 0;
4837 }
4838 }
4839 th->th_flags = TH_ACK;
4840
4841 win = tcp_sbspace(tp);
4842 if (win > ((int32_t)TCP_MAXWIN << tp->rcv_scale)) {
4843 win = (int32_t)TCP_MAXWIN << tp->rcv_scale;
4844 }
4845 th->th_win = htons((u_short) (win >> tp->rcv_scale));
4846
4847 if (is_probe) {
4848 th->th_seq = htonl(tp->snd_una - 1);
4849 } else {
4850 th->th_seq = htonl(tp->snd_una);
4851 }
4852 th->th_ack = htonl(tp->rcv_nxt);
4853
4854 /* Force recompute TCP checksum to be the final value */
4855 th->th_sum = 0;
4856 if (inp->inp_vflag & INP_IPV4) {
4857 th->th_sum = inet_cksum(m, IPPROTO_TCP,
4858 sizeof(struct ip), sizeof(struct tcphdr));
4859 } else {
4860 th->th_sum = inet6_cksum(m, IPPROTO_TCP,
4861 sizeof(struct ip6_hdr), sizeof(struct tcphdr));
4862 }
4863
4864 return m;
4865 }
4866
4867 void
tcp_fill_keepalive_offload_frames(ifnet_t ifp,struct ifnet_keepalive_offload_frame * frames_array __counted_by (frames_array_count),u_int32_t frames_array_count,size_t frame_data_offset,u_int32_t * used_frames_count)4868 tcp_fill_keepalive_offload_frames(ifnet_t ifp,
4869 struct ifnet_keepalive_offload_frame *frames_array __counted_by(frames_array_count),
4870 u_int32_t frames_array_count, size_t frame_data_offset,
4871 u_int32_t *used_frames_count)
4872 {
4873 struct inpcb *inp;
4874 inp_gen_t gencnt;
4875 u_int32_t frame_index = *used_frames_count;
4876
4877 /* Validation of the parameters */
4878 if (ifp == NULL || frames_array == NULL ||
4879 frames_array_count == 0 ||
4880 frame_index >= frames_array_count ||
4881 frame_data_offset >= IFNET_KEEPALIVE_OFFLOAD_FRAME_DATA_SIZE) {
4882 return;
4883 }
4884
4885 /* Fast exit when no process is using the socket option TCP_KEEPALIVE_OFFLOAD */
4886 if (ifp->if_tcp_kao_cnt == 0) {
4887 return;
4888 }
4889
4890 /*
4891 * This function is called outside the regular TCP processing
4892 * so we need to update the TCP clock.
4893 */
4894 calculate_tcp_clock();
4895
4896 lck_rw_lock_shared(&tcbinfo.ipi_lock);
4897 gencnt = tcbinfo.ipi_gencnt;
4898 LIST_FOREACH(inp, tcbinfo.ipi_listhead, inp_list) {
4899 struct socket *so;
4900 struct ifnet_keepalive_offload_frame *frame;
4901 struct mbuf *m = NULL;
4902 struct tcpcb *tp = intotcpcb(inp);
4903
4904 if (frame_index >= frames_array_count) {
4905 break;
4906 }
4907
4908 if (inp->inp_gencnt > gencnt ||
4909 inp->inp_state == INPCB_STATE_DEAD) {
4910 continue;
4911 }
4912
4913 if ((so = inp->inp_socket) == NULL ||
4914 (so->so_state & SS_DEFUNCT)) {
4915 continue;
4916 }
4917 /*
4918 * check for keepalive offload flag without socket
4919 * lock to avoid a deadlock
4920 */
4921 if (!(inp->inp_flags2 & INP2_KEEPALIVE_OFFLOAD)) {
4922 continue;
4923 }
4924
4925 if (!(inp->inp_vflag & (INP_IPV4 | INP_IPV6))) {
4926 continue;
4927 }
4928 if (inp->inp_ppcb == NULL ||
4929 in_pcb_checkstate(inp, WNT_ACQUIRE, 0) == WNT_STOPUSING) {
4930 continue;
4931 }
4932 socket_lock(so, 1);
4933 /* Release the want count */
4934 if (inp->inp_ppcb == NULL ||
4935 (in_pcb_checkstate(inp, WNT_RELEASE, 1) == WNT_STOPUSING)) {
4936 socket_unlock(so, 1);
4937 continue;
4938 }
4939 if ((inp->inp_vflag & INP_IPV4) &&
4940 (inp->inp_laddr.s_addr == INADDR_ANY ||
4941 inp->inp_faddr.s_addr == INADDR_ANY)) {
4942 socket_unlock(so, 1);
4943 continue;
4944 }
4945 if ((inp->inp_vflag & INP_IPV6) &&
4946 (IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr) ||
4947 IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_faddr))) {
4948 socket_unlock(so, 1);
4949 continue;
4950 }
4951 if (inp->inp_lport == 0 || inp->inp_fport == 0) {
4952 socket_unlock(so, 1);
4953 continue;
4954 }
4955 if (inp->inp_last_outifp == NULL ||
4956 inp->inp_last_outifp->if_index != ifp->if_index) {
4957 socket_unlock(so, 1);
4958 continue;
4959 }
4960 if ((inp->inp_vflag & INP_IPV4) && frame_data_offset +
4961 sizeof(struct ip) + sizeof(struct tcphdr) >
4962 IFNET_KEEPALIVE_OFFLOAD_FRAME_DATA_SIZE) {
4963 socket_unlock(so, 1);
4964 continue;
4965 } else if (!(inp->inp_vflag & INP_IPV4) && frame_data_offset +
4966 sizeof(struct ip6_hdr) + sizeof(struct tcphdr) >
4967 IFNET_KEEPALIVE_OFFLOAD_FRAME_DATA_SIZE) {
4968 socket_unlock(so, 1);
4969 continue;
4970 }
4971 /*
4972 * There is no point in waking up the device for connections
4973 * that are not established. Long lived connection are meant
4974 * for processes that will sent and receive data
4975 */
4976 if (tp->t_state != TCPS_ESTABLISHED) {
4977 socket_unlock(so, 1);
4978 continue;
4979 }
4980 /*
4981 * This inp has all the information that is needed to
4982 * generate an offload frame.
4983 */
4984 frame = &frames_array[frame_index];
4985 frame->type = IFNET_KEEPALIVE_OFFLOAD_FRAME_TCP;
4986 frame->ether_type = (inp->inp_vflag & INP_IPV4) ?
4987 IFNET_KEEPALIVE_OFFLOAD_FRAME_ETHERTYPE_IPV4 :
4988 IFNET_KEEPALIVE_OFFLOAD_FRAME_ETHERTYPE_IPV6;
4989 frame->interval = (uint16_t)(tp->t_keepidle > 0 ? tp->t_keepidle :
4990 tcp_keepidle);
4991 frame->keep_cnt = (uint8_t)TCP_CONN_KEEPCNT(tp);
4992 frame->keep_retry = (uint16_t)TCP_CONN_KEEPINTVL(tp);
4993 if (so->so_options & SO_NOWAKEFROMSLEEP) {
4994 frame->flags |=
4995 IFNET_KEEPALIVE_OFFLOAD_FLAG_NOWAKEFROMSLEEP;
4996 }
4997 frame->local_port = ntohs(inp->inp_lport);
4998 frame->remote_port = ntohs(inp->inp_fport);
4999 frame->local_seq = tp->snd_nxt;
5000 frame->remote_seq = tp->rcv_nxt;
5001 if (inp->inp_vflag & INP_IPV4) {
5002 ASSERT(frame_data_offset + sizeof(struct ip) + sizeof(struct tcphdr) <= UINT8_MAX);
5003 frame->length = (uint8_t)(frame_data_offset +
5004 sizeof(struct ip) + sizeof(struct tcphdr));
5005 frame->reply_length = frame->length;
5006
5007 frame->addr_length = sizeof(struct in_addr);
5008 bcopy(&inp->inp_laddr, frame->local_addr,
5009 sizeof(struct in_addr));
5010 bcopy(&inp->inp_faddr, frame->remote_addr,
5011 sizeof(struct in_addr));
5012 } else {
5013 struct in6_addr *ip6;
5014
5015 ASSERT(frame_data_offset + sizeof(struct ip6_hdr) + sizeof(struct tcphdr) <= UINT8_MAX);
5016 frame->length = (uint8_t)(frame_data_offset +
5017 sizeof(struct ip6_hdr) + sizeof(struct tcphdr));
5018 frame->reply_length = frame->length;
5019
5020 frame->addr_length = sizeof(struct in6_addr);
5021 ip6 = (struct in6_addr *)(void *)frame->local_addr;
5022 bcopy(&inp->in6p_laddr, ip6, sizeof(struct in6_addr));
5023 if (IN6_IS_SCOPE_EMBED(ip6)) {
5024 ip6->s6_addr16[1] = 0;
5025 }
5026
5027 ip6 = (struct in6_addr *)(void *)frame->remote_addr;
5028 bcopy(&inp->in6p_faddr, ip6, sizeof(struct in6_addr));
5029 if (IN6_IS_SCOPE_EMBED(ip6)) {
5030 ip6->s6_addr16[1] = 0;
5031 }
5032 }
5033
5034 /*
5035 * First the probe
5036 */
5037 m = tcp_make_keepalive_frame(tp, ifp, TRUE);
5038 if (m == NULL) {
5039 socket_unlock(so, 1);
5040 continue;
5041 }
5042 bcopy(m_mtod_current(m), frame->data + frame_data_offset, m->m_len);
5043 m_freem(m);
5044
5045 /*
5046 * Now the response packet to incoming probes
5047 */
5048 m = tcp_make_keepalive_frame(tp, ifp, FALSE);
5049 if (m == NULL) {
5050 socket_unlock(so, 1);
5051 continue;
5052 }
5053 bcopy(m_mtod_current(m), frame->reply_data + frame_data_offset,
5054 m->m_len);
5055 m_freem(m);
5056
5057 frame_index++;
5058 socket_unlock(so, 1);
5059 }
5060 lck_rw_done(&tcbinfo.ipi_lock);
5061 *used_frames_count = frame_index;
5062 }
5063
5064 static bool
inp_matches_kao_frame(ifnet_t ifp,struct ifnet_keepalive_offload_frame * frame,struct inpcb * inp)5065 inp_matches_kao_frame(ifnet_t ifp, struct ifnet_keepalive_offload_frame *frame,
5066 struct inpcb *inp)
5067 {
5068 if (inp->inp_ppcb == NULL) {
5069 return false;
5070 }
5071 /* Release the want count */
5072 if (in_pcb_checkstate(inp, WNT_RELEASE, 1) == WNT_STOPUSING) {
5073 return false;
5074 }
5075 if (inp->inp_last_outifp == NULL ||
5076 inp->inp_last_outifp->if_index != ifp->if_index) {
5077 return false;
5078 }
5079 if (frame->local_port != ntohs(inp->inp_lport) ||
5080 frame->remote_port != ntohs(inp->inp_fport)) {
5081 return false;
5082 }
5083 if (inp->inp_vflag & INP_IPV4) {
5084 if (memcmp(&inp->inp_laddr, frame->local_addr,
5085 sizeof(struct in_addr)) != 0 ||
5086 memcmp(&inp->inp_faddr, frame->remote_addr,
5087 sizeof(struct in_addr)) != 0) {
5088 return false;
5089 }
5090 } else if (inp->inp_vflag & INP_IPV6) {
5091 if (memcmp(&inp->inp_laddr, frame->local_addr,
5092 sizeof(struct in6_addr)) != 0 ||
5093 memcmp(&inp->inp_faddr, frame->remote_addr,
5094 sizeof(struct in6_addr)) != 0) {
5095 return false;
5096 }
5097 } else {
5098 return false;
5099 }
5100 return true;
5101 }
5102
5103 int
tcp_notify_kao_timeout(ifnet_t ifp,struct ifnet_keepalive_offload_frame * frame)5104 tcp_notify_kao_timeout(ifnet_t ifp,
5105 struct ifnet_keepalive_offload_frame *frame)
5106 {
5107 struct inpcb *inp = NULL;
5108 struct socket *so = NULL;
5109 bool found = false;
5110
5111 /*
5112 * Unlock the list before posting event on the matching socket
5113 */
5114 lck_rw_lock_shared(&tcbinfo.ipi_lock);
5115
5116 LIST_FOREACH(inp, tcbinfo.ipi_listhead, inp_list) {
5117 if ((so = inp->inp_socket) == NULL ||
5118 (so->so_state & SS_DEFUNCT)) {
5119 continue;
5120 }
5121 if (!(inp->inp_flags2 & INP2_KEEPALIVE_OFFLOAD)) {
5122 continue;
5123 }
5124 if (!(inp->inp_vflag & (INP_IPV4 | INP_IPV6))) {
5125 continue;
5126 }
5127 if (inp->inp_ppcb == NULL ||
5128 in_pcb_checkstate(inp, WNT_ACQUIRE, 0) == WNT_STOPUSING) {
5129 continue;
5130 }
5131 socket_lock(so, 1);
5132 if (inp_matches_kao_frame(ifp, frame, inp)) {
5133 /*
5134 * Keep the matching socket locked
5135 */
5136 found = true;
5137 break;
5138 }
5139 socket_unlock(so, 1);
5140 }
5141 lck_rw_done(&tcbinfo.ipi_lock);
5142
5143 if (found) {
5144 ASSERT(inp != NULL);
5145 ASSERT(so != NULL);
5146 ASSERT(so == inp->inp_socket);
5147 /*
5148 * Drop the TCP connection like tcptimers() does
5149 */
5150 tcpcb_ref_t tp = inp->inp_ppcb;
5151
5152 tcpstat.tcps_keepdrops++;
5153 soevent(so,
5154 (SO_FILT_HINT_LOCKED | SO_FILT_HINT_TIMEOUT));
5155 tp = tcp_drop(tp, ETIMEDOUT);
5156
5157 tcpstat.tcps_ka_offload_drops++;
5158 os_log_info(OS_LOG_DEFAULT, "%s: dropped lport %u fport %u\n",
5159 __func__, frame->local_port, frame->remote_port);
5160
5161 socket_unlock(so, 1);
5162 }
5163
5164 return 0;
5165 }
5166
5167 errno_t
tcp_notify_ack_id_valid(struct tcpcb * tp,struct socket * so,u_int32_t notify_id)5168 tcp_notify_ack_id_valid(struct tcpcb *tp, struct socket *so,
5169 u_int32_t notify_id)
5170 {
5171 struct tcp_notify_ack_marker *elm;
5172
5173 if (so->so_snd.sb_cc == 0) {
5174 return ENOBUFS;
5175 }
5176
5177 SLIST_FOREACH(elm, &tp->t_notify_ack, notify_next) {
5178 /* Duplicate id is not allowed */
5179 if (elm->notify_id == notify_id) {
5180 return EINVAL;
5181 }
5182 /* Duplicate position is not allowed */
5183 if (elm->notify_snd_una == tp->snd_una + so->so_snd.sb_cc) {
5184 return EINVAL;
5185 }
5186 }
5187 return 0;
5188 }
5189
5190 errno_t
tcp_add_notify_ack_marker(struct tcpcb * tp,u_int32_t notify_id)5191 tcp_add_notify_ack_marker(struct tcpcb *tp, u_int32_t notify_id)
5192 {
5193 struct tcp_notify_ack_marker *nm, *elm = NULL;
5194 struct socket *so = tp->t_inpcb->inp_socket;
5195
5196 nm = kalloc_type(struct tcp_notify_ack_marker, M_WAIT | Z_ZERO);
5197 if (nm == NULL) {
5198 return ENOMEM;
5199 }
5200 nm->notify_id = notify_id;
5201 nm->notify_snd_una = tp->snd_una + so->so_snd.sb_cc;
5202
5203 SLIST_FOREACH(elm, &tp->t_notify_ack, notify_next) {
5204 if (SEQ_GT(nm->notify_snd_una, elm->notify_snd_una)) {
5205 break;
5206 }
5207 }
5208
5209 if (elm == NULL) {
5210 VERIFY(SLIST_EMPTY(&tp->t_notify_ack));
5211 SLIST_INSERT_HEAD(&tp->t_notify_ack, nm, notify_next);
5212 } else {
5213 SLIST_INSERT_AFTER(elm, nm, notify_next);
5214 }
5215 tp->t_notify_ack_count++;
5216 return 0;
5217 }
5218
5219 void
tcp_notify_ack_free(struct tcpcb * tp)5220 tcp_notify_ack_free(struct tcpcb *tp)
5221 {
5222 struct tcp_notify_ack_marker *elm, *next;
5223 if (SLIST_EMPTY(&tp->t_notify_ack)) {
5224 return;
5225 }
5226
5227 SLIST_FOREACH_SAFE(elm, &tp->t_notify_ack, notify_next, next) {
5228 SLIST_REMOVE(&tp->t_notify_ack, elm, tcp_notify_ack_marker,
5229 notify_next);
5230 kfree_type(struct tcp_notify_ack_marker, elm);
5231 }
5232 SLIST_INIT(&tp->t_notify_ack);
5233 tp->t_notify_ack_count = 0;
5234 }
5235
5236 inline void
tcp_notify_acknowledgement(struct tcpcb * tp,struct socket * so)5237 tcp_notify_acknowledgement(struct tcpcb *tp, struct socket *so)
5238 {
5239 struct tcp_notify_ack_marker *elm;
5240
5241 elm = SLIST_FIRST(&tp->t_notify_ack);
5242 if (SEQ_GEQ(tp->snd_una, elm->notify_snd_una)) {
5243 soevent(so, SO_FILT_HINT_LOCKED | SO_FILT_HINT_NOTIFY_ACK);
5244 }
5245 }
5246
5247 void
tcp_get_notify_ack_count(struct tcpcb * tp,struct tcp_notify_ack_complete * retid)5248 tcp_get_notify_ack_count(struct tcpcb *tp,
5249 struct tcp_notify_ack_complete *retid)
5250 {
5251 struct tcp_notify_ack_marker *elm;
5252 uint32_t complete = 0;
5253
5254 SLIST_FOREACH(elm, &tp->t_notify_ack, notify_next) {
5255 if (SEQ_GEQ(tp->snd_una, elm->notify_snd_una)) {
5256 ASSERT(complete < UINT32_MAX);
5257 complete++;
5258 } else {
5259 break;
5260 }
5261 }
5262 retid->notify_pending = tp->t_notify_ack_count - complete;
5263 retid->notify_complete_count = min(TCP_MAX_NOTIFY_ACK, complete);
5264 }
5265
5266 void
tcp_get_notify_ack_ids(struct tcpcb * tp,struct tcp_notify_ack_complete * retid)5267 tcp_get_notify_ack_ids(struct tcpcb *tp,
5268 struct tcp_notify_ack_complete *retid)
5269 {
5270 size_t i = 0;
5271 struct tcp_notify_ack_marker *elm, *next;
5272
5273 SLIST_FOREACH_SAFE(elm, &tp->t_notify_ack, notify_next, next) {
5274 if (i >= retid->notify_complete_count) {
5275 break;
5276 }
5277 if (SEQ_GEQ(tp->snd_una, elm->notify_snd_una)) {
5278 retid->notify_complete_id[i++] = elm->notify_id;
5279 SLIST_REMOVE(&tp->t_notify_ack, elm,
5280 tcp_notify_ack_marker, notify_next);
5281 kfree_type(struct tcp_notify_ack_marker, elm);
5282 tp->t_notify_ack_count--;
5283 } else {
5284 break;
5285 }
5286 }
5287 }
5288
5289 bool
tcp_notify_ack_active(struct socket * so)5290 tcp_notify_ack_active(struct socket *so)
5291 {
5292 if ((SOCK_DOM(so) == PF_INET || SOCK_DOM(so) == PF_INET6) &&
5293 SOCK_TYPE(so) == SOCK_STREAM) {
5294 struct tcpcb *tp = intotcpcb(sotoinpcb(so));
5295
5296 if (!SLIST_EMPTY(&tp->t_notify_ack)) {
5297 struct tcp_notify_ack_marker *elm;
5298 elm = SLIST_FIRST(&tp->t_notify_ack);
5299 if (SEQ_GEQ(tp->snd_una, elm->notify_snd_una)) {
5300 return true;
5301 }
5302 }
5303 }
5304 return false;
5305 }
5306
5307 inline int32_t
inp_get_sndbytes_allunsent(struct socket * so,u_int32_t th_ack)5308 inp_get_sndbytes_allunsent(struct socket *so, u_int32_t th_ack)
5309 {
5310 struct inpcb *inp = sotoinpcb(so);
5311 struct tcpcb *tp = intotcpcb(inp);
5312
5313 if ((so->so_snd.sb_flags & SB_SNDBYTE_CNT) &&
5314 so->so_snd.sb_cc > 0) {
5315 int32_t unsent, sent;
5316 sent = tp->snd_max - th_ack;
5317 if (tp->t_flags & TF_SENTFIN) {
5318 sent--;
5319 }
5320 unsent = so->so_snd.sb_cc - sent;
5321 return unsent;
5322 }
5323 return 0;
5324 }
5325
5326 uint8_t
tcp_get_ace(struct tcphdr * th)5327 tcp_get_ace(struct tcphdr *th)
5328 {
5329 uint8_t ace = 0;
5330 if (th->th_flags & TH_ECE) {
5331 ace += 1;
5332 }
5333 if (th->th_flags & TH_CWR) {
5334 ace += 2;
5335 }
5336 if (th->th_x2 & (TH_AE >> 8)) {
5337 ace += 4;
5338 }
5339
5340 return ace;
5341 }
5342
5343 #define IFP_PER_FLOW_STAT(_ipv4_, _stat_) { \
5344 if (_ipv4_) { \
5345 ifp->if_ipv4_stat->_stat_++; \
5346 } else { \
5347 ifp->if_ipv6_stat->_stat_++; \
5348 } \
5349 }
5350
5351 #define FLOW_ECN_ENABLED(_flags_) \
5352 ((_flags_ & (TE_ECN_ON)) == (TE_ECN_ON))
5353
5354 void
tcp_update_stats_per_flow(struct ifnet_stats_per_flow * ifs,struct ifnet * ifp)5355 tcp_update_stats_per_flow(struct ifnet_stats_per_flow *ifs,
5356 struct ifnet *ifp)
5357 {
5358 if (ifp == NULL || !ifnet_is_fully_attached(ifp)) {
5359 return;
5360 }
5361
5362 ifnet_lock_shared(ifp);
5363 if (ifs->ecn_flags & TE_SETUPSENT) {
5364 if (ifs->ecn_flags & TE_CLIENT_SETUP) {
5365 IFP_PER_FLOW_STAT(ifs->ipv4, ecn_client_setup);
5366 if (FLOW_ECN_ENABLED(ifs->ecn_flags)) {
5367 IFP_PER_FLOW_STAT(ifs->ipv4,
5368 ecn_client_success);
5369 } else if (ifs->ecn_flags & TE_LOST_SYN) {
5370 IFP_PER_FLOW_STAT(ifs->ipv4,
5371 ecn_syn_lost);
5372 } else {
5373 IFP_PER_FLOW_STAT(ifs->ipv4,
5374 ecn_peer_nosupport);
5375 }
5376 } else {
5377 IFP_PER_FLOW_STAT(ifs->ipv4, ecn_server_setup);
5378 if (FLOW_ECN_ENABLED(ifs->ecn_flags)) {
5379 IFP_PER_FLOW_STAT(ifs->ipv4,
5380 ecn_server_success);
5381 } else if (ifs->ecn_flags & TE_LOST_SYN) {
5382 IFP_PER_FLOW_STAT(ifs->ipv4,
5383 ecn_synack_lost);
5384 } else {
5385 IFP_PER_FLOW_STAT(ifs->ipv4,
5386 ecn_peer_nosupport);
5387 }
5388 }
5389 } else {
5390 IFP_PER_FLOW_STAT(ifs->ipv4, ecn_off_conn);
5391 }
5392 if (FLOW_ECN_ENABLED(ifs->ecn_flags)) {
5393 if (ifs->ecn_flags & TE_RECV_ECN_CE) {
5394 tcpstat.tcps_ecn_conn_recv_ce++;
5395 IFP_PER_FLOW_STAT(ifs->ipv4, ecn_conn_recv_ce);
5396 }
5397 if (ifs->ecn_flags & TE_RECV_ECN_ECE) {
5398 tcpstat.tcps_ecn_conn_recv_ece++;
5399 IFP_PER_FLOW_STAT(ifs->ipv4, ecn_conn_recv_ece);
5400 }
5401 if (ifs->ecn_flags & (TE_RECV_ECN_CE | TE_RECV_ECN_ECE)) {
5402 if (ifs->txretransmitbytes > 0 ||
5403 ifs->rxoutoforderbytes > 0) {
5404 tcpstat.tcps_ecn_conn_pl_ce++;
5405 IFP_PER_FLOW_STAT(ifs->ipv4, ecn_conn_plce);
5406 } else {
5407 tcpstat.tcps_ecn_conn_nopl_ce++;
5408 IFP_PER_FLOW_STAT(ifs->ipv4, ecn_conn_noplce);
5409 }
5410 } else {
5411 if (ifs->txretransmitbytes > 0 ||
5412 ifs->rxoutoforderbytes > 0) {
5413 tcpstat.tcps_ecn_conn_plnoce++;
5414 IFP_PER_FLOW_STAT(ifs->ipv4, ecn_conn_plnoce);
5415 }
5416 }
5417 }
5418
5419 /* Other stats are interesting for non-local connections only */
5420 if (ifs->local) {
5421 ifnet_lock_done(ifp);
5422 return;
5423 }
5424
5425 if (ifs->ipv4) {
5426 ifp->if_ipv4_stat->timestamp = net_uptime();
5427 if (FLOW_ECN_ENABLED(ifs->ecn_flags)) {
5428 tcp_flow_ecn_perf_stats(ifs, &ifp->if_ipv4_stat->ecn_on);
5429 } else {
5430 tcp_flow_ecn_perf_stats(ifs, &ifp->if_ipv4_stat->ecn_off);
5431 }
5432 } else {
5433 ifp->if_ipv6_stat->timestamp = net_uptime();
5434 if (FLOW_ECN_ENABLED(ifs->ecn_flags)) {
5435 tcp_flow_ecn_perf_stats(ifs, &ifp->if_ipv6_stat->ecn_on);
5436 } else {
5437 tcp_flow_ecn_perf_stats(ifs, &ifp->if_ipv6_stat->ecn_off);
5438 }
5439 }
5440
5441 if (ifs->rxmit_drop) {
5442 if (FLOW_ECN_ENABLED(ifs->ecn_flags)) {
5443 IFP_PER_FLOW_STAT(ifs->ipv4, ecn_on.rxmit_drop);
5444 } else {
5445 IFP_PER_FLOW_STAT(ifs->ipv4, ecn_off.rxmit_drop);
5446 }
5447 }
5448 if (ifs->ecn_fallback_synloss) {
5449 IFP_PER_FLOW_STAT(ifs->ipv4, ecn_fallback_synloss);
5450 }
5451 if (ifs->ecn_fallback_droprst) {
5452 IFP_PER_FLOW_STAT(ifs->ipv4, ecn_fallback_droprst);
5453 }
5454 if (ifs->ecn_fallback_droprxmt) {
5455 IFP_PER_FLOW_STAT(ifs->ipv4, ecn_fallback_droprxmt);
5456 }
5457 if (ifs->ecn_fallback_ce) {
5458 IFP_PER_FLOW_STAT(ifs->ipv4, ecn_fallback_ce);
5459 }
5460 if (ifs->ecn_fallback_reorder) {
5461 IFP_PER_FLOW_STAT(ifs->ipv4, ecn_fallback_reorder);
5462 }
5463 if (ifs->ecn_recv_ce > 0) {
5464 IFP_PER_FLOW_STAT(ifs->ipv4, ecn_recv_ce);
5465 }
5466 if (ifs->ecn_recv_ece > 0) {
5467 IFP_PER_FLOW_STAT(ifs->ipv4, ecn_recv_ece);
5468 }
5469
5470 tcp_flow_lim_stats(ifs, &ifp->if_lim_stat);
5471
5472 /*
5473 * Link heuristics are updated here only for NECP client flow when they close
5474 * Socket flows are updated live
5475 */
5476 os_atomic_add(&ifp->if_tcp_stat->linkheur_noackpri, ifs->linkheur_noackpri, relaxed);
5477 os_atomic_add(&ifp->if_tcp_stat->linkheur_comprxmt, ifs->linkheur_comprxmt, relaxed);
5478 os_atomic_add(&ifp->if_tcp_stat->linkheur_synrxmt, ifs->linkheur_synrxmt, relaxed);
5479 os_atomic_add(&ifp->if_tcp_stat->linkheur_rxmtfloor, ifs->linkheur_rxmtfloor, relaxed);
5480
5481 ifnet_lock_done(ifp);
5482 }
5483
5484 #if SKYWALK
5485
5486 #include <skywalk/core/skywalk_var.h>
5487 #include <skywalk/nexus/flowswitch/nx_flowswitch.h>
5488
5489 void
tcp_add_fsw_flow(struct tcpcb * tp,struct ifnet * ifp)5490 tcp_add_fsw_flow(struct tcpcb *tp, struct ifnet *ifp)
5491 {
5492 struct inpcb *inp = tp->t_inpcb;
5493 struct socket *so = inp->inp_socket;
5494 uuid_t fsw_uuid;
5495 struct nx_flow_req nfr;
5496 int err;
5497
5498 if (!NX_FSW_TCP_RX_AGG_ENABLED()) {
5499 return;
5500 }
5501
5502 if (ifp == NULL || kern_nexus_get_flowswitch_instance(ifp, fsw_uuid)) {
5503 TCP_LOG_FSW_FLOW(tp, "skip ifp no fsw");
5504 return;
5505 }
5506
5507 memset(&nfr, 0, sizeof(nfr));
5508
5509 if (inp->inp_vflag & INP_IPV4) {
5510 ASSERT(!(inp->inp_laddr.s_addr == INADDR_ANY ||
5511 inp->inp_faddr.s_addr == INADDR_ANY ||
5512 IN_MULTICAST(ntohl(inp->inp_laddr.s_addr)) ||
5513 IN_MULTICAST(ntohl(inp->inp_faddr.s_addr))));
5514 nfr.nfr_saddr.sin.sin_len = sizeof(struct sockaddr_in);
5515 nfr.nfr_saddr.sin.sin_family = AF_INET;
5516 nfr.nfr_saddr.sin.sin_port = inp->inp_lport;
5517 memcpy(&nfr.nfr_saddr.sin.sin_addr, &inp->inp_laddr,
5518 sizeof(struct in_addr));
5519 nfr.nfr_daddr.sin.sin_len = sizeof(struct sockaddr_in);
5520 nfr.nfr_daddr.sin.sin_family = AF_INET;
5521 nfr.nfr_daddr.sin.sin_port = inp->inp_fport;
5522 memcpy(&nfr.nfr_daddr.sin.sin_addr, &inp->inp_faddr,
5523 sizeof(struct in_addr));
5524 } else {
5525 ASSERT(!(IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr) ||
5526 IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_faddr) ||
5527 IN6_IS_ADDR_MULTICAST(&inp->in6p_laddr) ||
5528 IN6_IS_ADDR_MULTICAST(&inp->in6p_faddr)));
5529 nfr.nfr_saddr.sin6.sin6_len = sizeof(struct sockaddr_in6);
5530 nfr.nfr_saddr.sin6.sin6_family = AF_INET6;
5531 nfr.nfr_saddr.sin6.sin6_port = inp->inp_lport;
5532 memcpy(&nfr.nfr_saddr.sin6.sin6_addr, &inp->in6p_laddr,
5533 sizeof(struct in6_addr));
5534 nfr.nfr_daddr.sin6.sin6_len = sizeof(struct sockaddr_in6);
5535 nfr.nfr_daddr.sin.sin_family = AF_INET6;
5536 nfr.nfr_daddr.sin6.sin6_port = inp->inp_fport;
5537 memcpy(&nfr.nfr_daddr.sin6.sin6_addr, &inp->in6p_faddr,
5538 sizeof(struct in6_addr));
5539 /* clear embedded scope ID */
5540 if (IN6_IS_SCOPE_EMBED(&nfr.nfr_saddr.sin6.sin6_addr)) {
5541 nfr.nfr_saddr.sin6.sin6_addr.s6_addr16[1] = 0;
5542 }
5543 if (IN6_IS_SCOPE_EMBED(&nfr.nfr_daddr.sin6.sin6_addr)) {
5544 nfr.nfr_daddr.sin6.sin6_addr.s6_addr16[1] = 0;
5545 }
5546 }
5547
5548 nfr.nfr_nx_port = 1;
5549 nfr.nfr_ip_protocol = IPPROTO_TCP;
5550 nfr.nfr_transport_protocol = IPPROTO_TCP;
5551 nfr.nfr_flags = NXFLOWREQF_ASIS;
5552 nfr.nfr_epid = (so != NULL ? so->last_pid : 0);
5553 if (NETNS_TOKEN_VALID(&inp->inp_netns_token)) {
5554 nfr.nfr_port_reservation = inp->inp_netns_token;
5555 nfr.nfr_flags |= NXFLOWREQF_EXT_PORT_RSV;
5556 }
5557 ASSERT(inp->inp_flowhash != 0);
5558 nfr.nfr_inp_flowhash = inp->inp_flowhash;
5559
5560 uuid_generate_random(nfr.nfr_flow_uuid);
5561 err = kern_nexus_flow_add(kern_nexus_shared_controller(), fsw_uuid,
5562 &nfr, sizeof(nfr));
5563
5564 if (err == 0) {
5565 uuid_copy(tp->t_fsw_uuid, fsw_uuid);
5566 uuid_copy(tp->t_flow_uuid, nfr.nfr_flow_uuid);
5567 }
5568
5569 TCP_LOG_FSW_FLOW(tp, "add err %d\n", err);
5570 }
5571
5572 void
tcp_del_fsw_flow(struct tcpcb * tp)5573 tcp_del_fsw_flow(struct tcpcb *tp)
5574 {
5575 if (uuid_is_null(tp->t_fsw_uuid) || uuid_is_null(tp->t_flow_uuid)) {
5576 return;
5577 }
5578
5579 struct nx_flow_req nfr;
5580 uuid_copy(nfr.nfr_flow_uuid, tp->t_flow_uuid);
5581
5582 /* It's possible for this call to fail if the nexus has detached */
5583 int err = kern_nexus_flow_del(kern_nexus_shared_controller(),
5584 tp->t_fsw_uuid, &nfr, sizeof(nfr));
5585 VERIFY(err == 0 || err == ENOENT || err == ENXIO);
5586
5587 uuid_clear(tp->t_fsw_uuid);
5588 uuid_clear(tp->t_flow_uuid);
5589
5590 TCP_LOG_FSW_FLOW(tp, "del err %d\n", err);
5591 }
5592
5593 #endif /* SKYWALK */
5594