1 /*
2 * Copyright (c) 2015-2025 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 /* TCP-cache to store and retrieve TCP-related information */
30
31 #include <net/flowhash.h>
32 #include <net/route.h>
33 #include <net/necp.h>
34 #include <netinet/in_pcb.h>
35 #include <netinet/mptcp.h>
36 #include <netinet/mptcp_var.h>
37 #include <netinet/tcp_cache.h>
38 #include <netinet/tcp_seq.h>
39 #include <netinet/tcp_var.h>
40 #include <kern/locks.h>
41 #include <sys/queue.h>
42 #include <dev/random/randomdev.h>
43 #include <net/sockaddr_utils.h>
44
45 #include <IOKit/IOBSD.h>
46
47 struct tcp_heuristic {
48 SLIST_ENTRY(tcp_heuristic) list;
49
50 uint32_t th_last_access;
51
52 struct tcp_heuristic_key th_key;
53
54 #define th_val_start th_tfo_data_loss
55 uint8_t th_tfo_data_loss; /* The number of times a SYN+data has been lost */
56 uint8_t th_tfo_req_loss; /* The number of times a SYN+cookie-req has been lost */
57 uint8_t th_tfo_data_rst; /* The number of times a SYN+data has received a RST */
58 uint8_t th_tfo_req_rst; /* The number of times a SYN+cookie-req has received a RST */
59 uint8_t th_mptcp_loss; /* The number of times a SYN+MP_CAPABLE has been lost */
60 uint8_t th_mptcp_success; /* The number of times MPTCP-negotiation has been successful */
61 uint8_t th_ecn_loss; /* The number of times a SYN+ecn was likely dropped */
62 uint8_t th_ecn_aggressive; /* The number of times we did an aggressive fallback */
63 uint8_t th_ecn_droprst; /* The number of times ECN connections received a RST after first data pkt */
64 uint8_t th_ecn_synrst; /* number of times RST was received in response to an ECN enabled SYN */
65 uint32_t th_tfo_enabled_time; /* The moment when we reenabled TFO after backing off */
66 uint32_t th_tfo_backoff_until; /* Time until when we should not try out TFO */
67 uint32_t th_tfo_backoff; /* Current backoff timer */
68 uint32_t th_mptcp_backoff; /* Time until when we should not try out MPTCP */
69 uint32_t th_ecn_backoff; /* Time until when we should not try out ECN */
70
71 uint8_t th_tfo_in_backoff:1, /* Are we avoiding TFO due to the backoff timer? */
72 th_mptcp_in_backoff:1, /* Are we avoiding MPTCP due to the backoff timer? */
73 th_mptcp_heuristic_disabled:1; /* Are heuristics disabled? */
74 // N.B.: we may sometimes erase ALL values from th_val_start to the end of the structure.
75 };
76
77
78 struct tcp_heuristics_head {
79 SLIST_HEAD(tcp_heur_bucket, tcp_heuristic) tcp_heuristics;
80
81 /* Per-hashbucket lock to avoid lock-contention */
82 lck_mtx_t thh_mtx;
83 };
84
85 struct tcp_cache {
86 SLIST_ENTRY(tcp_cache) list;
87
88 uint32_t tc_last_access;
89
90 struct tcp_cache_key tc_key;
91
92 uint8_t tc_tfo_cookie[TFO_COOKIE_LEN_MAX];
93 uint8_t tc_tfo_cookie_len;
94
95 uint8_t tc_mptcp_version_confirmed:1;
96 uint8_t tc_mptcp_version; /* version to use right now */
97 uint32_t tc_mptcp_next_version_try; /* Time, until we try preferred version again */
98 };
99
100 struct tcp_cache_head {
101 SLIST_HEAD(tcp_cache_bucket, tcp_cache) tcp_caches;
102
103 /* Per-hashbucket lock to avoid lock-contention */
104 lck_mtx_t tch_mtx;
105 };
106
107 struct tcp_cache_key_src {
108 struct ifnet *ifp;
109 in_4_6_addr laddr;
110 in_4_6_addr faddr;
111 int af;
112 };
113
114 static uint32_t tcp_cache_hash_seed;
115
116 static size_t tcp_cache_size;
117 static size_t tcp_heuristics_size;
118
119 /*
120 * The maximum depth of the hash-bucket. This way we limit the tcp_cache to
121 * TCP_CACHE_BUCKET_SIZE * tcp_cache_size and have "natural" garbage collection
122 */
123 #define TCP_CACHE_BUCKET_SIZE 5
124
125 static struct tcp_cache_head *__counted_by(tcp_cache_size) tcp_cache;
126
127 static LCK_ATTR_DECLARE(tcp_cache_mtx_attr, 0, 0);
128 static LCK_GRP_DECLARE(tcp_cache_mtx_grp, "tcpcache");
129
130 static struct tcp_heuristics_head *__counted_by(tcp_heuristics_size) tcp_heuristics;
131
132 static LCK_ATTR_DECLARE(tcp_heuristic_mtx_attr, 0, 0);
133 static LCK_GRP_DECLARE(tcp_heuristic_mtx_grp, "tcpheuristic");
134
135 static uint32_t tcp_backoff_maximum = 65536;
136
137 SYSCTL_UINT(_net_inet_tcp, OID_AUTO, backoff_maximum, CTLFLAG_RW | CTLFLAG_LOCKED,
138 &tcp_backoff_maximum, 0, "Maximum time for which we won't try TFO");
139
140 static uint32_t tcp_ecn_timeout = 5;
141
142 SYSCTL_UINT(_net_inet_tcp, OID_AUTO, ecn_timeout, CTLFLAG_RW | CTLFLAG_LOCKED,
143 &tcp_ecn_timeout, 5, "Initial minutes to wait before re-trying ECN");
144
145 static int disable_tcp_heuristics = 0;
146 SYSCTL_INT(_net_inet_tcp, OID_AUTO, disable_tcp_heuristics, CTLFLAG_RW | CTLFLAG_LOCKED,
147 &disable_tcp_heuristics, 0, "Set to 1, to disable all TCP heuristics (TFO, ECN, MPTCP)");
148
149 static uint32_t mptcp_version_timeout = 24 * 60;
150
151 SYSCTL_UINT(_net_inet_tcp, OID_AUTO, mptcp_version_timeout, CTLFLAG_RW | CTLFLAG_LOCKED,
152 &mptcp_version_timeout, 24 * 60, "Initial minutes to wait before re-trying MPTCP's preferred version");
153
154
155 static uint32_t
tcp_min_to_hz(uint32_t minutes)156 tcp_min_to_hz(uint32_t minutes)
157 {
158 if (minutes > 65536) {
159 return (uint32_t)65536 * 60 * TCP_RETRANSHZ;
160 }
161
162 return minutes * 60 * TCP_RETRANSHZ;
163 }
164
165 /*
166 * This number is coupled with tcp_ecn_timeout, because we want to prevent
167 * integer overflow. Need to find an unexpensive way to prevent integer overflow
168 * while still allowing a dynamic sysctl.
169 */
170 #define TCP_CACHE_OVERFLOW_PROTECT 9
171
172 /* Number of SYN-losses we accept */
173 #define TFO_MAX_COOKIE_LOSS 2
174 #define MPTCP_MAX_SYN_LOSS 2
175 #define MPTCP_SUCCESS_TRIGGER 10
176 #define MPTCP_VERSION_MAX_FAIL 2
177 #define ECN_MAX_SYN_LOSS 5
178 #define ECN_MAX_DROPRST 1
179 #define ECN_MAX_SYNRST 4
180 #define ECN_MAX_CE_AGGRESSIVE 1
181
182 /* Flags for setting/unsetting loss-heuristics, limited to 4 bytes */
183 #define TCPCACHE_F_TFO_REQ 0x01
184 #define TCPCACHE_F_TFO_DATA 0x02
185 #define TCPCACHE_F_ECN 0x04
186 #define TCPCACHE_F_MPTCP 0x08
187 #define TCPCACHE_F_ECN_DROPRST 0x10
188 #define TCPCACHE_F_ECN_AGGRESSIVE 0x20
189 #define TCPCACHE_F_TFO_REQ_RST 0x40
190 #define TCPCACHE_F_TFO_DATA_RST 0x80
191 #define TCPCACHE_F_ECN_SYNRST 0x100
192 #define TCPCACHE_F_ECN_SYN_LOSS 0x200
193
194 /* Always retry ECN after backing off to this level for some heuristics */
195 #define ECN_RETRY_LIMIT 9
196
197 #define TCP_CACHE_INC_IFNET_STAT(_ifp_, _af_, _stat_) { \
198 if ((_ifp_) != NULL) { \
199 if ((_af_) == AF_INET6) { \
200 (_ifp_)->if_ipv6_stat->_stat_++;\
201 } else { \
202 (_ifp_)->if_ipv4_stat->_stat_++;\
203 }\
204 }\
205 }
206
207 /*
208 * Round up to next higher power-of 2. See "Bit Twiddling Hacks".
209 *
210 * Might be worth moving this to a library so that others
211 * (e.g., scale_to_powerof2()) can use this as well instead of a while-loop.
212 */
213 static uint32_t
tcp_cache_roundup2(uint32_t a)214 tcp_cache_roundup2(uint32_t a)
215 {
216 a--;
217 a |= a >> 1;
218 a |= a >> 2;
219 a |= a >> 4;
220 a |= a >> 8;
221 a |= a >> 16;
222 a++;
223
224 return a;
225 }
226
227 static void
tcp_cache_hash_src(struct tcp_cache_key_src * tcks,struct tcp_heuristic_key * key)228 tcp_cache_hash_src(struct tcp_cache_key_src *tcks, struct tcp_heuristic_key *key)
229 {
230 struct ifnet *ifp = tcks->ifp;
231 uint8_t len = sizeof(key->thk_net_signature);
232 uint16_t flags;
233
234 if (tcks->af == AF_INET6) {
235 int ret;
236
237 key->thk_family = AF_INET6;
238 ret = ifnet_get_netsignature(ifp, AF_INET6, &len, &flags,
239 key->thk_net_signature);
240
241 /*
242 * ifnet_get_netsignature only returns EINVAL if ifn is NULL
243 * (we made sure that in the other cases it does not). So,
244 * in this case we should take the connection's address.
245 */
246 if (ret == ENOENT || ret == EINVAL) {
247 memcpy(&key->thk_ip.addr6, &tcks->laddr.addr6, sizeof(struct in6_addr));
248 }
249 } else {
250 int ret;
251
252 key->thk_family = AF_INET;
253 ret = ifnet_get_netsignature(ifp, AF_INET, &len, &flags,
254 key->thk_net_signature);
255
256 /*
257 * ifnet_get_netsignature only returns EINVAL if ifn is NULL
258 * (we made sure that in the other cases it does not). So,
259 * in this case we should take the connection's address.
260 */
261 if (ret == ENOENT || ret == EINVAL) {
262 memcpy(&key->thk_ip.addr, &tcks->laddr.addr, sizeof(struct in_addr));
263 }
264 }
265 }
266
267 static uint16_t
tcp_cache_hash(struct tcp_cache_key_src * tcks,struct tcp_cache_key * key)268 tcp_cache_hash(struct tcp_cache_key_src *tcks, struct tcp_cache_key *key)
269 {
270 uint32_t hash;
271
272 bzero(key, sizeof(struct tcp_cache_key));
273
274 tcp_cache_hash_src(tcks, &key->tck_src);
275
276 if (tcks->af == AF_INET6) {
277 key->tck_family = AF_INET6;
278 memcpy(&key->tck_dst.addr6, &tcks->faddr.addr6,
279 sizeof(struct in6_addr));
280 } else {
281 key->tck_family = AF_INET;
282 memcpy(&key->tck_dst.addr, &tcks->faddr.addr,
283 sizeof(struct in_addr));
284 }
285
286 hash = net_flowhash(key, sizeof(struct tcp_cache_key),
287 tcp_cache_hash_seed);
288
289 return (uint16_t)(hash & (tcp_cache_size - 1));
290 }
291
292 static void
tcp_cache_unlock(struct tcp_cache_head * head)293 tcp_cache_unlock(struct tcp_cache_head *head)
294 {
295 lck_mtx_unlock(&head->tch_mtx);
296 }
297
298 /*
299 * Make sure that everything that happens after tcp_getcache_with_lock()
300 * is short enough to justify that you hold the per-bucket lock!!!
301 *
302 * Otherwise, better build another lookup-function that does not hold the
303 * lock and you copy out the bits and bytes.
304 *
305 * That's why we provide the head as a "return"-pointer so that the caller
306 * can give it back to use for tcp_cache_unlock().
307 */
308 static struct tcp_cache *
tcp_getcache_with_lock(struct tcp_cache_key_src * tcks,int create,struct tcp_cache_head ** headarg)309 tcp_getcache_with_lock(struct tcp_cache_key_src *tcks,
310 int create, struct tcp_cache_head **headarg)
311 {
312 struct tcp_cache *__single tpcache = NULL;
313 struct tcp_cache_head *__single head;
314 struct tcp_cache_key key;
315 uint16_t hash;
316 int i = 0;
317
318 hash = tcp_cache_hash(tcks, &key);
319 head = &tcp_cache[hash];
320
321 lck_mtx_lock(&head->tch_mtx);
322
323 /*** First step: Look for the tcp_cache in our bucket ***/
324 SLIST_FOREACH(tpcache, &head->tcp_caches, list) {
325 if (memcmp(&tpcache->tc_key, &key, sizeof(key)) == 0) {
326 break;
327 }
328
329 i++;
330 }
331
332 /*** Second step: If it's not there, create/recycle it ***/
333 if ((tpcache == NULL) && create) {
334 if (i >= TCP_CACHE_BUCKET_SIZE) {
335 struct tcp_cache *oldest_cache = NULL;
336 uint32_t max_age = 0;
337
338 /* Look for the oldest tcp_cache in the bucket */
339 SLIST_FOREACH(tpcache, &head->tcp_caches, list) {
340 uint32_t age = tcp_now - tpcache->tc_last_access;
341 if (age >= max_age) {
342 max_age = age;
343 oldest_cache = tpcache;
344 }
345 }
346 VERIFY(oldest_cache != NULL);
347
348 tpcache = oldest_cache;
349
350 /* We recycle, thus let's indicate that there is no cookie */
351 tpcache->tc_tfo_cookie_len = 0;
352 } else {
353 /* Create a new cache and add it to the list */
354 tpcache = kalloc_type(struct tcp_cache, Z_NOPAGEWAIT | Z_ZERO);
355 if (tpcache == NULL) {
356 os_log_error(OS_LOG_DEFAULT, "%s could not allocate cache", __func__);
357 goto out_null;
358 }
359
360 tpcache->tc_mptcp_version = (uint8_t)mptcp_preferred_version;
361 tpcache->tc_mptcp_next_version_try = tcp_now;
362
363 SLIST_INSERT_HEAD(&head->tcp_caches, tpcache, list);
364 }
365
366 memcpy(&tpcache->tc_key, &key, sizeof(key));
367 }
368
369 if (tpcache == NULL) {
370 goto out_null;
371 }
372
373 /* Update timestamp for garbage collection purposes */
374 tpcache->tc_last_access = tcp_now;
375 *headarg = head;
376
377 return tpcache;
378
379 out_null:
380 tcp_cache_unlock(head);
381 return NULL;
382 }
383
384 static void
tcp_cache_key_src_create(struct tcpcb * tp,struct tcp_cache_key_src * tcks)385 tcp_cache_key_src_create(struct tcpcb *tp, struct tcp_cache_key_src *tcks)
386 {
387 struct inpcb *inp = tp->t_inpcb;
388 memset(tcks, 0, sizeof(*tcks));
389
390 tcks->ifp = inp->inp_last_outifp;
391
392 if (inp->inp_vflag & INP_IPV6) {
393 memcpy(&tcks->laddr.addr6, &inp->in6p_laddr, sizeof(struct in6_addr));
394 memcpy(&tcks->faddr.addr6, &inp->in6p_faddr, sizeof(struct in6_addr));
395 tcks->af = AF_INET6;
396 } else {
397 memcpy(&tcks->laddr.addr, &inp->inp_laddr, sizeof(struct in_addr));
398 memcpy(&tcks->faddr.addr, &inp->inp_faddr, sizeof(struct in_addr));
399 tcks->af = AF_INET;
400 }
401
402 return;
403 }
404
405 static void
mptcp_version_cache_key_src_init(struct sockaddr * dst,struct tcp_cache_key_src * tcks)406 mptcp_version_cache_key_src_init(struct sockaddr *dst, struct tcp_cache_key_src *tcks)
407 {
408 memset(tcks, 0, sizeof(*tcks));
409
410 if (dst->sa_family == AF_INET) {
411 memcpy(&tcks->faddr.addr, &SIN(dst)->sin_addr, sizeof(struct in_addr));
412 tcks->af = AF_INET;
413 } else {
414 memcpy(&tcks->faddr.addr6, &SIN6(dst)->sin6_addr, sizeof(struct in6_addr));
415 tcks->af = AF_INET6;
416 }
417
418 return;
419 }
420
421 static void
tcp_cache_set_cookie_common(struct tcp_cache_key_src * tcks,u_char * __counted_by (len)cookie,uint8_t len)422 tcp_cache_set_cookie_common(struct tcp_cache_key_src *tcks, u_char *__counted_by(len) cookie, uint8_t len)
423 {
424 struct tcp_cache_head *__single head;
425 struct tcp_cache *__single tpcache;
426
427 /* Call lookup/create function */
428 tpcache = tcp_getcache_with_lock(tcks, 1, &head);
429 if (tpcache == NULL) {
430 return;
431 }
432
433 tpcache->tc_tfo_cookie_len = len > TFO_COOKIE_LEN_MAX ?
434 TFO_COOKIE_LEN_MAX : len;
435 memcpy(tpcache->tc_tfo_cookie, cookie, tpcache->tc_tfo_cookie_len);
436
437 tcp_cache_unlock(head);
438 }
439
440 void
tcp_cache_set_cookie(struct tcpcb * tp,u_char * __counted_by (len)cookie,uint8_t len)441 tcp_cache_set_cookie(struct tcpcb *tp, u_char *__counted_by(len) cookie, uint8_t len)
442 {
443 struct tcp_cache_key_src tcks;
444
445 tcp_cache_key_src_create(tp, &tcks);
446 tcp_cache_set_cookie_common(&tcks, cookie, len);
447 }
448
449 static int
tcp_cache_get_cookie_common(struct tcp_cache_key_src * tcks,u_char * __counted_by (maxlen)cookie,uint8_t maxlen,uint8_t * len)450 tcp_cache_get_cookie_common(struct tcp_cache_key_src *tcks,
451 u_char *__counted_by(maxlen) cookie, uint8_t maxlen, uint8_t *len)
452 {
453 #pragma unused(maxlen)
454 struct tcp_cache_head *__single head;
455 struct tcp_cache *__single tpcache;
456
457 /* Call lookup/create function */
458 tpcache = tcp_getcache_with_lock(tcks, 1, &head);
459 if (tpcache == NULL) {
460 return 0;
461 }
462
463 if (tpcache->tc_tfo_cookie_len == 0) {
464 tcp_cache_unlock(head);
465 return 0;
466 }
467
468 /*
469 * Not enough space - this should never happen as it has been checked
470 * in tcp_tfo_check. So, fail here!
471 */
472 VERIFY(tpcache->tc_tfo_cookie_len <= *len);
473
474 memcpy(cookie, tpcache->tc_tfo_cookie, tpcache->tc_tfo_cookie_len);
475 *len = tpcache->tc_tfo_cookie_len;
476
477 tcp_cache_unlock(head);
478
479 return 1;
480 }
481
482 /*
483 * Get the cookie related to 'tp', and copy it into 'cookie', provided that len
484 * is big enough (len designates the available memory.
485 * Upon return, 'len' is set to the cookie's length.
486 *
487 * Returns 0 if we should request a cookie.
488 * Returns 1 if the cookie has been found and written.
489 */
490 int
tcp_cache_get_cookie(struct tcpcb * tp,u_char * __counted_by (maxlen)cookie,uint8_t maxlen,uint8_t * len)491 tcp_cache_get_cookie(struct tcpcb *tp, u_char *__counted_by(maxlen) cookie,
492 uint8_t maxlen, uint8_t *len)
493 {
494 struct tcp_cache_key_src tcks;
495
496 tcp_cache_key_src_create(tp, &tcks);
497 return tcp_cache_get_cookie_common(&tcks, cookie, maxlen, len);
498 }
499
500 static unsigned int
tcp_cache_get_cookie_len_common(struct tcp_cache_key_src * tcks)501 tcp_cache_get_cookie_len_common(struct tcp_cache_key_src *tcks)
502 {
503 struct tcp_cache_head *__single head;
504 struct tcp_cache *__single tpcache;
505 unsigned int cookie_len;
506
507 /* Call lookup/create function */
508 tpcache = tcp_getcache_with_lock(tcks, 1, &head);
509 if (tpcache == NULL) {
510 return 0;
511 }
512
513 cookie_len = tpcache->tc_tfo_cookie_len;
514
515 tcp_cache_unlock(head);
516
517 return cookie_len;
518 }
519
520 unsigned int
tcp_cache_get_cookie_len(struct tcpcb * tp)521 tcp_cache_get_cookie_len(struct tcpcb *tp)
522 {
523 struct tcp_cache_key_src tcks;
524
525 tcp_cache_key_src_create(tp, &tcks);
526 return tcp_cache_get_cookie_len_common(&tcks);
527 }
528
529 /*
530 * @return:
531 * 0 MPTCP_VERSION_0
532 * 1 MPTCP_VERSION_1
533 */
534 uint8_t
tcp_cache_get_mptcp_version(struct sockaddr * dst)535 tcp_cache_get_mptcp_version(struct sockaddr *dst)
536 {
537 struct tcp_cache_key_src tcks;
538 mptcp_version_cache_key_src_init(dst, &tcks);
539 uint8_t version = (uint8_t) mptcp_preferred_version;
540
541 struct tcp_cache_head *__single head;
542 struct tcp_cache *__single tpcache;
543
544 /* Call lookup/create function */
545 tpcache = tcp_getcache_with_lock(&tcks, 1, &head);
546 if (tpcache == NULL) {
547 return version;
548 }
549
550 version = tpcache->tc_mptcp_version;
551
552 /* Let's see if we should try the preferred version again */
553 if (!tpcache->tc_mptcp_version_confirmed &&
554 version != mptcp_preferred_version &&
555 TSTMP_GEQ(tcp_now, tpcache->tc_mptcp_next_version_try)) {
556 version = (uint8_t) mptcp_preferred_version;
557 }
558
559 tcp_cache_unlock(head);
560 return version;
561 }
562
563 void
tcp_cache_update_mptcp_version(struct tcpcb * tp,boolean_t succeeded)564 tcp_cache_update_mptcp_version(struct tcpcb *tp, boolean_t succeeded)
565 {
566 uint8_t version = tptomptp(tp)->mpt_version;
567 struct inpcb *inp = tp->t_inpcb;
568 struct tcp_cache_key_src tcks;
569 struct tcp_cache_head *__single head;
570 struct tcp_cache *__single tpcache;
571
572 if (inp->inp_vflag & INP_IPV6) {
573 struct sockaddr_in6 dst = {
574 .sin6_len = sizeof(struct sockaddr_in6),
575 .sin6_family = AF_INET6,
576 .sin6_addr = inp->in6p_faddr,
577 };
578 mptcp_version_cache_key_src_init(SA(&dst), &tcks);
579 } else {
580 struct sockaddr_in dst = {
581 .sin_len = sizeof(struct sockaddr_in),
582 .sin_family = AF_INET,
583 .sin_addr = inp->inp_faddr,
584 };
585 mptcp_version_cache_key_src_init(SA(&dst), &tcks);
586 }
587
588 /* Call lookup/create function */
589 tpcache = tcp_getcache_with_lock(&tcks, 1, &head);
590 if (tpcache == NULL) {
591 return;
592 }
593
594 /* We are still in probing phase */
595 if (tpcache->tc_mptcp_version_confirmed) {
596 goto exit;
597 }
598
599 if (succeeded) {
600 if (version == (uint8_t)mptcp_preferred_version) {
601 /* Preferred version succeeded - make it sticky */
602 tpcache->tc_mptcp_version_confirmed = true;
603 tpcache->tc_mptcp_version = version;
604 } else {
605 /* If we are past the next version try, set it
606 * so that we try preferred again in 24h
607 */
608 if (TSTMP_GEQ(tcp_now, tpcache->tc_mptcp_next_version_try)) {
609 tpcache->tc_mptcp_next_version_try = tcp_now + tcp_min_to_hz(mptcp_version_timeout);
610 }
611 }
612 } else {
613 if (version == (uint8_t)mptcp_preferred_version) {
614 /* Preferred version failed - try the other version */
615 tpcache->tc_mptcp_version = version == MPTCP_VERSION_0 ? MPTCP_VERSION_1 : MPTCP_VERSION_0;
616 }
617 /* Preferred version failed - make sure we give the preferred another
618 * shot in 24h.
619 */
620 if (TSTMP_GEQ(tcp_now, tpcache->tc_mptcp_next_version_try)) {
621 tpcache->tc_mptcp_next_version_try = tcp_now + tcp_min_to_hz(mptcp_version_timeout);
622 }
623 }
624
625 exit:
626 tcp_cache_unlock(head);
627 }
628
629 static uint16_t
tcp_heuristics_hash(struct tcp_cache_key_src * tcks,struct tcp_heuristic_key * key)630 tcp_heuristics_hash(struct tcp_cache_key_src *tcks, struct tcp_heuristic_key *key)
631 {
632 uint32_t hash;
633
634 bzero(key, sizeof(struct tcp_heuristic_key));
635
636 tcp_cache_hash_src(tcks, key);
637
638 hash = net_flowhash(key, sizeof(struct tcp_heuristic_key),
639 tcp_cache_hash_seed);
640
641 return (uint16_t)(hash & (tcp_cache_size - 1));
642 }
643
644 static void
tcp_heuristic_unlock(struct tcp_heuristics_head * head)645 tcp_heuristic_unlock(struct tcp_heuristics_head *head)
646 {
647 lck_mtx_unlock(&head->thh_mtx);
648 }
649
650 /*
651 * Make sure that everything that happens after tcp_getheuristic_with_lock()
652 * is short enough to justify that you hold the per-bucket lock!!!
653 *
654 * Otherwise, better build another lookup-function that does not hold the
655 * lock and you copy out the bits and bytes.
656 *
657 * That's why we provide the head as a "return"-pointer so that the caller
658 * can give it back to use for tcp_heur_unlock().
659 *
660 *
661 * ToDo - way too much code-duplication. We should create an interface to handle
662 * bucketized hashtables with recycling of the oldest element.
663 */
664 static struct tcp_heuristic *
tcp_getheuristic_with_lock(struct tcp_cache_key_src * tcks,int create,struct tcp_heuristics_head ** headarg)665 tcp_getheuristic_with_lock(struct tcp_cache_key_src *tcks,
666 int create, struct tcp_heuristics_head **headarg)
667 {
668 struct tcp_heuristic *__single tpheur = NULL;
669 struct tcp_heuristics_head *__single head;
670 struct tcp_heuristic_key key;
671 uint16_t hash;
672 int i = 0;
673
674 hash = tcp_heuristics_hash(tcks, &key);
675 head = &tcp_heuristics[hash];
676
677 lck_mtx_lock(&head->thh_mtx);
678
679 /*** First step: Look for the tcp_heur in our bucket ***/
680 SLIST_FOREACH(tpheur, &head->tcp_heuristics, list) {
681 if (memcmp(&tpheur->th_key, &key, sizeof(key)) == 0) {
682 break;
683 }
684
685 i++;
686 }
687
688 /*** Second step: If it's not there, create/recycle it ***/
689 if ((tpheur == NULL) && create) {
690 if (i >= TCP_CACHE_BUCKET_SIZE) {
691 struct tcp_heuristic *__single oldest_heur = NULL;
692 uint32_t max_age = 0;
693
694 /* Look for the oldest tcp_heur in the bucket */
695 SLIST_FOREACH(tpheur, &head->tcp_heuristics, list) {
696 uint32_t age = tcp_now - tpheur->th_last_access;
697 if (age >= max_age) {
698 max_age = age;
699 oldest_heur = tpheur;
700 }
701 }
702 VERIFY(oldest_heur != NULL);
703
704 tpheur = oldest_heur;
705
706 /* We recycle - set everything to 0 */
707 uint8_t *ptr = (uint8_t *)(struct tcp_heuristic *__indexable)tpheur;
708 const size_t preamble = offsetof(struct tcp_heuristic, th_val_start);
709 const size_t size = sizeof(struct tcp_heuristic) - preamble;
710 bzero(ptr + preamble, size);
711 } else {
712 /* Create a new heuristic and add it to the list */
713 tpheur = kalloc_type(struct tcp_heuristic, Z_NOPAGEWAIT | Z_ZERO);
714 if (tpheur == NULL) {
715 os_log_error(OS_LOG_DEFAULT, "%s could not allocate heuristic", __func__);
716 goto out_null;
717 }
718
719 SLIST_INSERT_HEAD(&head->tcp_heuristics, tpheur, list);
720 }
721
722 /*
723 * Set to tcp_now, to make sure it won't be > than tcp_now in the
724 * near future.
725 */
726 tpheur->th_ecn_backoff = tcp_now;
727 tpheur->th_tfo_backoff_until = tcp_now;
728 tpheur->th_mptcp_backoff = tcp_now;
729 tpheur->th_tfo_backoff = tcp_min_to_hz(tcp_ecn_timeout);
730
731 memcpy(&tpheur->th_key, &key, sizeof(key));
732 }
733
734 if (tpheur == NULL) {
735 goto out_null;
736 }
737
738 /* Update timestamp for garbage collection purposes */
739 tpheur->th_last_access = tcp_now;
740 *headarg = head;
741
742 return tpheur;
743
744 out_null:
745 tcp_heuristic_unlock(head);
746 return NULL;
747 }
748
749 static void
tcp_heuristic_reset_counters(struct tcp_cache_key_src * tcks,uint8_t flags)750 tcp_heuristic_reset_counters(struct tcp_cache_key_src *tcks, uint8_t flags)
751 {
752 struct tcp_heuristics_head *__single head;
753 struct tcp_heuristic *__single tpheur;
754
755 /*
756 * Always create heuristics here because MPTCP needs to write success
757 * into it. Thus, we always end up creating them.
758 */
759 tpheur = tcp_getheuristic_with_lock(tcks, 1, &head);
760 if (tpheur == NULL) {
761 return;
762 }
763
764 if (flags & TCPCACHE_F_TFO_DATA) {
765 if (tpheur->th_tfo_data_loss >= TFO_MAX_COOKIE_LOSS) {
766 os_log(OS_LOG_DEFAULT, "%s: Resetting TFO-data loss to 0 from %u on heur %lx\n",
767 __func__, tpheur->th_tfo_data_loss, (unsigned long)VM_KERNEL_ADDRPERM(tpheur));
768 }
769 tpheur->th_tfo_data_loss = 0;
770 }
771
772 if (flags & TCPCACHE_F_TFO_REQ) {
773 if (tpheur->th_tfo_req_loss >= TFO_MAX_COOKIE_LOSS) {
774 os_log(OS_LOG_DEFAULT, "%s: Resetting TFO-req loss to 0 from %u on heur %lx\n",
775 __func__, tpheur->th_tfo_req_loss, (unsigned long)VM_KERNEL_ADDRPERM(tpheur));
776 }
777 tpheur->th_tfo_req_loss = 0;
778 }
779
780 if (flags & TCPCACHE_F_TFO_DATA_RST) {
781 if (tpheur->th_tfo_data_rst >= TFO_MAX_COOKIE_LOSS) {
782 os_log(OS_LOG_DEFAULT, "%s: Resetting TFO-data RST to 0 from %u on heur %lx\n",
783 __func__, tpheur->th_tfo_data_rst, (unsigned long)VM_KERNEL_ADDRPERM(tpheur));
784 }
785 tpheur->th_tfo_data_rst = 0;
786 }
787
788 if (flags & TCPCACHE_F_TFO_REQ_RST) {
789 if (tpheur->th_tfo_req_rst >= TFO_MAX_COOKIE_LOSS) {
790 os_log(OS_LOG_DEFAULT, "%s: Resetting TFO-req RST to 0 from %u on heur %lx\n",
791 __func__, tpheur->th_tfo_req_rst, (unsigned long)VM_KERNEL_ADDRPERM(tpheur));
792 }
793 tpheur->th_tfo_req_rst = 0;
794 }
795
796 if (flags & TCPCACHE_F_ECN) {
797 tpheur->th_ecn_loss = 0;
798 tpheur->th_ecn_aggressive = 0;
799 tpheur->th_ecn_synrst = 0;
800 tpheur->th_ecn_droprst = 0;
801 }
802
803 if (flags & TCPCACHE_F_MPTCP) {
804 tpheur->th_mptcp_loss = 0;
805 if (tpheur->th_mptcp_success < MPTCP_SUCCESS_TRIGGER) {
806 tpheur->th_mptcp_success++;
807
808 if (tpheur->th_mptcp_success == MPTCP_SUCCESS_TRIGGER) {
809 os_log(mptcp_log_handle, "%s disabling heuristics for 12 hours", __func__);
810 tpheur->th_mptcp_heuristic_disabled = 1;
811 /* Disable heuristics for 12 hours */
812 tpheur->th_mptcp_backoff = tcp_now + tcp_min_to_hz(tcp_ecn_timeout * 12);
813 }
814 }
815 }
816
817 tcp_heuristic_unlock(head);
818 }
819
820 void
tcp_heuristic_tfo_success(struct tcpcb * tp)821 tcp_heuristic_tfo_success(struct tcpcb *tp)
822 {
823 struct tcp_cache_key_src tcks;
824 uint8_t flag = 0;
825
826 tcp_cache_key_src_create(tp, &tcks);
827
828 if (tp->t_tfo_stats & TFO_S_SYN_DATA_SENT) {
829 flag = (TCPCACHE_F_TFO_DATA | TCPCACHE_F_TFO_REQ |
830 TCPCACHE_F_TFO_DATA_RST | TCPCACHE_F_TFO_REQ_RST);
831 }
832 if (tp->t_tfo_stats & TFO_S_COOKIE_REQ) {
833 flag = (TCPCACHE_F_TFO_REQ | TCPCACHE_F_TFO_REQ_RST);
834 }
835
836 tcp_heuristic_reset_counters(&tcks, flag);
837 }
838
839 void
tcp_heuristic_mptcp_success(struct tcpcb * tp)840 tcp_heuristic_mptcp_success(struct tcpcb *tp)
841 {
842 struct tcp_cache_key_src tcks;
843
844 tcp_cache_key_src_create(tp, &tcks);
845 tcp_heuristic_reset_counters(&tcks, TCPCACHE_F_MPTCP);
846 }
847
848 void
tcp_heuristic_ecn_success(struct tcpcb * tp)849 tcp_heuristic_ecn_success(struct tcpcb *tp)
850 {
851 struct tcp_cache_key_src tcks;
852
853 tcp_cache_key_src_create(tp, &tcks);
854 tcp_heuristic_reset_counters(&tcks, TCPCACHE_F_ECN);
855 }
856
857 static void
__tcp_heuristic_tfo_middlebox_common(struct tcp_heuristic * tpheur)858 __tcp_heuristic_tfo_middlebox_common(struct tcp_heuristic *tpheur)
859 {
860 if (tpheur->th_tfo_in_backoff) {
861 return;
862 }
863
864 tpheur->th_tfo_in_backoff = 1;
865
866 if (tpheur->th_tfo_enabled_time) {
867 uint32_t old_backoff = tpheur->th_tfo_backoff;
868
869 tpheur->th_tfo_backoff -= (tcp_now - tpheur->th_tfo_enabled_time);
870 if (tpheur->th_tfo_backoff > old_backoff) {
871 tpheur->th_tfo_backoff = tcp_min_to_hz(tcp_ecn_timeout);
872 }
873 }
874
875 tpheur->th_tfo_backoff_until = tcp_now + tpheur->th_tfo_backoff;
876
877 /* Then, increase the backoff time */
878 tpheur->th_tfo_backoff *= 2;
879
880 if (tpheur->th_tfo_backoff > tcp_min_to_hz(tcp_backoff_maximum)) {
881 tpheur->th_tfo_backoff = tcp_min_to_hz(tcp_ecn_timeout);
882 }
883
884 os_log(OS_LOG_DEFAULT, "%s disable TFO until %u now %u on %lx\n", __func__,
885 tpheur->th_tfo_backoff_until, tcp_now, (unsigned long)VM_KERNEL_ADDRPERM(tpheur));
886 }
887
888 static void
tcp_heuristic_tfo_middlebox_common(struct tcp_cache_key_src * tcks)889 tcp_heuristic_tfo_middlebox_common(struct tcp_cache_key_src *tcks)
890 {
891 struct tcp_heuristics_head *__single head;
892 struct tcp_heuristic *__single tpheur;
893
894 tpheur = tcp_getheuristic_with_lock(tcks, 1, &head);
895 if (tpheur == NULL) {
896 return;
897 }
898
899 __tcp_heuristic_tfo_middlebox_common(tpheur);
900
901 tcp_heuristic_unlock(head);
902 }
903
904 static void
tcp_heuristic_inc_counters(struct tcp_cache_key_src * tcks,uint32_t flags)905 tcp_heuristic_inc_counters(struct tcp_cache_key_src *tcks,
906 uint32_t flags)
907 {
908 struct tcp_heuristics_head *__single head;
909 struct tcp_heuristic *__single tpheur;
910
911 tpheur = tcp_getheuristic_with_lock(tcks, 1, &head);
912 if (tpheur == NULL) {
913 return;
914 }
915
916 /* Limit to prevent integer-overflow during exponential backoff */
917 if ((flags & TCPCACHE_F_TFO_DATA) && tpheur->th_tfo_data_loss < TCP_CACHE_OVERFLOW_PROTECT) {
918 tpheur->th_tfo_data_loss++;
919
920 if (tpheur->th_tfo_data_loss >= TFO_MAX_COOKIE_LOSS) {
921 __tcp_heuristic_tfo_middlebox_common(tpheur);
922 }
923 }
924
925 if ((flags & TCPCACHE_F_TFO_REQ) && tpheur->th_tfo_req_loss < TCP_CACHE_OVERFLOW_PROTECT) {
926 tpheur->th_tfo_req_loss++;
927
928 if (tpheur->th_tfo_req_loss >= TFO_MAX_COOKIE_LOSS) {
929 __tcp_heuristic_tfo_middlebox_common(tpheur);
930 }
931 }
932
933 if ((flags & TCPCACHE_F_TFO_DATA_RST) && tpheur->th_tfo_data_rst < TCP_CACHE_OVERFLOW_PROTECT) {
934 tpheur->th_tfo_data_rst++;
935
936 if (tpheur->th_tfo_data_rst >= TFO_MAX_COOKIE_LOSS) {
937 __tcp_heuristic_tfo_middlebox_common(tpheur);
938 }
939 }
940
941 if ((flags & TCPCACHE_F_TFO_REQ_RST) && tpheur->th_tfo_req_rst < TCP_CACHE_OVERFLOW_PROTECT) {
942 tpheur->th_tfo_req_rst++;
943
944 if (tpheur->th_tfo_req_rst >= TFO_MAX_COOKIE_LOSS) {
945 __tcp_heuristic_tfo_middlebox_common(tpheur);
946 }
947 }
948
949 if ((flags & TCPCACHE_F_MPTCP) &&
950 tpheur->th_mptcp_loss < TCP_CACHE_OVERFLOW_PROTECT &&
951 tpheur->th_mptcp_heuristic_disabled == 0) {
952 tpheur->th_mptcp_loss++;
953 if (tpheur->th_mptcp_loss >= MPTCP_MAX_SYN_LOSS) {
954 /*
955 * Yes, we take tcp_ecn_timeout, to avoid adding yet
956 * another sysctl that is just used for testing.
957 */
958 tpheur->th_mptcp_backoff = tcp_now +
959 (tcp_min_to_hz(tcp_ecn_timeout) <<
960 (tpheur->th_mptcp_loss - MPTCP_MAX_SYN_LOSS));
961 tpheur->th_mptcp_in_backoff = 1;
962
963 os_log(OS_LOG_DEFAULT, "%s disable MPTCP until %u now %u on %lx\n",
964 __func__, tpheur->th_mptcp_backoff, tcp_now,
965 (unsigned long)VM_KERNEL_ADDRPERM(tpheur));
966 }
967 }
968
969 if ((flags & TCPCACHE_F_ECN_SYN_LOSS) &&
970 tpheur->th_ecn_loss < TCP_CACHE_OVERFLOW_PROTECT &&
971 TSTMP_LEQ(tpheur->th_ecn_backoff, tcp_now)) {
972 tpheur->th_ecn_loss++;
973 if (tpheur->th_ecn_loss >= ECN_MAX_SYN_LOSS) {
974 tcpstat.tcps_ecn_fallback_synloss++;
975 TCP_CACHE_INC_IFNET_STAT(tcks->ifp, tcks->af, ecn_fallback_synloss);
976 tpheur->th_ecn_backoff = tcp_now +
977 (tcp_min_to_hz(tcp_ecn_timeout) <<
978 (tpheur->th_ecn_loss - ECN_MAX_SYN_LOSS));
979 }
980 }
981
982 if ((flags & TCPCACHE_F_ECN_AGGRESSIVE) &&
983 tpheur->th_ecn_aggressive < TCP_CACHE_OVERFLOW_PROTECT &&
984 TSTMP_LEQ(tpheur->th_ecn_backoff, tcp_now)) {
985 tpheur->th_ecn_aggressive++;
986 if (tpheur->th_ecn_aggressive >= ECN_MAX_CE_AGGRESSIVE) {
987 tcpstat.tcps_ecn_fallback_ce++;
988 TCP_CACHE_INC_IFNET_STAT(tcks->ifp, tcks->af, ecn_fallback_ce);
989 tpheur->th_ecn_backoff = tcp_now +
990 (tcp_min_to_hz(tcp_ecn_timeout) <<
991 (tpheur->th_ecn_aggressive - ECN_MAX_CE_AGGRESSIVE));
992 }
993 }
994
995 if ((flags & TCPCACHE_F_ECN_DROPRST) &&
996 tpheur->th_ecn_droprst < TCP_CACHE_OVERFLOW_PROTECT &&
997 TSTMP_LEQ(tpheur->th_ecn_backoff, tcp_now)) {
998 tpheur->th_ecn_droprst++;
999 if (tpheur->th_ecn_droprst >= ECN_MAX_DROPRST) {
1000 tcpstat.tcps_ecn_fallback_droprst++;
1001 TCP_CACHE_INC_IFNET_STAT(tcks->ifp, tcks->af,
1002 ecn_fallback_droprst);
1003 tpheur->th_ecn_backoff = tcp_now +
1004 (tcp_min_to_hz(tcp_ecn_timeout) <<
1005 (tpheur->th_ecn_droprst - ECN_MAX_DROPRST));
1006 }
1007 }
1008
1009 if ((flags & TCPCACHE_F_ECN_SYNRST) &&
1010 tpheur->th_ecn_synrst < TCP_CACHE_OVERFLOW_PROTECT) {
1011 tpheur->th_ecn_synrst++;
1012 if (tpheur->th_ecn_synrst >= ECN_MAX_SYNRST) {
1013 tcpstat.tcps_ecn_fallback_synrst++;
1014 TCP_CACHE_INC_IFNET_STAT(tcks->ifp, tcks->af,
1015 ecn_fallback_synrst);
1016 tpheur->th_ecn_backoff = tcp_now +
1017 (tcp_min_to_hz(tcp_ecn_timeout) <<
1018 (tpheur->th_ecn_synrst - ECN_MAX_SYNRST));
1019 }
1020 }
1021 tcp_heuristic_unlock(head);
1022 }
1023
1024 void
tcp_heuristic_tfo_loss(struct tcpcb * tp)1025 tcp_heuristic_tfo_loss(struct tcpcb *tp)
1026 {
1027 struct tcp_cache_key_src tcks;
1028 uint32_t flag = 0;
1029
1030 if (symptoms_is_wifi_lossy() &&
1031 IFNET_IS_WIFI(tp->t_inpcb->inp_last_outifp)) {
1032 return;
1033 }
1034
1035 tcp_cache_key_src_create(tp, &tcks);
1036
1037 if (tp->t_tfo_stats & TFO_S_SYN_DATA_SENT) {
1038 flag = (TCPCACHE_F_TFO_DATA | TCPCACHE_F_TFO_REQ);
1039 }
1040 if (tp->t_tfo_stats & TFO_S_COOKIE_REQ) {
1041 flag = TCPCACHE_F_TFO_REQ;
1042 }
1043
1044 tcp_heuristic_inc_counters(&tcks, flag);
1045 }
1046
1047 void
tcp_heuristic_tfo_rst(struct tcpcb * tp)1048 tcp_heuristic_tfo_rst(struct tcpcb *tp)
1049 {
1050 struct tcp_cache_key_src tcks;
1051 uint32_t flag = 0;
1052
1053 tcp_cache_key_src_create(tp, &tcks);
1054
1055 if (tp->t_tfo_stats & TFO_S_SYN_DATA_SENT) {
1056 flag = (TCPCACHE_F_TFO_DATA_RST | TCPCACHE_F_TFO_REQ_RST);
1057 }
1058 if (tp->t_tfo_stats & TFO_S_COOKIE_REQ) {
1059 flag = TCPCACHE_F_TFO_REQ_RST;
1060 }
1061
1062 tcp_heuristic_inc_counters(&tcks, flag);
1063 }
1064
1065 void
tcp_heuristic_mptcp_loss(struct tcpcb * tp)1066 tcp_heuristic_mptcp_loss(struct tcpcb *tp)
1067 {
1068 struct tcp_cache_key_src tcks;
1069
1070 if (symptoms_is_wifi_lossy() &&
1071 IFNET_IS_WIFI(tp->t_inpcb->inp_last_outifp)) {
1072 return;
1073 }
1074
1075 tcp_cache_key_src_create(tp, &tcks);
1076
1077 tcp_heuristic_inc_counters(&tcks, TCPCACHE_F_MPTCP);
1078 }
1079
1080 void
tcp_heuristic_ecn_loss(struct tcpcb * tp)1081 tcp_heuristic_ecn_loss(struct tcpcb *tp)
1082 {
1083 struct tcp_cache_key_src tcks;
1084
1085 if (symptoms_is_wifi_lossy() &&
1086 IFNET_IS_WIFI(tp->t_inpcb->inp_last_outifp)) {
1087 return;
1088 }
1089
1090 tcp_cache_key_src_create(tp, &tcks);
1091 tcp_heuristic_inc_counters(&tcks, TCPCACHE_F_ECN_SYN_LOSS);
1092 }
1093
1094 void
tcp_heuristic_ecn_droprst(struct tcpcb * tp)1095 tcp_heuristic_ecn_droprst(struct tcpcb *tp)
1096 {
1097 struct tcp_cache_key_src tcks;
1098
1099 tcp_cache_key_src_create(tp, &tcks);
1100 tcp_heuristic_inc_counters(&tcks, TCPCACHE_F_ECN_DROPRST);
1101 }
1102
1103 void
tcp_heuristic_ecn_synrst(struct tcpcb * tp)1104 tcp_heuristic_ecn_synrst(struct tcpcb *tp)
1105 {
1106 struct tcp_cache_key_src tcks;
1107
1108 tcp_cache_key_src_create(tp, &tcks);
1109 tcp_heuristic_inc_counters(&tcks, TCPCACHE_F_ECN_SYNRST);
1110 }
1111
1112 void
tcp_heuristic_ecn_aggressive(struct tcpcb * tp)1113 tcp_heuristic_ecn_aggressive(struct tcpcb *tp)
1114 {
1115 struct tcp_cache_key_src tcks;
1116
1117 tcp_cache_key_src_create(tp, &tcks);
1118 tcp_heuristic_inc_counters(&tcks, TCPCACHE_F_ECN_AGGRESSIVE);
1119 }
1120
1121 void
tcp_heuristic_tfo_middlebox(struct tcpcb * tp)1122 tcp_heuristic_tfo_middlebox(struct tcpcb *tp)
1123 {
1124 struct tcp_cache_key_src tcks;
1125
1126 tp->t_tfo_flags |= TFO_F_HEURISTIC_DONE;
1127
1128 tcp_cache_key_src_create(tp, &tcks);
1129 tcp_heuristic_tfo_middlebox_common(&tcks);
1130 }
1131
1132 static boolean_t
tcp_heuristic_do_tfo_common(struct tcp_cache_key_src * tcks)1133 tcp_heuristic_do_tfo_common(struct tcp_cache_key_src *tcks)
1134 {
1135 struct tcp_heuristics_head *__single head;
1136 struct tcp_heuristic *__single tpheur;
1137
1138 if (disable_tcp_heuristics) {
1139 return TRUE;
1140 }
1141
1142 /* Get the tcp-heuristic. */
1143 tpheur = tcp_getheuristic_with_lock(tcks, 0, &head);
1144 if (tpheur == NULL) {
1145 return TRUE;
1146 }
1147
1148 if (tpheur->th_tfo_in_backoff == 0) {
1149 goto tfo_ok;
1150 }
1151
1152 if (TSTMP_GT(tcp_now, tpheur->th_tfo_backoff_until)) {
1153 tpheur->th_tfo_in_backoff = 0;
1154 tpheur->th_tfo_enabled_time = tcp_now;
1155
1156 goto tfo_ok;
1157 }
1158
1159 tcp_heuristic_unlock(head);
1160 return FALSE;
1161
1162 tfo_ok:
1163 tcp_heuristic_unlock(head);
1164 return TRUE;
1165 }
1166
1167 boolean_t
tcp_heuristic_do_tfo(struct tcpcb * tp)1168 tcp_heuristic_do_tfo(struct tcpcb *tp)
1169 {
1170 struct tcp_cache_key_src tcks;
1171
1172 tcp_cache_key_src_create(tp, &tcks);
1173 if (tcp_heuristic_do_tfo_common(&tcks)) {
1174 return TRUE;
1175 }
1176
1177 return FALSE;
1178 }
1179 /*
1180 * @return:
1181 * 0 Enable MPTCP (we are still discovering middleboxes)
1182 * -1 Enable MPTCP (heuristics have been temporarily disabled)
1183 * 1 Disable MPTCP
1184 */
1185 int
tcp_heuristic_do_mptcp(struct tcpcb * tp)1186 tcp_heuristic_do_mptcp(struct tcpcb *tp)
1187 {
1188 struct tcp_cache_key_src tcks;
1189 struct tcp_heuristics_head *__single head = NULL;
1190 struct tcp_heuristic *__single tpheur;
1191 int ret = 0;
1192
1193 if (disable_tcp_heuristics ||
1194 (tptomptp(tp)->mpt_mpte->mpte_flags & MPTE_FORCE_ENABLE)) {
1195 return 0;
1196 }
1197
1198 tcp_cache_key_src_create(tp, &tcks);
1199
1200 /* Get the tcp-heuristic. */
1201 tpheur = tcp_getheuristic_with_lock(&tcks, 0, &head);
1202 if (tpheur == NULL) {
1203 return 0;
1204 }
1205
1206 if (tpheur->th_mptcp_in_backoff == 0 ||
1207 tpheur->th_mptcp_heuristic_disabled == 1) {
1208 goto mptcp_ok;
1209 }
1210
1211 if (TSTMP_GT(tpheur->th_mptcp_backoff, tcp_now)) {
1212 goto fallback;
1213 }
1214
1215 tpheur->th_mptcp_in_backoff = 0;
1216
1217 mptcp_ok:
1218 if (tpheur->th_mptcp_heuristic_disabled) {
1219 ret = -1;
1220
1221 if (TSTMP_GT(tcp_now, tpheur->th_mptcp_backoff)) {
1222 tpheur->th_mptcp_heuristic_disabled = 0;
1223 tpheur->th_mptcp_success = 0;
1224 }
1225 }
1226
1227 tcp_heuristic_unlock(head);
1228 return ret;
1229
1230 fallback:
1231 if (head) {
1232 tcp_heuristic_unlock(head);
1233 }
1234
1235 if (tptomptp(tp)->mpt_mpte->mpte_flags & MPTE_FIRSTPARTY) {
1236 tcpstat.tcps_mptcp_fp_heuristic_fallback++;
1237 } else {
1238 tcpstat.tcps_mptcp_heuristic_fallback++;
1239 }
1240
1241 return 1;
1242 }
1243
1244 static boolean_t
tcp_heuristic_do_ecn_common(struct tcp_cache_key_src * tcks)1245 tcp_heuristic_do_ecn_common(struct tcp_cache_key_src *tcks)
1246 {
1247 struct tcp_heuristics_head *__single head;
1248 struct tcp_heuristic *__single tpheur;
1249 boolean_t ret = TRUE;
1250
1251 if (disable_tcp_heuristics) {
1252 return TRUE;
1253 }
1254
1255 /* Get the tcp-heuristic. */
1256 tpheur = tcp_getheuristic_with_lock(tcks, 0, &head);
1257 if (tpheur == NULL) {
1258 return ret;
1259 }
1260
1261 if (TSTMP_GT(tpheur->th_ecn_backoff, tcp_now)) {
1262 ret = FALSE;
1263 } else {
1264 /* Reset the following counters to start re-evaluating */
1265 if (tpheur->th_ecn_droprst >= ECN_RETRY_LIMIT) {
1266 tpheur->th_ecn_droprst = 0;
1267 }
1268
1269 if (tpheur->th_ecn_synrst >= ECN_RETRY_LIMIT) {
1270 tpheur->th_ecn_synrst = 0;
1271 }
1272
1273 /* Make sure it follows along */
1274 tpheur->th_ecn_backoff = tcp_now;
1275 }
1276
1277 tcp_heuristic_unlock(head);
1278
1279 return ret;
1280 }
1281
1282 boolean_t
tcp_heuristic_do_ecn(struct tcpcb * tp)1283 tcp_heuristic_do_ecn(struct tcpcb *tp)
1284 {
1285 struct tcp_cache_key_src tcks;
1286
1287 tcp_cache_key_src_create(tp, &tcks);
1288 return tcp_heuristic_do_ecn_common(&tcks);
1289 }
1290
1291 boolean_t
tcp_heuristic_do_ecn_with_address(struct ifnet * ifp,union sockaddr_in_4_6 * local_address)1292 tcp_heuristic_do_ecn_with_address(struct ifnet *ifp,
1293 union sockaddr_in_4_6 *local_address)
1294 {
1295 struct tcp_cache_key_src tcks;
1296
1297 memset(&tcks, 0, sizeof(tcks));
1298 tcks.ifp = ifp;
1299
1300 calculate_tcp_clock();
1301
1302 if (local_address->sa.sa_family == AF_INET6) {
1303 memcpy(&tcks.laddr.addr6, &local_address->sin6.sin6_addr, sizeof(struct in6_addr));
1304 tcks.af = AF_INET6;
1305 } else if (local_address->sa.sa_family == AF_INET) {
1306 memcpy(&tcks.laddr.addr, &local_address->sin.sin_addr, sizeof(struct in_addr));
1307 tcks.af = AF_INET;
1308 }
1309
1310 return tcp_heuristic_do_ecn_common(&tcks);
1311 }
1312
1313 void
tcp_heuristics_ecn_update(struct necp_tcp_ecn_cache * necp_buffer,struct ifnet * ifp,union sockaddr_in_4_6 * local_address)1314 tcp_heuristics_ecn_update(struct necp_tcp_ecn_cache *necp_buffer,
1315 struct ifnet *ifp, union sockaddr_in_4_6 *local_address)
1316 {
1317 struct tcp_cache_key_src tcks;
1318
1319 memset(&tcks, 0, sizeof(tcks));
1320 tcks.ifp = ifp;
1321
1322 calculate_tcp_clock();
1323
1324 if (local_address->sa.sa_family == AF_INET6) {
1325 memcpy(&tcks.laddr.addr6, &local_address->sin6.sin6_addr, sizeof(struct in6_addr));
1326 tcks.af = AF_INET6;
1327 } else if (local_address->sa.sa_family == AF_INET) {
1328 memcpy(&tcks.laddr.addr, &local_address->sin.sin_addr, sizeof(struct in_addr));
1329 tcks.af = AF_INET;
1330 }
1331
1332 if (necp_buffer->necp_tcp_ecn_heuristics_success) {
1333 tcp_heuristic_reset_counters(&tcks, TCPCACHE_F_ECN);
1334 } else if (necp_buffer->necp_tcp_ecn_heuristics_loss) {
1335 tcp_heuristic_inc_counters(&tcks, TCPCACHE_F_ECN_SYN_LOSS);
1336 } else if (necp_buffer->necp_tcp_ecn_heuristics_drop_rst) {
1337 tcp_heuristic_inc_counters(&tcks, TCPCACHE_F_ECN_DROPRST);
1338 } else if (necp_buffer->necp_tcp_ecn_heuristics_syn_rst) {
1339 tcp_heuristic_inc_counters(&tcks, TCPCACHE_F_ECN_SYNRST);
1340 } else if (necp_buffer->necp_tcp_ecn_heuristics_aggressive) {
1341 tcp_heuristic_inc_counters(&tcks, TCPCACHE_F_ECN_AGGRESSIVE);
1342 }
1343
1344 return;
1345 }
1346
1347 boolean_t
tcp_heuristic_do_tfo_with_address(struct ifnet * ifp,union sockaddr_in_4_6 * local_address,union sockaddr_in_4_6 * remote_address,uint8_t * __counted_by (maxlen)cookie,uint8_t maxlen,uint8_t * cookie_len)1348 tcp_heuristic_do_tfo_with_address(struct ifnet *ifp,
1349 union sockaddr_in_4_6 *local_address, union sockaddr_in_4_6 *remote_address,
1350 uint8_t *__counted_by(maxlen) cookie, uint8_t maxlen, uint8_t *cookie_len)
1351 {
1352 struct tcp_cache_key_src tcks;
1353
1354 memset(&tcks, 0, sizeof(tcks));
1355 tcks.ifp = ifp;
1356
1357 calculate_tcp_clock();
1358
1359 if (remote_address->sa.sa_family == AF_INET6) {
1360 memcpy(&tcks.laddr.addr6, &local_address->sin6.sin6_addr, sizeof(struct in6_addr));
1361 memcpy(&tcks.faddr.addr6, &remote_address->sin6.sin6_addr, sizeof(struct in6_addr));
1362 tcks.af = AF_INET6;
1363 } else if (remote_address->sa.sa_family == AF_INET) {
1364 memcpy(&tcks.laddr.addr, &local_address->sin.sin_addr, sizeof(struct in_addr));
1365 memcpy(&tcks.faddr.addr, &remote_address->sin.sin_addr, sizeof(struct in_addr));
1366 tcks.af = AF_INET;
1367 }
1368
1369 if (tcp_heuristic_do_tfo_common(&tcks)) {
1370 if (!tcp_cache_get_cookie_common(&tcks, cookie, maxlen, cookie_len)) {
1371 *cookie_len = 0;
1372 }
1373 return TRUE;
1374 }
1375
1376 return FALSE;
1377 }
1378
1379 void
tcp_heuristics_tfo_update(struct necp_tcp_tfo_cache * necp_buffer,struct ifnet * ifp,union sockaddr_in_4_6 * local_address,union sockaddr_in_4_6 * remote_address)1380 tcp_heuristics_tfo_update(struct necp_tcp_tfo_cache *necp_buffer,
1381 struct ifnet *ifp, union sockaddr_in_4_6 *local_address,
1382 union sockaddr_in_4_6 *remote_address)
1383 {
1384 struct tcp_cache_key_src tcks;
1385
1386 memset(&tcks, 0, sizeof(tcks));
1387 tcks.ifp = ifp;
1388
1389 calculate_tcp_clock();
1390
1391 if (remote_address->sa.sa_family == AF_INET6) {
1392 memcpy(&tcks.laddr.addr6, &local_address->sin6.sin6_addr, sizeof(struct in6_addr));
1393 memcpy(&tcks.faddr.addr6, &remote_address->sin6.sin6_addr, sizeof(struct in6_addr));
1394 tcks.af = AF_INET6;
1395 } else if (remote_address->sa.sa_family == AF_INET) {
1396 memcpy(&tcks.laddr.addr, &local_address->sin.sin_addr, sizeof(struct in_addr));
1397 memcpy(&tcks.faddr.addr, &remote_address->sin.sin_addr, sizeof(struct in_addr));
1398 tcks.af = AF_INET;
1399 }
1400
1401 if (necp_buffer->necp_tcp_tfo_heuristics_success) {
1402 tcp_heuristic_reset_counters(&tcks, TCPCACHE_F_TFO_REQ | TCPCACHE_F_TFO_DATA |
1403 TCPCACHE_F_TFO_REQ_RST | TCPCACHE_F_TFO_DATA_RST);
1404 }
1405
1406 if (necp_buffer->necp_tcp_tfo_heuristics_success_req) {
1407 tcp_heuristic_reset_counters(&tcks, TCPCACHE_F_TFO_REQ | TCPCACHE_F_TFO_REQ_RST);
1408 }
1409
1410 if (necp_buffer->necp_tcp_tfo_heuristics_loss) {
1411 tcp_heuristic_inc_counters(&tcks, TCPCACHE_F_TFO_REQ | TCPCACHE_F_TFO_DATA);
1412 }
1413
1414 if (necp_buffer->necp_tcp_tfo_heuristics_loss_req) {
1415 tcp_heuristic_inc_counters(&tcks, TCPCACHE_F_TFO_REQ);
1416 }
1417
1418 if (necp_buffer->necp_tcp_tfo_heuristics_rst_data) {
1419 tcp_heuristic_inc_counters(&tcks, TCPCACHE_F_TFO_REQ_RST | TCPCACHE_F_TFO_DATA_RST);
1420 }
1421
1422 if (necp_buffer->necp_tcp_tfo_heuristics_rst_req) {
1423 tcp_heuristic_inc_counters(&tcks, TCPCACHE_F_TFO_REQ_RST);
1424 }
1425
1426 if (necp_buffer->necp_tcp_tfo_heuristics_middlebox) {
1427 tcp_heuristic_tfo_middlebox_common(&tcks);
1428 }
1429
1430 if (necp_buffer->necp_tcp_tfo_cookie_len != 0) {
1431 tcp_cache_set_cookie_common(&tcks,
1432 necp_buffer->necp_tcp_tfo_cookie, necp_buffer->necp_tcp_tfo_cookie_len);
1433 }
1434
1435 return;
1436 }
1437
1438 #if (DEVELOPMENT || DEBUG)
1439 /*
1440 * This test sysctl forces the hash table to be full which will force us to
1441 * erase portions of it.
1442 */
1443 static int
1444 sysctl_fill_hashtable SYSCTL_HANDLER_ARGS
1445 {
1446 #pragma unused(arg1, arg2)
1447 int error = 0, val;
1448
1449 val = 0;
1450 error = sysctl_handle_int(oidp, &val, 0, req);
1451 if (error || !req->newptr) {
1452 return error;
1453 }
1454 if (val == 1) {
1455 struct necp_tcp_tfo_cache necp_buffer = {};
1456 union sockaddr_in_4_6 local_address = {}, remote_address = {};
1457
1458 necp_buffer.necp_tcp_tfo_heuristics_success = 1;
1459 necp_buffer.necp_tcp_tfo_heuristics_loss = 1;
1460 necp_buffer.necp_tcp_tfo_heuristics_middlebox = 1;
1461
1462 for (unsigned i = 0; i < 1024; i++) {
1463 local_address.sin.sin_family = AF_INET;
1464 local_address.sin.sin_len = sizeof(struct sockaddr_in);
1465 local_address.sin.sin_port = random() % UINT16_MAX;
1466 local_address.sin.sin_addr.s_addr = random();
1467
1468 remote_address.sin.sin_family = AF_INET;
1469 remote_address.sin.sin_len = sizeof(struct sockaddr_in);
1470 remote_address.sin.sin_port = random() % UINT16_MAX;
1471 remote_address.sin.sin_addr.s_addr = random();
1472
1473 tcp_heuristics_tfo_update(&necp_buffer, lo_ifp,
1474 &local_address,
1475 &remote_address);
1476 }
1477 }
1478
1479 return error;
1480 }
1481
1482 static int fill_hash_table = 0;
1483 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, test_cache, CTLTYPE_INT | CTLFLAG_RW |
1484 CTLFLAG_LOCKED, &fill_hash_table, 0, &sysctl_fill_hashtable, "I",
1485 "Tests the hash table erasing procedures");
1486 #endif /* DEVELOPMENT || DEBUG */
1487
1488 static void
sysctl_cleartfocache(void)1489 sysctl_cleartfocache(void)
1490 {
1491 int i;
1492
1493 for (i = 0; i < tcp_cache_size; i++) {
1494 struct tcp_cache_head *__single head = &tcp_cache[i];
1495 struct tcp_cache *__single tpcache, *__single tmp;
1496 struct tcp_heuristics_head *__single hhead = &tcp_heuristics[i];
1497 struct tcp_heuristic *__single tpheur, *__single htmp;
1498
1499 lck_mtx_lock(&head->tch_mtx);
1500 SLIST_FOREACH_SAFE(tpcache, &head->tcp_caches, list, tmp) {
1501 SLIST_REMOVE(&head->tcp_caches, tpcache, tcp_cache, list);
1502 kfree_type(struct tcp_cache, tpcache);
1503 }
1504 lck_mtx_unlock(&head->tch_mtx);
1505
1506 lck_mtx_lock(&hhead->thh_mtx);
1507 SLIST_FOREACH_SAFE(tpheur, &hhead->tcp_heuristics, list, htmp) {
1508 SLIST_REMOVE(&hhead->tcp_heuristics, tpheur, tcp_heuristic, list);
1509 kfree_type(struct tcp_heuristic, tpheur);
1510 }
1511 lck_mtx_unlock(&hhead->thh_mtx);
1512 }
1513 }
1514
1515 /* This sysctl is useful for testing purposes only */
1516 static int tcpcleartfo = 0;
1517
1518 static int sysctl_cleartfo SYSCTL_HANDLER_ARGS
1519 {
1520 #pragma unused(arg1, arg2)
1521 int error = 0, val, oldval = tcpcleartfo;
1522
1523 val = oldval;
1524 error = sysctl_handle_int(oidp, &val, 0, req);
1525 if (error || !req->newptr) {
1526 if (error) {
1527 os_log_error(OS_LOG_DEFAULT, "%s could not parse int: %d", __func__, error);
1528 }
1529 return error;
1530 }
1531
1532 /*
1533 * The actual value does not matter. If the value is set, it triggers
1534 * the clearing of the TFO cache. If a future implementation does not
1535 * use the route entry to hold the TFO cache, replace the route sysctl.
1536 */
1537
1538 if (val != oldval) {
1539 sysctl_cleartfocache();
1540 }
1541
1542 tcpcleartfo = val;
1543
1544 return error;
1545 }
1546
1547 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, clear_tfocache, CTLTYPE_INT | CTLFLAG_RW |
1548 CTLFLAG_LOCKED, &tcpcleartfo, 0, &sysctl_cleartfo, "I",
1549 "Toggle to clear the TFO destination based heuristic cache");
1550
1551 static int
1552 sysctl_tcp_heuristics_list SYSCTL_HANDLER_ARGS
1553 {
1554 #pragma unused(arg1, arg2)
1555 int error = 0;
1556 size_t total_entries = 0;
1557 size_t total_size;
1558 bool entitled = false;
1559
1560 if (tcp_heuristics == NULL || tcp_heuristics_size == 0) {
1561 return ENOENT;
1562 }
1563
1564 if (IOCurrentTaskHasEntitlement(TCP_HEURISTICS_LIST_ENTITLEMENT)) {
1565 entitled = true;
1566 }
1567
1568 /* First pass: count total number of heuristic entries across all buckets */
1569 for (size_t i = 0; i < tcp_heuristics_size; i++) {
1570 struct tcp_heuristics_head *head = &tcp_heuristics[i];
1571 struct tcp_heuristic *tpheur;
1572
1573 lck_mtx_lock(&head->thh_mtx);
1574 SLIST_FOREACH(tpheur, &head->tcp_heuristics, list) {
1575 total_entries++;
1576 }
1577 lck_mtx_unlock(&head->thh_mtx);
1578 }
1579
1580 total_size = total_entries * sizeof(struct tcp_heuristics_data);
1581
1582 if (req->oldptr == USER_ADDR_NULL) {
1583 /* Just return the size needed */
1584 return SYSCTL_OUT(req, NULL, total_size);
1585 }
1586
1587 if (req->oldlen < total_size) {
1588 return ENOMEM;
1589 }
1590
1591 /* Second pass: copy out all heuristic entries */
1592 for (size_t i = 0; i < tcp_heuristics_size; i++) {
1593 struct tcp_heuristics_head *head = &tcp_heuristics[i];
1594 struct tcp_heuristic *tpheur;
1595
1596 lck_mtx_lock(&head->thh_mtx);
1597 SLIST_FOREACH(tpheur, &head->tcp_heuristics, list) {
1598 struct tcp_heuristics_data heur_data;
1599
1600 /* Copy data from tcp_heuristic to tcp_heuristics_data (excluding list field) */
1601 heur_data.th_last_access = tpheur->th_last_access;
1602 if (entitled) {
1603 heur_data.th_key = tpheur->th_key;
1604 } else {
1605 heur_data.th_key.thk_family = tpheur->th_key.thk_family;
1606 }
1607 heur_data.th_tfo_data_loss = tpheur->th_tfo_data_loss;
1608 heur_data.th_tfo_req_loss = tpheur->th_tfo_req_loss;
1609 heur_data.th_tfo_data_rst = tpheur->th_tfo_data_rst;
1610 heur_data.th_tfo_req_rst = tpheur->th_tfo_req_rst;
1611 heur_data.th_mptcp_loss = tpheur->th_mptcp_loss;
1612 heur_data.th_mptcp_success = tpheur->th_mptcp_success;
1613 heur_data.th_ecn_droprst = tpheur->th_ecn_droprst;
1614 heur_data.th_ecn_synrst = tpheur->th_ecn_synrst;
1615 heur_data.th_tfo_enabled_time = tpheur->th_tfo_enabled_time;
1616 heur_data.th_tfo_backoff_until = tpheur->th_tfo_backoff_until;
1617 heur_data.th_tfo_backoff = tpheur->th_tfo_backoff;
1618 heur_data.th_mptcp_backoff = tpheur->th_mptcp_backoff;
1619 heur_data.th_ecn_backoff = tpheur->th_ecn_backoff;
1620 heur_data.th_tfo_in_backoff = tpheur->th_tfo_in_backoff;
1621 heur_data.th_mptcp_in_backoff = tpheur->th_mptcp_in_backoff;
1622 heur_data.th_mptcp_heuristic_disabled = tpheur->th_mptcp_heuristic_disabled;
1623
1624 error = SYSCTL_OUT(req, &heur_data, sizeof(struct tcp_heuristics_data));
1625 if (error) {
1626 lck_mtx_unlock(&head->thh_mtx);
1627 return error;
1628 }
1629 }
1630 lck_mtx_unlock(&head->thh_mtx);
1631 }
1632
1633 return error;
1634 }
1635
1636 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, heuristics_list,
1637 CTLTYPE_OPAQUE | CTLFLAG_RD | CTLFLAG_LOCKED,
1638 NULL, 0, sysctl_tcp_heuristics_list, "S,tcp_heuristics_data",
1639 "TCP heuristics entries from all buckets");
1640
1641 static int
1642 sysctl_tcp_cache_list SYSCTL_HANDLER_ARGS
1643 {
1644 #pragma unused(arg1, arg2)
1645 int error = 0;
1646 size_t total_entries = 0;
1647 size_t total_size;
1648 bool entitled = false;
1649
1650 if (tcp_cache == NULL || tcp_cache_size == 0) {
1651 return ENOENT;
1652 }
1653
1654 if (IOCurrentTaskHasEntitlement(TCP_CACHE_LIST_ENTITLEMENT)) {
1655 entitled = true;
1656 }
1657
1658 /* First pass: count total number of cache entries across all buckets */
1659 for (size_t i = 0; i < tcp_cache_size; i++) {
1660 struct tcp_cache_head *head = &tcp_cache[i];
1661 struct tcp_cache *tpcache;
1662
1663 lck_mtx_lock(&head->tch_mtx);
1664 SLIST_FOREACH(tpcache, &head->tcp_caches, list) {
1665 total_entries++;
1666 }
1667 lck_mtx_unlock(&head->tch_mtx);
1668 }
1669
1670 total_size = total_entries * sizeof(struct tcp_cache_data);
1671
1672 if (req->oldptr == USER_ADDR_NULL) {
1673 /* Just return the size needed */
1674 return SYSCTL_OUT(req, NULL, total_size);
1675 }
1676
1677 if (req->oldlen < total_size) {
1678 return ENOMEM;
1679 }
1680
1681 /* Second pass: copy out all cache entries */
1682 for (size_t i = 0; i < tcp_cache_size; i++) {
1683 struct tcp_cache_head *head = &tcp_cache[i];
1684 struct tcp_cache *tpcache;
1685
1686 lck_mtx_lock(&head->tch_mtx);
1687 SLIST_FOREACH(tpcache, &head->tcp_caches, list) {
1688 struct tcp_cache_data cache_data;
1689
1690 /* Copy data from tcp_cache to tcp_cache_data (excluding list field) */
1691 cache_data.tc_last_access = tpcache->tc_last_access;
1692 if (entitled) {
1693 cache_data.tc_key = tpcache->tc_key;
1694 } else {
1695 cache_data.tc_key.tck_family = tpcache->tc_key.tck_family;
1696 }
1697 memcpy(cache_data.tc_tfo_cookie, tpcache->tc_tfo_cookie, TFO_COOKIE_LEN_MAX);
1698 cache_data.tc_tfo_cookie_len = tpcache->tc_tfo_cookie_len;
1699 cache_data.tc_mptcp_version_confirmed = tpcache->tc_mptcp_version_confirmed;
1700 cache_data.tc_mptcp_version = tpcache->tc_mptcp_version;
1701 cache_data.tc_mptcp_next_version_try = tpcache->tc_mptcp_next_version_try;
1702
1703 error = SYSCTL_OUT(req, &cache_data, sizeof(struct tcp_cache_data));
1704 if (error) {
1705 lck_mtx_unlock(&head->tch_mtx);
1706 return error;
1707 }
1708 }
1709 lck_mtx_unlock(&head->tch_mtx);
1710 }
1711
1712 return error;
1713 }
1714
1715 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, cache_list,
1716 CTLTYPE_OPAQUE | CTLFLAG_RD | CTLFLAG_LOCKED,
1717 NULL, 0, sysctl_tcp_cache_list, "S,tcp_cache_data",
1718 "TCP cache entries from all buckets");
1719
1720 void
tcp_cache_init(void)1721 tcp_cache_init(void)
1722 {
1723 uint64_t sane_size_meg = sane_size / 1024 / 1024;
1724 size_t cache_size;
1725 /*
1726 * On machines with <100MB of memory this will result in a (full) cache-size
1727 * of 32 entries, thus 32 * 5 * 64bytes = 10KB. (about 0.01 %)
1728 * On machines with > 4GB of memory, we have a cache-size of 1024 entries,
1729 * thus about 327KB.
1730 *
1731 * Side-note: we convert to uint32_t. If sane_size is more than
1732 * 16000 TB, we loose precision. But, who cares? :)
1733 */
1734 cache_size = tcp_cache_roundup2((uint32_t)(sane_size_meg >> 2));
1735 if (cache_size < 32) {
1736 cache_size = 32;
1737 } else if (cache_size > 1024) {
1738 cache_size = 1024;
1739 }
1740
1741 tcp_cache = zalloc_permanent(sizeof(struct tcp_cache_head) * cache_size,
1742 ZALIGN(struct tcp_cache_head));
1743 tcp_cache_size = cache_size;
1744 tcp_heuristics = zalloc_permanent(sizeof(struct tcp_heuristics_head) * cache_size,
1745 ZALIGN(struct tcp_heuristics_head));
1746 tcp_heuristics_size = cache_size;
1747
1748 for (int i = 0; i < tcp_cache_size; i++) {
1749 lck_mtx_init(&tcp_cache[i].tch_mtx, &tcp_cache_mtx_grp,
1750 &tcp_cache_mtx_attr);
1751 SLIST_INIT(&tcp_cache[i].tcp_caches);
1752
1753 lck_mtx_init(&tcp_heuristics[i].thh_mtx, &tcp_heuristic_mtx_grp,
1754 &tcp_heuristic_mtx_attr);
1755 SLIST_INIT(&tcp_heuristics[i].tcp_heuristics);
1756 }
1757
1758 tcp_cache_hash_seed = RandomULong();
1759 }
1760