1 /*
2 * Copyright (c) 2015-2021 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 /* TCP-cache to store and retrieve TCP-related information */
30
31 #include <net/flowhash.h>
32 #include <net/route.h>
33 #include <net/necp.h>
34 #include <netinet/in_pcb.h>
35 #include <netinet/mptcp.h>
36 #include <netinet/mptcp_var.h>
37 #include <netinet/tcp_cache.h>
38 #include <netinet/tcp_seq.h>
39 #include <netinet/tcp_var.h>
40 #include <kern/locks.h>
41 #include <sys/queue.h>
42 #include <dev/random/randomdev.h>
43
44 typedef union {
45 struct in_addr addr;
46 struct in6_addr addr6;
47 } in_4_6_addr;
48
49 struct tcp_heuristic_key {
50 union {
51 uint8_t thk_net_signature[IFNET_SIGNATURELEN];
52 in_4_6_addr thk_ip;
53 };
54 sa_family_t thk_family;
55 };
56
57 struct tcp_heuristic {
58 SLIST_ENTRY(tcp_heuristic) list;
59
60 uint32_t th_last_access;
61
62 struct tcp_heuristic_key th_key;
63
64 char th_val_start[0]; /* Marker for memsetting to 0 */
65
66 uint8_t th_tfo_data_loss; /* The number of times a SYN+data has been lost */
67 uint8_t th_tfo_req_loss; /* The number of times a SYN+cookie-req has been lost */
68 uint8_t th_tfo_data_rst; /* The number of times a SYN+data has received a RST */
69 uint8_t th_tfo_req_rst; /* The number of times a SYN+cookie-req has received a RST */
70 uint8_t th_mptcp_loss; /* The number of times a SYN+MP_CAPABLE has been lost */
71 uint8_t th_mptcp_success; /* The number of times MPTCP-negotiation has been successful */
72 uint8_t th_ecn_loss; /* The number of times a SYN+ecn has been lost */
73 uint8_t th_ecn_aggressive; /* The number of times we did an aggressive fallback */
74 uint8_t th_ecn_droprst; /* The number of times ECN connections received a RST after first data pkt */
75 uint8_t th_ecn_droprxmt; /* The number of times ECN connection is dropped after multiple retransmits */
76 uint8_t th_ecn_synrst; /* number of times RST was received in response to an ECN enabled SYN */
77 uint32_t th_tfo_enabled_time; /* The moment when we reenabled TFO after backing off */
78 uint32_t th_tfo_backoff_until; /* Time until when we should not try out TFO */
79 uint32_t th_tfo_backoff; /* Current backoff timer */
80 uint32_t th_mptcp_backoff; /* Time until when we should not try out MPTCP */
81 uint32_t th_ecn_backoff; /* Time until when we should not try out ECN */
82
83 uint8_t th_tfo_in_backoff:1, /* Are we avoiding TFO due to the backoff timer? */
84 th_mptcp_in_backoff:1, /* Are we avoiding MPTCP due to the backoff timer? */
85 th_mptcp_heuristic_disabled:1; /* Are heuristics disabled? */
86
87 char th_val_end[0]; /* Marker for memsetting to 0 */
88 };
89
90 struct tcp_heuristics_head {
91 SLIST_HEAD(tcp_heur_bucket, tcp_heuristic) tcp_heuristics;
92
93 /* Per-hashbucket lock to avoid lock-contention */
94 lck_mtx_t thh_mtx;
95 };
96
97 struct tcp_cache_key {
98 sa_family_t tck_family;
99
100 struct tcp_heuristic_key tck_src;
101 in_4_6_addr tck_dst;
102 };
103
104 #define MPTCP_VERSION_SUPPORTED 1
105 #define MPTCP_VERSION_UNSUPPORTED -1
106 #define MPTCP_VERSION_SUPPORTED_UNKNOWN 0
107 struct tcp_cache {
108 SLIST_ENTRY(tcp_cache) list;
109
110 uint32_t tc_last_access;
111
112 struct tcp_cache_key tc_key;
113
114 uint8_t tc_tfo_cookie[TFO_COOKIE_LEN_MAX];
115 uint8_t tc_tfo_cookie_len;
116
117 uint8_t tc_mptcp_version_confirmed:1;
118 uint8_t tc_mptcp_version; /* version to use right now */
119 uint32_t tc_mptcp_next_version_try; /* Time, until we try preferred version again */
120 };
121
122 struct tcp_cache_head {
123 SLIST_HEAD(tcp_cache_bucket, tcp_cache) tcp_caches;
124
125 /* Per-hashbucket lock to avoid lock-contention */
126 lck_mtx_t tch_mtx;
127 };
128
129 struct tcp_cache_key_src {
130 struct ifnet *ifp;
131 in_4_6_addr laddr;
132 in_4_6_addr faddr;
133 int af;
134 };
135
136 static uint32_t tcp_cache_hash_seed;
137
138 size_t tcp_cache_size;
139
140 /*
141 * The maximum depth of the hash-bucket. This way we limit the tcp_cache to
142 * TCP_CACHE_BUCKET_SIZE * tcp_cache_size and have "natural" garbage collection
143 */
144 #define TCP_CACHE_BUCKET_SIZE 5
145
146 static struct tcp_cache_head *tcp_cache;
147
148 static LCK_ATTR_DECLARE(tcp_cache_mtx_attr, 0, 0);
149 static LCK_GRP_DECLARE(tcp_cache_mtx_grp, "tcpcache");
150
151 static struct tcp_heuristics_head *tcp_heuristics;
152
153 static LCK_ATTR_DECLARE(tcp_heuristic_mtx_attr, 0, 0);
154 static LCK_GRP_DECLARE(tcp_heuristic_mtx_grp, "tcpheuristic");
155
156 static uint32_t tcp_backoff_maximum = 65536;
157
158 SYSCTL_UINT(_net_inet_tcp, OID_AUTO, backoff_maximum, CTLFLAG_RW | CTLFLAG_LOCKED,
159 &tcp_backoff_maximum, 0, "Maximum time for which we won't try TFO");
160
161 static uint32_t tcp_ecn_timeout = 60;
162
163 SYSCTL_UINT(_net_inet_tcp, OID_AUTO, ecn_timeout, CTLFLAG_RW | CTLFLAG_LOCKED,
164 &tcp_ecn_timeout, 60, "Initial minutes to wait before re-trying ECN");
165
166 static int disable_tcp_heuristics = 0;
167 SYSCTL_INT(_net_inet_tcp, OID_AUTO, disable_tcp_heuristics, CTLFLAG_RW | CTLFLAG_LOCKED,
168 &disable_tcp_heuristics, 0, "Set to 1, to disable all TCP heuristics (TFO, ECN, MPTCP)");
169
170 static uint32_t mptcp_version_timeout = 24 * 60;
171
172 SYSCTL_UINT(_net_inet_tcp, OID_AUTO, mptcp_version_timeout, CTLFLAG_RW | CTLFLAG_LOCKED,
173 &mptcp_version_timeout, 24 * 60, "Initial minutes to wait before re-trying MPTCP's preferred version");
174
175
176 static uint32_t
tcp_min_to_hz(uint32_t minutes)177 tcp_min_to_hz(uint32_t minutes)
178 {
179 if (minutes > 65536) {
180 return (uint32_t)65536 * 60 * TCP_RETRANSHZ;
181 }
182
183 return minutes * 60 * TCP_RETRANSHZ;
184 }
185
186 /*
187 * This number is coupled with tcp_ecn_timeout, because we want to prevent
188 * integer overflow. Need to find an unexpensive way to prevent integer overflow
189 * while still allowing a dynamic sysctl.
190 */
191 #define TCP_CACHE_OVERFLOW_PROTECT 9
192
193 /* Number of SYN-losses we accept */
194 #define TFO_MAX_COOKIE_LOSS 2
195 #define ECN_MAX_SYN_LOSS 2
196 #define MPTCP_MAX_SYN_LOSS 2
197 #define MPTCP_SUCCESS_TRIGGER 10
198 #define MPTCP_VERSION_MAX_FAIL 2
199 #define ECN_MAX_DROPRST 1
200 #define ECN_MAX_DROPRXMT 4
201 #define ECN_MAX_SYNRST 4
202
203 /* Flags for setting/unsetting loss-heuristics, limited to 4 bytes */
204 #define TCPCACHE_F_TFO_REQ 0x01
205 #define TCPCACHE_F_TFO_DATA 0x02
206 #define TCPCACHE_F_ECN 0x04
207 #define TCPCACHE_F_MPTCP 0x08
208 #define TCPCACHE_F_ECN_DROPRST 0x10
209 #define TCPCACHE_F_ECN_DROPRXMT 0x20
210 #define TCPCACHE_F_TFO_REQ_RST 0x40
211 #define TCPCACHE_F_TFO_DATA_RST 0x80
212 #define TCPCACHE_F_ECN_SYNRST 0x100
213
214 /* Always retry ECN after backing off to this level for some heuristics */
215 #define ECN_RETRY_LIMIT 9
216
217 #define TCP_CACHE_INC_IFNET_STAT(_ifp_, _af_, _stat_) { \
218 if ((_ifp_) != NULL) { \
219 if ((_af_) == AF_INET6) { \
220 (_ifp_)->if_ipv6_stat->_stat_++;\
221 } else { \
222 (_ifp_)->if_ipv4_stat->_stat_++;\
223 }\
224 }\
225 }
226
227 /*
228 * Round up to next higher power-of 2. See "Bit Twiddling Hacks".
229 *
230 * Might be worth moving this to a library so that others
231 * (e.g., scale_to_powerof2()) can use this as well instead of a while-loop.
232 */
233 static uint32_t
tcp_cache_roundup2(uint32_t a)234 tcp_cache_roundup2(uint32_t a)
235 {
236 a--;
237 a |= a >> 1;
238 a |= a >> 2;
239 a |= a >> 4;
240 a |= a >> 8;
241 a |= a >> 16;
242 a++;
243
244 return a;
245 }
246
247 static void
tcp_cache_hash_src(struct tcp_cache_key_src * tcks,struct tcp_heuristic_key * key)248 tcp_cache_hash_src(struct tcp_cache_key_src *tcks, struct tcp_heuristic_key *key)
249 {
250 struct ifnet *ifp = tcks->ifp;
251 uint8_t len = sizeof(key->thk_net_signature);
252 uint16_t flags;
253
254 if (tcks->af == AF_INET6) {
255 int ret;
256
257 key->thk_family = AF_INET6;
258 ret = ifnet_get_netsignature(ifp, AF_INET6, &len, &flags,
259 key->thk_net_signature);
260
261 /*
262 * ifnet_get_netsignature only returns EINVAL if ifn is NULL
263 * (we made sure that in the other cases it does not). So,
264 * in this case we should take the connection's address.
265 */
266 if (ret == ENOENT || ret == EINVAL) {
267 memcpy(&key->thk_ip.addr6, &tcks->laddr.addr6, sizeof(struct in6_addr));
268 }
269 } else {
270 int ret;
271
272 key->thk_family = AF_INET;
273 ret = ifnet_get_netsignature(ifp, AF_INET, &len, &flags,
274 key->thk_net_signature);
275
276 /*
277 * ifnet_get_netsignature only returns EINVAL if ifn is NULL
278 * (we made sure that in the other cases it does not). So,
279 * in this case we should take the connection's address.
280 */
281 if (ret == ENOENT || ret == EINVAL) {
282 memcpy(&key->thk_ip.addr, &tcks->laddr.addr, sizeof(struct in_addr));
283 }
284 }
285 }
286
287 static uint16_t
tcp_cache_hash(struct tcp_cache_key_src * tcks,struct tcp_cache_key * key)288 tcp_cache_hash(struct tcp_cache_key_src *tcks, struct tcp_cache_key *key)
289 {
290 uint32_t hash;
291
292 bzero(key, sizeof(struct tcp_cache_key));
293
294 tcp_cache_hash_src(tcks, &key->tck_src);
295
296 if (tcks->af == AF_INET6) {
297 key->tck_family = AF_INET6;
298 memcpy(&key->tck_dst.addr6, &tcks->faddr.addr6,
299 sizeof(struct in6_addr));
300 } else {
301 key->tck_family = AF_INET;
302 memcpy(&key->tck_dst.addr, &tcks->faddr.addr,
303 sizeof(struct in_addr));
304 }
305
306 hash = net_flowhash(key, sizeof(struct tcp_cache_key),
307 tcp_cache_hash_seed);
308
309 return (uint16_t)(hash & (tcp_cache_size - 1));
310 }
311
312 static void
tcp_cache_unlock(struct tcp_cache_head * head)313 tcp_cache_unlock(struct tcp_cache_head *head)
314 {
315 lck_mtx_unlock(&head->tch_mtx);
316 }
317
318 /*
319 * Make sure that everything that happens after tcp_getcache_with_lock()
320 * is short enough to justify that you hold the per-bucket lock!!!
321 *
322 * Otherwise, better build another lookup-function that does not hold the
323 * lock and you copy out the bits and bytes.
324 *
325 * That's why we provide the head as a "return"-pointer so that the caller
326 * can give it back to use for tcp_cache_unlock().
327 */
328 static struct tcp_cache *
tcp_getcache_with_lock(struct tcp_cache_key_src * tcks,int create,struct tcp_cache_head ** headarg)329 tcp_getcache_with_lock(struct tcp_cache_key_src *tcks,
330 int create, struct tcp_cache_head **headarg)
331 {
332 struct tcp_cache *tpcache = NULL;
333 struct tcp_cache_head *head;
334 struct tcp_cache_key key;
335 uint16_t hash;
336 int i = 0;
337
338 hash = tcp_cache_hash(tcks, &key);
339 head = &tcp_cache[hash];
340
341 lck_mtx_lock(&head->tch_mtx);
342
343 /*** First step: Look for the tcp_cache in our bucket ***/
344 SLIST_FOREACH(tpcache, &head->tcp_caches, list) {
345 if (memcmp(&tpcache->tc_key, &key, sizeof(key)) == 0) {
346 break;
347 }
348
349 i++;
350 }
351
352 /*** Second step: If it's not there, create/recycle it ***/
353 if ((tpcache == NULL) && create) {
354 if (i >= TCP_CACHE_BUCKET_SIZE) {
355 struct tcp_cache *oldest_cache = NULL;
356 uint32_t max_age = 0;
357
358 /* Look for the oldest tcp_cache in the bucket */
359 SLIST_FOREACH(tpcache, &head->tcp_caches, list) {
360 uint32_t age = tcp_now - tpcache->tc_last_access;
361 if (age > max_age) {
362 max_age = age;
363 oldest_cache = tpcache;
364 }
365 }
366 VERIFY(oldest_cache != NULL);
367
368 tpcache = oldest_cache;
369
370 /* We recycle, thus let's indicate that there is no cookie */
371 tpcache->tc_tfo_cookie_len = 0;
372 } else {
373 /* Create a new cache and add it to the list */
374 tpcache = kalloc_type(struct tcp_cache, Z_NOWAIT | Z_ZERO);
375 if (tpcache == NULL) {
376 os_log_error(OS_LOG_DEFAULT, "%s could not allocate cache", __func__);
377 goto out_null;
378 }
379
380 tpcache->tc_mptcp_version = (uint8_t)mptcp_preferred_version;
381 tpcache->tc_mptcp_next_version_try = tcp_now;
382
383 SLIST_INSERT_HEAD(&head->tcp_caches, tpcache, list);
384 }
385
386 memcpy(&tpcache->tc_key, &key, sizeof(key));
387 }
388
389 if (tpcache == NULL) {
390 goto out_null;
391 }
392
393 /* Update timestamp for garbage collection purposes */
394 tpcache->tc_last_access = tcp_now;
395 *headarg = head;
396
397 return tpcache;
398
399 out_null:
400 tcp_cache_unlock(head);
401 return NULL;
402 }
403
404 static void
tcp_cache_key_src_create(struct tcpcb * tp,struct tcp_cache_key_src * tcks)405 tcp_cache_key_src_create(struct tcpcb *tp, struct tcp_cache_key_src *tcks)
406 {
407 struct inpcb *inp = tp->t_inpcb;
408 memset(tcks, 0, sizeof(*tcks));
409
410 tcks->ifp = inp->inp_last_outifp;
411
412 if (inp->inp_vflag & INP_IPV6) {
413 memcpy(&tcks->laddr.addr6, &inp->in6p_laddr, sizeof(struct in6_addr));
414 memcpy(&tcks->faddr.addr6, &inp->in6p_faddr, sizeof(struct in6_addr));
415 tcks->af = AF_INET6;
416 } else {
417 memcpy(&tcks->laddr.addr, &inp->inp_laddr, sizeof(struct in_addr));
418 memcpy(&tcks->faddr.addr, &inp->inp_faddr, sizeof(struct in_addr));
419 tcks->af = AF_INET;
420 }
421
422 return;
423 }
424
425 static void
mptcp_version_cache_key_src_init(struct sockaddr * dst,struct tcp_cache_key_src * tcks)426 mptcp_version_cache_key_src_init(struct sockaddr *dst, struct tcp_cache_key_src *tcks)
427 {
428 memset(tcks, 0, sizeof(*tcks));
429
430 if (dst->sa_family == AF_INET) {
431 memcpy(&tcks->faddr.addr, &SIN(dst)->sin_addr, sizeof(struct in_addr));
432 tcks->af = AF_INET;
433 } else {
434 memcpy(&tcks->faddr.addr6, &SIN6(dst)->sin6_addr, sizeof(struct in6_addr));
435 tcks->af = AF_INET6;
436 }
437
438 return;
439 }
440
441 static void
tcp_cache_set_cookie_common(struct tcp_cache_key_src * tcks,u_char * cookie,uint8_t len)442 tcp_cache_set_cookie_common(struct tcp_cache_key_src *tcks, u_char *cookie, uint8_t len)
443 {
444 struct tcp_cache_head *head;
445 struct tcp_cache *tpcache;
446
447 /* Call lookup/create function */
448 tpcache = tcp_getcache_with_lock(tcks, 1, &head);
449 if (tpcache == NULL) {
450 return;
451 }
452
453 tpcache->tc_tfo_cookie_len = len > TFO_COOKIE_LEN_MAX ?
454 TFO_COOKIE_LEN_MAX : len;
455 memcpy(tpcache->tc_tfo_cookie, cookie, tpcache->tc_tfo_cookie_len);
456
457 tcp_cache_unlock(head);
458 }
459
460 void
tcp_cache_set_cookie(struct tcpcb * tp,u_char * cookie,uint8_t len)461 tcp_cache_set_cookie(struct tcpcb *tp, u_char *cookie, uint8_t len)
462 {
463 struct tcp_cache_key_src tcks;
464
465 tcp_cache_key_src_create(tp, &tcks);
466 tcp_cache_set_cookie_common(&tcks, cookie, len);
467 }
468
469 static int
tcp_cache_get_cookie_common(struct tcp_cache_key_src * tcks,u_char * cookie,uint8_t * len)470 tcp_cache_get_cookie_common(struct tcp_cache_key_src *tcks, u_char *cookie, uint8_t *len)
471 {
472 struct tcp_cache_head *head;
473 struct tcp_cache *tpcache;
474
475 /* Call lookup/create function */
476 tpcache = tcp_getcache_with_lock(tcks, 1, &head);
477 if (tpcache == NULL) {
478 return 0;
479 }
480
481 if (tpcache->tc_tfo_cookie_len == 0) {
482 tcp_cache_unlock(head);
483 return 0;
484 }
485
486 /*
487 * Not enough space - this should never happen as it has been checked
488 * in tcp_tfo_check. So, fail here!
489 */
490 VERIFY(tpcache->tc_tfo_cookie_len <= *len);
491
492 memcpy(cookie, tpcache->tc_tfo_cookie, tpcache->tc_tfo_cookie_len);
493 *len = tpcache->tc_tfo_cookie_len;
494
495 tcp_cache_unlock(head);
496
497 return 1;
498 }
499
500 /*
501 * Get the cookie related to 'tp', and copy it into 'cookie', provided that len
502 * is big enough (len designates the available memory.
503 * Upon return, 'len' is set to the cookie's length.
504 *
505 * Returns 0 if we should request a cookie.
506 * Returns 1 if the cookie has been found and written.
507 */
508 int
tcp_cache_get_cookie(struct tcpcb * tp,u_char * cookie,uint8_t * len)509 tcp_cache_get_cookie(struct tcpcb *tp, u_char *cookie, uint8_t *len)
510 {
511 struct tcp_cache_key_src tcks;
512
513 tcp_cache_key_src_create(tp, &tcks);
514 return tcp_cache_get_cookie_common(&tcks, cookie, len);
515 }
516
517 static unsigned int
tcp_cache_get_cookie_len_common(struct tcp_cache_key_src * tcks)518 tcp_cache_get_cookie_len_common(struct tcp_cache_key_src *tcks)
519 {
520 struct tcp_cache_head *head;
521 struct tcp_cache *tpcache;
522 unsigned int cookie_len;
523
524 /* Call lookup/create function */
525 tpcache = tcp_getcache_with_lock(tcks, 1, &head);
526 if (tpcache == NULL) {
527 return 0;
528 }
529
530 cookie_len = tpcache->tc_tfo_cookie_len;
531
532 tcp_cache_unlock(head);
533
534 return cookie_len;
535 }
536
537 unsigned int
tcp_cache_get_cookie_len(struct tcpcb * tp)538 tcp_cache_get_cookie_len(struct tcpcb *tp)
539 {
540 struct tcp_cache_key_src tcks;
541
542 tcp_cache_key_src_create(tp, &tcks);
543 return tcp_cache_get_cookie_len_common(&tcks);
544 }
545
546 /*
547 * @return:
548 * 0 MPTCP_VERSION_0
549 * 1 MPTCP_VERSION_1
550 */
551 uint8_t
tcp_cache_get_mptcp_version(struct sockaddr * dst)552 tcp_cache_get_mptcp_version(struct sockaddr *dst)
553 {
554 struct tcp_cache_key_src tcks;
555 mptcp_version_cache_key_src_init(dst, &tcks);
556 uint8_t version = (uint8_t) mptcp_preferred_version;
557
558 struct tcp_cache_head *head;
559 struct tcp_cache *tpcache;
560
561 /* Call lookup/create function */
562 tpcache = tcp_getcache_with_lock(&tcks, 1, &head);
563 if (tpcache == NULL) {
564 return version;
565 }
566
567 version = tpcache->tc_mptcp_version;
568
569 /* Let's see if we should try the preferred version again */
570 if (!tpcache->tc_mptcp_version_confirmed &&
571 version != mptcp_preferred_version &&
572 TSTMP_GEQ(tcp_now, tpcache->tc_mptcp_next_version_try)) {
573 version = (uint8_t) mptcp_preferred_version;
574 }
575
576 tcp_cache_unlock(head);
577 return version;
578 }
579
580 void
tcp_cache_update_mptcp_version(struct tcpcb * tp,boolean_t succeeded)581 tcp_cache_update_mptcp_version(struct tcpcb *tp, boolean_t succeeded)
582 {
583 uint8_t version = tptomptp(tp)->mpt_version;
584 struct inpcb *inp = tp->t_inpcb;
585 struct tcp_cache_key_src tcks;
586 struct tcp_cache_head *head;
587 struct tcp_cache *tpcache;
588
589 if (inp->inp_vflag & INP_IPV6) {
590 struct sockaddr_in6 dst = {
591 .sin6_len = sizeof(struct sockaddr_in6),
592 .sin6_family = AF_INET6,
593 .sin6_addr = inp->in6p_faddr,
594 };
595 mptcp_version_cache_key_src_init((struct sockaddr *)&dst, &tcks);
596 } else {
597 struct sockaddr_in dst = {
598 .sin_len = sizeof(struct sockaddr_in),
599 .sin_family = AF_INET,
600 .sin_addr = inp->inp_faddr,
601 };
602 mptcp_version_cache_key_src_init((struct sockaddr *)&dst, &tcks);
603 }
604
605 /* Call lookup/create function */
606 tpcache = tcp_getcache_with_lock(&tcks, 1, &head);
607 if (tpcache == NULL) {
608 return;
609 }
610
611 /* We are still in probing phase */
612 if (tpcache->tc_mptcp_version_confirmed) {
613 goto exit;
614 }
615
616 if (succeeded) {
617 if (version == (uint8_t)mptcp_preferred_version) {
618 /* Preferred version succeeded - make it sticky */
619 tpcache->tc_mptcp_version_confirmed = true;
620 tpcache->tc_mptcp_version = version;
621 } else {
622 /* If we are past the next version try, set it
623 * so that we try preferred again in 24h
624 */
625 if (TSTMP_GEQ(tcp_now, tpcache->tc_mptcp_next_version_try)) {
626 tpcache->tc_mptcp_next_version_try = tcp_now + tcp_min_to_hz(mptcp_version_timeout);
627 }
628 }
629 } else {
630 if (version == (uint8_t)mptcp_preferred_version) {
631 /* Preferred version failed - try the other version */
632 tpcache->tc_mptcp_version = version == MPTCP_VERSION_0 ? MPTCP_VERSION_1 : MPTCP_VERSION_0;
633 }
634 /* Preferred version failed - make sure we give the preferred another
635 * shot in 24h.
636 */
637 if (TSTMP_GEQ(tcp_now, tpcache->tc_mptcp_next_version_try)) {
638 tpcache->tc_mptcp_next_version_try = tcp_now + tcp_min_to_hz(mptcp_version_timeout);
639 }
640 }
641
642 exit:
643 tcp_cache_unlock(head);
644 }
645
646 static uint16_t
tcp_heuristics_hash(struct tcp_cache_key_src * tcks,struct tcp_heuristic_key * key)647 tcp_heuristics_hash(struct tcp_cache_key_src *tcks, struct tcp_heuristic_key *key)
648 {
649 uint32_t hash;
650
651 bzero(key, sizeof(struct tcp_heuristic_key));
652
653 tcp_cache_hash_src(tcks, key);
654
655 hash = net_flowhash(key, sizeof(struct tcp_heuristic_key),
656 tcp_cache_hash_seed);
657
658 return (uint16_t)(hash & (tcp_cache_size - 1));
659 }
660
661 static void
tcp_heuristic_unlock(struct tcp_heuristics_head * head)662 tcp_heuristic_unlock(struct tcp_heuristics_head *head)
663 {
664 lck_mtx_unlock(&head->thh_mtx);
665 }
666
667 /*
668 * Make sure that everything that happens after tcp_getheuristic_with_lock()
669 * is short enough to justify that you hold the per-bucket lock!!!
670 *
671 * Otherwise, better build another lookup-function that does not hold the
672 * lock and you copy out the bits and bytes.
673 *
674 * That's why we provide the head as a "return"-pointer so that the caller
675 * can give it back to use for tcp_heur_unlock().
676 *
677 *
678 * ToDo - way too much code-duplication. We should create an interface to handle
679 * bucketized hashtables with recycling of the oldest element.
680 */
681 static struct tcp_heuristic *
tcp_getheuristic_with_lock(struct tcp_cache_key_src * tcks,int create,struct tcp_heuristics_head ** headarg)682 tcp_getheuristic_with_lock(struct tcp_cache_key_src *tcks,
683 int create, struct tcp_heuristics_head **headarg)
684 {
685 struct tcp_heuristic *tpheur = NULL;
686 struct tcp_heuristics_head *head;
687 struct tcp_heuristic_key key;
688 uint16_t hash;
689 int i = 0;
690
691 hash = tcp_heuristics_hash(tcks, &key);
692 head = &tcp_heuristics[hash];
693
694 lck_mtx_lock(&head->thh_mtx);
695
696 /*** First step: Look for the tcp_heur in our bucket ***/
697 SLIST_FOREACH(tpheur, &head->tcp_heuristics, list) {
698 if (memcmp(&tpheur->th_key, &key, sizeof(key)) == 0) {
699 break;
700 }
701
702 i++;
703 }
704
705 /*** Second step: If it's not there, create/recycle it ***/
706 if ((tpheur == NULL) && create) {
707 if (i >= TCP_CACHE_BUCKET_SIZE) {
708 struct tcp_heuristic *oldest_heur = NULL;
709 uint32_t max_age = 0;
710
711 /* Look for the oldest tcp_heur in the bucket */
712 SLIST_FOREACH(tpheur, &head->tcp_heuristics, list) {
713 uint32_t age = tcp_now - tpheur->th_last_access;
714 if (age > max_age) {
715 max_age = age;
716 oldest_heur = tpheur;
717 }
718 }
719 VERIFY(oldest_heur != NULL);
720
721 tpheur = oldest_heur;
722
723 /* We recycle - set everything to 0 */
724 bzero(tpheur->th_val_start,
725 tpheur->th_val_end - tpheur->th_val_start);
726 } else {
727 /* Create a new heuristic and add it to the list */
728 tpheur = kalloc_type(struct tcp_heuristic, Z_NOWAIT | Z_ZERO);
729 if (tpheur == NULL) {
730 os_log_error(OS_LOG_DEFAULT, "%s could not allocate cache", __func__);
731 goto out_null;
732 }
733
734 SLIST_INSERT_HEAD(&head->tcp_heuristics, tpheur, list);
735 }
736
737 /*
738 * Set to tcp_now, to make sure it won't be > than tcp_now in the
739 * near future.
740 */
741 tpheur->th_ecn_backoff = tcp_now;
742 tpheur->th_tfo_backoff_until = tcp_now;
743 tpheur->th_mptcp_backoff = tcp_now;
744 tpheur->th_tfo_backoff = tcp_min_to_hz(tcp_ecn_timeout);
745
746 memcpy(&tpheur->th_key, &key, sizeof(key));
747 }
748
749 if (tpheur == NULL) {
750 goto out_null;
751 }
752
753 /* Update timestamp for garbage collection purposes */
754 tpheur->th_last_access = tcp_now;
755 *headarg = head;
756
757 return tpheur;
758
759 out_null:
760 tcp_heuristic_unlock(head);
761 return NULL;
762 }
763
764 static void
tcp_heuristic_reset_counters(struct tcp_cache_key_src * tcks,uint8_t flags)765 tcp_heuristic_reset_counters(struct tcp_cache_key_src *tcks, uint8_t flags)
766 {
767 struct tcp_heuristics_head *head;
768 struct tcp_heuristic *tpheur;
769
770 /*
771 * Always create heuristics here because MPTCP needs to write success
772 * into it. Thus, we always end up creating them.
773 */
774 tpheur = tcp_getheuristic_with_lock(tcks, 1, &head);
775 if (tpheur == NULL) {
776 return;
777 }
778
779 if (flags & TCPCACHE_F_TFO_DATA) {
780 if (tpheur->th_tfo_data_loss >= TFO_MAX_COOKIE_LOSS) {
781 os_log(OS_LOG_DEFAULT, "%s: Resetting TFO-data loss to 0 from %u on heur %lx\n",
782 __func__, tpheur->th_tfo_data_loss, (unsigned long)VM_KERNEL_ADDRPERM(tpheur));
783 }
784 tpheur->th_tfo_data_loss = 0;
785 }
786
787 if (flags & TCPCACHE_F_TFO_REQ) {
788 if (tpheur->th_tfo_req_loss >= TFO_MAX_COOKIE_LOSS) {
789 os_log(OS_LOG_DEFAULT, "%s: Resetting TFO-req loss to 0 from %u on heur %lx\n",
790 __func__, tpheur->th_tfo_req_loss, (unsigned long)VM_KERNEL_ADDRPERM(tpheur));
791 }
792 tpheur->th_tfo_req_loss = 0;
793 }
794
795 if (flags & TCPCACHE_F_TFO_DATA_RST) {
796 if (tpheur->th_tfo_data_rst >= TFO_MAX_COOKIE_LOSS) {
797 os_log(OS_LOG_DEFAULT, "%s: Resetting TFO-data RST to 0 from %u on heur %lx\n",
798 __func__, tpheur->th_tfo_data_rst, (unsigned long)VM_KERNEL_ADDRPERM(tpheur));
799 }
800 tpheur->th_tfo_data_rst = 0;
801 }
802
803 if (flags & TCPCACHE_F_TFO_REQ_RST) {
804 if (tpheur->th_tfo_req_rst >= TFO_MAX_COOKIE_LOSS) {
805 os_log(OS_LOG_DEFAULT, "%s: Resetting TFO-req RST to 0 from %u on heur %lx\n",
806 __func__, tpheur->th_tfo_req_rst, (unsigned long)VM_KERNEL_ADDRPERM(tpheur));
807 }
808 tpheur->th_tfo_req_rst = 0;
809 }
810
811 if (flags & TCPCACHE_F_ECN) {
812 if (tpheur->th_ecn_loss >= ECN_MAX_SYN_LOSS || tpheur->th_ecn_synrst >= ECN_MAX_SYNRST) {
813 os_log(OS_LOG_DEFAULT, "%s: Resetting ECN-loss to 0 from %u and synrst from %u on heur %lx\n",
814 __func__, tpheur->th_ecn_loss, tpheur->th_ecn_synrst, (unsigned long)VM_KERNEL_ADDRPERM(tpheur));
815 }
816 tpheur->th_ecn_loss = 0;
817 tpheur->th_ecn_synrst = 0;
818 }
819
820 if (flags & TCPCACHE_F_MPTCP) {
821 tpheur->th_mptcp_loss = 0;
822 if (tpheur->th_mptcp_success < MPTCP_SUCCESS_TRIGGER) {
823 tpheur->th_mptcp_success++;
824
825 if (tpheur->th_mptcp_success == MPTCP_SUCCESS_TRIGGER) {
826 os_log(mptcp_log_handle, "%s disabling heuristics for 12 hours", __func__);
827 tpheur->th_mptcp_heuristic_disabled = 1;
828 /* Disable heuristics for 12 hours */
829 tpheur->th_mptcp_backoff = tcp_now + tcp_min_to_hz(tcp_ecn_timeout * 12);
830 }
831 }
832 }
833
834 tcp_heuristic_unlock(head);
835 }
836
837 void
tcp_heuristic_tfo_success(struct tcpcb * tp)838 tcp_heuristic_tfo_success(struct tcpcb *tp)
839 {
840 struct tcp_cache_key_src tcks;
841 uint8_t flag = 0;
842
843 tcp_cache_key_src_create(tp, &tcks);
844
845 if (tp->t_tfo_stats & TFO_S_SYN_DATA_SENT) {
846 flag = (TCPCACHE_F_TFO_DATA | TCPCACHE_F_TFO_REQ |
847 TCPCACHE_F_TFO_DATA_RST | TCPCACHE_F_TFO_REQ_RST);
848 }
849 if (tp->t_tfo_stats & TFO_S_COOKIE_REQ) {
850 flag = (TCPCACHE_F_TFO_REQ | TCPCACHE_F_TFO_REQ_RST);
851 }
852
853 tcp_heuristic_reset_counters(&tcks, flag);
854 }
855
856 void
tcp_heuristic_mptcp_success(struct tcpcb * tp)857 tcp_heuristic_mptcp_success(struct tcpcb *tp)
858 {
859 struct tcp_cache_key_src tcks;
860
861 tcp_cache_key_src_create(tp, &tcks);
862 tcp_heuristic_reset_counters(&tcks, TCPCACHE_F_MPTCP);
863 }
864
865 void
tcp_heuristic_ecn_success(struct tcpcb * tp)866 tcp_heuristic_ecn_success(struct tcpcb *tp)
867 {
868 struct tcp_cache_key_src tcks;
869
870 tcp_cache_key_src_create(tp, &tcks);
871 tcp_heuristic_reset_counters(&tcks, TCPCACHE_F_ECN);
872 }
873
874 static void
__tcp_heuristic_tfo_middlebox_common(struct tcp_heuristic * tpheur)875 __tcp_heuristic_tfo_middlebox_common(struct tcp_heuristic *tpheur)
876 {
877 if (tpheur->th_tfo_in_backoff) {
878 return;
879 }
880
881 tpheur->th_tfo_in_backoff = 1;
882
883 if (tpheur->th_tfo_enabled_time) {
884 uint32_t old_backoff = tpheur->th_tfo_backoff;
885
886 tpheur->th_tfo_backoff -= (tcp_now - tpheur->th_tfo_enabled_time);
887 if (tpheur->th_tfo_backoff > old_backoff) {
888 tpheur->th_tfo_backoff = tcp_min_to_hz(tcp_ecn_timeout);
889 }
890 }
891
892 tpheur->th_tfo_backoff_until = tcp_now + tpheur->th_tfo_backoff;
893
894 /* Then, increase the backoff time */
895 tpheur->th_tfo_backoff *= 2;
896
897 if (tpheur->th_tfo_backoff > tcp_min_to_hz(tcp_backoff_maximum)) {
898 tpheur->th_tfo_backoff = tcp_min_to_hz(tcp_ecn_timeout);
899 }
900
901 os_log(OS_LOG_DEFAULT, "%s disable TFO until %u now %u on %lx\n", __func__,
902 tpheur->th_tfo_backoff_until, tcp_now, (unsigned long)VM_KERNEL_ADDRPERM(tpheur));
903 }
904
905 static void
tcp_heuristic_tfo_middlebox_common(struct tcp_cache_key_src * tcks)906 tcp_heuristic_tfo_middlebox_common(struct tcp_cache_key_src *tcks)
907 {
908 struct tcp_heuristics_head *head;
909 struct tcp_heuristic *tpheur;
910
911 tpheur = tcp_getheuristic_with_lock(tcks, 1, &head);
912 if (tpheur == NULL) {
913 return;
914 }
915
916 __tcp_heuristic_tfo_middlebox_common(tpheur);
917
918 tcp_heuristic_unlock(head);
919 }
920
921 static void
tcp_heuristic_inc_counters(struct tcp_cache_key_src * tcks,uint32_t flags)922 tcp_heuristic_inc_counters(struct tcp_cache_key_src *tcks,
923 uint32_t flags)
924 {
925 struct tcp_heuristics_head *head;
926 struct tcp_heuristic *tpheur;
927
928 tpheur = tcp_getheuristic_with_lock(tcks, 1, &head);
929 if (tpheur == NULL) {
930 return;
931 }
932
933 /* Limit to prevent integer-overflow during exponential backoff */
934 if ((flags & TCPCACHE_F_TFO_DATA) && tpheur->th_tfo_data_loss < TCP_CACHE_OVERFLOW_PROTECT) {
935 tpheur->th_tfo_data_loss++;
936
937 if (tpheur->th_tfo_data_loss >= TFO_MAX_COOKIE_LOSS) {
938 __tcp_heuristic_tfo_middlebox_common(tpheur);
939 }
940 }
941
942 if ((flags & TCPCACHE_F_TFO_REQ) && tpheur->th_tfo_req_loss < TCP_CACHE_OVERFLOW_PROTECT) {
943 tpheur->th_tfo_req_loss++;
944
945 if (tpheur->th_tfo_req_loss >= TFO_MAX_COOKIE_LOSS) {
946 __tcp_heuristic_tfo_middlebox_common(tpheur);
947 }
948 }
949
950 if ((flags & TCPCACHE_F_TFO_DATA_RST) && tpheur->th_tfo_data_rst < TCP_CACHE_OVERFLOW_PROTECT) {
951 tpheur->th_tfo_data_rst++;
952
953 if (tpheur->th_tfo_data_rst >= TFO_MAX_COOKIE_LOSS) {
954 __tcp_heuristic_tfo_middlebox_common(tpheur);
955 }
956 }
957
958 if ((flags & TCPCACHE_F_TFO_REQ_RST) && tpheur->th_tfo_req_rst < TCP_CACHE_OVERFLOW_PROTECT) {
959 tpheur->th_tfo_req_rst++;
960
961 if (tpheur->th_tfo_req_rst >= TFO_MAX_COOKIE_LOSS) {
962 __tcp_heuristic_tfo_middlebox_common(tpheur);
963 }
964 }
965
966 if ((flags & TCPCACHE_F_ECN) &&
967 tpheur->th_ecn_loss < TCP_CACHE_OVERFLOW_PROTECT &&
968 TSTMP_LEQ(tpheur->th_ecn_backoff, tcp_now)) {
969 tpheur->th_ecn_loss++;
970 if (tpheur->th_ecn_loss >= ECN_MAX_SYN_LOSS) {
971 tcpstat.tcps_ecn_fallback_synloss++;
972 TCP_CACHE_INC_IFNET_STAT(tcks->ifp, tcks->af, ecn_fallback_synloss);
973 tpheur->th_ecn_backoff = tcp_now +
974 (tcp_min_to_hz(tcp_ecn_timeout) <<
975 (tpheur->th_ecn_loss - ECN_MAX_SYN_LOSS));
976
977 os_log(OS_LOG_DEFAULT, "%s disable ECN until %u now %u on %lx for SYN-loss\n",
978 __func__, tpheur->th_ecn_backoff, tcp_now,
979 (unsigned long)VM_KERNEL_ADDRPERM(tpheur));
980 }
981 }
982
983 if ((flags & TCPCACHE_F_MPTCP) &&
984 tpheur->th_mptcp_loss < TCP_CACHE_OVERFLOW_PROTECT &&
985 tpheur->th_mptcp_heuristic_disabled == 0) {
986 tpheur->th_mptcp_loss++;
987 if (tpheur->th_mptcp_loss >= MPTCP_MAX_SYN_LOSS) {
988 /*
989 * Yes, we take tcp_ecn_timeout, to avoid adding yet
990 * another sysctl that is just used for testing.
991 */
992 tpheur->th_mptcp_backoff = tcp_now +
993 (tcp_min_to_hz(tcp_ecn_timeout) <<
994 (tpheur->th_mptcp_loss - MPTCP_MAX_SYN_LOSS));
995 tpheur->th_mptcp_in_backoff = 1;
996
997 os_log(OS_LOG_DEFAULT, "%s disable MPTCP until %u now %u on %lx\n",
998 __func__, tpheur->th_mptcp_backoff, tcp_now,
999 (unsigned long)VM_KERNEL_ADDRPERM(tpheur));
1000 }
1001 }
1002
1003 if ((flags & TCPCACHE_F_ECN_DROPRST) &&
1004 tpheur->th_ecn_droprst < TCP_CACHE_OVERFLOW_PROTECT &&
1005 TSTMP_LEQ(tpheur->th_ecn_backoff, tcp_now)) {
1006 tpheur->th_ecn_droprst++;
1007 if (tpheur->th_ecn_droprst >= ECN_MAX_DROPRST) {
1008 tcpstat.tcps_ecn_fallback_droprst++;
1009 TCP_CACHE_INC_IFNET_STAT(tcks->ifp, tcks->af,
1010 ecn_fallback_droprst);
1011 tpheur->th_ecn_backoff = tcp_now +
1012 (tcp_min_to_hz(tcp_ecn_timeout) <<
1013 (tpheur->th_ecn_droprst - ECN_MAX_DROPRST));
1014
1015 os_log(OS_LOG_DEFAULT, "%s disable ECN until %u now %u on %lx for drop-RST\n",
1016 __func__, tpheur->th_ecn_backoff, tcp_now,
1017 (unsigned long)VM_KERNEL_ADDRPERM(tpheur));
1018 }
1019 }
1020
1021 if ((flags & TCPCACHE_F_ECN_DROPRXMT) &&
1022 tpheur->th_ecn_droprxmt < TCP_CACHE_OVERFLOW_PROTECT &&
1023 TSTMP_LEQ(tpheur->th_ecn_backoff, tcp_now)) {
1024 tpheur->th_ecn_droprxmt++;
1025 if (tpheur->th_ecn_droprxmt >= ECN_MAX_DROPRXMT) {
1026 tcpstat.tcps_ecn_fallback_droprxmt++;
1027 TCP_CACHE_INC_IFNET_STAT(tcks->ifp, tcks->af,
1028 ecn_fallback_droprxmt);
1029 tpheur->th_ecn_backoff = tcp_now +
1030 (tcp_min_to_hz(tcp_ecn_timeout) <<
1031 (tpheur->th_ecn_droprxmt - ECN_MAX_DROPRXMT));
1032
1033 os_log(OS_LOG_DEFAULT, "%s disable ECN until %u now %u on %lx for drop-Rxmit\n",
1034 __func__, tpheur->th_ecn_backoff, tcp_now,
1035 (unsigned long)VM_KERNEL_ADDRPERM(tpheur));
1036 }
1037 }
1038 if ((flags & TCPCACHE_F_ECN_SYNRST) &&
1039 tpheur->th_ecn_synrst < TCP_CACHE_OVERFLOW_PROTECT) {
1040 tpheur->th_ecn_synrst++;
1041 if (tpheur->th_ecn_synrst >= ECN_MAX_SYNRST) {
1042 tcpstat.tcps_ecn_fallback_synrst++;
1043 TCP_CACHE_INC_IFNET_STAT(tcks->ifp, tcks->af,
1044 ecn_fallback_synrst);
1045 tpheur->th_ecn_backoff = tcp_now +
1046 (tcp_min_to_hz(tcp_ecn_timeout) <<
1047 (tpheur->th_ecn_synrst - ECN_MAX_SYNRST));
1048
1049 os_log(OS_LOG_DEFAULT, "%s disable ECN until %u now %u on %lx for SYN-RST\n",
1050 __func__, tpheur->th_ecn_backoff, tcp_now,
1051 (unsigned long)VM_KERNEL_ADDRPERM(tpheur));
1052 }
1053 }
1054 tcp_heuristic_unlock(head);
1055 }
1056
1057 void
tcp_heuristic_tfo_loss(struct tcpcb * tp)1058 tcp_heuristic_tfo_loss(struct tcpcb *tp)
1059 {
1060 struct tcp_cache_key_src tcks;
1061 uint32_t flag = 0;
1062
1063 if (symptoms_is_wifi_lossy() &&
1064 IFNET_IS_WIFI(tp->t_inpcb->inp_last_outifp)) {
1065 return;
1066 }
1067
1068 tcp_cache_key_src_create(tp, &tcks);
1069
1070 if (tp->t_tfo_stats & TFO_S_SYN_DATA_SENT) {
1071 flag = (TCPCACHE_F_TFO_DATA | TCPCACHE_F_TFO_REQ);
1072 }
1073 if (tp->t_tfo_stats & TFO_S_COOKIE_REQ) {
1074 flag = TCPCACHE_F_TFO_REQ;
1075 }
1076
1077 tcp_heuristic_inc_counters(&tcks, flag);
1078 }
1079
1080 void
tcp_heuristic_tfo_rst(struct tcpcb * tp)1081 tcp_heuristic_tfo_rst(struct tcpcb *tp)
1082 {
1083 struct tcp_cache_key_src tcks;
1084 uint32_t flag = 0;
1085
1086 tcp_cache_key_src_create(tp, &tcks);
1087
1088 if (tp->t_tfo_stats & TFO_S_SYN_DATA_SENT) {
1089 flag = (TCPCACHE_F_TFO_DATA_RST | TCPCACHE_F_TFO_REQ_RST);
1090 }
1091 if (tp->t_tfo_stats & TFO_S_COOKIE_REQ) {
1092 flag = TCPCACHE_F_TFO_REQ_RST;
1093 }
1094
1095 tcp_heuristic_inc_counters(&tcks, flag);
1096 }
1097
1098 void
tcp_heuristic_mptcp_loss(struct tcpcb * tp)1099 tcp_heuristic_mptcp_loss(struct tcpcb *tp)
1100 {
1101 struct tcp_cache_key_src tcks;
1102
1103 if (symptoms_is_wifi_lossy() &&
1104 IFNET_IS_WIFI(tp->t_inpcb->inp_last_outifp)) {
1105 return;
1106 }
1107
1108 tcp_cache_key_src_create(tp, &tcks);
1109
1110 tcp_heuristic_inc_counters(&tcks, TCPCACHE_F_MPTCP);
1111 }
1112
1113 void
tcp_heuristic_ecn_loss(struct tcpcb * tp)1114 tcp_heuristic_ecn_loss(struct tcpcb *tp)
1115 {
1116 struct tcp_cache_key_src tcks;
1117
1118 if (symptoms_is_wifi_lossy() &&
1119 IFNET_IS_WIFI(tp->t_inpcb->inp_last_outifp)) {
1120 return;
1121 }
1122
1123 tcp_cache_key_src_create(tp, &tcks);
1124
1125 tcp_heuristic_inc_counters(&tcks, TCPCACHE_F_ECN);
1126 }
1127
1128 void
tcp_heuristic_ecn_droprst(struct tcpcb * tp)1129 tcp_heuristic_ecn_droprst(struct tcpcb *tp)
1130 {
1131 struct tcp_cache_key_src tcks;
1132
1133 tcp_cache_key_src_create(tp, &tcks);
1134
1135 tcp_heuristic_inc_counters(&tcks, TCPCACHE_F_ECN_DROPRST);
1136 }
1137
1138 void
tcp_heuristic_ecn_droprxmt(struct tcpcb * tp)1139 tcp_heuristic_ecn_droprxmt(struct tcpcb *tp)
1140 {
1141 struct tcp_cache_key_src tcks;
1142
1143 tcp_cache_key_src_create(tp, &tcks);
1144
1145 tcp_heuristic_inc_counters(&tcks, TCPCACHE_F_ECN_DROPRXMT);
1146 }
1147
1148 void
tcp_heuristic_ecn_synrst(struct tcpcb * tp)1149 tcp_heuristic_ecn_synrst(struct tcpcb *tp)
1150 {
1151 struct tcp_cache_key_src tcks;
1152
1153 tcp_cache_key_src_create(tp, &tcks);
1154
1155 tcp_heuristic_inc_counters(&tcks, TCPCACHE_F_ECN_SYNRST);
1156 }
1157
1158 void
tcp_heuristic_tfo_middlebox(struct tcpcb * tp)1159 tcp_heuristic_tfo_middlebox(struct tcpcb *tp)
1160 {
1161 struct tcp_cache_key_src tcks;
1162
1163 tp->t_tfo_flags |= TFO_F_HEURISTIC_DONE;
1164
1165 tcp_cache_key_src_create(tp, &tcks);
1166 tcp_heuristic_tfo_middlebox_common(&tcks);
1167 }
1168
1169 static void
tcp_heuristic_ecn_aggressive_common(struct tcp_cache_key_src * tcks)1170 tcp_heuristic_ecn_aggressive_common(struct tcp_cache_key_src *tcks)
1171 {
1172 struct tcp_heuristics_head *head;
1173 struct tcp_heuristic *tpheur;
1174
1175 tpheur = tcp_getheuristic_with_lock(tcks, 1, &head);
1176 if (tpheur == NULL) {
1177 return;
1178 }
1179
1180 if (TSTMP_GT(tpheur->th_ecn_backoff, tcp_now)) {
1181 /* We are already in aggressive mode */
1182 tcp_heuristic_unlock(head);
1183 return;
1184 }
1185
1186 /* Must be done before, otherwise we will start off with expo-backoff */
1187 tpheur->th_ecn_backoff = tcp_now +
1188 (tcp_min_to_hz(tcp_ecn_timeout) << (tpheur->th_ecn_aggressive));
1189
1190 /*
1191 * Ugly way to prevent integer overflow... limit to prevent in
1192 * overflow during exp. backoff.
1193 */
1194 if (tpheur->th_ecn_aggressive < TCP_CACHE_OVERFLOW_PROTECT) {
1195 tpheur->th_ecn_aggressive++;
1196 }
1197
1198 tcp_heuristic_unlock(head);
1199
1200 os_log(OS_LOG_DEFAULT, "%s disable ECN until %u now %u on %lx\n", __func__,
1201 tpheur->th_ecn_backoff, tcp_now, (unsigned long)VM_KERNEL_ADDRPERM(tpheur));
1202 }
1203
1204 void
tcp_heuristic_ecn_aggressive(struct tcpcb * tp)1205 tcp_heuristic_ecn_aggressive(struct tcpcb *tp)
1206 {
1207 struct tcp_cache_key_src tcks;
1208
1209 tcp_cache_key_src_create(tp, &tcks);
1210 tcp_heuristic_ecn_aggressive_common(&tcks);
1211 }
1212
1213 static boolean_t
tcp_heuristic_do_tfo_common(struct tcp_cache_key_src * tcks)1214 tcp_heuristic_do_tfo_common(struct tcp_cache_key_src *tcks)
1215 {
1216 struct tcp_heuristics_head *head;
1217 struct tcp_heuristic *tpheur;
1218
1219 if (disable_tcp_heuristics) {
1220 return TRUE;
1221 }
1222
1223 /* Get the tcp-heuristic. */
1224 tpheur = tcp_getheuristic_with_lock(tcks, 0, &head);
1225 if (tpheur == NULL) {
1226 return TRUE;
1227 }
1228
1229 if (tpheur->th_tfo_in_backoff == 0) {
1230 goto tfo_ok;
1231 }
1232
1233 if (TSTMP_GT(tcp_now, tpheur->th_tfo_backoff_until)) {
1234 tpheur->th_tfo_in_backoff = 0;
1235 tpheur->th_tfo_enabled_time = tcp_now;
1236
1237 goto tfo_ok;
1238 }
1239
1240 tcp_heuristic_unlock(head);
1241 return FALSE;
1242
1243 tfo_ok:
1244 tcp_heuristic_unlock(head);
1245 return TRUE;
1246 }
1247
1248 boolean_t
tcp_heuristic_do_tfo(struct tcpcb * tp)1249 tcp_heuristic_do_tfo(struct tcpcb *tp)
1250 {
1251 struct tcp_cache_key_src tcks;
1252
1253 tcp_cache_key_src_create(tp, &tcks);
1254 if (tcp_heuristic_do_tfo_common(&tcks)) {
1255 return TRUE;
1256 }
1257
1258 return FALSE;
1259 }
1260 /*
1261 * @return:
1262 * 0 Enable MPTCP (we are still discovering middleboxes)
1263 * -1 Enable MPTCP (heuristics have been temporarily disabled)
1264 * 1 Disable MPTCP
1265 */
1266 int
tcp_heuristic_do_mptcp(struct tcpcb * tp)1267 tcp_heuristic_do_mptcp(struct tcpcb *tp)
1268 {
1269 struct tcp_cache_key_src tcks;
1270 struct tcp_heuristics_head *head = NULL;
1271 struct tcp_heuristic *tpheur;
1272 int ret = 0;
1273
1274 if (disable_tcp_heuristics ||
1275 (tptomptp(tp)->mpt_mpte->mpte_flags & MPTE_FORCE_ENABLE)) {
1276 return 0;
1277 }
1278
1279 tcp_cache_key_src_create(tp, &tcks);
1280
1281 /* Get the tcp-heuristic. */
1282 tpheur = tcp_getheuristic_with_lock(&tcks, 0, &head);
1283 if (tpheur == NULL) {
1284 return 0;
1285 }
1286
1287 if (tpheur->th_mptcp_in_backoff == 0 ||
1288 tpheur->th_mptcp_heuristic_disabled == 1) {
1289 goto mptcp_ok;
1290 }
1291
1292 if (TSTMP_GT(tpheur->th_mptcp_backoff, tcp_now)) {
1293 goto fallback;
1294 }
1295
1296 tpheur->th_mptcp_in_backoff = 0;
1297
1298 mptcp_ok:
1299 if (tpheur->th_mptcp_heuristic_disabled) {
1300 ret = -1;
1301
1302 if (TSTMP_GT(tcp_now, tpheur->th_mptcp_backoff)) {
1303 tpheur->th_mptcp_heuristic_disabled = 0;
1304 tpheur->th_mptcp_success = 0;
1305 }
1306 }
1307
1308 tcp_heuristic_unlock(head);
1309 return ret;
1310
1311 fallback:
1312 if (head) {
1313 tcp_heuristic_unlock(head);
1314 }
1315
1316 if (tptomptp(tp)->mpt_mpte->mpte_flags & MPTE_FIRSTPARTY) {
1317 tcpstat.tcps_mptcp_fp_heuristic_fallback++;
1318 } else {
1319 tcpstat.tcps_mptcp_heuristic_fallback++;
1320 }
1321
1322 return 1;
1323 }
1324
1325 static boolean_t
tcp_heuristic_do_ecn_common(struct tcp_cache_key_src * tcks)1326 tcp_heuristic_do_ecn_common(struct tcp_cache_key_src *tcks)
1327 {
1328 struct tcp_heuristics_head *head;
1329 struct tcp_heuristic *tpheur;
1330 boolean_t ret = TRUE;
1331
1332 if (disable_tcp_heuristics) {
1333 return TRUE;
1334 }
1335
1336 /* Get the tcp-heuristic. */
1337 tpheur = tcp_getheuristic_with_lock(tcks, 0, &head);
1338 if (tpheur == NULL) {
1339 return ret;
1340 }
1341
1342 if (TSTMP_GT(tpheur->th_ecn_backoff, tcp_now)) {
1343 ret = FALSE;
1344 } else {
1345 /* Reset the following counters to start re-evaluating */
1346 if (tpheur->th_ecn_droprst >= ECN_RETRY_LIMIT) {
1347 tpheur->th_ecn_droprst = 0;
1348 }
1349 if (tpheur->th_ecn_droprxmt >= ECN_RETRY_LIMIT) {
1350 tpheur->th_ecn_droprxmt = 0;
1351 }
1352 if (tpheur->th_ecn_synrst >= ECN_RETRY_LIMIT) {
1353 tpheur->th_ecn_synrst = 0;
1354 }
1355
1356 /* Make sure it follows along */
1357 tpheur->th_ecn_backoff = tcp_now;
1358 }
1359
1360 tcp_heuristic_unlock(head);
1361
1362 return ret;
1363 }
1364
1365 boolean_t
tcp_heuristic_do_ecn(struct tcpcb * tp)1366 tcp_heuristic_do_ecn(struct tcpcb *tp)
1367 {
1368 struct tcp_cache_key_src tcks;
1369
1370 tcp_cache_key_src_create(tp, &tcks);
1371 return tcp_heuristic_do_ecn_common(&tcks);
1372 }
1373
1374 boolean_t
tcp_heuristic_do_ecn_with_address(struct ifnet * ifp,union sockaddr_in_4_6 * local_address)1375 tcp_heuristic_do_ecn_with_address(struct ifnet *ifp,
1376 union sockaddr_in_4_6 *local_address)
1377 {
1378 struct tcp_cache_key_src tcks;
1379
1380 memset(&tcks, 0, sizeof(tcks));
1381 tcks.ifp = ifp;
1382
1383 calculate_tcp_clock();
1384
1385 if (local_address->sa.sa_family == AF_INET6) {
1386 memcpy(&tcks.laddr.addr6, &local_address->sin6.sin6_addr, sizeof(struct in6_addr));
1387 tcks.af = AF_INET6;
1388 } else if (local_address->sa.sa_family == AF_INET) {
1389 memcpy(&tcks.laddr.addr, &local_address->sin.sin_addr, sizeof(struct in_addr));
1390 tcks.af = AF_INET;
1391 }
1392
1393 return tcp_heuristic_do_ecn_common(&tcks);
1394 }
1395
1396 void
tcp_heuristics_ecn_update(struct necp_tcp_ecn_cache * necp_buffer,struct ifnet * ifp,union sockaddr_in_4_6 * local_address)1397 tcp_heuristics_ecn_update(struct necp_tcp_ecn_cache *necp_buffer,
1398 struct ifnet *ifp, union sockaddr_in_4_6 *local_address)
1399 {
1400 struct tcp_cache_key_src tcks;
1401
1402 memset(&tcks, 0, sizeof(tcks));
1403 tcks.ifp = ifp;
1404
1405 calculate_tcp_clock();
1406
1407 if (local_address->sa.sa_family == AF_INET6) {
1408 memcpy(&tcks.laddr.addr6, &local_address->sin6.sin6_addr, sizeof(struct in6_addr));
1409 tcks.af = AF_INET6;
1410 } else if (local_address->sa.sa_family == AF_INET) {
1411 memcpy(&tcks.laddr.addr, &local_address->sin.sin_addr, sizeof(struct in_addr));
1412 tcks.af = AF_INET;
1413 }
1414
1415 if (necp_buffer->necp_tcp_ecn_heuristics_success) {
1416 tcp_heuristic_reset_counters(&tcks, TCPCACHE_F_ECN);
1417 } else if (necp_buffer->necp_tcp_ecn_heuristics_loss) {
1418 tcp_heuristic_inc_counters(&tcks, TCPCACHE_F_ECN);
1419 } else if (necp_buffer->necp_tcp_ecn_heuristics_drop_rst) {
1420 tcp_heuristic_inc_counters(&tcks, TCPCACHE_F_ECN_DROPRST);
1421 } else if (necp_buffer->necp_tcp_ecn_heuristics_drop_rxmt) {
1422 tcp_heuristic_inc_counters(&tcks, TCPCACHE_F_ECN_DROPRXMT);
1423 } else if (necp_buffer->necp_tcp_ecn_heuristics_syn_rst) {
1424 tcp_heuristic_inc_counters(&tcks, TCPCACHE_F_ECN_SYNRST);
1425 } else if (necp_buffer->necp_tcp_ecn_heuristics_aggressive) {
1426 tcp_heuristic_ecn_aggressive_common(&tcks);
1427 }
1428
1429 return;
1430 }
1431
1432 boolean_t
tcp_heuristic_do_tfo_with_address(struct ifnet * ifp,union sockaddr_in_4_6 * local_address,union sockaddr_in_4_6 * remote_address,uint8_t * cookie,uint8_t * cookie_len)1433 tcp_heuristic_do_tfo_with_address(struct ifnet *ifp,
1434 union sockaddr_in_4_6 *local_address, union sockaddr_in_4_6 *remote_address,
1435 uint8_t *cookie, uint8_t *cookie_len)
1436 {
1437 struct tcp_cache_key_src tcks;
1438
1439 memset(&tcks, 0, sizeof(tcks));
1440 tcks.ifp = ifp;
1441
1442 calculate_tcp_clock();
1443
1444 if (remote_address->sa.sa_family == AF_INET6) {
1445 memcpy(&tcks.laddr.addr6, &local_address->sin6.sin6_addr, sizeof(struct in6_addr));
1446 memcpy(&tcks.faddr.addr6, &remote_address->sin6.sin6_addr, sizeof(struct in6_addr));
1447 tcks.af = AF_INET6;
1448 } else if (remote_address->sa.sa_family == AF_INET) {
1449 memcpy(&tcks.laddr.addr, &local_address->sin.sin_addr, sizeof(struct in_addr));
1450 memcpy(&tcks.faddr.addr, &remote_address->sin.sin_addr, sizeof(struct in_addr));
1451 tcks.af = AF_INET;
1452 }
1453
1454 if (tcp_heuristic_do_tfo_common(&tcks)) {
1455 if (!tcp_cache_get_cookie_common(&tcks, cookie, cookie_len)) {
1456 *cookie_len = 0;
1457 }
1458 return TRUE;
1459 }
1460
1461 return FALSE;
1462 }
1463
1464 void
tcp_heuristics_tfo_update(struct necp_tcp_tfo_cache * necp_buffer,struct ifnet * ifp,union sockaddr_in_4_6 * local_address,union sockaddr_in_4_6 * remote_address)1465 tcp_heuristics_tfo_update(struct necp_tcp_tfo_cache *necp_buffer,
1466 struct ifnet *ifp, union sockaddr_in_4_6 *local_address,
1467 union sockaddr_in_4_6 *remote_address)
1468 {
1469 struct tcp_cache_key_src tcks;
1470
1471 memset(&tcks, 0, sizeof(tcks));
1472 tcks.ifp = ifp;
1473
1474 calculate_tcp_clock();
1475
1476 if (remote_address->sa.sa_family == AF_INET6) {
1477 memcpy(&tcks.laddr.addr6, &local_address->sin6.sin6_addr, sizeof(struct in6_addr));
1478 memcpy(&tcks.faddr.addr6, &remote_address->sin6.sin6_addr, sizeof(struct in6_addr));
1479 tcks.af = AF_INET6;
1480 } else if (remote_address->sa.sa_family == AF_INET) {
1481 memcpy(&tcks.laddr.addr, &local_address->sin.sin_addr, sizeof(struct in_addr));
1482 memcpy(&tcks.faddr.addr, &remote_address->sin.sin_addr, sizeof(struct in_addr));
1483 tcks.af = AF_INET;
1484 }
1485
1486 if (necp_buffer->necp_tcp_tfo_heuristics_success) {
1487 tcp_heuristic_reset_counters(&tcks, TCPCACHE_F_TFO_REQ | TCPCACHE_F_TFO_DATA |
1488 TCPCACHE_F_TFO_REQ_RST | TCPCACHE_F_TFO_DATA_RST);
1489 }
1490
1491 if (necp_buffer->necp_tcp_tfo_heuristics_success_req) {
1492 tcp_heuristic_reset_counters(&tcks, TCPCACHE_F_TFO_REQ | TCPCACHE_F_TFO_REQ_RST);
1493 }
1494
1495 if (necp_buffer->necp_tcp_tfo_heuristics_loss) {
1496 tcp_heuristic_inc_counters(&tcks, TCPCACHE_F_TFO_REQ | TCPCACHE_F_TFO_DATA);
1497 }
1498
1499 if (necp_buffer->necp_tcp_tfo_heuristics_loss_req) {
1500 tcp_heuristic_inc_counters(&tcks, TCPCACHE_F_TFO_REQ);
1501 }
1502
1503 if (necp_buffer->necp_tcp_tfo_heuristics_rst_data) {
1504 tcp_heuristic_inc_counters(&tcks, TCPCACHE_F_TFO_REQ_RST | TCPCACHE_F_TFO_DATA_RST);
1505 }
1506
1507 if (necp_buffer->necp_tcp_tfo_heuristics_rst_req) {
1508 tcp_heuristic_inc_counters(&tcks, TCPCACHE_F_TFO_REQ_RST);
1509 }
1510
1511 if (necp_buffer->necp_tcp_tfo_heuristics_middlebox) {
1512 tcp_heuristic_tfo_middlebox_common(&tcks);
1513 }
1514
1515 if (necp_buffer->necp_tcp_tfo_cookie_len != 0) {
1516 tcp_cache_set_cookie_common(&tcks,
1517 necp_buffer->necp_tcp_tfo_cookie, necp_buffer->necp_tcp_tfo_cookie_len);
1518 }
1519
1520 return;
1521 }
1522
1523 static void
sysctl_cleartfocache(void)1524 sysctl_cleartfocache(void)
1525 {
1526 int i;
1527
1528 for (i = 0; i < tcp_cache_size; i++) {
1529 struct tcp_cache_head *head = &tcp_cache[i];
1530 struct tcp_cache *tpcache, *tmp;
1531 struct tcp_heuristics_head *hhead = &tcp_heuristics[i];
1532 struct tcp_heuristic *tpheur, *htmp;
1533
1534 lck_mtx_lock(&head->tch_mtx);
1535 SLIST_FOREACH_SAFE(tpcache, &head->tcp_caches, list, tmp) {
1536 SLIST_REMOVE(&head->tcp_caches, tpcache, tcp_cache, list);
1537 kfree_type(struct tcp_cache, tpcache);
1538 }
1539 lck_mtx_unlock(&head->tch_mtx);
1540
1541 lck_mtx_lock(&hhead->thh_mtx);
1542 SLIST_FOREACH_SAFE(tpheur, &hhead->tcp_heuristics, list, htmp) {
1543 SLIST_REMOVE(&hhead->tcp_heuristics, tpheur, tcp_heuristic, list);
1544 kfree_type(struct tcp_heuristic, tpheur);
1545 }
1546 lck_mtx_unlock(&hhead->thh_mtx);
1547 }
1548 }
1549
1550 /* This sysctl is useful for testing purposes only */
1551 static int tcpcleartfo = 0;
1552
1553 static int sysctl_cleartfo SYSCTL_HANDLER_ARGS
1554 {
1555 #pragma unused(arg1, arg2)
1556 int error = 0, val, oldval = tcpcleartfo;
1557
1558 val = oldval;
1559 error = sysctl_handle_int(oidp, &val, 0, req);
1560 if (error || !req->newptr) {
1561 if (error) {
1562 os_log_error(OS_LOG_DEFAULT, "%s could not parse int: %d", __func__, error);
1563 }
1564 return error;
1565 }
1566
1567 /*
1568 * The actual value does not matter. If the value is set, it triggers
1569 * the clearing of the TFO cache. If a future implementation does not
1570 * use the route entry to hold the TFO cache, replace the route sysctl.
1571 */
1572
1573 if (val != oldval) {
1574 sysctl_cleartfocache();
1575 }
1576
1577 tcpcleartfo = val;
1578
1579 return error;
1580 }
1581
1582 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, clear_tfocache, CTLTYPE_INT | CTLFLAG_RW |
1583 CTLFLAG_LOCKED, &tcpcleartfo, 0, &sysctl_cleartfo, "I",
1584 "Toggle to clear the TFO destination based heuristic cache");
1585
1586 void
tcp_cache_init(void)1587 tcp_cache_init(void)
1588 {
1589 uint64_t sane_size_meg = sane_size / 1024 / 1024;
1590
1591 /*
1592 * On machines with <100MB of memory this will result in a (full) cache-size
1593 * of 32 entries, thus 32 * 5 * 64bytes = 10KB. (about 0.01 %)
1594 * On machines with > 4GB of memory, we have a cache-size of 1024 entries,
1595 * thus about 327KB.
1596 *
1597 * Side-note: we convert to uint32_t. If sane_size is more than
1598 * 16000 TB, we loose precision. But, who cares? :)
1599 */
1600 tcp_cache_size = tcp_cache_roundup2((uint32_t)(sane_size_meg >> 2));
1601 if (tcp_cache_size < 32) {
1602 tcp_cache_size = 32;
1603 } else if (tcp_cache_size > 1024) {
1604 tcp_cache_size = 1024;
1605 }
1606
1607 tcp_cache = zalloc_permanent(sizeof(struct tcp_cache_head) * tcp_cache_size,
1608 ZALIGN(struct tcp_cache_head));
1609
1610 tcp_heuristics = zalloc_permanent(sizeof(struct tcp_heuristics_head) * tcp_cache_size,
1611 ZALIGN(struct tcp_heuristics_head));
1612
1613 for (int i = 0; i < tcp_cache_size; i++) {
1614 lck_mtx_init(&tcp_cache[i].tch_mtx, &tcp_cache_mtx_grp,
1615 &tcp_cache_mtx_attr);
1616 SLIST_INIT(&tcp_cache[i].tcp_caches);
1617
1618 lck_mtx_init(&tcp_heuristics[i].thh_mtx, &tcp_heuristic_mtx_grp,
1619 &tcp_heuristic_mtx_attr);
1620 SLIST_INIT(&tcp_heuristics[i].tcp_heuristics);
1621 }
1622
1623 tcp_cache_hash_seed = RandomULong();
1624 }
1625