1 /*
2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995
30 * The Regents of the University of California. All rights reserved.
31 *
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions
34 * are met:
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce the above copyright
38 * notice, this list of conditions and the following disclaimer in the
39 * documentation and/or other materials provided with the distribution.
40 * 3. All advertising materials mentioning features or use of this software
41 * must display the following acknowledgement:
42 * This product includes software developed by the University of
43 * California, Berkeley and its contributors.
44 * 4. Neither the name of the University nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE.
59 *
60 * @(#)tcp_timer.c 8.2 (Berkeley) 5/24/95
61 * $FreeBSD: src/sys/netinet/tcp_timer.c,v 1.34.2.11 2001/08/22 00:59:12 silby Exp $
62 */
63
64 #include "tcp_includes.h"
65
66 #include <sys/param.h>
67 #include <sys/systm.h>
68 #include <sys/kernel.h>
69 #include <sys/mbuf.h>
70 #include <sys/sysctl.h>
71 #include <sys/socket.h>
72 #include <sys/socketvar.h>
73 #include <sys/protosw.h>
74 #include <sys/domain.h>
75 #include <sys/mcache.h>
76 #include <sys/queue.h>
77 #include <kern/locks.h>
78 #include <kern/cpu_number.h> /* before tcp_seq.h, for tcp_random18() */
79 #include <mach/boolean.h>
80
81 #include <net/route.h>
82 #include <net/if_var.h>
83 #include <net/ntstat.h>
84
85 #include <netinet/in.h>
86 #include <netinet/in_systm.h>
87 #include <netinet/in_pcb.h>
88 #include <netinet/in_var.h>
89 #include <netinet6/in6_pcb.h>
90 #include <netinet/ip_var.h>
91 #include <netinet/tcp.h>
92 #include <netinet/tcp_cache.h>
93 #include <netinet/tcp_fsm.h>
94 #include <netinet/tcp_seq.h>
95 #include <netinet/tcp_timer.h>
96 #include <netinet/tcp_var.h>
97 #include <netinet/tcp_cc.h>
98 #include <netinet6/tcp6_var.h>
99 #include <netinet/tcpip.h>
100 #if TCPDEBUG
101 #include <netinet/tcp_debug.h>
102 #endif
103 #include <netinet/tcp_log.h>
104
105 #include <sys/kdebug.h>
106 #include <mach/sdt.h>
107 #include <netinet/mptcp_var.h>
108
109 /* Max number of times a stretch ack can be delayed on a connection */
110 #define TCP_STRETCHACK_DELAY_THRESHOLD 5
111
112 /*
113 * If the host processor has been sleeping for too long, this is the threshold
114 * used to avoid sending stale retransmissions.
115 */
116 #define TCP_SLEEP_TOO_LONG (10 * 60 * 1000) /* 10 minutes in ms */
117
118 /* tcp timer list */
119 struct tcptimerlist tcp_timer_list;
120
121 /* List of pcbs in timewait state, protected by tcbinfo's ipi_lock */
122 struct tcptailq tcp_tw_tailq;
123
124
125 static int
126 sysctl_msec_to_ticks SYSCTL_HANDLER_ARGS
127 {
128 #pragma unused(arg2)
129 int error, temp;
130 long s, tt;
131
132 tt = *(int *)arg1;
133 s = tt * 1000 / TCP_RETRANSHZ;
134 if (tt < 0 || s > INT_MAX) {
135 return EINVAL;
136 }
137 temp = (int)s;
138
139 error = sysctl_handle_int(oidp, &temp, 0, req);
140 if (error || !req->newptr) {
141 return error;
142 }
143
144 tt = (long)temp * TCP_RETRANSHZ / 1000;
145 if (tt < 1 || tt > INT_MAX) {
146 return EINVAL;
147 }
148
149 *(int *)arg1 = (int)tt;
150 SYSCTL_SKMEM_UPDATE_AT_OFFSET(arg2, *(int*)arg1);
151 return 0;
152 }
153
154 #if SYSCTL_SKMEM
155 int tcp_keepinit = TCPTV_KEEP_INIT;
156 SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPINIT, keepinit,
157 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
158 &tcp_keepinit, offsetof(skmem_sysctl, tcp.keepinit),
159 sysctl_msec_to_ticks, "I", "");
160
161 int tcp_keepidle = TCPTV_KEEP_IDLE;
162 SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPIDLE, keepidle,
163 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
164 &tcp_keepidle, offsetof(skmem_sysctl, tcp.keepidle),
165 sysctl_msec_to_ticks, "I", "");
166
167 int tcp_keepintvl = TCPTV_KEEPINTVL;
168 SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPINTVL, keepintvl,
169 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
170 &tcp_keepintvl, offsetof(skmem_sysctl, tcp.keepintvl),
171 sysctl_msec_to_ticks, "I", "");
172
173 SYSCTL_SKMEM_TCP_INT(OID_AUTO, keepcnt,
174 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
175 int, tcp_keepcnt, TCPTV_KEEPCNT, "number of times to repeat keepalive");
176
177 int tcp_msl = TCPTV_MSL;
178 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, msl,
179 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
180 &tcp_msl, offsetof(skmem_sysctl, tcp.msl),
181 sysctl_msec_to_ticks, "I", "Maximum segment lifetime");
182 #else /* SYSCTL_SKMEM */
183 int tcp_keepinit;
184 SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPINIT, keepinit,
185 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
186 &tcp_keepinit, 0, sysctl_msec_to_ticks, "I", "");
187
188 int tcp_keepidle;
189 SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPIDLE, keepidle,
190 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
191 &tcp_keepidle, 0, sysctl_msec_to_ticks, "I", "");
192
193 int tcp_keepintvl;
194 SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPINTVL, keepintvl,
195 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
196 &tcp_keepintvl, 0, sysctl_msec_to_ticks, "I", "");
197
198 int tcp_keepcnt;
199 SYSCTL_INT(_net_inet_tcp, OID_AUTO, keepcnt,
200 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
201 &tcp_keepcnt, 0, "number of times to repeat keepalive");
202
203 int tcp_msl;
204 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, msl,
205 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
206 &tcp_msl, 0, sysctl_msec_to_ticks, "I", "Maximum segment lifetime");
207 #endif /* SYSCTL_SKMEM */
208
209 /*
210 * Avoid DoS with connections half-closed in TIME_WAIT_2
211 */
212 int tcp_fin_timeout = TCPTV_FINWAIT2;
213
214 static int
215 sysctl_tcp_fin_timeout SYSCTL_HANDLER_ARGS
216 {
217 #pragma unused(arg2)
218 int error;
219 int value = tcp_fin_timeout;
220
221 error = sysctl_handle_int(oidp, &value, 0, req);
222 if (error != 0 || req->newptr == USER_ADDR_NULL) {
223 return error;
224 }
225
226 if (value == -1) {
227 /* Reset to default value */
228 value = TCPTV_FINWAIT2;
229 } else {
230 /* Convert from milliseconds */
231 long big_value = value * TCP_RETRANSHZ / 1000;
232
233 if (big_value < 0 || big_value > INT_MAX) {
234 return EINVAL;
235 }
236 value = (int)big_value;
237 }
238 tcp_fin_timeout = value;
239 SYSCTL_SKMEM_UPDATE_AT_OFFSET(arg2, value);
240 return 0;
241 }
242
243 #if SYSCTL_SKMEM
244 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, fin_timeout,
245 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
246 &tcp_fin_timeout, offsetof(skmem_sysctl, tcp.fin_timeout),
247 sysctl_tcp_fin_timeout, "I", "");
248 #else /* SYSCTL_SKMEM */
249 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, fin_timeout,
250 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
251 &tcp_fin_timeout, 0,
252 sysctl_tcp_fin_timeout, "I", "");
253 #endif /* SYSCTL_SKMEM */
254
255 /*
256 * Avoid DoS via TCP Robustness in Persist Condition
257 * (see http://www.ietf.org/id/draft-ananth-tcpm-persist-02.txt)
258 * by allowing a system wide maximum persistence timeout value when in
259 * Zero Window Probe mode.
260 *
261 * Expressed in milliseconds to be consistent without timeout related
262 * values, the TCP socket option is in seconds.
263 */
264 #if SYSCTL_SKMEM
265 u_int32_t tcp_max_persist_timeout = 0;
266 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, max_persist_timeout,
267 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
268 &tcp_max_persist_timeout, offsetof(skmem_sysctl, tcp.max_persist_timeout),
269 sysctl_msec_to_ticks, "I", "Maximum persistence timeout for ZWP");
270 #else /* SYSCTL_SKMEM */
271 u_int32_t tcp_max_persist_timeout = 0;
272 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, max_persist_timeout,
273 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
274 &tcp_max_persist_timeout, 0, sysctl_msec_to_ticks, "I",
275 "Maximum persistence timeout for ZWP");
276 #endif /* SYSCTL_SKMEM */
277
278 SYSCTL_SKMEM_TCP_INT(OID_AUTO, always_keepalive,
279 CTLFLAG_RW | CTLFLAG_LOCKED, static int, always_keepalive, 0,
280 "Assume SO_KEEPALIVE on all TCP connections");
281
282 /*
283 * This parameter determines how long the timer list will stay in fast or
284 * quick mode even though all connections are idle. In this state, the
285 * timer will run more frequently anticipating new data.
286 */
287 SYSCTL_SKMEM_TCP_INT(OID_AUTO, timer_fastmode_idlemax,
288 CTLFLAG_RW | CTLFLAG_LOCKED, int, timer_fastmode_idlemax,
289 TCP_FASTMODE_IDLERUN_MAX, "Maximum idle generations in fast mode");
290
291 /*
292 * See tcp_syn_backoff[] for interval values between SYN retransmits;
293 * the value set below defines the number of retransmits, before we
294 * disable the timestamp and window scaling options during subsequent
295 * SYN retransmits. Setting it to 0 disables the dropping off of those
296 * two options.
297 */
298 SYSCTL_SKMEM_TCP_INT(OID_AUTO, broken_peer_syn_rexmit_thres,
299 CTLFLAG_RW | CTLFLAG_LOCKED, static int, tcp_broken_peer_syn_rxmit_thres,
300 10, "Number of retransmitted SYNs before disabling RFC 1323 "
301 "options on local connections");
302
303 static int tcp_timer_advanced = 0;
304 SYSCTL_INT(_net_inet_tcp, OID_AUTO, tcp_timer_advanced,
305 CTLFLAG_RD | CTLFLAG_LOCKED, &tcp_timer_advanced, 0,
306 "Number of times one of the timers was advanced");
307
308 static int tcp_resched_timerlist = 0;
309 SYSCTL_INT(_net_inet_tcp, OID_AUTO, tcp_resched_timerlist,
310 CTLFLAG_RD | CTLFLAG_LOCKED, &tcp_resched_timerlist, 0,
311 "Number of times timer list was rescheduled as part of processing a packet");
312
313 SYSCTL_SKMEM_TCP_INT(OID_AUTO, pmtud_blackhole_detection,
314 CTLFLAG_RW | CTLFLAG_LOCKED, int, tcp_pmtud_black_hole_detect, 1,
315 "Path MTU Discovery Black Hole Detection");
316
317 SYSCTL_SKMEM_TCP_INT(OID_AUTO, pmtud_blackhole_mss,
318 CTLFLAG_RW | CTLFLAG_LOCKED, int, tcp_pmtud_black_hole_mss, 1200,
319 "Path MTU Discovery Black Hole Detection lowered MSS");
320
321 #if (DEBUG || DEVELOPMENT)
322 int tcp_probe_if_fix_port = 0;
323 SYSCTL_INT(_net_inet_tcp, OID_AUTO, probe_if_fix_port,
324 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
325 &tcp_probe_if_fix_port, 0, "");
326 #endif /* (DEBUG || DEVELOPMENT) */
327
328 static u_int32_t tcp_mss_rec_medium = 1200;
329 static u_int32_t tcp_mss_rec_low = 512;
330
331 #define TCP_REPORT_STATS_INTERVAL 43200 /* 12 hours, in seconds */
332 int tcp_report_stats_interval = TCP_REPORT_STATS_INTERVAL;
333
334 /* performed garbage collection of "used" sockets */
335 static boolean_t tcp_gc_done = FALSE;
336
337 /* max idle probes */
338 int tcp_maxpersistidle = TCPTV_KEEP_IDLE;
339
340 /*
341 * TCP delack timer is set to 100 ms. Since the processing of timer list
342 * in fast mode will happen no faster than 100 ms, the delayed ack timer
343 * will fire some where between 100 and 200 ms.
344 */
345 int tcp_delack = TCP_RETRANSHZ / 10;
346
347 #if MPTCP
348 /*
349 * MP_JOIN retransmission of 3rd ACK will be every 500 msecs without backoff
350 */
351 int tcp_jack_rxmt = TCP_RETRANSHZ / 2;
352 #endif /* MPTCP */
353
354 static boolean_t tcp_itimer_done = FALSE;
355
356 static void tcp_remove_timer(struct tcpcb *tp);
357 static void tcp_sched_timerlist(uint32_t offset);
358 static u_int32_t tcp_run_conn_timer(struct tcpcb *tp, u_int16_t *mode,
359 u_int16_t probe_if_index);
360 static inline void tcp_set_lotimer_index(struct tcpcb *);
361 __private_extern__ void tcp_remove_from_time_wait(struct inpcb *inp);
362 static inline void tcp_update_mss_core(struct tcpcb *tp, struct ifnet *ifp);
363 __private_extern__ void tcp_report_stats(void);
364
365 static u_int64_t tcp_last_report_time;
366
367 /*
368 * Structure to store previously reported stats so that we can send
369 * incremental changes in each report interval.
370 */
371 struct tcp_last_report_stats {
372 u_int32_t tcps_connattempt;
373 u_int32_t tcps_accepts;
374 u_int32_t tcps_ecn_client_setup;
375 u_int32_t tcps_ecn_server_setup;
376 u_int32_t tcps_ecn_client_success;
377 u_int32_t tcps_ecn_server_success;
378 u_int32_t tcps_ecn_not_supported;
379 u_int32_t tcps_ecn_lost_syn;
380 u_int32_t tcps_ecn_lost_synack;
381 u_int32_t tcps_ecn_recv_ce;
382 u_int32_t tcps_ecn_recv_ece;
383 u_int32_t tcps_ecn_sent_ece;
384 u_int32_t tcps_ecn_conn_recv_ce;
385 u_int32_t tcps_ecn_conn_recv_ece;
386 u_int32_t tcps_ecn_conn_plnoce;
387 u_int32_t tcps_ecn_conn_pl_ce;
388 u_int32_t tcps_ecn_conn_nopl_ce;
389 u_int32_t tcps_ecn_fallback_synloss;
390 u_int32_t tcps_ecn_fallback_reorder;
391 u_int32_t tcps_ecn_fallback_ce;
392
393 /* TFO-related statistics */
394 u_int32_t tcps_tfo_syn_data_rcv;
395 u_int32_t tcps_tfo_cookie_req_rcv;
396 u_int32_t tcps_tfo_cookie_sent;
397 u_int32_t tcps_tfo_cookie_invalid;
398 u_int32_t tcps_tfo_cookie_req;
399 u_int32_t tcps_tfo_cookie_rcv;
400 u_int32_t tcps_tfo_syn_data_sent;
401 u_int32_t tcps_tfo_syn_data_acked;
402 u_int32_t tcps_tfo_syn_loss;
403 u_int32_t tcps_tfo_blackhole;
404 u_int32_t tcps_tfo_cookie_wrong;
405 u_int32_t tcps_tfo_no_cookie_rcv;
406 u_int32_t tcps_tfo_heuristics_disable;
407 u_int32_t tcps_tfo_sndblackhole;
408
409 /* MPTCP-related statistics */
410 u_int32_t tcps_mptcp_handover_attempt;
411 u_int32_t tcps_mptcp_interactive_attempt;
412 u_int32_t tcps_mptcp_aggregate_attempt;
413 u_int32_t tcps_mptcp_fp_handover_attempt;
414 u_int32_t tcps_mptcp_fp_interactive_attempt;
415 u_int32_t tcps_mptcp_fp_aggregate_attempt;
416 u_int32_t tcps_mptcp_heuristic_fallback;
417 u_int32_t tcps_mptcp_fp_heuristic_fallback;
418 u_int32_t tcps_mptcp_handover_success_wifi;
419 u_int32_t tcps_mptcp_handover_success_cell;
420 u_int32_t tcps_mptcp_interactive_success;
421 u_int32_t tcps_mptcp_aggregate_success;
422 u_int32_t tcps_mptcp_fp_handover_success_wifi;
423 u_int32_t tcps_mptcp_fp_handover_success_cell;
424 u_int32_t tcps_mptcp_fp_interactive_success;
425 u_int32_t tcps_mptcp_fp_aggregate_success;
426 u_int32_t tcps_mptcp_handover_cell_from_wifi;
427 u_int32_t tcps_mptcp_handover_wifi_from_cell;
428 u_int32_t tcps_mptcp_interactive_cell_from_wifi;
429 u_int64_t tcps_mptcp_handover_cell_bytes;
430 u_int64_t tcps_mptcp_interactive_cell_bytes;
431 u_int64_t tcps_mptcp_aggregate_cell_bytes;
432 u_int64_t tcps_mptcp_handover_all_bytes;
433 u_int64_t tcps_mptcp_interactive_all_bytes;
434 u_int64_t tcps_mptcp_aggregate_all_bytes;
435 u_int32_t tcps_mptcp_back_to_wifi;
436 u_int32_t tcps_mptcp_wifi_proxy;
437 u_int32_t tcps_mptcp_cell_proxy;
438 u_int32_t tcps_mptcp_triggered_cell;
439 };
440
441
442 /* Returns true if the timer is on the timer list */
443 #define TIMER_IS_ON_LIST(tp) ((tp)->t_flags & TF_TIMER_ONLIST)
444
445 /* Run the TCP timerlist atleast once every hour */
446 #define TCP_TIMERLIST_MAX_OFFSET (60 * 60 * TCP_RETRANSHZ)
447
448
449 static void add_to_time_wait_locked(struct tcpcb *tp, uint32_t delay);
450 static boolean_t tcp_garbage_collect(struct inpcb *, int);
451
452 #define TIMERENTRY_TO_TP(te) ((struct tcpcb *)((uintptr_t)te - offsetof(struct tcpcb, tentry.le.le_next)))
453
454 #define VERIFY_NEXT_LINK(elm, field) do { \
455 if (LIST_NEXT((elm),field) != NULL && \
456 LIST_NEXT((elm),field)->field.le_prev != \
457 &((elm)->field.le_next)) \
458 panic("Bad link elm %p next->prev != elm", (elm)); \
459 } while(0)
460
461 #define VERIFY_PREV_LINK(elm, field) do { \
462 if (*(elm)->field.le_prev != (elm)) \
463 panic("Bad link elm %p prev->next != elm", (elm)); \
464 } while(0)
465
466 #define TCP_SET_TIMER_MODE(mode, i) do { \
467 if (IS_TIMER_HZ_10MS(i)) \
468 (mode) |= TCP_TIMERLIST_10MS_MODE; \
469 else if (IS_TIMER_HZ_100MS(i)) \
470 (mode) |= TCP_TIMERLIST_100MS_MODE; \
471 else \
472 (mode) |= TCP_TIMERLIST_500MS_MODE; \
473 } while(0)
474
475 #if (DEVELOPMENT || DEBUG)
476 SYSCTL_UINT(_net_inet_tcp, OID_AUTO, mss_rec_medium,
477 CTLFLAG_RW | CTLFLAG_LOCKED, &tcp_mss_rec_medium, 0,
478 "Medium MSS based on recommendation in link status report");
479 SYSCTL_UINT(_net_inet_tcp, OID_AUTO, mss_rec_low,
480 CTLFLAG_RW | CTLFLAG_LOCKED, &tcp_mss_rec_low, 0,
481 "Low MSS based on recommendation in link status report");
482
483 static int32_t tcp_change_mss_recommended = 0;
484 static int
485 sysctl_change_mss_recommended SYSCTL_HANDLER_ARGS
486 {
487 #pragma unused(oidp, arg1, arg2)
488 int i, err = 0, changed = 0;
489 struct ifnet *ifp;
490 struct if_link_status ifsr;
491 struct if_cellular_status_v1 *new_cell_sr;
492 err = sysctl_io_number(req, tcp_change_mss_recommended,
493 sizeof(int32_t), &i, &changed);
494 if (changed) {
495 if (i < 0 || i > UINT16_MAX) {
496 return EINVAL;
497 }
498 ifnet_head_lock_shared();
499 TAILQ_FOREACH(ifp, &ifnet_head, if_link) {
500 if (IFNET_IS_CELLULAR(ifp)) {
501 bzero(&ifsr, sizeof(ifsr));
502 new_cell_sr = &ifsr.ifsr_u.ifsr_cell.if_cell_u.if_status_v1;
503 ifsr.ifsr_version = IF_CELLULAR_STATUS_REPORT_CURRENT_VERSION;
504 ifsr.ifsr_len = sizeof(*new_cell_sr);
505
506 /* Set MSS recommended */
507 new_cell_sr->valid_bitmask |= IF_CELL_UL_MSS_RECOMMENDED_VALID;
508 new_cell_sr->mss_recommended = (uint16_t)i;
509 err = ifnet_link_status_report(ifp, new_cell_sr, sizeof(new_cell_sr));
510 if (err == 0) {
511 tcp_change_mss_recommended = i;
512 } else {
513 break;
514 }
515 }
516 }
517 ifnet_head_done();
518 }
519 return err;
520 }
521
522 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, change_mss_recommended,
523 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, &tcp_change_mss_recommended,
524 0, sysctl_change_mss_recommended, "IU", "Change MSS recommended");
525
526 SYSCTL_INT(_net_inet_tcp, OID_AUTO, report_stats_interval,
527 CTLFLAG_RW | CTLFLAG_LOCKED, &tcp_report_stats_interval, 0,
528 "Report stats interval");
529 #endif /* (DEVELOPMENT || DEBUG) */
530
531 /*
532 * Macro to compare two timers. If there is a reset of the sign bit,
533 * it is safe to assume that the timer has wrapped around. By doing
534 * signed comparision, we take care of wrap around such that the value
535 * with the sign bit reset is actually ahead of the other.
536 */
537 inline int32_t
timer_diff(uint32_t t1,uint32_t toff1,uint32_t t2,uint32_t toff2)538 timer_diff(uint32_t t1, uint32_t toff1, uint32_t t2, uint32_t toff2)
539 {
540 return (int32_t)((t1 + toff1) - (t2 + toff2));
541 }
542
543 /*
544 * Add to tcp timewait list, delay is given in milliseconds.
545 */
546 static void
add_to_time_wait_locked(struct tcpcb * tp,uint32_t delay)547 add_to_time_wait_locked(struct tcpcb *tp, uint32_t delay)
548 {
549 struct inpcbinfo *pcbinfo = &tcbinfo;
550 struct inpcb *inp = tp->t_inpcb;
551 uint32_t timer;
552
553 /* pcb list should be locked when we get here */
554 LCK_RW_ASSERT(&pcbinfo->ipi_lock, LCK_RW_ASSERT_EXCLUSIVE);
555
556 /* We may get here multiple times, so check */
557 if (!(inp->inp_flags2 & INP2_TIMEWAIT)) {
558 pcbinfo->ipi_twcount++;
559 inp->inp_flags2 |= INP2_TIMEWAIT;
560
561 /* Remove from global inp list */
562 LIST_REMOVE(inp, inp_list);
563 } else {
564 TAILQ_REMOVE(&tcp_tw_tailq, tp, t_twentry);
565 }
566
567 /* Compute the time at which this socket can be closed */
568 timer = tcp_now + delay;
569
570 /* We will use the TCPT_2MSL timer for tracking this delay */
571
572 if (TIMER_IS_ON_LIST(tp)) {
573 tcp_remove_timer(tp);
574 }
575 tp->t_timer[TCPT_2MSL] = timer;
576
577 TAILQ_INSERT_TAIL(&tcp_tw_tailq, tp, t_twentry);
578 }
579
580 void
add_to_time_wait(struct tcpcb * tp,uint32_t delay)581 add_to_time_wait(struct tcpcb *tp, uint32_t delay)
582 {
583 struct inpcbinfo *pcbinfo = &tcbinfo;
584 if (tp->t_inpcb->inp_socket->so_options & SO_NOWAKEFROMSLEEP) {
585 socket_post_kev_msg_closed(tp->t_inpcb->inp_socket);
586 }
587
588 tcp_del_fsw_flow(tp);
589
590 /* 19182803: Notify nstat that connection is closing before waiting. */
591 nstat_pcb_detach(tp->t_inpcb);
592
593 if (!lck_rw_try_lock_exclusive(&pcbinfo->ipi_lock)) {
594 socket_unlock(tp->t_inpcb->inp_socket, 0);
595 lck_rw_lock_exclusive(&pcbinfo->ipi_lock);
596 socket_lock(tp->t_inpcb->inp_socket, 0);
597 }
598 add_to_time_wait_locked(tp, delay);
599 lck_rw_done(&pcbinfo->ipi_lock);
600
601 inpcb_gc_sched(pcbinfo, INPCB_TIMER_LAZY);
602 }
603
604 /* If this is on time wait queue, remove it. */
605 void
tcp_remove_from_time_wait(struct inpcb * inp)606 tcp_remove_from_time_wait(struct inpcb *inp)
607 {
608 struct tcpcb *tp = intotcpcb(inp);
609 if (inp->inp_flags2 & INP2_TIMEWAIT) {
610 TAILQ_REMOVE(&tcp_tw_tailq, tp, t_twentry);
611 }
612 }
613
614 static boolean_t
tcp_garbage_collect(struct inpcb * inp,int istimewait)615 tcp_garbage_collect(struct inpcb *inp, int istimewait)
616 {
617 boolean_t active = FALSE;
618 struct socket *so, *mp_so = NULL;
619 struct tcpcb *tp;
620
621 so = inp->inp_socket;
622 tp = intotcpcb(inp);
623
624 if (so->so_flags & SOF_MP_SUBFLOW) {
625 mp_so = mptetoso(tptomptp(tp)->mpt_mpte);
626 if (!socket_try_lock(mp_so)) {
627 mp_so = NULL;
628 active = TRUE;
629 goto out;
630 }
631 if (mpsotomppcb(mp_so)->mpp_inside > 0) {
632 os_log(mptcp_log_handle, "%s - %lx: Still inside %d usecount %d\n", __func__,
633 (unsigned long)VM_KERNEL_ADDRPERM(mpsotompte(mp_so)),
634 mpsotomppcb(mp_so)->mpp_inside,
635 mp_so->so_usecount);
636 socket_unlock(mp_so, 0);
637 mp_so = NULL;
638 active = TRUE;
639 goto out;
640 }
641 /* We call socket_unlock with refcount further below */
642 mp_so->so_usecount++;
643 tptomptp(tp)->mpt_mpte->mpte_mppcb->mpp_inside++;
644 }
645
646 /*
647 * Skip if still in use or busy; it would have been more efficient
648 * if we were to test so_usecount against 0, but this isn't possible
649 * due to the current implementation of tcp_dropdropablreq() where
650 * overflow sockets that are eligible for garbage collection have
651 * their usecounts set to 1.
652 */
653 if (!lck_mtx_try_lock_spin(&inp->inpcb_mtx)) {
654 active = TRUE;
655 goto out;
656 }
657
658 /* Check again under the lock */
659 if (so->so_usecount > 1) {
660 if (inp->inp_wantcnt == WNT_STOPUSING) {
661 active = TRUE;
662 }
663 lck_mtx_unlock(&inp->inpcb_mtx);
664 goto out;
665 }
666
667 if (istimewait && TSTMP_GEQ(tcp_now, tp->t_timer[TCPT_2MSL]) &&
668 tp->t_state != TCPS_CLOSED) {
669 /* Become a regular mutex */
670 lck_mtx_convert_spin(&inp->inpcb_mtx);
671 tcp_close(tp);
672 }
673
674 /*
675 * Overflowed socket dropped from the listening queue? Do this
676 * only if we are called to clean up the time wait slots, since
677 * tcp_dropdropablreq() considers a socket to have been fully
678 * dropped after add_to_time_wait() is finished.
679 * Also handle the case of connections getting closed by the peer
680 * while in the queue as seen with rdar://6422317
681 *
682 */
683 if (so->so_usecount == 1 &&
684 ((istimewait && (so->so_flags & SOF_OVERFLOW)) ||
685 ((tp != NULL) && (tp->t_state == TCPS_CLOSED) &&
686 (so->so_head != NULL) &&
687 ((so->so_state & (SS_INCOMP | SS_CANTSENDMORE | SS_CANTRCVMORE)) ==
688 (SS_INCOMP | SS_CANTSENDMORE | SS_CANTRCVMORE))))) {
689 if (inp->inp_state != INPCB_STATE_DEAD) {
690 /* Become a regular mutex */
691 lck_mtx_convert_spin(&inp->inpcb_mtx);
692 if (SOCK_CHECK_DOM(so, PF_INET6)) {
693 in6_pcbdetach(inp);
694 } else {
695 in_pcbdetach(inp);
696 }
697 }
698 VERIFY(so->so_usecount > 0);
699 so->so_usecount--;
700 if (inp->inp_wantcnt == WNT_STOPUSING) {
701 active = TRUE;
702 }
703 lck_mtx_unlock(&inp->inpcb_mtx);
704 goto out;
705 } else if (inp->inp_wantcnt != WNT_STOPUSING) {
706 lck_mtx_unlock(&inp->inpcb_mtx);
707 active = FALSE;
708 goto out;
709 }
710
711 /*
712 * We get here because the PCB is no longer searchable
713 * (WNT_STOPUSING); detach (if needed) and dispose if it is dead
714 * (usecount is 0). This covers all cases, including overflow
715 * sockets and those that are considered as "embryonic",
716 * i.e. created by sonewconn() in TCP input path, and have
717 * not yet been committed. For the former, we reduce the usecount
718 * to 0 as done by the code above. For the latter, the usecount
719 * would have reduced to 0 as part calling soabort() when the
720 * socket is dropped at the end of tcp_input().
721 */
722 if (so->so_usecount == 0) {
723 DTRACE_TCP4(state__change, void, NULL, struct inpcb *, inp,
724 struct tcpcb *, tp, int32_t, TCPS_CLOSED);
725 /* Become a regular mutex */
726 lck_mtx_convert_spin(&inp->inpcb_mtx);
727
728 /*
729 * If this tp still happens to be on the timer list,
730 * take it out
731 */
732 if (TIMER_IS_ON_LIST(tp)) {
733 tcp_remove_timer(tp);
734 }
735
736 if (inp->inp_state != INPCB_STATE_DEAD) {
737 if (SOCK_CHECK_DOM(so, PF_INET6)) {
738 in6_pcbdetach(inp);
739 } else {
740 in_pcbdetach(inp);
741 }
742 }
743
744 if (mp_so) {
745 mptcp_subflow_del(tptomptp(tp)->mpt_mpte, tp->t_mpsub);
746
747 /* so is now unlinked from mp_so - let's drop the lock */
748 socket_unlock(mp_so, 1);
749 mp_so = NULL;
750 }
751
752 in_pcbdispose(inp);
753 active = FALSE;
754 goto out;
755 }
756
757 lck_mtx_unlock(&inp->inpcb_mtx);
758 active = TRUE;
759
760 out:
761 if (mp_so) {
762 socket_unlock(mp_so, 1);
763 }
764
765 return active;
766 }
767
768 /*
769 * TCP garbage collector callback (inpcb_timer_func_t).
770 *
771 * Returns the number of pcbs that will need to be gc-ed soon,
772 * returnining > 0 will keep timer active.
773 */
774 void
tcp_gc(struct inpcbinfo * ipi)775 tcp_gc(struct inpcbinfo *ipi)
776 {
777 struct inpcb *inp, *nxt;
778 struct tcpcb *tw_tp, *tw_ntp;
779 #if TCPDEBUG
780 int ostate;
781 #endif
782 #if KDEBUG
783 static int tws_checked = 0;
784 #endif
785
786 KERNEL_DEBUG(DBG_FNC_TCP_SLOW | DBG_FUNC_START, 0, 0, 0, 0, 0);
787
788 /*
789 * Update tcp_now here as it may get used while
790 * processing the slow timer.
791 */
792 calculate_tcp_clock();
793
794 /*
795 * Garbage collect socket/tcpcb: We need to acquire the list lock
796 * exclusively to do this
797 */
798
799 if (lck_rw_try_lock_exclusive(&ipi->ipi_lock) == FALSE) {
800 /* don't sweat it this time; cleanup was done last time */
801 if (tcp_gc_done == TRUE) {
802 tcp_gc_done = FALSE;
803 KERNEL_DEBUG(DBG_FNC_TCP_SLOW | DBG_FUNC_END,
804 tws_checked, cur_tw_slot, 0, 0, 0);
805 /* Lock upgrade failed, give up this round */
806 atomic_add_32(&ipi->ipi_gc_req.intimer_fast, 1);
807 return;
808 }
809 /* Upgrade failed, lost lock now take it again exclusive */
810 lck_rw_lock_exclusive(&ipi->ipi_lock);
811 }
812 tcp_gc_done = TRUE;
813
814 LIST_FOREACH_SAFE(inp, &tcb, inp_list, nxt) {
815 if (tcp_garbage_collect(inp, 0)) {
816 atomic_add_32(&ipi->ipi_gc_req.intimer_fast, 1);
817 }
818 }
819
820 /* Now cleanup the time wait ones */
821 TAILQ_FOREACH_SAFE(tw_tp, &tcp_tw_tailq, t_twentry, tw_ntp) {
822 /*
823 * We check the timestamp here without holding the
824 * socket lock for better performance. If there are
825 * any pcbs in time-wait, the timer will get rescheduled.
826 * Hence some error in this check can be tolerated.
827 *
828 * Sometimes a socket on time-wait queue can be closed if
829 * 2MSL timer expired but the application still has a
830 * usecount on it.
831 */
832 if (tw_tp->t_state == TCPS_CLOSED ||
833 TSTMP_GEQ(tcp_now, tw_tp->t_timer[TCPT_2MSL])) {
834 if (tcp_garbage_collect(tw_tp->t_inpcb, 1)) {
835 atomic_add_32(&ipi->ipi_gc_req.intimer_lazy, 1);
836 }
837 }
838 }
839
840 /* take into account pcbs that are still in time_wait_slots */
841 atomic_add_32(&ipi->ipi_gc_req.intimer_lazy, ipi->ipi_twcount);
842
843 lck_rw_done(&ipi->ipi_lock);
844
845 /* Clean up the socache while we are here */
846 if (so_cache_timer()) {
847 atomic_add_32(&ipi->ipi_gc_req.intimer_lazy, 1);
848 }
849
850 KERNEL_DEBUG(DBG_FNC_TCP_SLOW | DBG_FUNC_END, tws_checked,
851 cur_tw_slot, 0, 0, 0);
852
853 return;
854 }
855
856 /*
857 * Cancel all timers for TCP tp.
858 */
859 void
tcp_canceltimers(struct tcpcb * tp)860 tcp_canceltimers(struct tcpcb *tp)
861 {
862 int i;
863
864 tcp_remove_timer(tp);
865 for (i = 0; i < TCPT_NTIMERS; i++) {
866 tp->t_timer[i] = 0;
867 }
868 tp->tentry.timer_start = tcp_now;
869 tp->tentry.index = TCPT_NONE;
870 }
871
872 int tcp_syn_backoff[TCP_MAXRXTSHIFT + 1] =
873 { 1, 1, 1, 1, 1, 2, 4, 8, 16, 32, 64, 64, 64 };
874
875 int tcp_backoff[TCP_MAXRXTSHIFT + 1] =
876 { 1, 2, 4, 8, 16, 32, 64, 64, 64, 64, 64, 64, 64 };
877
878 static int tcp_totbackoff = 511; /* sum of tcp_backoff[] */
879
880 void
tcp_rexmt_save_state(struct tcpcb * tp)881 tcp_rexmt_save_state(struct tcpcb *tp)
882 {
883 u_int32_t fsize;
884 if (TSTMP_SUPPORTED(tp)) {
885 /*
886 * Since timestamps are supported on the connection,
887 * we can do recovery as described in rfc 4015.
888 */
889 fsize = tp->snd_max - tp->snd_una;
890 tp->snd_ssthresh_prev = max(fsize, tp->snd_ssthresh);
891 tp->snd_recover_prev = tp->snd_recover;
892 } else {
893 /*
894 * Timestamp option is not supported on this connection.
895 * Record ssthresh and cwnd so they can
896 * be recovered if this turns out to be a "bad" retransmit.
897 * A retransmit is considered "bad" if an ACK for this
898 * segment is received within RTT/2 interval; the assumption
899 * here is that the ACK was already in flight. See
900 * "On Estimating End-to-End Network Path Properties" by
901 * Allman and Paxson for more details.
902 */
903 tp->snd_cwnd_prev = tp->snd_cwnd;
904 tp->snd_ssthresh_prev = tp->snd_ssthresh;
905 tp->snd_recover_prev = tp->snd_recover;
906 if (IN_FASTRECOVERY(tp)) {
907 tp->t_flags |= TF_WASFRECOVERY;
908 } else {
909 tp->t_flags &= ~TF_WASFRECOVERY;
910 }
911 }
912 tp->t_srtt_prev = (tp->t_srtt >> TCP_RTT_SHIFT) + 2;
913 tp->t_rttvar_prev = (tp->t_rttvar >> TCP_RTTVAR_SHIFT);
914 tp->t_flagsext &= ~(TF_RECOMPUTE_RTT);
915 }
916
917 /*
918 * Revert to the older segment size if there is an indication that PMTU
919 * blackhole detection was not needed.
920 */
921 void
tcp_pmtud_revert_segment_size(struct tcpcb * tp)922 tcp_pmtud_revert_segment_size(struct tcpcb *tp)
923 {
924 int32_t optlen;
925
926 VERIFY(tp->t_pmtud_saved_maxopd > 0);
927 tp->t_flags |= TF_PMTUD;
928 tp->t_flags &= ~TF_BLACKHOLE;
929 optlen = tp->t_maxopd - tp->t_maxseg;
930 tp->t_maxopd = tp->t_pmtud_saved_maxopd;
931 tp->t_maxseg = tp->t_maxopd - optlen;
932
933 /*
934 * Reset the slow-start flight size as it
935 * may depend on the new MSS
936 */
937 if (CC_ALGO(tp)->cwnd_init != NULL) {
938 CC_ALGO(tp)->cwnd_init(tp);
939 }
940
941 if (TCP_USE_RLEDBAT(tp, tp->t_inpcb->inp_socket) &&
942 tcp_cc_rledbat.rwnd_init != NULL) {
943 tcp_cc_rledbat.rwnd_init(tp);
944 }
945
946 tp->t_pmtud_start_ts = 0;
947 tcpstat.tcps_pmtudbh_reverted++;
948
949 /* change MSS according to recommendation, if there was one */
950 tcp_update_mss_locked(tp->t_inpcb->inp_socket, NULL);
951 }
952
953 static uint32_t
tcp_pmtud_black_holed_next_mss(struct tcpcb * tp)954 tcp_pmtud_black_holed_next_mss(struct tcpcb *tp)
955 {
956 /* Reduce the MSS to intermediary value */
957 if (tp->t_maxopd > tcp_pmtud_black_hole_mss) {
958 return tcp_pmtud_black_hole_mss;
959 } else {
960 if (tp->t_inpcb->inp_vflag & INP_IPV4) {
961 return tcp_mssdflt;
962 } else {
963 return tcp_v6mssdflt;
964 }
965 }
966 }
967
968 /*
969 * Send a packet designed to force a response
970 * if the peer is up and reachable:
971 * either an ACK if the connection is still alive,
972 * or an RST if the peer has closed the connection
973 * due to timeout or reboot.
974 * Using sequence number tp->snd_una-1
975 * causes the transmitted zero-length segment
976 * to lie outside the receive window;
977 * by the protocol spec, this requires the
978 * correspondent TCP to respond.
979 */
980 static bool
tcp_send_keep_alive(struct tcpcb * tp)981 tcp_send_keep_alive(struct tcpcb *tp)
982 {
983 struct tcptemp *t_template;
984
985 tcpstat.tcps_keepprobe++;
986 t_template = tcp_maketemplate(tp);
987 if (t_template != NULL) {
988 struct inpcb *inp = tp->t_inpcb;
989 struct tcp_respond_args tra;
990
991 bzero(&tra, sizeof(tra));
992 tra.nocell = INP_NO_CELLULAR(inp) ? 1 : 0;
993 tra.noexpensive = INP_NO_EXPENSIVE(inp) ? 1 : 0;
994 tra.noconstrained = INP_NO_CONSTRAINED(inp) ? 1 : 0;
995 tra.awdl_unrestricted = INP_AWDL_UNRESTRICTED(inp) ? 1 : 0;
996 tra.intcoproc_allowed = INP_INTCOPROC_ALLOWED(inp) ? 1 : 0;
997 tra.management_allowed = INP_MANAGEMENT_ALLOWED(inp) ? 1 : 0;
998 tra.keep_alive = 1;
999 if (tp->t_inpcb->inp_flags & INP_BOUND_IF) {
1000 tra.ifscope = tp->t_inpcb->inp_boundifp->if_index;
1001 } else {
1002 tra.ifscope = IFSCOPE_NONE;
1003 }
1004 tcp_respond(tp, t_template->tt_ipgen,
1005 &t_template->tt_t, (struct mbuf *)NULL,
1006 tp->rcv_nxt, tp->snd_una - 1, 0, &tra);
1007 (void) m_free(dtom(t_template));
1008 return true;
1009 } else {
1010 return false;
1011 }
1012 }
1013
1014 /*
1015 * TCP timer processing.
1016 */
1017 struct tcpcb *
tcp_timers(struct tcpcb * tp,int timer)1018 tcp_timers(struct tcpcb *tp, int timer)
1019 {
1020 int32_t rexmt, optlen = 0, idle_time = 0;
1021 struct socket *so;
1022 #if TCPDEBUG
1023 int ostate;
1024 #endif
1025 u_int64_t accsleep_ms;
1026 u_int64_t last_sleep_ms = 0;
1027
1028 so = tp->t_inpcb->inp_socket;
1029 idle_time = tcp_now - tp->t_rcvtime;
1030
1031 switch (timer) {
1032 /*
1033 * 2 MSL timeout in shutdown went off. If we're closed but
1034 * still waiting for peer to close and connection has been idle
1035 * too long, or if 2MSL time is up from TIME_WAIT or FIN_WAIT_2,
1036 * delete connection control block.
1037 * Otherwise, (this case shouldn't happen) check again in a bit
1038 * we keep the socket in the main list in that case.
1039 */
1040 case TCPT_2MSL:
1041 tcp_free_sackholes(tp);
1042 if (tp->t_state != TCPS_TIME_WAIT &&
1043 tp->t_state != TCPS_FIN_WAIT_2 &&
1044 ((idle_time > 0) && (idle_time < TCP_CONN_MAXIDLE(tp)))) {
1045 tp->t_timer[TCPT_2MSL] = OFFSET_FROM_START(tp,
1046 (u_int32_t)TCP_CONN_KEEPINTVL(tp));
1047 } else {
1048 if (tp->t_state == TCPS_FIN_WAIT_2) {
1049 TCP_LOG_DROP_PCB(NULL, NULL, tp, false,
1050 "FIN wait timeout drop");
1051 tcpstat.tcps_fin_timeout_drops++;
1052 tp = tcp_drop(tp, 0);
1053 } else {
1054 tp = tcp_close(tp);
1055 }
1056 return tp;
1057 }
1058 break;
1059
1060 /*
1061 * Retransmission timer went off. Message has not
1062 * been acked within retransmit interval. Back off
1063 * to a longer retransmit interval and retransmit one segment.
1064 */
1065 case TCPT_REXMT:
1066 absolutetime_to_nanoseconds(mach_absolutetime_asleep,
1067 &accsleep_ms);
1068 accsleep_ms = accsleep_ms / 1000000UL;
1069 if (accsleep_ms > tp->t_accsleep_ms) {
1070 last_sleep_ms = accsleep_ms - tp->t_accsleep_ms;
1071 }
1072 /*
1073 * Drop a connection in the retransmit timer
1074 * 1. If we have retransmitted more than TCP_MAXRXTSHIFT
1075 * times
1076 * 2. If the time spent in this retransmission episode is
1077 * more than the time limit set with TCP_RXT_CONNDROPTIME
1078 * socket option
1079 * 3. If TCP_RXT_FINDROP socket option was set and
1080 * we have already retransmitted the FIN 3 times without
1081 * receiving an ack
1082 */
1083 if (++tp->t_rxtshift > TCP_MAXRXTSHIFT ||
1084 (tp->t_rxt_conndroptime > 0 && tp->t_rxtstart > 0 &&
1085 (tcp_now - tp->t_rxtstart) >= tp->t_rxt_conndroptime) ||
1086 ((tp->t_flagsext & TF_RXTFINDROP) != 0 &&
1087 (tp->t_flags & TF_SENTFIN) != 0 && tp->t_rxtshift >= 4) ||
1088 (tp->t_rxtshift > 4 && last_sleep_ms >= TCP_SLEEP_TOO_LONG)) {
1089 if (tp->t_state == TCPS_ESTABLISHED &&
1090 tp->t_rxt_minimum_timeout > 0) {
1091 /*
1092 * Avoid dropping a connection if minimum
1093 * timeout is set and that time did not
1094 * pass. We will retry sending
1095 * retransmissions at the maximum interval
1096 */
1097 if (TSTMP_LT(tcp_now, (tp->t_rxtstart +
1098 tp->t_rxt_minimum_timeout))) {
1099 tp->t_rxtshift = TCP_MAXRXTSHIFT - 1;
1100 goto retransmit_packet;
1101 }
1102 }
1103 if ((tp->t_flagsext & TF_RXTFINDROP) != 0) {
1104 tcpstat.tcps_rxtfindrop++;
1105 } else if (last_sleep_ms >= TCP_SLEEP_TOO_LONG) {
1106 tcpstat.tcps_drop_after_sleep++;
1107 } else {
1108 tcpstat.tcps_timeoutdrop++;
1109 }
1110 if (tp->t_rxtshift >= TCP_MAXRXTSHIFT) {
1111 if (TCP_ECN_ENABLED(tp)) {
1112 INP_INC_IFNET_STAT(tp->t_inpcb,
1113 ecn_on.rxmit_drop);
1114 } else {
1115 INP_INC_IFNET_STAT(tp->t_inpcb,
1116 ecn_off.rxmit_drop);
1117 }
1118 }
1119 tp->t_rxtshift = TCP_MAXRXTSHIFT;
1120 soevent(so,
1121 (SO_FILT_HINT_LOCKED | SO_FILT_HINT_TIMEOUT));
1122
1123 if (TCP_ECN_ENABLED(tp) &&
1124 tp->t_state == TCPS_ESTABLISHED) {
1125 tcp_heuristic_ecn_droprxmt(tp);
1126 }
1127
1128 TCP_LOG_DROP_PCB(NULL, NULL, tp, false,
1129 "retransmission timeout drop");
1130 tp = tcp_drop(tp, tp->t_softerror ?
1131 tp->t_softerror : ETIMEDOUT);
1132
1133 break;
1134 }
1135 retransmit_packet:
1136 tcpstat.tcps_rexmttimeo++;
1137 tp->t_accsleep_ms = accsleep_ms;
1138
1139 if (tp->t_rxtshift == 1 &&
1140 tp->t_state == TCPS_ESTABLISHED) {
1141 /* Set the time at which retransmission started. */
1142 tp->t_rxtstart = tcp_now;
1143
1144 /*
1145 * if this is the first retransmit timeout, save
1146 * the state so that we can recover if the timeout
1147 * is spurious.
1148 */
1149 tcp_rexmt_save_state(tp);
1150 tcp_ccdbg_trace(tp, NULL, TCP_CC_FIRST_REXMT);
1151 }
1152 #if MPTCP
1153 if ((tp->t_rxtshift >= mptcp_fail_thresh) &&
1154 (tp->t_state == TCPS_ESTABLISHED) &&
1155 (tp->t_mpflags & TMPF_MPTCP_TRUE)) {
1156 mptcp_act_on_txfail(so);
1157 }
1158
1159 if (TCPS_HAVEESTABLISHED(tp->t_state) &&
1160 (so->so_flags & SOF_MP_SUBFLOW)) {
1161 struct mptses *mpte = tptomptp(tp)->mpt_mpte;
1162
1163 if (mpte->mpte_svctype == MPTCP_SVCTYPE_HANDOVER ||
1164 mpte->mpte_svctype == MPTCP_SVCTYPE_PURE_HANDOVER) {
1165 mptcp_check_subflows_and_add(mpte);
1166 }
1167 }
1168 #endif /* MPTCP */
1169
1170 if (tp->t_adaptive_wtimo > 0 &&
1171 tp->t_rxtshift > tp->t_adaptive_wtimo &&
1172 TCPS_HAVEESTABLISHED(tp->t_state)) {
1173 /* Send an event to the application */
1174 soevent(so,
1175 (SO_FILT_HINT_LOCKED |
1176 SO_FILT_HINT_ADAPTIVE_WTIMO));
1177 }
1178
1179 /*
1180 * If this is a retransmit timeout after PTO, the PTO
1181 * was not effective
1182 */
1183 if (tp->t_flagsext & TF_SENT_TLPROBE) {
1184 tp->t_flagsext &= ~(TF_SENT_TLPROBE);
1185 tcpstat.tcps_rto_after_pto++;
1186 }
1187
1188 if (tp->t_flagsext & TF_DELAY_RECOVERY) {
1189 /*
1190 * Retransmit timer fired before entering recovery
1191 * on a connection with packet re-ordering. This
1192 * suggests that the reordering metrics computed
1193 * are not accurate.
1194 */
1195 tp->t_reorderwin = 0;
1196 tp->t_timer[TCPT_DELAYFR] = 0;
1197 tp->t_flagsext &= ~(TF_DELAY_RECOVERY);
1198 }
1199
1200 if (!(tp->t_flagsext & TF_FASTOPEN_FORCE_ENABLE) &&
1201 tp->t_state == TCPS_SYN_RECEIVED) {
1202 tcp_disable_tfo(tp);
1203 }
1204
1205 if (!(tp->t_flagsext & TF_FASTOPEN_FORCE_ENABLE) &&
1206 !(tp->t_tfo_flags & TFO_F_HEURISTIC_DONE) &&
1207 (tp->t_tfo_stats & TFO_S_SYN_DATA_SENT) &&
1208 !(tp->t_tfo_flags & TFO_F_NO_SNDPROBING) &&
1209 ((tp->t_state != TCPS_SYN_SENT && tp->t_rxtshift > 1) ||
1210 tp->t_rxtshift > 4)) {
1211 /*
1212 * For regular retransmissions, a first one is being
1213 * done for tail-loss probe.
1214 * Thus, if rxtshift > 1, this means we have sent the segment
1215 * a total of 3 times.
1216 *
1217 * If we are in SYN-SENT state, then there is no tail-loss
1218 * probe thus we have to let rxtshift go up to 3.
1219 */
1220 tcp_heuristic_tfo_middlebox(tp);
1221
1222 so->so_error = ENODATA;
1223 soevent(so,
1224 (SO_FILT_HINT_LOCKED | SO_FILT_HINT_MP_SUB_ERROR));
1225 sorwakeup(so);
1226 sowwakeup(so);
1227
1228 tp->t_tfo_stats |= TFO_S_SEND_BLACKHOLE;
1229 tcpstat.tcps_tfo_sndblackhole++;
1230 }
1231
1232 if (!(tp->t_flagsext & TF_FASTOPEN_FORCE_ENABLE) &&
1233 !(tp->t_tfo_flags & TFO_F_HEURISTIC_DONE) &&
1234 (tp->t_tfo_stats & TFO_S_SYN_DATA_ACKED) &&
1235 tp->t_rxtshift > 3) {
1236 if (TSTMP_GT(tp->t_sndtime - 10 * TCP_RETRANSHZ, tp->t_rcvtime)) {
1237 tcp_heuristic_tfo_middlebox(tp);
1238
1239 so->so_error = ENODATA;
1240 soevent(so,
1241 (SO_FILT_HINT_LOCKED | SO_FILT_HINT_MP_SUB_ERROR));
1242 sorwakeup(so);
1243 sowwakeup(so);
1244 }
1245 }
1246
1247 if (tp->t_state == TCPS_SYN_SENT) {
1248 rexmt = TCP_REXMTVAL(tp) * tcp_syn_backoff[tp->t_rxtshift];
1249 tp->t_stat.synrxtshift = tp->t_rxtshift;
1250 tp->t_stat.rxmitsyns++;
1251
1252 /* When retransmitting, disable TFO */
1253 if (tfo_enabled(tp) &&
1254 !(tp->t_flagsext & TF_FASTOPEN_FORCE_ENABLE)) {
1255 tcp_disable_tfo(tp);
1256 tp->t_tfo_flags |= TFO_F_SYN_LOSS;
1257 }
1258 } else {
1259 rexmt = TCP_REXMTVAL(tp) * tcp_backoff[tp->t_rxtshift];
1260 }
1261
1262 TCPT_RANGESET(tp->t_rxtcur, rexmt, tp->t_rttmin, TCPTV_REXMTMAX,
1263 TCP_ADD_REXMTSLOP(tp));
1264 tp->t_timer[TCPT_REXMT] = OFFSET_FROM_START(tp, tp->t_rxtcur);
1265
1266 TCP_LOG_RTT_INFO(tp);
1267
1268 if (INP_WAIT_FOR_IF_FEEDBACK(tp->t_inpcb)) {
1269 goto fc_output;
1270 }
1271
1272 tcp_free_sackholes(tp);
1273 /*
1274 * Check for potential Path MTU Discovery Black Hole
1275 */
1276 if (tcp_pmtud_black_hole_detect &&
1277 !(tp->t_flagsext & TF_NOBLACKHOLE_DETECTION) &&
1278 (tp->t_state == TCPS_ESTABLISHED)) {
1279 if ((tp->t_flags & TF_PMTUD) &&
1280 tp->t_pmtud_lastseg_size > tcp_pmtud_black_holed_next_mss(tp) &&
1281 tp->t_rxtshift == 2) {
1282 /*
1283 * Enter Path MTU Black-hole Detection mechanism:
1284 * - Disable Path MTU Discovery (IP "DF" bit).
1285 * - Reduce MTU to lower value than what we
1286 * negotiated with the peer.
1287 */
1288 /* Disable Path MTU Discovery for now */
1289 tp->t_flags &= ~TF_PMTUD;
1290 /* Record that we may have found a black hole */
1291 tp->t_flags |= TF_BLACKHOLE;
1292 optlen = tp->t_maxopd - tp->t_maxseg;
1293 /* Keep track of previous MSS */
1294 tp->t_pmtud_saved_maxopd = tp->t_maxopd;
1295 tp->t_pmtud_start_ts = tcp_now;
1296 if (tp->t_pmtud_start_ts == 0) {
1297 tp->t_pmtud_start_ts++;
1298 }
1299 /* Reduce the MSS to intermediary value */
1300 tp->t_maxopd = tcp_pmtud_black_holed_next_mss(tp);
1301 tp->t_maxseg = tp->t_maxopd - optlen;
1302
1303 /*
1304 * Reset the slow-start flight size
1305 * as it may depend on the new MSS
1306 */
1307 if (CC_ALGO(tp)->cwnd_init != NULL) {
1308 CC_ALGO(tp)->cwnd_init(tp);
1309 }
1310 tp->snd_cwnd = tp->t_maxseg;
1311
1312 if (TCP_USE_RLEDBAT(tp, so) &&
1313 tcp_cc_rledbat.rwnd_init != NULL) {
1314 tcp_cc_rledbat.rwnd_init(tp);
1315 }
1316 }
1317 /*
1318 * If further retransmissions are still
1319 * unsuccessful with a lowered MTU, maybe this
1320 * isn't a Black Hole and we restore the previous
1321 * MSS and blackhole detection flags.
1322 */
1323 else {
1324 if ((tp->t_flags & TF_BLACKHOLE) &&
1325 (tp->t_rxtshift > 4)) {
1326 tcp_pmtud_revert_segment_size(tp);
1327 tp->snd_cwnd = tp->t_maxseg;
1328 }
1329 }
1330 }
1331
1332 /*
1333 * Disable rfc1323 and rfc1644 if we haven't got any
1334 * response to our SYN (after we reach the threshold)
1335 * to work-around some broken terminal servers (most of
1336 * which have hopefully been retired) that have bad VJ
1337 * header compression code which trashes TCP segments
1338 * containing unknown-to-them TCP options.
1339 * Do this only on non-local connections.
1340 */
1341 if (tp->t_state == TCPS_SYN_SENT &&
1342 tp->t_rxtshift == tcp_broken_peer_syn_rxmit_thres) {
1343 tp->t_flags &= ~(TF_REQ_SCALE | TF_REQ_TSTMP | TF_REQ_CC);
1344 }
1345
1346 /*
1347 * If losing, let the lower level know and try for
1348 * a better route. Also, if we backed off this far,
1349 * our srtt estimate is probably bogus. Clobber it
1350 * so we'll take the next rtt measurement as our srtt;
1351 * move the current srtt into rttvar to keep the current
1352 * retransmit times until then.
1353 */
1354 if (tp->t_rxtshift > TCP_MAXRXTSHIFT / 4) {
1355 if (!(tp->t_inpcb->inp_vflag & INP_IPV4)) {
1356 in6_losing(tp->t_inpcb);
1357 } else {
1358 in_losing(tp->t_inpcb);
1359 }
1360 tp->t_rttvar += (tp->t_srtt >> TCP_RTT_SHIFT);
1361 tp->t_srtt = 0;
1362 }
1363 tp->snd_nxt = tp->snd_una;
1364 /*
1365 * Note: We overload snd_recover to function also as the
1366 * snd_last variable described in RFC 2582
1367 */
1368 tp->snd_recover = tp->snd_max;
1369 /*
1370 * Force a segment to be sent.
1371 */
1372 tp->t_flags |= TF_ACKNOW;
1373
1374 /* If timing a segment in this window, stop the timer */
1375 tp->t_rtttime = 0;
1376
1377 if (!IN_FASTRECOVERY(tp) && tp->t_rxtshift == 1) {
1378 tcpstat.tcps_tailloss_rto++;
1379 }
1380
1381
1382 /*
1383 * RFC 5681 says: when a TCP sender detects segment loss
1384 * using retransmit timer and the given segment has already
1385 * been retransmitted by way of the retransmission timer at
1386 * least once, the value of ssthresh is held constant
1387 */
1388 if (tp->t_rxtshift == 1 &&
1389 CC_ALGO(tp)->after_timeout != NULL) {
1390 CC_ALGO(tp)->after_timeout(tp);
1391 /*
1392 * CWR notifications are to be sent on new data
1393 * right after Fast Retransmits and ECE
1394 * notification receipts.
1395 */
1396 if (!TCP_ACC_ECN_ON(tp) && TCP_ECN_ENABLED(tp)) {
1397 tp->ecn_flags |= TE_SENDCWR;
1398 }
1399 }
1400
1401 EXIT_FASTRECOVERY(tp);
1402
1403 /* Exit cwnd non validated phase */
1404 tp->t_flagsext &= ~TF_CWND_NONVALIDATED;
1405
1406
1407 fc_output:
1408 tcp_ccdbg_trace(tp, NULL, TCP_CC_REXMT_TIMEOUT);
1409
1410 (void) tcp_output(tp);
1411 break;
1412
1413 /*
1414 * Persistance timer into zero window.
1415 * Force a byte to be output, if possible.
1416 */
1417 case TCPT_PERSIST:
1418 tcpstat.tcps_persisttimeo++;
1419 /*
1420 * Hack: if the peer is dead/unreachable, we do not
1421 * time out if the window is closed. After a full
1422 * backoff, drop the connection if the idle time
1423 * (no responses to probes) reaches the maximum
1424 * backoff that we would use if retransmitting.
1425 *
1426 * Drop the connection if we reached the maximum allowed time for
1427 * Zero Window Probes without a non-zero update from the peer.
1428 * See rdar://5805356
1429 */
1430 if ((tp->t_rxtshift == TCP_MAXRXTSHIFT &&
1431 (idle_time >= tcp_maxpersistidle ||
1432 idle_time >= TCP_REXMTVAL(tp) * tcp_totbackoff)) ||
1433 ((tp->t_persist_stop != 0) &&
1434 TSTMP_LEQ(tp->t_persist_stop, tcp_now))) {
1435 TCP_LOG_DROP_PCB(NULL, NULL, tp, false, "persist timeout drop");
1436 tcpstat.tcps_persistdrop++;
1437 soevent(so,
1438 (SO_FILT_HINT_LOCKED | SO_FILT_HINT_TIMEOUT));
1439 tp = tcp_drop(tp, ETIMEDOUT);
1440 break;
1441 }
1442 tcp_setpersist(tp);
1443 tp->t_flagsext |= TF_FORCE;
1444 (void) tcp_output(tp);
1445 tp->t_flagsext &= ~TF_FORCE;
1446 break;
1447
1448 /*
1449 * Keep-alive timer went off; send something
1450 * or drop connection if idle for too long.
1451 */
1452 case TCPT_KEEP:
1453 #if FLOW_DIVERT
1454 if (tp->t_inpcb->inp_socket->so_flags & SOF_FLOW_DIVERT) {
1455 break;
1456 }
1457 #endif /* FLOW_DIVERT */
1458
1459 tcpstat.tcps_keeptimeo++;
1460 #if MPTCP
1461 /*
1462 * Regular TCP connections do not send keepalives after closing
1463 * MPTCP must not also, after sending Data FINs.
1464 */
1465 struct mptcb *mp_tp = tptomptp(tp);
1466 if ((tp->t_mpflags & TMPF_MPTCP_TRUE) &&
1467 (tp->t_state > TCPS_ESTABLISHED)) {
1468 goto dropit;
1469 } else if (mp_tp != NULL) {
1470 if ((mptcp_ok_to_keepalive(mp_tp) == 0)) {
1471 goto dropit;
1472 }
1473 }
1474 #endif /* MPTCP */
1475 if (tp->t_state < TCPS_ESTABLISHED) {
1476 goto dropit;
1477 }
1478 if ((always_keepalive ||
1479 (tp->t_inpcb->inp_socket->so_options & SO_KEEPALIVE) ||
1480 (tp->t_flagsext & TF_DETECT_READSTALL) ||
1481 (tp->t_tfo_probe_state == TFO_PROBE_PROBING)) &&
1482 (tp->t_state <= TCPS_CLOSING || tp->t_state == TCPS_FIN_WAIT_2)) {
1483 if (idle_time >= TCP_CONN_KEEPIDLE(tp) + TCP_CONN_MAXIDLE(tp)) {
1484 TCP_LOG_DROP_PCB(NULL, NULL, tp, false,
1485 "keep alive timeout drop");
1486 goto dropit;
1487 }
1488
1489 if (tcp_send_keep_alive(tp)) {
1490 if (tp->t_flagsext & TF_DETECT_READSTALL) {
1491 tp->t_rtimo_probes++;
1492 }
1493
1494 TCP_LOG_KEEP_ALIVE(tp, idle_time);
1495 }
1496
1497 tp->t_timer[TCPT_KEEP] = OFFSET_FROM_START(tp,
1498 TCP_CONN_KEEPINTVL(tp));
1499 } else {
1500 tp->t_timer[TCPT_KEEP] = OFFSET_FROM_START(tp,
1501 TCP_CONN_KEEPIDLE(tp));
1502 }
1503 if (tp->t_flagsext & TF_DETECT_READSTALL) {
1504 struct ifnet *outifp = tp->t_inpcb->inp_last_outifp;
1505 bool reenable_probe = false;
1506 /*
1507 * The keep alive packets sent to detect a read
1508 * stall did not get a response from the
1509 * peer. Generate more keep-alives to confirm this.
1510 * If the number of probes sent reaches the limit,
1511 * generate an event.
1512 */
1513 if (tp->t_adaptive_rtimo > 0) {
1514 if (tp->t_rtimo_probes > tp->t_adaptive_rtimo) {
1515 /* Generate an event */
1516 soevent(so,
1517 (SO_FILT_HINT_LOCKED |
1518 SO_FILT_HINT_ADAPTIVE_RTIMO));
1519 tcp_keepalive_reset(tp);
1520 } else {
1521 reenable_probe = true;
1522 }
1523 } else if (outifp != NULL &&
1524 (outifp->if_eflags & IFEF_PROBE_CONNECTIVITY) &&
1525 tp->t_rtimo_probes <= TCP_CONNECTIVITY_PROBES_MAX) {
1526 reenable_probe = true;
1527 } else {
1528 tp->t_flagsext &= ~TF_DETECT_READSTALL;
1529 }
1530 if (reenable_probe) {
1531 int ind = min(tp->t_rtimo_probes,
1532 TCP_MAXRXTSHIFT);
1533 tp->t_timer[TCPT_KEEP] = OFFSET_FROM_START(
1534 tp, tcp_backoff[ind] * TCP_REXMTVAL(tp));
1535 }
1536 }
1537 if (tp->t_tfo_probe_state == TFO_PROBE_PROBING) {
1538 int ind;
1539
1540 tp->t_tfo_probes++;
1541 ind = min(tp->t_tfo_probes, TCP_MAXRXTSHIFT);
1542
1543 /*
1544 * We take the minimum among the time set by true
1545 * keepalive (see above) and the backoff'd RTO. That
1546 * way we backoff in case of packet-loss but will never
1547 * timeout slower than regular keepalive due to the
1548 * backing off.
1549 */
1550 tp->t_timer[TCPT_KEEP] = min(OFFSET_FROM_START(
1551 tp, tcp_backoff[ind] * TCP_REXMTVAL(tp)),
1552 tp->t_timer[TCPT_KEEP]);
1553 } else if (!(tp->t_flagsext & TF_FASTOPEN_FORCE_ENABLE) &&
1554 !(tp->t_tfo_flags & TFO_F_HEURISTIC_DONE) &&
1555 tp->t_tfo_probe_state == TFO_PROBE_WAIT_DATA) {
1556 /* Still no data! Let's assume a TFO-error and err out... */
1557 tcp_heuristic_tfo_middlebox(tp);
1558
1559 so->so_error = ENODATA;
1560 soevent(so,
1561 (SO_FILT_HINT_LOCKED | SO_FILT_HINT_MP_SUB_ERROR));
1562 sorwakeup(so);
1563 tp->t_tfo_stats |= TFO_S_RECV_BLACKHOLE;
1564 tcpstat.tcps_tfo_blackhole++;
1565 }
1566 break;
1567 case TCPT_DELACK:
1568 if (tcp_delack_enabled && (tp->t_flags & TF_DELACK)) {
1569 tp->t_flags &= ~TF_DELACK;
1570 tp->t_timer[TCPT_DELACK] = 0;
1571 tp->t_flags |= TF_ACKNOW;
1572
1573 /*
1574 * If delayed ack timer fired while stretching
1575 * acks, count the number of times the streaming
1576 * detection was not correct. If this exceeds a
1577 * threshold, disable strech ack on this
1578 * connection
1579 *
1580 * Also, go back to acking every other packet.
1581 */
1582 if ((tp->t_flags & TF_STRETCHACK)) {
1583 if (tp->t_unacksegs > 1 &&
1584 tp->t_unacksegs < maxseg_unacked) {
1585 tp->t_stretchack_delayed++;
1586 }
1587
1588 if (tp->t_stretchack_delayed >
1589 TCP_STRETCHACK_DELAY_THRESHOLD) {
1590 tp->t_flagsext |= TF_DISABLE_STRETCHACK;
1591 /*
1592 * Note the time at which stretch
1593 * ack was disabled automatically
1594 */
1595 tp->rcv_nostrack_ts = tcp_now;
1596 tcpstat.tcps_nostretchack++;
1597 tp->t_stretchack_delayed = 0;
1598 tp->rcv_nostrack_pkts = 0;
1599 }
1600 tcp_reset_stretch_ack(tp);
1601 }
1602 tp->t_forced_acks = TCP_FORCED_ACKS_COUNT;
1603
1604 /*
1605 * If we are measuring inter packet arrival jitter
1606 * for throttling a connection, this delayed ack
1607 * might be the reason for accumulating some
1608 * jitter. So let's restart the measurement.
1609 */
1610 CLEAR_IAJ_STATE(tp);
1611
1612 tcpstat.tcps_delack++;
1613 tp->t_stat.delayed_acks_sent++;
1614 (void) tcp_output(tp);
1615 }
1616 break;
1617
1618 #if MPTCP
1619 case TCPT_JACK_RXMT:
1620 if ((tp->t_state == TCPS_ESTABLISHED) &&
1621 (tp->t_mpflags & TMPF_PREESTABLISHED) &&
1622 (tp->t_mpflags & TMPF_JOINED_FLOW)) {
1623 if (++tp->t_mprxtshift > TCP_MAXRXTSHIFT) {
1624 tcpstat.tcps_timeoutdrop++;
1625 soevent(so,
1626 (SO_FILT_HINT_LOCKED |
1627 SO_FILT_HINT_TIMEOUT));
1628 tp = tcp_drop(tp, tp->t_softerror ?
1629 tp->t_softerror : ETIMEDOUT);
1630 break;
1631 }
1632 tcpstat.tcps_join_rxmts++;
1633 tp->t_mpflags |= TMPF_SND_JACK;
1634 tp->t_flags |= TF_ACKNOW;
1635
1636 /*
1637 * No backoff is implemented for simplicity for this
1638 * corner case.
1639 */
1640 (void) tcp_output(tp);
1641 }
1642 break;
1643 case TCPT_CELLICON:
1644 {
1645 struct mptses *mpte = tptomptp(tp)->mpt_mpte;
1646
1647 tp->t_timer[TCPT_CELLICON] = 0;
1648
1649 if (mpte->mpte_cellicon_increments == 0) {
1650 /* Cell-icon not set by this connection */
1651 break;
1652 }
1653
1654 if (TSTMP_LT(mpte->mpte_last_cellicon_set + MPTCP_CELLICON_TOGGLE_RATE, tcp_now)) {
1655 mptcp_unset_cellicon(mpte, NULL, 1);
1656 }
1657
1658 if (mpte->mpte_cellicon_increments) {
1659 tp->t_timer[TCPT_CELLICON] = OFFSET_FROM_START(tp, MPTCP_CELLICON_TOGGLE_RATE);
1660 }
1661
1662 break;
1663 }
1664 #endif /* MPTCP */
1665
1666 case TCPT_PTO:
1667 {
1668 int32_t ret = 0;
1669
1670 if (!(tp->t_flagsext & TF_IF_PROBING)) {
1671 tp->t_flagsext &= ~(TF_SENT_TLPROBE);
1672 }
1673 /*
1674 * Check if the connection is in the right state to
1675 * send a probe
1676 */
1677 if ((tp->t_state != TCPS_ESTABLISHED ||
1678 tp->t_rxtshift > 0 ||
1679 tp->snd_max == tp->snd_una ||
1680 !SACK_ENABLED(tp) ||
1681 (tcp_do_better_lr != 1 && !TAILQ_EMPTY(&tp->snd_holes)) ||
1682 IN_FASTRECOVERY(tp)) &&
1683 !(tp->t_flagsext & TF_IF_PROBING)) {
1684 break;
1685 }
1686
1687 /*
1688 * When the interface state is changed explicitly reset the retransmission
1689 * timer state for both SYN and data packets because we do not want to
1690 * wait unnecessarily or timeout too quickly if the link characteristics
1691 * have changed drastically
1692 */
1693 if (tp->t_flagsext & TF_IF_PROBING) {
1694 tp->t_rxtshift = 0;
1695 if (tp->t_state == TCPS_SYN_SENT) {
1696 tp->t_stat.synrxtshift = tp->t_rxtshift;
1697 }
1698 /*
1699 * Reset to the the default RTO
1700 */
1701 tp->t_srtt = TCPTV_SRTTBASE;
1702 tp->t_rttvar =
1703 ((TCPTV_RTOBASE - TCPTV_SRTTBASE) << TCP_RTTVAR_SHIFT) / 4;
1704 tp->t_rttmin = tp->t_flags & TF_LOCAL ? tcp_TCPTV_MIN :
1705 TCPTV_REXMTMIN;
1706 TCPT_RANGESET(tp->t_rxtcur, TCP_REXMTVAL(tp),
1707 tp->t_rttmin, TCPTV_REXMTMAX, TCP_ADD_REXMTSLOP(tp));
1708 TCP_LOG_RTT_INFO(tp);
1709 }
1710
1711 if (tp->t_state == TCPS_SYN_SENT) {
1712 /*
1713 * The PTO for SYN_SENT reinitializes TCP as if it was a fresh
1714 * connection attempt
1715 */
1716 tp->snd_nxt = tp->snd_una;
1717 /*
1718 * Note: We overload snd_recover to function also as the
1719 * snd_last variable described in RFC 2582
1720 */
1721 tp->snd_recover = tp->snd_max;
1722 /*
1723 * Force a segment to be sent.
1724 */
1725 tp->t_flags |= TF_ACKNOW;
1726
1727 /* If timing a segment in this window, stop the timer */
1728 tp->t_rtttime = 0;
1729 } else {
1730 int32_t snd_len;
1731
1732 /*
1733 * If there is no new data to send or if the
1734 * connection is limited by receive window then
1735 * retransmit the last segment, otherwise send
1736 * new data.
1737 */
1738 snd_len = min(so->so_snd.sb_cc, tp->snd_wnd)
1739 - (tp->snd_max - tp->snd_una);
1740 if (snd_len > 0) {
1741 tp->snd_nxt = tp->snd_max;
1742 } else {
1743 snd_len = min((tp->snd_max - tp->snd_una),
1744 tp->t_maxseg);
1745 tp->snd_nxt = tp->snd_max - snd_len;
1746 }
1747 }
1748
1749 tcpstat.tcps_pto++;
1750 if (tp->t_flagsext & TF_IF_PROBING) {
1751 tcpstat.tcps_probe_if++;
1752 }
1753
1754 /* If timing a segment in this window, stop the timer */
1755 tp->t_rtttime = 0;
1756 /* Note that tail loss probe is being sent. Exclude IF probe */
1757 if (!(tp->t_flagsext & TF_IF_PROBING)) {
1758 tp->t_flagsext |= TF_SENT_TLPROBE;
1759 tp->t_tlpstart = tcp_now;
1760 }
1761
1762 tp->snd_cwnd += tp->t_maxseg;
1763 /*
1764 * When tail-loss-probe fires, we reset the RTO timer, because
1765 * a probe just got sent, so we are good to push out the timer.
1766 *
1767 * Set to 0 to ensure that tcp_output() will reschedule it
1768 */
1769 tp->t_timer[TCPT_REXMT] = 0;
1770 ret = tcp_output(tp);
1771
1772 #if (DEBUG || DEVELOPMENT)
1773 if ((tp->t_flagsext & TF_IF_PROBING) &&
1774 ((IFNET_IS_COMPANION_LINK(tp->t_inpcb->inp_last_outifp)) ||
1775 tp->t_state == TCPS_SYN_SENT)) {
1776 if (ret == 0 && tcp_probe_if_fix_port > 0 &&
1777 tcp_probe_if_fix_port <= IPPORT_HILASTAUTO) {
1778 tp->t_timer[TCPT_REXMT] = 0;
1779 tcp_set_lotimer_index(tp);
1780 }
1781
1782 os_log(OS_LOG_DEFAULT,
1783 "%s: sent %s probe for %u > %u on interface %s"
1784 " (%u) %s(%d)",
1785 __func__,
1786 tp->t_state == TCPS_SYN_SENT ? "SYN" : "data",
1787 ntohs(tp->t_inpcb->inp_lport),
1788 ntohs(tp->t_inpcb->inp_fport),
1789 if_name(tp->t_inpcb->inp_last_outifp),
1790 tp->t_inpcb->inp_last_outifp->if_index,
1791 ret == 0 ? "succeeded" :"failed", ret);
1792 }
1793 #endif /* DEBUG || DEVELOPMENT */
1794
1795 /*
1796 * When there is data (or a SYN) to send, the above call to
1797 * tcp_output() should have armed either the REXMT or the
1798 * PERSIST timer. If it didn't, something is wrong and this
1799 * connection would idle around forever. Let's make sure that
1800 * at least the REXMT timer is set.
1801 */
1802 if (tp->t_timer[TCPT_REXMT] == 0 && tp->t_timer[TCPT_PERSIST] == 0 &&
1803 (tp->t_inpcb->inp_socket->so_snd.sb_cc != 0 || tp->t_state == TCPS_SYN_SENT ||
1804 tp->t_state == TCPS_SYN_RECEIVED)) {
1805 tp->t_timer[TCPT_REXMT] =
1806 OFFSET_FROM_START(tp, tp->t_rxtcur);
1807
1808 os_log(OS_LOG_DEFAULT,
1809 "%s: tcp_output() returned %u with retransmission timer disabled "
1810 "for %u > %u in state %d, reset timer to %d",
1811 __func__, ret,
1812 ntohs(tp->t_inpcb->inp_lport),
1813 ntohs(tp->t_inpcb->inp_fport),
1814 tp->t_state,
1815 tp->t_timer[TCPT_REXMT]);
1816
1817 tcp_check_timer_state(tp);
1818 }
1819 tp->snd_cwnd -= tp->t_maxseg;
1820
1821 if (!(tp->t_flagsext & TF_IF_PROBING)) {
1822 tp->t_tlphighrxt = tp->snd_nxt;
1823 }
1824 break;
1825 }
1826 case TCPT_DELAYFR:
1827 tp->t_flagsext &= ~TF_DELAY_RECOVERY;
1828
1829 /*
1830 * Don't do anything if one of the following is true:
1831 * - the connection is already in recovery
1832 * - sequence until snd_recover has been acknowledged.
1833 * - retransmit timeout has fired
1834 */
1835 if (IN_FASTRECOVERY(tp) ||
1836 SEQ_GEQ(tp->snd_una, tp->snd_recover) ||
1837 tp->t_rxtshift > 0) {
1838 break;
1839 }
1840
1841 VERIFY(SACK_ENABLED(tp));
1842 tcp_rexmt_save_state(tp);
1843 if (CC_ALGO(tp)->pre_fr != NULL) {
1844 CC_ALGO(tp)->pre_fr(tp);
1845 if (!TCP_ACC_ECN_ON(tp) && TCP_ECN_ENABLED(tp)) {
1846 tp->ecn_flags |= TE_SENDCWR;
1847 }
1848 }
1849 ENTER_FASTRECOVERY(tp);
1850
1851 tp->t_timer[TCPT_REXMT] = 0;
1852 tcpstat.tcps_sack_recovery_episode++;
1853 tp->t_sack_recovery_episode++;
1854 tp->sack_newdata = tp->snd_nxt;
1855 tp->snd_cwnd = tp->t_maxseg;
1856 tcp_ccdbg_trace(tp, NULL, TCP_CC_ENTER_FASTRECOVERY);
1857 (void) tcp_output(tp);
1858 break;
1859
1860 dropit:
1861 tcpstat.tcps_keepdrops++;
1862 soevent(so,
1863 (SO_FILT_HINT_LOCKED | SO_FILT_HINT_TIMEOUT));
1864 tp = tcp_drop(tp, ETIMEDOUT);
1865 break;
1866 }
1867 #if TCPDEBUG
1868 if (tp->t_inpcb->inp_socket->so_options & SO_DEBUG) {
1869 tcp_trace(TA_USER, ostate, tp, (void *)0, (struct tcphdr *)0,
1870 PRU_SLOWTIMO);
1871 }
1872 #endif
1873 return tp;
1874 }
1875
1876 /* Remove a timer entry from timer list */
1877 void
tcp_remove_timer(struct tcpcb * tp)1878 tcp_remove_timer(struct tcpcb *tp)
1879 {
1880 struct tcptimerlist *listp = &tcp_timer_list;
1881
1882 socket_lock_assert_owned(tp->t_inpcb->inp_socket);
1883 if (!(TIMER_IS_ON_LIST(tp))) {
1884 return;
1885 }
1886 lck_mtx_lock(&listp->mtx);
1887
1888 /* Check if pcb is on timer list again after acquiring the lock */
1889 if (!(TIMER_IS_ON_LIST(tp))) {
1890 lck_mtx_unlock(&listp->mtx);
1891 return;
1892 }
1893
1894 if (listp->next_te != NULL && listp->next_te == &tp->tentry) {
1895 listp->next_te = LIST_NEXT(&tp->tentry, le);
1896 }
1897
1898 LIST_REMOVE(&tp->tentry, le);
1899 tp->t_flags &= ~(TF_TIMER_ONLIST);
1900
1901 listp->entries--;
1902
1903 tp->tentry.le.le_next = NULL;
1904 tp->tentry.le.le_prev = NULL;
1905 lck_mtx_unlock(&listp->mtx);
1906 }
1907
1908 /*
1909 * Function to check if the timerlist needs to be rescheduled to run
1910 * the timer entry correctly. Basically, this is to check if we can avoid
1911 * taking the list lock.
1912 */
1913
1914 static boolean_t
need_to_resched_timerlist(u_int32_t runtime,u_int16_t mode)1915 need_to_resched_timerlist(u_int32_t runtime, u_int16_t mode)
1916 {
1917 struct tcptimerlist *listp = &tcp_timer_list;
1918 int32_t diff;
1919
1920 /*
1921 * If the list is being processed then the state of the list is
1922 * in flux. In this case always acquire the lock and set the state
1923 * correctly.
1924 */
1925 if (listp->running) {
1926 return TRUE;
1927 }
1928
1929 if (!listp->scheduled) {
1930 return TRUE;
1931 }
1932
1933 diff = timer_diff(listp->runtime, 0, runtime, 0);
1934 if (diff <= 0) {
1935 /* The list is going to run before this timer */
1936 return FALSE;
1937 } else {
1938 if (mode & TCP_TIMERLIST_10MS_MODE) {
1939 if (diff <= TCP_TIMER_10MS_QUANTUM) {
1940 return FALSE;
1941 }
1942 } else if (mode & TCP_TIMERLIST_100MS_MODE) {
1943 if (diff <= TCP_TIMER_100MS_QUANTUM) {
1944 return FALSE;
1945 }
1946 } else {
1947 if (diff <= TCP_TIMER_500MS_QUANTUM) {
1948 return FALSE;
1949 }
1950 }
1951 }
1952 return TRUE;
1953 }
1954
1955 void
tcp_sched_timerlist(uint32_t offset)1956 tcp_sched_timerlist(uint32_t offset)
1957 {
1958 uint64_t deadline = 0;
1959 struct tcptimerlist *listp = &tcp_timer_list;
1960
1961 LCK_MTX_ASSERT(&listp->mtx, LCK_MTX_ASSERT_OWNED);
1962
1963 offset = min(offset, TCP_TIMERLIST_MAX_OFFSET);
1964 listp->runtime = tcp_now + offset;
1965 listp->schedtime = tcp_now;
1966 if (listp->runtime == 0) {
1967 listp->runtime++;
1968 offset++;
1969 }
1970
1971 clock_interval_to_deadline(offset, USEC_PER_SEC, &deadline);
1972
1973 thread_call_enter_delayed(listp->call, deadline);
1974 listp->scheduled = TRUE;
1975 }
1976
1977 /*
1978 * Function to run the timers for a connection.
1979 *
1980 * Returns the offset of next timer to be run for this connection which
1981 * can be used to reschedule the timerlist.
1982 *
1983 * te_mode is an out parameter that indicates the modes of active
1984 * timers for this connection.
1985 */
1986 u_int32_t
tcp_run_conn_timer(struct tcpcb * tp,u_int16_t * te_mode,u_int16_t probe_if_index)1987 tcp_run_conn_timer(struct tcpcb *tp, u_int16_t *te_mode,
1988 u_int16_t probe_if_index)
1989 {
1990 struct socket *so;
1991 u_int16_t i = 0, index = TCPT_NONE, lo_index = TCPT_NONE;
1992 u_int32_t timer_val, offset = 0, lo_timer = 0;
1993 int32_t diff;
1994 boolean_t needtorun[TCPT_NTIMERS];
1995 int count = 0;
1996
1997 VERIFY(tp != NULL);
1998 bzero(needtorun, sizeof(needtorun));
1999 *te_mode = 0;
2000
2001 socket_lock(tp->t_inpcb->inp_socket, 1);
2002
2003 so = tp->t_inpcb->inp_socket;
2004 /* Release the want count on inp */
2005 if (in_pcb_checkstate(tp->t_inpcb, WNT_RELEASE, 1)
2006 == WNT_STOPUSING) {
2007 if (TIMER_IS_ON_LIST(tp)) {
2008 tcp_remove_timer(tp);
2009 }
2010
2011 /* Looks like the TCP connection got closed while we
2012 * were waiting for the lock.. Done
2013 */
2014 goto done;
2015 }
2016
2017 /*
2018 * If this connection is over an interface that needs to
2019 * be probed, send probe packets to reinitiate communication.
2020 */
2021 if (TCP_IF_STATE_CHANGED(tp, probe_if_index)) {
2022 tp->t_flagsext |= TF_IF_PROBING;
2023 tcp_timers(tp, TCPT_PTO);
2024 tp->t_timer[TCPT_PTO] = 0;
2025 tp->t_flagsext &= ~TF_IF_PROBING;
2026 }
2027
2028 /*
2029 * Since the timer thread needs to wait for tcp lock, it may race
2030 * with another thread that can cancel or reschedule the timer
2031 * that is about to run. Check if we need to run anything.
2032 */
2033 if ((index = tp->tentry.index) == TCPT_NONE) {
2034 goto done;
2035 }
2036
2037 timer_val = tp->t_timer[index];
2038
2039 diff = timer_diff(tp->tentry.runtime, 0, tcp_now, 0);
2040 if (diff > 0) {
2041 if (tp->tentry.index != TCPT_NONE) {
2042 offset = diff;
2043 *(te_mode) = tp->tentry.mode;
2044 }
2045 goto done;
2046 }
2047
2048 tp->t_timer[index] = 0;
2049 if (timer_val > 0) {
2050 tp = tcp_timers(tp, index);
2051 if (tp == NULL) {
2052 goto done;
2053 }
2054 }
2055
2056 /*
2057 * Check if there are any other timers that need to be run.
2058 * While doing it, adjust the timer values wrt tcp_now.
2059 */
2060 tp->tentry.mode = 0;
2061 for (i = 0; i < TCPT_NTIMERS; ++i) {
2062 if (tp->t_timer[i] != 0) {
2063 diff = timer_diff(tp->tentry.timer_start,
2064 tp->t_timer[i], tcp_now, 0);
2065 if (diff <= 0) {
2066 needtorun[i] = TRUE;
2067 count++;
2068 } else {
2069 tp->t_timer[i] = diff;
2070 needtorun[i] = FALSE;
2071 if (lo_timer == 0 || diff < lo_timer) {
2072 lo_timer = diff;
2073 lo_index = i;
2074 }
2075 TCP_SET_TIMER_MODE(tp->tentry.mode, i);
2076 }
2077 }
2078 }
2079
2080 tp->tentry.timer_start = tcp_now;
2081 tp->tentry.index = lo_index;
2082 VERIFY(tp->tentry.index == TCPT_NONE || tp->tentry.mode > 0);
2083
2084 if (tp->tentry.index != TCPT_NONE) {
2085 tp->tentry.runtime = tp->tentry.timer_start +
2086 tp->t_timer[tp->tentry.index];
2087 if (tp->tentry.runtime == 0) {
2088 tp->tentry.runtime++;
2089 }
2090 }
2091
2092 if (count > 0) {
2093 /* run any other timers outstanding at this time. */
2094 for (i = 0; i < TCPT_NTIMERS; ++i) {
2095 if (needtorun[i]) {
2096 tp->t_timer[i] = 0;
2097 tp = tcp_timers(tp, i);
2098 if (tp == NULL) {
2099 offset = 0;
2100 *(te_mode) = 0;
2101 goto done;
2102 }
2103 }
2104 }
2105 tcp_set_lotimer_index(tp);
2106 }
2107
2108 if (tp->tentry.index < TCPT_NONE) {
2109 offset = tp->t_timer[tp->tentry.index];
2110 *(te_mode) = tp->tentry.mode;
2111 }
2112
2113 done:
2114 if (tp != NULL && tp->tentry.index == TCPT_NONE) {
2115 tcp_remove_timer(tp);
2116 offset = 0;
2117 }
2118
2119 socket_unlock(so, 1);
2120 return offset;
2121 }
2122
2123 void
tcp_run_timerlist(void * arg1,void * arg2)2124 tcp_run_timerlist(void * arg1, void * arg2)
2125 {
2126 #pragma unused(arg1, arg2)
2127 struct tcptimerentry *te, *next_te;
2128 struct tcptimerlist *listp = &tcp_timer_list;
2129 struct tcpcb *tp;
2130 uint32_t next_timer = 0; /* offset of the next timer on the list */
2131 u_int16_t te_mode = 0; /* modes of all active timers in a tcpcb */
2132 u_int16_t list_mode = 0; /* cumulative of modes of all tcpcbs */
2133 uint32_t active_count = 0;
2134
2135 calculate_tcp_clock();
2136
2137 lck_mtx_lock(&listp->mtx);
2138
2139 int32_t drift = tcp_now - listp->runtime;
2140 if (drift <= 1) {
2141 tcpstat.tcps_timer_drift_le_1_ms++;
2142 } else if (drift <= 10) {
2143 tcpstat.tcps_timer_drift_le_10_ms++;
2144 } else if (drift <= 20) {
2145 tcpstat.tcps_timer_drift_le_20_ms++;
2146 } else if (drift <= 50) {
2147 tcpstat.tcps_timer_drift_le_50_ms++;
2148 } else if (drift <= 100) {
2149 tcpstat.tcps_timer_drift_le_100_ms++;
2150 } else if (drift <= 200) {
2151 tcpstat.tcps_timer_drift_le_200_ms++;
2152 } else if (drift <= 500) {
2153 tcpstat.tcps_timer_drift_le_500_ms++;
2154 } else if (drift <= 1000) {
2155 tcpstat.tcps_timer_drift_le_1000_ms++;
2156 } else {
2157 tcpstat.tcps_timer_drift_gt_1000_ms++;
2158 }
2159
2160 listp->running = TRUE;
2161
2162 LIST_FOREACH_SAFE(te, &listp->lhead, le, next_te) {
2163 uint32_t offset = 0;
2164 uint32_t runtime = te->runtime;
2165
2166 tp = TIMERENTRY_TO_TP(te);
2167
2168 /*
2169 * An interface probe may need to happen before the previously scheduled runtime
2170 */
2171 if (te->index < TCPT_NONE && TSTMP_GT(runtime, tcp_now) &&
2172 !TCP_IF_STATE_CHANGED(tp, listp->probe_if_index)) {
2173 offset = timer_diff(runtime, 0, tcp_now, 0);
2174 if (next_timer == 0 || offset < next_timer) {
2175 next_timer = offset;
2176 }
2177 list_mode |= te->mode;
2178 continue;
2179 }
2180
2181 /*
2182 * Acquire an inp wantcnt on the inpcb so that the socket
2183 * won't get detached even if tcp_close is called
2184 */
2185 if (in_pcb_checkstate(tp->t_inpcb, WNT_ACQUIRE, 0)
2186 == WNT_STOPUSING) {
2187 /*
2188 * Some how this pcb went into dead state while
2189 * on the timer list, just take it off the list.
2190 * Since the timer list entry pointers are
2191 * protected by the timer list lock, we can
2192 * do it here without the socket lock.
2193 */
2194 if (TIMER_IS_ON_LIST(tp)) {
2195 tp->t_flags &= ~(TF_TIMER_ONLIST);
2196 LIST_REMOVE(&tp->tentry, le);
2197 listp->entries--;
2198
2199 tp->tentry.le.le_next = NULL;
2200 tp->tentry.le.le_prev = NULL;
2201 }
2202 continue;
2203 }
2204 active_count++;
2205
2206 /*
2207 * Store the next timerentry pointer before releasing the
2208 * list lock. If that entry has to be removed when we
2209 * release the lock, this pointer will be updated to the
2210 * element after that.
2211 */
2212 listp->next_te = next_te;
2213
2214 VERIFY_NEXT_LINK(&tp->tentry, le);
2215 VERIFY_PREV_LINK(&tp->tentry, le);
2216
2217 lck_mtx_unlock(&listp->mtx);
2218
2219 offset = tcp_run_conn_timer(tp, &te_mode,
2220 listp->probe_if_index);
2221
2222 lck_mtx_lock(&listp->mtx);
2223
2224 next_te = listp->next_te;
2225 listp->next_te = NULL;
2226
2227 if (offset > 0 && te_mode != 0) {
2228 list_mode |= te_mode;
2229
2230 if (next_timer == 0 || offset < next_timer) {
2231 next_timer = offset;
2232 }
2233 }
2234 }
2235
2236 if (!LIST_EMPTY(&listp->lhead)) {
2237 uint32_t next_mode = 0;
2238 if ((list_mode & TCP_TIMERLIST_10MS_MODE) ||
2239 (listp->pref_mode & TCP_TIMERLIST_10MS_MODE)) {
2240 next_mode = TCP_TIMERLIST_10MS_MODE;
2241 } else if ((list_mode & TCP_TIMERLIST_100MS_MODE) ||
2242 (listp->pref_mode & TCP_TIMERLIST_100MS_MODE)) {
2243 next_mode = TCP_TIMERLIST_100MS_MODE;
2244 } else {
2245 next_mode = TCP_TIMERLIST_500MS_MODE;
2246 }
2247
2248 if (next_mode != TCP_TIMERLIST_500MS_MODE) {
2249 listp->idleruns = 0;
2250 } else {
2251 /*
2252 * the next required mode is slow mode, but if
2253 * the last one was a faster mode and we did not
2254 * have enough idle runs, repeat the last mode.
2255 *
2256 * We try to keep the timer list in fast mode for
2257 * some idle time in expectation of new data.
2258 */
2259 if (listp->mode != next_mode &&
2260 listp->idleruns < timer_fastmode_idlemax) {
2261 listp->idleruns++;
2262 next_mode = listp->mode;
2263 next_timer = TCP_TIMER_100MS_QUANTUM;
2264 } else {
2265 listp->idleruns = 0;
2266 }
2267 }
2268 listp->mode = next_mode;
2269 if (listp->pref_offset != 0) {
2270 next_timer = min(listp->pref_offset, next_timer);
2271 }
2272
2273 if (listp->mode == TCP_TIMERLIST_500MS_MODE) {
2274 next_timer = max(next_timer,
2275 TCP_TIMER_500MS_QUANTUM);
2276 }
2277
2278 tcp_sched_timerlist(next_timer);
2279 } else {
2280 /*
2281 * No need to reschedule this timer, but always run
2282 * periodically at a much higher granularity.
2283 */
2284 tcp_sched_timerlist(TCP_TIMERLIST_MAX_OFFSET);
2285 }
2286
2287 listp->running = FALSE;
2288 listp->pref_mode = 0;
2289 listp->pref_offset = 0;
2290 listp->probe_if_index = 0;
2291
2292 lck_mtx_unlock(&listp->mtx);
2293 }
2294
2295 /*
2296 * Function to check if the timerlist needs to be rescheduled to run this
2297 * connection's timers correctly.
2298 */
2299 void
tcp_sched_timers(struct tcpcb * tp)2300 tcp_sched_timers(struct tcpcb *tp)
2301 {
2302 struct tcptimerentry *te = &tp->tentry;
2303 u_int16_t index = te->index;
2304 u_int16_t mode = te->mode;
2305 struct tcptimerlist *listp = &tcp_timer_list;
2306 int32_t offset = 0;
2307 boolean_t list_locked = FALSE;
2308
2309 if (tp->t_inpcb->inp_state == INPCB_STATE_DEAD) {
2310 /* Just return without adding the dead pcb to the list */
2311 if (TIMER_IS_ON_LIST(tp)) {
2312 tcp_remove_timer(tp);
2313 }
2314 return;
2315 }
2316
2317 if (index == TCPT_NONE) {
2318 /* Nothing to run */
2319 tcp_remove_timer(tp);
2320 return;
2321 }
2322
2323 /*
2324 * compute the offset at which the next timer for this connection
2325 * has to run.
2326 */
2327 offset = timer_diff(te->runtime, 0, tcp_now, 0);
2328 if (offset <= 0) {
2329 offset = 1;
2330 tcp_timer_advanced++;
2331 }
2332
2333 if (!TIMER_IS_ON_LIST(tp)) {
2334 if (!list_locked) {
2335 lck_mtx_lock(&listp->mtx);
2336 list_locked = TRUE;
2337 }
2338
2339 if (!TIMER_IS_ON_LIST(tp)) {
2340 LIST_INSERT_HEAD(&listp->lhead, te, le);
2341 tp->t_flags |= TF_TIMER_ONLIST;
2342
2343 listp->entries++;
2344 if (listp->entries > listp->maxentries) {
2345 listp->maxentries = listp->entries;
2346 }
2347
2348 /* if the list is not scheduled, just schedule it */
2349 if (!listp->scheduled) {
2350 goto schedule;
2351 }
2352 }
2353 }
2354
2355 /*
2356 * Timer entry is currently on the list, check if the list needs
2357 * to be rescheduled.
2358 */
2359 if (need_to_resched_timerlist(te->runtime, mode)) {
2360 tcp_resched_timerlist++;
2361
2362 if (!list_locked) {
2363 lck_mtx_lock(&listp->mtx);
2364 list_locked = TRUE;
2365 }
2366
2367 VERIFY_NEXT_LINK(te, le);
2368 VERIFY_PREV_LINK(te, le);
2369
2370 if (listp->running) {
2371 listp->pref_mode |= mode;
2372 if (listp->pref_offset == 0 ||
2373 offset < listp->pref_offset) {
2374 listp->pref_offset = offset;
2375 }
2376 } else {
2377 /*
2378 * The list could have got rescheduled while
2379 * this thread was waiting for the lock
2380 */
2381 if (listp->scheduled) {
2382 int32_t diff;
2383 diff = timer_diff(listp->runtime, 0,
2384 tcp_now, offset);
2385 if (diff <= 0) {
2386 goto done;
2387 } else {
2388 goto schedule;
2389 }
2390 } else {
2391 goto schedule;
2392 }
2393 }
2394 }
2395 goto done;
2396
2397 schedule:
2398 /*
2399 * Since a connection with timers is getting scheduled, the timer
2400 * list moves from idle to active state and that is why idlegen is
2401 * reset
2402 */
2403 if (mode & TCP_TIMERLIST_10MS_MODE) {
2404 listp->mode = TCP_TIMERLIST_10MS_MODE;
2405 listp->idleruns = 0;
2406 offset = min(offset, TCP_TIMER_10MS_QUANTUM);
2407 } else if (mode & TCP_TIMERLIST_100MS_MODE) {
2408 if (listp->mode > TCP_TIMERLIST_100MS_MODE) {
2409 listp->mode = TCP_TIMERLIST_100MS_MODE;
2410 }
2411 listp->idleruns = 0;
2412 offset = min(offset, TCP_TIMER_100MS_QUANTUM);
2413 }
2414 tcp_sched_timerlist(offset);
2415
2416 done:
2417 if (list_locked) {
2418 lck_mtx_unlock(&listp->mtx);
2419 }
2420
2421 return;
2422 }
2423
2424 static inline void
tcp_set_lotimer_index(struct tcpcb * tp)2425 tcp_set_lotimer_index(struct tcpcb *tp)
2426 {
2427 uint16_t i, lo_index = TCPT_NONE, mode = 0;
2428 uint32_t lo_timer = 0;
2429 for (i = 0; i < TCPT_NTIMERS; ++i) {
2430 if (tp->t_timer[i] != 0) {
2431 TCP_SET_TIMER_MODE(mode, i);
2432 if (lo_timer == 0 || tp->t_timer[i] < lo_timer) {
2433 lo_timer = tp->t_timer[i];
2434 lo_index = i;
2435 }
2436 }
2437 }
2438 tp->tentry.index = lo_index;
2439 tp->tentry.mode = mode;
2440 VERIFY(tp->tentry.index == TCPT_NONE || tp->tentry.mode > 0);
2441
2442 if (tp->tentry.index != TCPT_NONE) {
2443 tp->tentry.runtime = tp->tentry.timer_start
2444 + tp->t_timer[tp->tentry.index];
2445 if (tp->tentry.runtime == 0) {
2446 tp->tentry.runtime++;
2447 }
2448 }
2449 }
2450
2451 void
tcp_check_timer_state(struct tcpcb * tp)2452 tcp_check_timer_state(struct tcpcb *tp)
2453 {
2454 socket_lock_assert_owned(tp->t_inpcb->inp_socket);
2455
2456 if (tp->t_inpcb->inp_flags2 & INP2_TIMEWAIT) {
2457 return;
2458 }
2459
2460 tcp_set_lotimer_index(tp);
2461
2462 tcp_sched_timers(tp);
2463 return;
2464 }
2465
2466 static inline void
tcp_cumulative_stat(u_int32_t cur,u_int32_t * prev,u_int32_t * dest)2467 tcp_cumulative_stat(u_int32_t cur, u_int32_t *prev, u_int32_t *dest)
2468 {
2469 /* handle wrap around */
2470 int32_t diff = (int32_t) (cur - *prev);
2471 if (diff > 0) {
2472 *dest = diff;
2473 } else {
2474 *dest = 0;
2475 }
2476 *prev = cur;
2477 return;
2478 }
2479
2480 static inline void
tcp_cumulative_stat64(u_int64_t cur,u_int64_t * prev,u_int64_t * dest)2481 tcp_cumulative_stat64(u_int64_t cur, u_int64_t *prev, u_int64_t *dest)
2482 {
2483 /* handle wrap around */
2484 int64_t diff = (int64_t) (cur - *prev);
2485 if (diff > 0) {
2486 *dest = diff;
2487 } else {
2488 *dest = 0;
2489 }
2490 *prev = cur;
2491 return;
2492 }
2493
2494 __private_extern__ void
tcp_report_stats(void)2495 tcp_report_stats(void)
2496 {
2497 struct nstat_sysinfo_data data;
2498 struct sockaddr_in dst;
2499 struct sockaddr_in6 dst6;
2500 struct rtentry *rt = NULL;
2501 static struct tcp_last_report_stats prev;
2502 u_int64_t var, uptime;
2503
2504 #define stat data.u.tcp_stats
2505 if (((uptime = net_uptime()) - tcp_last_report_time) <
2506 tcp_report_stats_interval) {
2507 return;
2508 }
2509
2510 tcp_last_report_time = uptime;
2511
2512 bzero(&data, sizeof(data));
2513 data.flags = NSTAT_SYSINFO_TCP_STATS;
2514
2515 bzero(&dst, sizeof(dst));
2516 dst.sin_len = sizeof(dst);
2517 dst.sin_family = AF_INET;
2518
2519 /* ipv4 avg rtt */
2520 lck_mtx_lock(rnh_lock);
2521 rt = rt_lookup(TRUE, (struct sockaddr *)&dst, NULL,
2522 rt_tables[AF_INET], IFSCOPE_NONE);
2523 lck_mtx_unlock(rnh_lock);
2524 if (rt != NULL) {
2525 RT_LOCK(rt);
2526 if (rt_primary_default(rt, rt_key(rt)) &&
2527 rt->rt_stats != NULL) {
2528 stat.ipv4_avgrtt = rt->rt_stats->nstat_avg_rtt;
2529 }
2530 RT_UNLOCK(rt);
2531 rtfree(rt);
2532 rt = NULL;
2533 }
2534
2535 /* ipv6 avg rtt */
2536 bzero(&dst6, sizeof(dst6));
2537 dst6.sin6_len = sizeof(dst6);
2538 dst6.sin6_family = AF_INET6;
2539
2540 lck_mtx_lock(rnh_lock);
2541 rt = rt_lookup(TRUE, (struct sockaddr *)&dst6, NULL,
2542 rt_tables[AF_INET6], IFSCOPE_NONE);
2543 lck_mtx_unlock(rnh_lock);
2544 if (rt != NULL) {
2545 RT_LOCK(rt);
2546 if (rt_primary_default(rt, rt_key(rt)) &&
2547 rt->rt_stats != NULL) {
2548 stat.ipv6_avgrtt = rt->rt_stats->nstat_avg_rtt;
2549 }
2550 RT_UNLOCK(rt);
2551 rtfree(rt);
2552 rt = NULL;
2553 }
2554
2555 /* send packet loss rate, shift by 10 for precision */
2556 if (tcpstat.tcps_sndpack > 0 && tcpstat.tcps_sndrexmitpack > 0) {
2557 var = tcpstat.tcps_sndrexmitpack << 10;
2558 stat.send_plr = (uint32_t)((var * 100) / tcpstat.tcps_sndpack);
2559 }
2560
2561 /* recv packet loss rate, shift by 10 for precision */
2562 if (tcpstat.tcps_rcvpack > 0 && tcpstat.tcps_recovered_pkts > 0) {
2563 var = tcpstat.tcps_recovered_pkts << 10;
2564 stat.recv_plr = (uint32_t)((var * 100) / tcpstat.tcps_rcvpack);
2565 }
2566
2567 /* RTO after tail loss, shift by 10 for precision */
2568 if (tcpstat.tcps_sndrexmitpack > 0
2569 && tcpstat.tcps_tailloss_rto > 0) {
2570 var = tcpstat.tcps_tailloss_rto << 10;
2571 stat.send_tlrto_rate =
2572 (uint32_t)((var * 100) / tcpstat.tcps_sndrexmitpack);
2573 }
2574
2575 /* packet reordering */
2576 if (tcpstat.tcps_sndpack > 0 && tcpstat.tcps_reordered_pkts > 0) {
2577 var = tcpstat.tcps_reordered_pkts << 10;
2578 stat.send_reorder_rate =
2579 (uint32_t)((var * 100) / tcpstat.tcps_sndpack);
2580 }
2581
2582 if (tcp_ecn_outbound == 1) {
2583 stat.ecn_client_enabled = 1;
2584 }
2585 if (tcp_ecn_inbound == 1) {
2586 stat.ecn_server_enabled = 1;
2587 }
2588 tcp_cumulative_stat(tcpstat.tcps_connattempt,
2589 &prev.tcps_connattempt, &stat.connection_attempts);
2590 tcp_cumulative_stat(tcpstat.tcps_accepts,
2591 &prev.tcps_accepts, &stat.connection_accepts);
2592 tcp_cumulative_stat(tcpstat.tcps_ecn_client_setup,
2593 &prev.tcps_ecn_client_setup, &stat.ecn_client_setup);
2594 tcp_cumulative_stat(tcpstat.tcps_ecn_server_setup,
2595 &prev.tcps_ecn_server_setup, &stat.ecn_server_setup);
2596 tcp_cumulative_stat(tcpstat.tcps_ecn_client_success,
2597 &prev.tcps_ecn_client_success, &stat.ecn_client_success);
2598 tcp_cumulative_stat(tcpstat.tcps_ecn_server_success,
2599 &prev.tcps_ecn_server_success, &stat.ecn_server_success);
2600 tcp_cumulative_stat(tcpstat.tcps_ecn_not_supported,
2601 &prev.tcps_ecn_not_supported, &stat.ecn_not_supported);
2602 tcp_cumulative_stat(tcpstat.tcps_ecn_lost_syn,
2603 &prev.tcps_ecn_lost_syn, &stat.ecn_lost_syn);
2604 tcp_cumulative_stat(tcpstat.tcps_ecn_lost_synack,
2605 &prev.tcps_ecn_lost_synack, &stat.ecn_lost_synack);
2606 tcp_cumulative_stat(tcpstat.tcps_ecn_recv_ce,
2607 &prev.tcps_ecn_recv_ce, &stat.ecn_recv_ce);
2608 tcp_cumulative_stat(tcpstat.tcps_ecn_recv_ece,
2609 &prev.tcps_ecn_recv_ece, &stat.ecn_recv_ece);
2610 tcp_cumulative_stat(tcpstat.tcps_ecn_recv_ece,
2611 &prev.tcps_ecn_recv_ece, &stat.ecn_recv_ece);
2612 tcp_cumulative_stat(tcpstat.tcps_ecn_sent_ece,
2613 &prev.tcps_ecn_sent_ece, &stat.ecn_sent_ece);
2614 tcp_cumulative_stat(tcpstat.tcps_ecn_sent_ece,
2615 &prev.tcps_ecn_sent_ece, &stat.ecn_sent_ece);
2616 tcp_cumulative_stat(tcpstat.tcps_ecn_conn_recv_ce,
2617 &prev.tcps_ecn_conn_recv_ce, &stat.ecn_conn_recv_ce);
2618 tcp_cumulative_stat(tcpstat.tcps_ecn_conn_recv_ece,
2619 &prev.tcps_ecn_conn_recv_ece, &stat.ecn_conn_recv_ece);
2620 tcp_cumulative_stat(tcpstat.tcps_ecn_conn_plnoce,
2621 &prev.tcps_ecn_conn_plnoce, &stat.ecn_conn_plnoce);
2622 tcp_cumulative_stat(tcpstat.tcps_ecn_conn_pl_ce,
2623 &prev.tcps_ecn_conn_pl_ce, &stat.ecn_conn_pl_ce);
2624 tcp_cumulative_stat(tcpstat.tcps_ecn_conn_nopl_ce,
2625 &prev.tcps_ecn_conn_nopl_ce, &stat.ecn_conn_nopl_ce);
2626 tcp_cumulative_stat(tcpstat.tcps_ecn_fallback_synloss,
2627 &prev.tcps_ecn_fallback_synloss, &stat.ecn_fallback_synloss);
2628 tcp_cumulative_stat(tcpstat.tcps_ecn_fallback_reorder,
2629 &prev.tcps_ecn_fallback_reorder, &stat.ecn_fallback_reorder);
2630 tcp_cumulative_stat(tcpstat.tcps_ecn_fallback_ce,
2631 &prev.tcps_ecn_fallback_ce, &stat.ecn_fallback_ce);
2632 tcp_cumulative_stat(tcpstat.tcps_tfo_syn_data_rcv,
2633 &prev.tcps_tfo_syn_data_rcv, &stat.tfo_syn_data_rcv);
2634 tcp_cumulative_stat(tcpstat.tcps_tfo_cookie_req_rcv,
2635 &prev.tcps_tfo_cookie_req_rcv, &stat.tfo_cookie_req_rcv);
2636 tcp_cumulative_stat(tcpstat.tcps_tfo_cookie_sent,
2637 &prev.tcps_tfo_cookie_sent, &stat.tfo_cookie_sent);
2638 tcp_cumulative_stat(tcpstat.tcps_tfo_cookie_invalid,
2639 &prev.tcps_tfo_cookie_invalid, &stat.tfo_cookie_invalid);
2640 tcp_cumulative_stat(tcpstat.tcps_tfo_cookie_req,
2641 &prev.tcps_tfo_cookie_req, &stat.tfo_cookie_req);
2642 tcp_cumulative_stat(tcpstat.tcps_tfo_cookie_rcv,
2643 &prev.tcps_tfo_cookie_rcv, &stat.tfo_cookie_rcv);
2644 tcp_cumulative_stat(tcpstat.tcps_tfo_syn_data_sent,
2645 &prev.tcps_tfo_syn_data_sent, &stat.tfo_syn_data_sent);
2646 tcp_cumulative_stat(tcpstat.tcps_tfo_syn_data_acked,
2647 &prev.tcps_tfo_syn_data_acked, &stat.tfo_syn_data_acked);
2648 tcp_cumulative_stat(tcpstat.tcps_tfo_syn_loss,
2649 &prev.tcps_tfo_syn_loss, &stat.tfo_syn_loss);
2650 tcp_cumulative_stat(tcpstat.tcps_tfo_blackhole,
2651 &prev.tcps_tfo_blackhole, &stat.tfo_blackhole);
2652 tcp_cumulative_stat(tcpstat.tcps_tfo_cookie_wrong,
2653 &prev.tcps_tfo_cookie_wrong, &stat.tfo_cookie_wrong);
2654 tcp_cumulative_stat(tcpstat.tcps_tfo_no_cookie_rcv,
2655 &prev.tcps_tfo_no_cookie_rcv, &stat.tfo_no_cookie_rcv);
2656 tcp_cumulative_stat(tcpstat.tcps_tfo_heuristics_disable,
2657 &prev.tcps_tfo_heuristics_disable, &stat.tfo_heuristics_disable);
2658 tcp_cumulative_stat(tcpstat.tcps_tfo_sndblackhole,
2659 &prev.tcps_tfo_sndblackhole, &stat.tfo_sndblackhole);
2660
2661
2662 tcp_cumulative_stat(tcpstat.tcps_mptcp_handover_attempt,
2663 &prev.tcps_mptcp_handover_attempt, &stat.mptcp_handover_attempt);
2664 tcp_cumulative_stat(tcpstat.tcps_mptcp_interactive_attempt,
2665 &prev.tcps_mptcp_interactive_attempt, &stat.mptcp_interactive_attempt);
2666 tcp_cumulative_stat(tcpstat.tcps_mptcp_aggregate_attempt,
2667 &prev.tcps_mptcp_aggregate_attempt, &stat.mptcp_aggregate_attempt);
2668 tcp_cumulative_stat(tcpstat.tcps_mptcp_fp_handover_attempt,
2669 &prev.tcps_mptcp_fp_handover_attempt, &stat.mptcp_fp_handover_attempt);
2670 tcp_cumulative_stat(tcpstat.tcps_mptcp_fp_interactive_attempt,
2671 &prev.tcps_mptcp_fp_interactive_attempt, &stat.mptcp_fp_interactive_attempt);
2672 tcp_cumulative_stat(tcpstat.tcps_mptcp_fp_aggregate_attempt,
2673 &prev.tcps_mptcp_fp_aggregate_attempt, &stat.mptcp_fp_aggregate_attempt);
2674 tcp_cumulative_stat(tcpstat.tcps_mptcp_heuristic_fallback,
2675 &prev.tcps_mptcp_heuristic_fallback, &stat.mptcp_heuristic_fallback);
2676 tcp_cumulative_stat(tcpstat.tcps_mptcp_fp_heuristic_fallback,
2677 &prev.tcps_mptcp_fp_heuristic_fallback, &stat.mptcp_fp_heuristic_fallback);
2678 tcp_cumulative_stat(tcpstat.tcps_mptcp_handover_success_wifi,
2679 &prev.tcps_mptcp_handover_success_wifi, &stat.mptcp_handover_success_wifi);
2680 tcp_cumulative_stat(tcpstat.tcps_mptcp_handover_success_cell,
2681 &prev.tcps_mptcp_handover_success_cell, &stat.mptcp_handover_success_cell);
2682 tcp_cumulative_stat(tcpstat.tcps_mptcp_interactive_success,
2683 &prev.tcps_mptcp_interactive_success, &stat.mptcp_interactive_success);
2684 tcp_cumulative_stat(tcpstat.tcps_mptcp_aggregate_success,
2685 &prev.tcps_mptcp_aggregate_success, &stat.mptcp_aggregate_success);
2686 tcp_cumulative_stat(tcpstat.tcps_mptcp_fp_handover_success_wifi,
2687 &prev.tcps_mptcp_fp_handover_success_wifi, &stat.mptcp_fp_handover_success_wifi);
2688 tcp_cumulative_stat(tcpstat.tcps_mptcp_fp_handover_success_cell,
2689 &prev.tcps_mptcp_fp_handover_success_cell, &stat.mptcp_fp_handover_success_cell);
2690 tcp_cumulative_stat(tcpstat.tcps_mptcp_fp_interactive_success,
2691 &prev.tcps_mptcp_fp_interactive_success, &stat.mptcp_fp_interactive_success);
2692 tcp_cumulative_stat(tcpstat.tcps_mptcp_fp_aggregate_success,
2693 &prev.tcps_mptcp_fp_aggregate_success, &stat.mptcp_fp_aggregate_success);
2694 tcp_cumulative_stat(tcpstat.tcps_mptcp_handover_cell_from_wifi,
2695 &prev.tcps_mptcp_handover_cell_from_wifi, &stat.mptcp_handover_cell_from_wifi);
2696 tcp_cumulative_stat(tcpstat.tcps_mptcp_handover_wifi_from_cell,
2697 &prev.tcps_mptcp_handover_wifi_from_cell, &stat.mptcp_handover_wifi_from_cell);
2698 tcp_cumulative_stat(tcpstat.tcps_mptcp_interactive_cell_from_wifi,
2699 &prev.tcps_mptcp_interactive_cell_from_wifi, &stat.mptcp_interactive_cell_from_wifi);
2700 tcp_cumulative_stat64(tcpstat.tcps_mptcp_handover_cell_bytes,
2701 &prev.tcps_mptcp_handover_cell_bytes, &stat.mptcp_handover_cell_bytes);
2702 tcp_cumulative_stat64(tcpstat.tcps_mptcp_interactive_cell_bytes,
2703 &prev.tcps_mptcp_interactive_cell_bytes, &stat.mptcp_interactive_cell_bytes);
2704 tcp_cumulative_stat64(tcpstat.tcps_mptcp_aggregate_cell_bytes,
2705 &prev.tcps_mptcp_aggregate_cell_bytes, &stat.mptcp_aggregate_cell_bytes);
2706 tcp_cumulative_stat64(tcpstat.tcps_mptcp_handover_all_bytes,
2707 &prev.tcps_mptcp_handover_all_bytes, &stat.mptcp_handover_all_bytes);
2708 tcp_cumulative_stat64(tcpstat.tcps_mptcp_interactive_all_bytes,
2709 &prev.tcps_mptcp_interactive_all_bytes, &stat.mptcp_interactive_all_bytes);
2710 tcp_cumulative_stat64(tcpstat.tcps_mptcp_aggregate_all_bytes,
2711 &prev.tcps_mptcp_aggregate_all_bytes, &stat.mptcp_aggregate_all_bytes);
2712 tcp_cumulative_stat(tcpstat.tcps_mptcp_back_to_wifi,
2713 &prev.tcps_mptcp_back_to_wifi, &stat.mptcp_back_to_wifi);
2714 tcp_cumulative_stat(tcpstat.tcps_mptcp_wifi_proxy,
2715 &prev.tcps_mptcp_wifi_proxy, &stat.mptcp_wifi_proxy);
2716 tcp_cumulative_stat(tcpstat.tcps_mptcp_cell_proxy,
2717 &prev.tcps_mptcp_cell_proxy, &stat.mptcp_cell_proxy);
2718 tcp_cumulative_stat(tcpstat.tcps_mptcp_triggered_cell,
2719 &prev.tcps_mptcp_triggered_cell, &stat.mptcp_triggered_cell);
2720
2721 nstat_sysinfo_send_data(&data);
2722
2723 #undef stat
2724 }
2725
2726 void
tcp_interface_send_probe(u_int16_t probe_if_index)2727 tcp_interface_send_probe(u_int16_t probe_if_index)
2728 {
2729 int32_t offset = 0;
2730 struct tcptimerlist *listp = &tcp_timer_list;
2731
2732 /* Make sure TCP clock is up to date */
2733 calculate_tcp_clock();
2734
2735 lck_mtx_lock(&listp->mtx);
2736 if (listp->probe_if_index > 0 && listp->probe_if_index != probe_if_index) {
2737 tcpstat.tcps_probe_if_conflict++;
2738 os_log(OS_LOG_DEFAULT,
2739 "%s: probe_if_index %u conflicts with %u, tcps_probe_if_conflict %u\n",
2740 __func__, probe_if_index, listp->probe_if_index,
2741 tcpstat.tcps_probe_if_conflict);
2742 goto done;
2743 }
2744
2745 listp->probe_if_index = probe_if_index;
2746 if (listp->running) {
2747 os_log(OS_LOG_DEFAULT, "%s: timer list already running for if_index %u\n",
2748 __func__, probe_if_index);
2749 goto done;
2750 }
2751
2752 /*
2753 * Reschedule the timerlist to run within the next 10ms, which is
2754 * the fastest that we can do.
2755 */
2756 offset = TCP_TIMER_10MS_QUANTUM;
2757 if (listp->scheduled) {
2758 int32_t diff;
2759 diff = timer_diff(listp->runtime, 0, tcp_now, offset);
2760 if (diff <= 0) {
2761 /* The timer will fire sooner than what's needed */
2762 os_log(OS_LOG_DEFAULT,
2763 "%s: timer will fire sooner than needed for if_index %u\n",
2764 __func__, probe_if_index);
2765 goto done;
2766 }
2767 }
2768 listp->mode = TCP_TIMERLIST_10MS_MODE;
2769 listp->idleruns = 0;
2770
2771 tcp_sched_timerlist(offset);
2772
2773 done:
2774 lck_mtx_unlock(&listp->mtx);
2775 return;
2776 }
2777
2778 /*
2779 * Enable read probes on this connection, if:
2780 * - it is in established state
2781 * - doesn't have any data outstanding
2782 * - the outgoing ifp matches
2783 * - we have not already sent any read probes
2784 */
2785 static void
tcp_enable_read_probe(struct tcpcb * tp,struct ifnet * ifp)2786 tcp_enable_read_probe(struct tcpcb *tp, struct ifnet *ifp)
2787 {
2788 if (tp->t_state == TCPS_ESTABLISHED &&
2789 tp->snd_max == tp->snd_una &&
2790 tp->t_inpcb->inp_last_outifp == ifp &&
2791 !(tp->t_flagsext & TF_DETECT_READSTALL) &&
2792 tp->t_rtimo_probes == 0) {
2793 tp->t_flagsext |= TF_DETECT_READSTALL;
2794 tp->t_rtimo_probes = 0;
2795 tp->t_timer[TCPT_KEEP] = OFFSET_FROM_START(tp,
2796 TCP_TIMER_10MS_QUANTUM);
2797 if (tp->tentry.index == TCPT_NONE) {
2798 tp->tentry.index = TCPT_KEEP;
2799 tp->tentry.runtime = tcp_now +
2800 TCP_TIMER_10MS_QUANTUM;
2801 } else {
2802 int32_t diff = 0;
2803
2804 /* Reset runtime to be in next 10ms */
2805 diff = timer_diff(tp->tentry.runtime, 0,
2806 tcp_now, TCP_TIMER_10MS_QUANTUM);
2807 if (diff > 0) {
2808 tp->tentry.index = TCPT_KEEP;
2809 tp->tentry.runtime = tcp_now +
2810 TCP_TIMER_10MS_QUANTUM;
2811 if (tp->tentry.runtime == 0) {
2812 tp->tentry.runtime++;
2813 }
2814 }
2815 }
2816 }
2817 }
2818
2819 /*
2820 * Disable read probe and reset the keep alive timer
2821 */
2822 static void
tcp_disable_read_probe(struct tcpcb * tp)2823 tcp_disable_read_probe(struct tcpcb *tp)
2824 {
2825 if (tp->t_adaptive_rtimo == 0 &&
2826 ((tp->t_flagsext & TF_DETECT_READSTALL) ||
2827 tp->t_rtimo_probes > 0)) {
2828 tcp_keepalive_reset(tp);
2829
2830 if (tp->t_mpsub) {
2831 mptcp_reset_keepalive(tp);
2832 }
2833 }
2834 }
2835
2836 /*
2837 * Reschedule the tcp timerlist in the next 10ms to re-enable read/write
2838 * probes on connections going over a particular interface.
2839 */
2840 void
tcp_probe_connectivity(struct ifnet * ifp,u_int32_t enable)2841 tcp_probe_connectivity(struct ifnet *ifp, u_int32_t enable)
2842 {
2843 int32_t offset;
2844 struct tcptimerlist *listp = &tcp_timer_list;
2845 struct inpcbinfo *pcbinfo = &tcbinfo;
2846 struct inpcb *inp, *nxt;
2847
2848 if (ifp == NULL) {
2849 return;
2850 }
2851
2852 /* update clock */
2853 calculate_tcp_clock();
2854
2855 /*
2856 * Enable keep alive timer on all connections that are
2857 * active/established on this interface.
2858 */
2859 lck_rw_lock_shared(&pcbinfo->ipi_lock);
2860
2861 LIST_FOREACH_SAFE(inp, pcbinfo->ipi_listhead, inp_list, nxt) {
2862 struct tcpcb *tp = NULL;
2863 if (in_pcb_checkstate(inp, WNT_ACQUIRE, 0) ==
2864 WNT_STOPUSING) {
2865 continue;
2866 }
2867
2868 /* Acquire lock to look at the state of the connection */
2869 socket_lock(inp->inp_socket, 1);
2870
2871 /* Release the want count */
2872 if (inp->inp_ppcb == NULL ||
2873 (in_pcb_checkstate(inp, WNT_RELEASE, 1) == WNT_STOPUSING)) {
2874 socket_unlock(inp->inp_socket, 1);
2875 continue;
2876 }
2877 tp = intotcpcb(inp);
2878 if (enable) {
2879 tcp_enable_read_probe(tp, ifp);
2880 } else {
2881 tcp_disable_read_probe(tp);
2882 }
2883
2884 socket_unlock(inp->inp_socket, 1);
2885 }
2886 lck_rw_done(&pcbinfo->ipi_lock);
2887
2888 lck_mtx_lock(&listp->mtx);
2889 if (listp->running) {
2890 listp->pref_mode |= TCP_TIMERLIST_10MS_MODE;
2891 goto done;
2892 }
2893
2894 /* Reschedule within the next 10ms */
2895 offset = TCP_TIMER_10MS_QUANTUM;
2896 if (listp->scheduled) {
2897 int32_t diff;
2898 diff = timer_diff(listp->runtime, 0, tcp_now, offset);
2899 if (diff <= 0) {
2900 /* The timer will fire sooner than what's needed */
2901 goto done;
2902 }
2903 }
2904 listp->mode = TCP_TIMERLIST_10MS_MODE;
2905 listp->idleruns = 0;
2906
2907 tcp_sched_timerlist(offset);
2908 done:
2909 lck_mtx_unlock(&listp->mtx);
2910 return;
2911 }
2912
2913 inline void
tcp_update_mss_core(struct tcpcb * tp,struct ifnet * ifp)2914 tcp_update_mss_core(struct tcpcb *tp, struct ifnet *ifp)
2915 {
2916 struct if_cellular_status_v1 *ifsr;
2917 u_int32_t optlen;
2918 ifsr = &ifp->if_link_status->ifsr_u.ifsr_cell.if_cell_u.if_status_v1;
2919 if (ifsr->valid_bitmask & IF_CELL_UL_MSS_RECOMMENDED_VALID) {
2920 optlen = tp->t_maxopd - tp->t_maxseg;
2921
2922 if (ifsr->mss_recommended ==
2923 IF_CELL_UL_MSS_RECOMMENDED_NONE &&
2924 tp->t_cached_maxopd > 0 &&
2925 tp->t_maxopd < tp->t_cached_maxopd) {
2926 tp->t_maxopd = tp->t_cached_maxopd;
2927 tcpstat.tcps_mss_to_default++;
2928 } else if (ifsr->mss_recommended ==
2929 IF_CELL_UL_MSS_RECOMMENDED_MEDIUM &&
2930 tp->t_maxopd > tcp_mss_rec_medium) {
2931 tp->t_cached_maxopd = tp->t_maxopd;
2932 tp->t_maxopd = tcp_mss_rec_medium;
2933 tcpstat.tcps_mss_to_medium++;
2934 } else if (ifsr->mss_recommended ==
2935 IF_CELL_UL_MSS_RECOMMENDED_LOW &&
2936 tp->t_maxopd > tcp_mss_rec_low) {
2937 tp->t_cached_maxopd = tp->t_maxopd;
2938 tp->t_maxopd = tcp_mss_rec_low;
2939 tcpstat.tcps_mss_to_low++;
2940 }
2941 tp->t_maxseg = tp->t_maxopd - optlen;
2942
2943 /*
2944 * clear the cached value if it is same as the current
2945 */
2946 if (tp->t_maxopd == tp->t_cached_maxopd) {
2947 tp->t_cached_maxopd = 0;
2948 }
2949 }
2950 }
2951
2952 void
tcp_update_mss_locked(struct socket * so,struct ifnet * ifp)2953 tcp_update_mss_locked(struct socket *so, struct ifnet *ifp)
2954 {
2955 struct inpcb *inp = sotoinpcb(so);
2956 struct tcpcb *tp = intotcpcb(inp);
2957
2958 if (ifp == NULL && (ifp = inp->inp_last_outifp) == NULL) {
2959 return;
2960 }
2961
2962 if (!IFNET_IS_CELLULAR(ifp)) {
2963 /*
2964 * This optimization is implemented for cellular
2965 * networks only
2966 */
2967 return;
2968 }
2969 if (tp->t_state <= TCPS_CLOSE_WAIT) {
2970 /*
2971 * If the connection is currently doing or has done PMTU
2972 * blackhole detection, do not change the MSS
2973 */
2974 if (tp->t_flags & TF_BLACKHOLE) {
2975 return;
2976 }
2977 if (ifp->if_link_status == NULL) {
2978 return;
2979 }
2980 tcp_update_mss_core(tp, ifp);
2981 }
2982 }
2983
2984 void
tcp_itimer(struct inpcbinfo * ipi)2985 tcp_itimer(struct inpcbinfo *ipi)
2986 {
2987 struct inpcb *inp, *nxt;
2988
2989 if (lck_rw_try_lock_exclusive(&ipi->ipi_lock) == FALSE) {
2990 if (tcp_itimer_done == TRUE) {
2991 tcp_itimer_done = FALSE;
2992 atomic_add_32(&ipi->ipi_timer_req.intimer_fast, 1);
2993 return;
2994 }
2995 /* Upgrade failed, lost lock now take it again exclusive */
2996 lck_rw_lock_exclusive(&ipi->ipi_lock);
2997 }
2998 tcp_itimer_done = TRUE;
2999
3000 LIST_FOREACH_SAFE(inp, &tcb, inp_list, nxt) {
3001 struct socket *so;
3002 struct ifnet *ifp;
3003
3004 if (inp->inp_ppcb == NULL ||
3005 in_pcb_checkstate(inp, WNT_ACQUIRE, 0) == WNT_STOPUSING) {
3006 continue;
3007 }
3008 so = inp->inp_socket;
3009 ifp = inp->inp_last_outifp;
3010 socket_lock(so, 1);
3011 if (in_pcb_checkstate(inp, WNT_RELEASE, 1) == WNT_STOPUSING) {
3012 socket_unlock(so, 1);
3013 continue;
3014 }
3015 so_check_extended_bk_idle_time(so);
3016 if (ipi->ipi_flags & INPCBINFO_UPDATE_MSS) {
3017 tcp_update_mss_locked(so, NULL);
3018 }
3019 socket_unlock(so, 1);
3020
3021 /*
3022 * Defunct all system-initiated background sockets if the
3023 * socket is using the cellular interface and the interface
3024 * has its LQM set to abort.
3025 */
3026 if ((ipi->ipi_flags & INPCBINFO_HANDLE_LQM_ABORT) &&
3027 IS_SO_TC_BACKGROUNDSYSTEM(so->so_traffic_class) &&
3028 ifp != NULL && IFNET_IS_CELLULAR(ifp) &&
3029 (ifp->if_interface_state.valid_bitmask &
3030 IF_INTERFACE_STATE_LQM_STATE_VALID) &&
3031 ifp->if_interface_state.lqm_state ==
3032 IFNET_LQM_THRESH_ABORT) {
3033 socket_defunct(current_proc(), so,
3034 SHUTDOWN_SOCKET_LEVEL_DISCONNECT_ALL);
3035 }
3036 }
3037
3038 ipi->ipi_flags &= ~(INPCBINFO_UPDATE_MSS | INPCBINFO_HANDLE_LQM_ABORT);
3039 lck_rw_done(&ipi->ipi_lock);
3040 }
3041