xref: /xnu-8020.121.3/bsd/netinet/tcp_timer.c (revision fdd8201d7b966f0c3ea610489d29bd841d358941)
1 /*
2  * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995
30  *	The Regents of the University of California.  All rights reserved.
31  *
32  * Redistribution and use in source and binary forms, with or without
33  * modification, are permitted provided that the following conditions
34  * are met:
35  * 1. Redistributions of source code must retain the above copyright
36  *    notice, this list of conditions and the following disclaimer.
37  * 2. Redistributions in binary form must reproduce the above copyright
38  *    notice, this list of conditions and the following disclaimer in the
39  *    documentation and/or other materials provided with the distribution.
40  * 3. All advertising materials mentioning features or use of this software
41  *    must display the following acknowledgement:
42  *	This product includes software developed by the University of
43  *	California, Berkeley and its contributors.
44  * 4. Neither the name of the University nor the names of its contributors
45  *    may be used to endorse or promote products derived from this software
46  *    without specific prior written permission.
47  *
48  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58  * SUCH DAMAGE.
59  *
60  *	@(#)tcp_timer.c	8.2 (Berkeley) 5/24/95
61  * $FreeBSD: src/sys/netinet/tcp_timer.c,v 1.34.2.11 2001/08/22 00:59:12 silby Exp $
62  */
63 
64 
65 #include <sys/param.h>
66 #include <sys/systm.h>
67 #include <sys/kernel.h>
68 #include <sys/mbuf.h>
69 #include <sys/sysctl.h>
70 #include <sys/socket.h>
71 #include <sys/socketvar.h>
72 #include <sys/protosw.h>
73 #include <sys/domain.h>
74 #include <sys/mcache.h>
75 #include <sys/queue.h>
76 #include <kern/locks.h>
77 #include <kern/cpu_number.h>    /* before tcp_seq.h, for tcp_random18() */
78 #include <mach/boolean.h>
79 
80 #include <net/route.h>
81 #include <net/if_var.h>
82 #include <net/ntstat.h>
83 
84 #include <netinet/in.h>
85 #include <netinet/in_systm.h>
86 #include <netinet/in_pcb.h>
87 #include <netinet/in_var.h>
88 #include <netinet6/in6_pcb.h>
89 #include <netinet/ip_var.h>
90 #include <netinet/tcp.h>
91 #include <netinet/tcp_cache.h>
92 #include <netinet/tcp_fsm.h>
93 #include <netinet/tcp_seq.h>
94 #include <netinet/tcp_timer.h>
95 #include <netinet/tcp_var.h>
96 #include <netinet/tcp_cc.h>
97 #include <netinet6/tcp6_var.h>
98 #include <netinet/tcpip.h>
99 #if TCPDEBUG
100 #include <netinet/tcp_debug.h>
101 #endif
102 #include <netinet/tcp_log.h>
103 
104 #include <sys/kdebug.h>
105 #include <mach/sdt.h>
106 #include <netinet/mptcp_var.h>
107 
108 /* Max number of times a stretch ack can be delayed on a connection */
109 #define TCP_STRETCHACK_DELAY_THRESHOLD  5
110 
111 /*
112  * If the host processor has been sleeping for too long, this is the threshold
113  * used to avoid sending stale retransmissions.
114  */
115 #define TCP_SLEEP_TOO_LONG      (10 * 60 * 1000) /* 10 minutes in ms */
116 
117 /* tcp timer list */
118 struct tcptimerlist tcp_timer_list;
119 
120 /* List of pcbs in timewait state, protected by tcbinfo's ipi_lock */
121 struct tcptailq tcp_tw_tailq;
122 
123 
124 static int
125 sysctl_msec_to_ticks SYSCTL_HANDLER_ARGS
126 {
127 #pragma unused(arg2)
128 	int error, temp;
129 	long s, tt;
130 
131 	tt = *(int *)arg1;
132 	s = tt * 1000 / TCP_RETRANSHZ;
133 	if (tt < 0 || s > INT_MAX) {
134 		return EINVAL;
135 	}
136 	temp = (int)s;
137 
138 	error = sysctl_handle_int(oidp, &temp, 0, req);
139 	if (error || !req->newptr) {
140 		return error;
141 	}
142 
143 	tt = (long)temp * TCP_RETRANSHZ / 1000;
144 	if (tt < 1 || tt > INT_MAX) {
145 		return EINVAL;
146 	}
147 
148 	*(int *)arg1 = (int)tt;
149 	SYSCTL_SKMEM_UPDATE_AT_OFFSET(arg2, *(int*)arg1);
150 	return 0;
151 }
152 
153 #if SYSCTL_SKMEM
154 int     tcp_keepinit = TCPTV_KEEP_INIT;
155 SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPINIT, keepinit,
156     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
157     &tcp_keepinit, offsetof(skmem_sysctl, tcp.keepinit),
158     sysctl_msec_to_ticks, "I", "");
159 
160 int     tcp_keepidle = TCPTV_KEEP_IDLE;
161 SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPIDLE, keepidle,
162     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
163     &tcp_keepidle, offsetof(skmem_sysctl, tcp.keepidle),
164     sysctl_msec_to_ticks, "I", "");
165 
166 int     tcp_keepintvl = TCPTV_KEEPINTVL;
167 SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPINTVL, keepintvl,
168     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
169     &tcp_keepintvl, offsetof(skmem_sysctl, tcp.keepintvl),
170     sysctl_msec_to_ticks, "I", "");
171 
172 SYSCTL_SKMEM_TCP_INT(OID_AUTO, keepcnt,
173     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
174     int, tcp_keepcnt, TCPTV_KEEPCNT, "number of times to repeat keepalive");
175 
176 int     tcp_msl = TCPTV_MSL;
177 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, msl,
178     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
179     &tcp_msl, offsetof(skmem_sysctl, tcp.msl),
180     sysctl_msec_to_ticks, "I", "Maximum segment lifetime");
181 #else /* SYSCTL_SKMEM */
182 int     tcp_keepinit;
183 SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPINIT, keepinit,
184     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
185     &tcp_keepinit, 0, sysctl_msec_to_ticks, "I", "");
186 
187 int     tcp_keepidle;
188 SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPIDLE, keepidle,
189     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
190     &tcp_keepidle, 0, sysctl_msec_to_ticks, "I", "");
191 
192 int     tcp_keepintvl;
193 SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPINTVL, keepintvl,
194     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
195     &tcp_keepintvl, 0, sysctl_msec_to_ticks, "I", "");
196 
197 int     tcp_keepcnt;
198 SYSCTL_INT(_net_inet_tcp, OID_AUTO, keepcnt,
199     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
200     &tcp_keepcnt, 0, "number of times to repeat keepalive");
201 
202 int     tcp_msl;
203 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, msl,
204     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
205     &tcp_msl, 0, sysctl_msec_to_ticks, "I", "Maximum segment lifetime");
206 #endif /* SYSCTL_SKMEM */
207 
208 /*
209  * Avoid DoS with connections half-closed in TIME_WAIT_2
210  */
211 int     tcp_fin_timeout = TCPTV_FINWAIT2;
212 
213 static int
214 sysctl_tcp_fin_timeout SYSCTL_HANDLER_ARGS
215 {
216 #pragma unused(arg2)
217 	int error;
218 	int value = tcp_fin_timeout;
219 
220 	error = sysctl_handle_int(oidp, &value, 0, req);
221 	if (error != 0 || req->newptr == USER_ADDR_NULL) {
222 		return error;
223 	}
224 
225 	if (value == -1) {
226 		/* Reset to default value */
227 		value = TCPTV_FINWAIT2;
228 	} else {
229 		/* Convert from milliseconds */
230 		long big_value = value * TCP_RETRANSHZ / 1000;
231 
232 		if (big_value < 0 || big_value > INT_MAX) {
233 			return EINVAL;
234 		}
235 		value = (int)big_value;
236 	}
237 	tcp_fin_timeout = value;
238 	SYSCTL_SKMEM_UPDATE_AT_OFFSET(arg2, value);
239 	return 0;
240 }
241 
242 #if SYSCTL_SKMEM
243 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, fin_timeout,
244     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
245     &tcp_fin_timeout, offsetof(skmem_sysctl, tcp.fin_timeout),
246     sysctl_tcp_fin_timeout, "I", "");
247 #else /* SYSCTL_SKMEM */
248 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, fin_timeout,
249     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
250     &tcp_fin_timeout, 0,
251     sysctl_tcp_fin_timeout, "I", "");
252 #endif /* SYSCTL_SKMEM */
253 
254 /*
255  * Avoid DoS via TCP Robustness in Persist Condition
256  * (see http://www.ietf.org/id/draft-ananth-tcpm-persist-02.txt)
257  * by allowing a system wide maximum persistence timeout value when in
258  * Zero Window Probe mode.
259  *
260  * Expressed in milliseconds to be consistent without timeout related
261  * values, the TCP socket option is in seconds.
262  */
263 #if SYSCTL_SKMEM
264 u_int32_t tcp_max_persist_timeout = 0;
265 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, max_persist_timeout,
266     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
267     &tcp_max_persist_timeout, offsetof(skmem_sysctl, tcp.max_persist_timeout),
268     sysctl_msec_to_ticks, "I", "Maximum persistence timeout for ZWP");
269 #else /* SYSCTL_SKMEM */
270 u_int32_t tcp_max_persist_timeout = 0;
271 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, max_persist_timeout,
272     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
273     &tcp_max_persist_timeout, 0, sysctl_msec_to_ticks, "I",
274     "Maximum persistence timeout for ZWP");
275 #endif /* SYSCTL_SKMEM */
276 
277 SYSCTL_SKMEM_TCP_INT(OID_AUTO, always_keepalive,
278     CTLFLAG_RW | CTLFLAG_LOCKED, static int, always_keepalive, 0,
279     "Assume SO_KEEPALIVE on all TCP connections");
280 
281 /*
282  * This parameter determines how long the timer list will stay in fast or
283  * quick mode even though all connections are idle. In this state, the
284  * timer will run more frequently anticipating new data.
285  */
286 SYSCTL_SKMEM_TCP_INT(OID_AUTO, timer_fastmode_idlemax,
287     CTLFLAG_RW | CTLFLAG_LOCKED, int, timer_fastmode_idlemax,
288     TCP_FASTMODE_IDLERUN_MAX, "Maximum idle generations in fast mode");
289 
290 /*
291  * See tcp_syn_backoff[] for interval values between SYN retransmits;
292  * the value set below defines the number of retransmits, before we
293  * disable the timestamp and window scaling options during subsequent
294  * SYN retransmits.  Setting it to 0 disables the dropping off of those
295  * two options.
296  */
297 SYSCTL_SKMEM_TCP_INT(OID_AUTO, broken_peer_syn_rexmit_thres,
298     CTLFLAG_RW | CTLFLAG_LOCKED, static int, tcp_broken_peer_syn_rxmit_thres,
299     10, "Number of retransmitted SYNs before disabling RFC 1323 "
300     "options on local connections");
301 
302 static int tcp_timer_advanced = 0;
303 SYSCTL_INT(_net_inet_tcp, OID_AUTO, tcp_timer_advanced,
304     CTLFLAG_RD | CTLFLAG_LOCKED, &tcp_timer_advanced, 0,
305     "Number of times one of the timers was advanced");
306 
307 static int tcp_resched_timerlist = 0;
308 SYSCTL_INT(_net_inet_tcp, OID_AUTO, tcp_resched_timerlist,
309     CTLFLAG_RD | CTLFLAG_LOCKED, &tcp_resched_timerlist, 0,
310     "Number of times timer list was rescheduled as part of processing a packet");
311 
312 SYSCTL_SKMEM_TCP_INT(OID_AUTO, pmtud_blackhole_detection,
313     CTLFLAG_RW | CTLFLAG_LOCKED, int, tcp_pmtud_black_hole_detect, 1,
314     "Path MTU Discovery Black Hole Detection");
315 
316 SYSCTL_SKMEM_TCP_INT(OID_AUTO, pmtud_blackhole_mss,
317     CTLFLAG_RW | CTLFLAG_LOCKED, int, tcp_pmtud_black_hole_mss, 1200,
318     "Path MTU Discovery Black Hole Detection lowered MSS");
319 
320 #if (DEBUG || DEVELOPMENT)
321 int tcp_probe_if_fix_port = 0;
322 SYSCTL_INT(_net_inet_tcp, OID_AUTO, probe_if_fix_port,
323     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
324     &tcp_probe_if_fix_port, 0, "");
325 #endif /* (DEBUG || DEVELOPMENT) */
326 
327 static u_int32_t tcp_mss_rec_medium = 1200;
328 static u_int32_t tcp_mss_rec_low = 512;
329 
330 #define TCP_REPORT_STATS_INTERVAL       43200 /* 12 hours, in seconds */
331 int tcp_report_stats_interval = TCP_REPORT_STATS_INTERVAL;
332 
333 /* performed garbage collection of "used" sockets */
334 static boolean_t tcp_gc_done = FALSE;
335 
336 /* max idle probes */
337 int     tcp_maxpersistidle = TCPTV_KEEP_IDLE;
338 
339 /*
340  * TCP delack timer is set to 100 ms. Since the processing of timer list
341  * in fast mode will happen no faster than 100 ms, the delayed ack timer
342  * will fire some where between 100 and 200 ms.
343  */
344 int     tcp_delack = TCP_RETRANSHZ / 10;
345 
346 #if MPTCP
347 /*
348  * MP_JOIN retransmission of 3rd ACK will be every 500 msecs without backoff
349  */
350 int     tcp_jack_rxmt = TCP_RETRANSHZ / 2;
351 #endif /* MPTCP */
352 
353 static boolean_t tcp_itimer_done = FALSE;
354 
355 static void tcp_remove_timer(struct tcpcb *tp);
356 static void tcp_sched_timerlist(uint32_t offset);
357 static u_int32_t tcp_run_conn_timer(struct tcpcb *tp, u_int16_t *mode,
358     u_int16_t probe_if_index);
359 static inline void tcp_set_lotimer_index(struct tcpcb *);
360 __private_extern__ void tcp_remove_from_time_wait(struct inpcb *inp);
361 static inline void tcp_update_mss_core(struct tcpcb *tp, struct ifnet *ifp);
362 __private_extern__ void tcp_report_stats(void);
363 
364 static  u_int64_t tcp_last_report_time;
365 
366 /*
367  * Structure to store previously reported stats so that we can send
368  * incremental changes in each report interval.
369  */
370 struct tcp_last_report_stats {
371 	u_int32_t       tcps_connattempt;
372 	u_int32_t       tcps_accepts;
373 	u_int32_t       tcps_ecn_client_setup;
374 	u_int32_t       tcps_ecn_server_setup;
375 	u_int32_t       tcps_ecn_client_success;
376 	u_int32_t       tcps_ecn_server_success;
377 	u_int32_t       tcps_ecn_not_supported;
378 	u_int32_t       tcps_ecn_lost_syn;
379 	u_int32_t       tcps_ecn_lost_synack;
380 	u_int32_t       tcps_ecn_recv_ce;
381 	u_int32_t       tcps_ecn_recv_ece;
382 	u_int32_t       tcps_ecn_sent_ece;
383 	u_int32_t       tcps_ecn_conn_recv_ce;
384 	u_int32_t       tcps_ecn_conn_recv_ece;
385 	u_int32_t       tcps_ecn_conn_plnoce;
386 	u_int32_t       tcps_ecn_conn_pl_ce;
387 	u_int32_t       tcps_ecn_conn_nopl_ce;
388 	u_int32_t       tcps_ecn_fallback_synloss;
389 	u_int32_t       tcps_ecn_fallback_reorder;
390 	u_int32_t       tcps_ecn_fallback_ce;
391 
392 	/* TFO-related statistics */
393 	u_int32_t       tcps_tfo_syn_data_rcv;
394 	u_int32_t       tcps_tfo_cookie_req_rcv;
395 	u_int32_t       tcps_tfo_cookie_sent;
396 	u_int32_t       tcps_tfo_cookie_invalid;
397 	u_int32_t       tcps_tfo_cookie_req;
398 	u_int32_t       tcps_tfo_cookie_rcv;
399 	u_int32_t       tcps_tfo_syn_data_sent;
400 	u_int32_t       tcps_tfo_syn_data_acked;
401 	u_int32_t       tcps_tfo_syn_loss;
402 	u_int32_t       tcps_tfo_blackhole;
403 	u_int32_t       tcps_tfo_cookie_wrong;
404 	u_int32_t       tcps_tfo_no_cookie_rcv;
405 	u_int32_t       tcps_tfo_heuristics_disable;
406 	u_int32_t       tcps_tfo_sndblackhole;
407 
408 	/* MPTCP-related statistics */
409 	u_int32_t       tcps_mptcp_handover_attempt;
410 	u_int32_t       tcps_mptcp_interactive_attempt;
411 	u_int32_t       tcps_mptcp_aggregate_attempt;
412 	u_int32_t       tcps_mptcp_fp_handover_attempt;
413 	u_int32_t       tcps_mptcp_fp_interactive_attempt;
414 	u_int32_t       tcps_mptcp_fp_aggregate_attempt;
415 	u_int32_t       tcps_mptcp_heuristic_fallback;
416 	u_int32_t       tcps_mptcp_fp_heuristic_fallback;
417 	u_int32_t       tcps_mptcp_handover_success_wifi;
418 	u_int32_t       tcps_mptcp_handover_success_cell;
419 	u_int32_t       tcps_mptcp_interactive_success;
420 	u_int32_t       tcps_mptcp_aggregate_success;
421 	u_int32_t       tcps_mptcp_fp_handover_success_wifi;
422 	u_int32_t       tcps_mptcp_fp_handover_success_cell;
423 	u_int32_t       tcps_mptcp_fp_interactive_success;
424 	u_int32_t       tcps_mptcp_fp_aggregate_success;
425 	u_int32_t       tcps_mptcp_handover_cell_from_wifi;
426 	u_int32_t       tcps_mptcp_handover_wifi_from_cell;
427 	u_int32_t       tcps_mptcp_interactive_cell_from_wifi;
428 	u_int64_t       tcps_mptcp_handover_cell_bytes;
429 	u_int64_t       tcps_mptcp_interactive_cell_bytes;
430 	u_int64_t       tcps_mptcp_aggregate_cell_bytes;
431 	u_int64_t       tcps_mptcp_handover_all_bytes;
432 	u_int64_t       tcps_mptcp_interactive_all_bytes;
433 	u_int64_t       tcps_mptcp_aggregate_all_bytes;
434 	u_int32_t       tcps_mptcp_back_to_wifi;
435 	u_int32_t       tcps_mptcp_wifi_proxy;
436 	u_int32_t       tcps_mptcp_cell_proxy;
437 	u_int32_t       tcps_mptcp_triggered_cell;
438 };
439 
440 
441 /* Returns true if the timer is on the timer list */
442 #define TIMER_IS_ON_LIST(tp) ((tp)->t_flags & TF_TIMER_ONLIST)
443 
444 /* Run the TCP timerlist atleast once every hour */
445 #define TCP_TIMERLIST_MAX_OFFSET (60 * 60 * TCP_RETRANSHZ)
446 
447 
448 static void add_to_time_wait_locked(struct tcpcb *tp, uint32_t delay);
449 static boolean_t tcp_garbage_collect(struct inpcb *, int);
450 
451 #define TIMERENTRY_TO_TP(te) ((struct tcpcb *)((uintptr_t)te - offsetof(struct tcpcb, tentry.le.le_next)))
452 
453 #define VERIFY_NEXT_LINK(elm, field) do {       \
454 	if (LIST_NEXT((elm),field) != NULL &&   \
455 	    LIST_NEXT((elm),field)->field.le_prev !=    \
456 	        &((elm)->field.le_next))        \
457 	        panic("Bad link elm %p next->prev != elm", (elm));      \
458 } while(0)
459 
460 #define VERIFY_PREV_LINK(elm, field) do {       \
461 	if (*(elm)->field.le_prev != (elm))     \
462 	        panic("Bad link elm %p prev->next != elm", (elm));      \
463 } while(0)
464 
465 #define TCP_SET_TIMER_MODE(mode, i) do { \
466 	if (IS_TIMER_HZ_10MS(i)) \
467 	        (mode) |= TCP_TIMERLIST_10MS_MODE; \
468 	else if (IS_TIMER_HZ_100MS(i)) \
469 	        (mode) |= TCP_TIMERLIST_100MS_MODE; \
470 	else \
471 	        (mode) |= TCP_TIMERLIST_500MS_MODE; \
472 } while(0)
473 
474 #if (DEVELOPMENT || DEBUG)
475 SYSCTL_UINT(_net_inet_tcp, OID_AUTO, mss_rec_medium,
476     CTLFLAG_RW | CTLFLAG_LOCKED, &tcp_mss_rec_medium, 0,
477     "Medium MSS based on recommendation in link status report");
478 SYSCTL_UINT(_net_inet_tcp, OID_AUTO, mss_rec_low,
479     CTLFLAG_RW | CTLFLAG_LOCKED, &tcp_mss_rec_low, 0,
480     "Low MSS based on recommendation in link status report");
481 
482 static int32_t tcp_change_mss_recommended = 0;
483 static int
484 sysctl_change_mss_recommended SYSCTL_HANDLER_ARGS
485 {
486 #pragma unused(oidp, arg1, arg2)
487 	int i, err = 0, changed = 0;
488 	struct ifnet *ifp;
489 	struct if_link_status ifsr;
490 	struct if_cellular_status_v1 *new_cell_sr;
491 	err = sysctl_io_number(req, tcp_change_mss_recommended,
492 	    sizeof(int32_t), &i, &changed);
493 	if (changed) {
494 		if (i < 0 || i > UINT16_MAX) {
495 			return EINVAL;
496 		}
497 		ifnet_head_lock_shared();
498 		TAILQ_FOREACH(ifp, &ifnet_head, if_link) {
499 			if (IFNET_IS_CELLULAR(ifp)) {
500 				bzero(&ifsr, sizeof(ifsr));
501 				new_cell_sr = &ifsr.ifsr_u.ifsr_cell.if_cell_u.if_status_v1;
502 				ifsr.ifsr_version = IF_CELLULAR_STATUS_REPORT_CURRENT_VERSION;
503 				ifsr.ifsr_len = sizeof(*new_cell_sr);
504 
505 				/* Set MSS recommended */
506 				new_cell_sr->valid_bitmask |= IF_CELL_UL_MSS_RECOMMENDED_VALID;
507 				new_cell_sr->mss_recommended = (uint16_t)i;
508 				err = ifnet_link_status_report(ifp, new_cell_sr, sizeof(new_cell_sr));
509 				if (err == 0) {
510 					tcp_change_mss_recommended = i;
511 				} else {
512 					break;
513 				}
514 			}
515 		}
516 		ifnet_head_done();
517 	}
518 	return err;
519 }
520 
521 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, change_mss_recommended,
522     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, &tcp_change_mss_recommended,
523     0, sysctl_change_mss_recommended, "IU", "Change MSS recommended");
524 
525 SYSCTL_INT(_net_inet_tcp, OID_AUTO, report_stats_interval,
526     CTLFLAG_RW | CTLFLAG_LOCKED, &tcp_report_stats_interval, 0,
527     "Report stats interval");
528 #endif /* (DEVELOPMENT || DEBUG) */
529 
530 /*
531  * Macro to compare two timers. If there is a reset of the sign bit,
532  * it is safe to assume that the timer has wrapped around. By doing
533  * signed comparision, we take care of wrap around such that the value
534  * with the sign bit reset is actually ahead of the other.
535  */
536 inline int32_t
timer_diff(uint32_t t1,uint32_t toff1,uint32_t t2,uint32_t toff2)537 timer_diff(uint32_t t1, uint32_t toff1, uint32_t t2, uint32_t toff2)
538 {
539 	return (int32_t)((t1 + toff1) - (t2 + toff2));
540 }
541 
542 /*
543  * Add to tcp timewait list, delay is given in milliseconds.
544  */
545 static void
add_to_time_wait_locked(struct tcpcb * tp,uint32_t delay)546 add_to_time_wait_locked(struct tcpcb *tp, uint32_t delay)
547 {
548 	struct inpcbinfo *pcbinfo = &tcbinfo;
549 	struct inpcb *inp = tp->t_inpcb;
550 	uint32_t timer;
551 
552 	/* pcb list should be locked when we get here */
553 	LCK_RW_ASSERT(&pcbinfo->ipi_lock, LCK_RW_ASSERT_EXCLUSIVE);
554 
555 	/* We may get here multiple times, so check */
556 	if (!(inp->inp_flags2 & INP2_TIMEWAIT)) {
557 		pcbinfo->ipi_twcount++;
558 		inp->inp_flags2 |= INP2_TIMEWAIT;
559 
560 		/* Remove from global inp list */
561 		LIST_REMOVE(inp, inp_list);
562 	} else {
563 		TAILQ_REMOVE(&tcp_tw_tailq, tp, t_twentry);
564 	}
565 
566 	/* Compute the time at which this socket can be closed */
567 	timer = tcp_now + delay;
568 
569 	/* We will use the TCPT_2MSL timer for tracking this delay */
570 
571 	if (TIMER_IS_ON_LIST(tp)) {
572 		tcp_remove_timer(tp);
573 	}
574 	tp->t_timer[TCPT_2MSL] = timer;
575 
576 	TAILQ_INSERT_TAIL(&tcp_tw_tailq, tp, t_twentry);
577 }
578 
579 void
add_to_time_wait(struct tcpcb * tp,uint32_t delay)580 add_to_time_wait(struct tcpcb *tp, uint32_t delay)
581 {
582 	struct inpcbinfo *pcbinfo = &tcbinfo;
583 	if (tp->t_inpcb->inp_socket->so_options & SO_NOWAKEFROMSLEEP) {
584 		socket_post_kev_msg_closed(tp->t_inpcb->inp_socket);
585 	}
586 
587 	tcp_del_fsw_flow(tp);
588 
589 	/* 19182803: Notify nstat that connection is closing before waiting. */
590 	nstat_pcb_detach(tp->t_inpcb);
591 
592 	if (!lck_rw_try_lock_exclusive(&pcbinfo->ipi_lock)) {
593 		socket_unlock(tp->t_inpcb->inp_socket, 0);
594 		lck_rw_lock_exclusive(&pcbinfo->ipi_lock);
595 		socket_lock(tp->t_inpcb->inp_socket, 0);
596 	}
597 	add_to_time_wait_locked(tp, delay);
598 	lck_rw_done(&pcbinfo->ipi_lock);
599 
600 	inpcb_gc_sched(pcbinfo, INPCB_TIMER_LAZY);
601 }
602 
603 /* If this is on time wait queue, remove it. */
604 void
tcp_remove_from_time_wait(struct inpcb * inp)605 tcp_remove_from_time_wait(struct inpcb *inp)
606 {
607 	struct tcpcb *tp = intotcpcb(inp);
608 	if (inp->inp_flags2 & INP2_TIMEWAIT) {
609 		TAILQ_REMOVE(&tcp_tw_tailq, tp, t_twentry);
610 	}
611 }
612 
613 static boolean_t
tcp_garbage_collect(struct inpcb * inp,int istimewait)614 tcp_garbage_collect(struct inpcb *inp, int istimewait)
615 {
616 	boolean_t active = FALSE;
617 	struct socket *so, *mp_so = NULL;
618 	struct tcpcb *tp;
619 
620 	so = inp->inp_socket;
621 	tp = intotcpcb(inp);
622 
623 	if (so->so_flags & SOF_MP_SUBFLOW) {
624 		mp_so = mptetoso(tptomptp(tp)->mpt_mpte);
625 		if (!socket_try_lock(mp_so)) {
626 			mp_so = NULL;
627 			active = TRUE;
628 			goto out;
629 		}
630 		if (mpsotomppcb(mp_so)->mpp_inside > 0) {
631 			os_log(mptcp_log_handle, "%s - %lx: Still inside %d usecount %d\n", __func__,
632 			    (unsigned long)VM_KERNEL_ADDRPERM(mpsotompte(mp_so)),
633 			    mpsotomppcb(mp_so)->mpp_inside,
634 			    mp_so->so_usecount);
635 			socket_unlock(mp_so, 0);
636 			mp_so = NULL;
637 			active = TRUE;
638 			goto out;
639 		}
640 		/* We call socket_unlock with refcount further below */
641 		mp_so->so_usecount++;
642 		tptomptp(tp)->mpt_mpte->mpte_mppcb->mpp_inside++;
643 	}
644 
645 	/*
646 	 * Skip if still in use or busy; it would have been more efficient
647 	 * if we were to test so_usecount against 0, but this isn't possible
648 	 * due to the current implementation of tcp_dropdropablreq() where
649 	 * overflow sockets that are eligible for garbage collection have
650 	 * their usecounts set to 1.
651 	 */
652 	if (!lck_mtx_try_lock_spin(&inp->inpcb_mtx)) {
653 		active = TRUE;
654 		goto out;
655 	}
656 
657 	/* Check again under the lock */
658 	if (so->so_usecount > 1) {
659 		if (inp->inp_wantcnt == WNT_STOPUSING) {
660 			active = TRUE;
661 		}
662 		lck_mtx_unlock(&inp->inpcb_mtx);
663 		goto out;
664 	}
665 
666 	if (istimewait && TSTMP_GEQ(tcp_now, tp->t_timer[TCPT_2MSL]) &&
667 	    tp->t_state != TCPS_CLOSED) {
668 		/* Become a regular mutex */
669 		lck_mtx_convert_spin(&inp->inpcb_mtx);
670 		tcp_close(tp);
671 	}
672 
673 	/*
674 	 * Overflowed socket dropped from the listening queue?  Do this
675 	 * only if we are called to clean up the time wait slots, since
676 	 * tcp_dropdropablreq() considers a socket to have been fully
677 	 * dropped after add_to_time_wait() is finished.
678 	 * Also handle the case of connections getting closed by the peer
679 	 * while in the queue as seen with rdar://6422317
680 	 *
681 	 */
682 	if (so->so_usecount == 1 &&
683 	    ((istimewait && (so->so_flags & SOF_OVERFLOW)) ||
684 	    ((tp != NULL) && (tp->t_state == TCPS_CLOSED) &&
685 	    (so->so_head != NULL) &&
686 	    ((so->so_state & (SS_INCOMP | SS_CANTSENDMORE | SS_CANTRCVMORE)) ==
687 	    (SS_INCOMP | SS_CANTSENDMORE | SS_CANTRCVMORE))))) {
688 		if (inp->inp_state != INPCB_STATE_DEAD) {
689 			/* Become a regular mutex */
690 			lck_mtx_convert_spin(&inp->inpcb_mtx);
691 			if (SOCK_CHECK_DOM(so, PF_INET6)) {
692 				in6_pcbdetach(inp);
693 			} else {
694 				in_pcbdetach(inp);
695 			}
696 		}
697 		VERIFY(so->so_usecount > 0);
698 		so->so_usecount--;
699 		if (inp->inp_wantcnt == WNT_STOPUSING) {
700 			active = TRUE;
701 		}
702 		lck_mtx_unlock(&inp->inpcb_mtx);
703 		goto out;
704 	} else if (inp->inp_wantcnt != WNT_STOPUSING) {
705 		lck_mtx_unlock(&inp->inpcb_mtx);
706 		active = FALSE;
707 		goto out;
708 	}
709 
710 	/*
711 	 * We get here because the PCB is no longer searchable
712 	 * (WNT_STOPUSING); detach (if needed) and dispose if it is dead
713 	 * (usecount is 0).  This covers all cases, including overflow
714 	 * sockets and those that are considered as "embryonic",
715 	 * i.e. created by sonewconn() in TCP input path, and have
716 	 * not yet been committed.  For the former, we reduce the usecount
717 	 *  to 0 as done by the code above.  For the latter, the usecount
718 	 * would have reduced to 0 as part calling soabort() when the
719 	 * socket is dropped at the end of tcp_input().
720 	 */
721 	if (so->so_usecount == 0) {
722 		DTRACE_TCP4(state__change, void, NULL, struct inpcb *, inp,
723 		    struct tcpcb *, tp, int32_t, TCPS_CLOSED);
724 		/* Become a regular mutex */
725 		lck_mtx_convert_spin(&inp->inpcb_mtx);
726 
727 		/*
728 		 * If this tp still happens to be on the timer list,
729 		 * take it out
730 		 */
731 		if (TIMER_IS_ON_LIST(tp)) {
732 			tcp_remove_timer(tp);
733 		}
734 
735 		if (inp->inp_state != INPCB_STATE_DEAD) {
736 			if (SOCK_CHECK_DOM(so, PF_INET6)) {
737 				in6_pcbdetach(inp);
738 			} else {
739 				in_pcbdetach(inp);
740 			}
741 		}
742 
743 		if (mp_so) {
744 			mptcp_subflow_del(tptomptp(tp)->mpt_mpte, tp->t_mpsub);
745 
746 			/* so is now unlinked from mp_so - let's drop the lock */
747 			socket_unlock(mp_so, 1);
748 			mp_so = NULL;
749 		}
750 
751 		in_pcbdispose(inp);
752 		active = FALSE;
753 		goto out;
754 	}
755 
756 	lck_mtx_unlock(&inp->inpcb_mtx);
757 	active = TRUE;
758 
759 out:
760 	if (mp_so) {
761 		socket_unlock(mp_so, 1);
762 	}
763 
764 	return active;
765 }
766 
767 /*
768  * TCP garbage collector callback (inpcb_timer_func_t).
769  *
770  * Returns the number of pcbs that will need to be gc-ed soon,
771  * returnining > 0 will keep timer active.
772  */
773 void
tcp_gc(struct inpcbinfo * ipi)774 tcp_gc(struct inpcbinfo *ipi)
775 {
776 	struct inpcb *inp, *nxt;
777 	struct tcpcb *tw_tp, *tw_ntp;
778 #if TCPDEBUG
779 	int ostate;
780 #endif
781 #if  KDEBUG
782 	static int tws_checked = 0;
783 #endif
784 
785 	KERNEL_DEBUG(DBG_FNC_TCP_SLOW | DBG_FUNC_START, 0, 0, 0, 0, 0);
786 
787 	/*
788 	 * Update tcp_now here as it may get used while
789 	 * processing the slow timer.
790 	 */
791 	calculate_tcp_clock();
792 
793 	/*
794 	 * Garbage collect socket/tcpcb: We need to acquire the list lock
795 	 * exclusively to do this
796 	 */
797 
798 	if (lck_rw_try_lock_exclusive(&ipi->ipi_lock) == FALSE) {
799 		/* don't sweat it this time; cleanup was done last time */
800 		if (tcp_gc_done == TRUE) {
801 			tcp_gc_done = FALSE;
802 			KERNEL_DEBUG(DBG_FNC_TCP_SLOW | DBG_FUNC_END,
803 			    tws_checked, cur_tw_slot, 0, 0, 0);
804 			/* Lock upgrade failed, give up this round */
805 			atomic_add_32(&ipi->ipi_gc_req.intimer_fast, 1);
806 			return;
807 		}
808 		/* Upgrade failed, lost lock now take it again exclusive */
809 		lck_rw_lock_exclusive(&ipi->ipi_lock);
810 	}
811 	tcp_gc_done = TRUE;
812 
813 	LIST_FOREACH_SAFE(inp, &tcb, inp_list, nxt) {
814 		if (tcp_garbage_collect(inp, 0)) {
815 			atomic_add_32(&ipi->ipi_gc_req.intimer_fast, 1);
816 		}
817 	}
818 
819 	/* Now cleanup the time wait ones */
820 	TAILQ_FOREACH_SAFE(tw_tp, &tcp_tw_tailq, t_twentry, tw_ntp) {
821 		/*
822 		 * We check the timestamp here without holding the
823 		 * socket lock for better performance. If there are
824 		 * any pcbs in time-wait, the timer will get rescheduled.
825 		 * Hence some error in this check can be tolerated.
826 		 *
827 		 * Sometimes a socket on time-wait queue can be closed if
828 		 * 2MSL timer expired but the application still has a
829 		 * usecount on it.
830 		 */
831 		if (tw_tp->t_state == TCPS_CLOSED ||
832 		    TSTMP_GEQ(tcp_now, tw_tp->t_timer[TCPT_2MSL])) {
833 			if (tcp_garbage_collect(tw_tp->t_inpcb, 1)) {
834 				atomic_add_32(&ipi->ipi_gc_req.intimer_lazy, 1);
835 			}
836 		}
837 	}
838 
839 	/* take into account pcbs that are still in time_wait_slots */
840 	atomic_add_32(&ipi->ipi_gc_req.intimer_lazy, ipi->ipi_twcount);
841 
842 	lck_rw_done(&ipi->ipi_lock);
843 
844 	/* Clean up the socache while we are here */
845 	if (so_cache_timer()) {
846 		atomic_add_32(&ipi->ipi_gc_req.intimer_lazy, 1);
847 	}
848 
849 	KERNEL_DEBUG(DBG_FNC_TCP_SLOW | DBG_FUNC_END, tws_checked,
850 	    cur_tw_slot, 0, 0, 0);
851 
852 	return;
853 }
854 
855 /*
856  * Cancel all timers for TCP tp.
857  */
858 void
tcp_canceltimers(struct tcpcb * tp)859 tcp_canceltimers(struct tcpcb *tp)
860 {
861 	int i;
862 
863 	tcp_remove_timer(tp);
864 	for (i = 0; i < TCPT_NTIMERS; i++) {
865 		tp->t_timer[i] = 0;
866 	}
867 	tp->tentry.timer_start = tcp_now;
868 	tp->tentry.index = TCPT_NONE;
869 }
870 
871 int     tcp_syn_backoff[TCP_MAXRXTSHIFT + 1] =
872 { 1, 1, 1, 1, 1, 2, 4, 8, 16, 32, 64, 64, 64 };
873 
874 int     tcp_backoff[TCP_MAXRXTSHIFT + 1] =
875 { 1, 2, 4, 8, 16, 32, 64, 64, 64, 64, 64, 64, 64 };
876 
877 static int tcp_totbackoff = 511;        /* sum of tcp_backoff[] */
878 
879 void
tcp_rexmt_save_state(struct tcpcb * tp)880 tcp_rexmt_save_state(struct tcpcb *tp)
881 {
882 	u_int32_t fsize;
883 	if (TSTMP_SUPPORTED(tp)) {
884 		/*
885 		 * Since timestamps are supported on the connection,
886 		 * we can do recovery as described in rfc 4015.
887 		 */
888 		fsize = tp->snd_max - tp->snd_una;
889 		tp->snd_ssthresh_prev = max(fsize, tp->snd_ssthresh);
890 		tp->snd_recover_prev = tp->snd_recover;
891 	} else {
892 		/*
893 		 * Timestamp option is not supported on this connection.
894 		 * Record ssthresh and cwnd so they can
895 		 * be recovered if this turns out to be a "bad" retransmit.
896 		 * A retransmit is considered "bad" if an ACK for this
897 		 * segment is received within RTT/2 interval; the assumption
898 		 * here is that the ACK was already in flight.  See
899 		 * "On Estimating End-to-End Network Path Properties" by
900 		 * Allman and Paxson for more details.
901 		 */
902 		tp->snd_cwnd_prev = tp->snd_cwnd;
903 		tp->snd_ssthresh_prev = tp->snd_ssthresh;
904 		tp->snd_recover_prev = tp->snd_recover;
905 		if (IN_FASTRECOVERY(tp)) {
906 			tp->t_flags |= TF_WASFRECOVERY;
907 		} else {
908 			tp->t_flags &= ~TF_WASFRECOVERY;
909 		}
910 	}
911 	tp->t_srtt_prev = (tp->t_srtt >> TCP_RTT_SHIFT) + 2;
912 	tp->t_rttvar_prev = (tp->t_rttvar >> TCP_RTTVAR_SHIFT);
913 	tp->t_flagsext &= ~(TF_RECOMPUTE_RTT);
914 }
915 
916 /*
917  * Revert to the older segment size if there is an indication that PMTU
918  * blackhole detection was not needed.
919  */
920 void
tcp_pmtud_revert_segment_size(struct tcpcb * tp)921 tcp_pmtud_revert_segment_size(struct tcpcb *tp)
922 {
923 	int32_t optlen;
924 
925 	VERIFY(tp->t_pmtud_saved_maxopd > 0);
926 	tp->t_flags |= TF_PMTUD;
927 	tp->t_flags &= ~TF_BLACKHOLE;
928 	optlen = tp->t_maxopd - tp->t_maxseg;
929 	tp->t_maxopd = tp->t_pmtud_saved_maxopd;
930 	tp->t_maxseg = tp->t_maxopd - optlen;
931 
932 	/*
933 	 * Reset the slow-start flight size as it
934 	 * may depend on the new MSS
935 	 */
936 	if (CC_ALGO(tp)->cwnd_init != NULL) {
937 		CC_ALGO(tp)->cwnd_init(tp);
938 	}
939 
940 	if (TCP_USE_RLEDBAT(tp, tp->t_inpcb->inp_socket) &&
941 	    tcp_cc_rledbat.rwnd_init != NULL) {
942 		tcp_cc_rledbat.rwnd_init(tp);
943 	}
944 
945 	tp->t_pmtud_start_ts = 0;
946 	tcpstat.tcps_pmtudbh_reverted++;
947 
948 	/* change MSS according to recommendation, if there was one */
949 	tcp_update_mss_locked(tp->t_inpcb->inp_socket, NULL);
950 }
951 
952 static uint32_t
tcp_pmtud_black_holed_next_mss(struct tcpcb * tp)953 tcp_pmtud_black_holed_next_mss(struct tcpcb *tp)
954 {
955 	/* Reduce the MSS to intermediary value */
956 	if (tp->t_maxopd > tcp_pmtud_black_hole_mss) {
957 		return tcp_pmtud_black_hole_mss;
958 	} else {
959 		if (tp->t_inpcb->inp_vflag & INP_IPV4) {
960 			return tcp_mssdflt;
961 		} else {
962 			return tcp_v6mssdflt;
963 		}
964 	}
965 }
966 
967 /*
968  * Send a packet designed to force a response
969  * if the peer is up and reachable:
970  * either an ACK if the connection is still alive,
971  * or an RST if the peer has closed the connection
972  * due to timeout or reboot.
973  * Using sequence number tp->snd_una-1
974  * causes the transmitted zero-length segment
975  * to lie outside the receive window;
976  * by the protocol spec, this requires the
977  * correspondent TCP to respond.
978  */
979 static bool
tcp_send_keep_alive(struct tcpcb * tp)980 tcp_send_keep_alive(struct tcpcb *tp)
981 {
982 	struct tcptemp *t_template;
983 
984 	tcpstat.tcps_keepprobe++;
985 	t_template = tcp_maketemplate(tp);
986 	if (t_template != NULL) {
987 		struct inpcb *inp = tp->t_inpcb;
988 		struct tcp_respond_args tra;
989 
990 		bzero(&tra, sizeof(tra));
991 		tra.nocell = INP_NO_CELLULAR(inp);
992 		tra.noexpensive = INP_NO_EXPENSIVE(inp);
993 		tra.noconstrained = INP_NO_CONSTRAINED(inp);
994 		tra.awdl_unrestricted = INP_AWDL_UNRESTRICTED(inp);
995 		tra.intcoproc_allowed = INP_INTCOPROC_ALLOWED(inp);
996 		tra.keep_alive = 1;
997 		if (tp->t_inpcb->inp_flags & INP_BOUND_IF) {
998 			tra.ifscope = tp->t_inpcb->inp_boundifp->if_index;
999 		} else {
1000 			tra.ifscope = IFSCOPE_NONE;
1001 		}
1002 		tcp_respond(tp, t_template->tt_ipgen,
1003 		    &t_template->tt_t, (struct mbuf *)NULL,
1004 		    tp->rcv_nxt, tp->snd_una - 1, 0, &tra);
1005 		(void) m_free(dtom(t_template));
1006 		return true;
1007 	} else {
1008 		return false;
1009 	}
1010 }
1011 
1012 /*
1013  * TCP timer processing.
1014  */
1015 struct tcpcb *
tcp_timers(struct tcpcb * tp,int timer)1016 tcp_timers(struct tcpcb *tp, int timer)
1017 {
1018 	int32_t rexmt, optlen = 0, idle_time = 0;
1019 	struct socket *so;
1020 #if TCPDEBUG
1021 	int ostate;
1022 #endif
1023 	u_int64_t accsleep_ms;
1024 	u_int64_t last_sleep_ms = 0;
1025 
1026 	so = tp->t_inpcb->inp_socket;
1027 	idle_time = tcp_now - tp->t_rcvtime;
1028 
1029 	switch (timer) {
1030 	/*
1031 	 * 2 MSL timeout in shutdown went off.  If we're closed but
1032 	 * still waiting for peer to close and connection has been idle
1033 	 * too long, or if 2MSL time is up from TIME_WAIT or FIN_WAIT_2,
1034 	 * delete connection control block.
1035 	 * Otherwise, (this case shouldn't happen) check again in a bit
1036 	 * we keep the socket in the main list in that case.
1037 	 */
1038 	case TCPT_2MSL:
1039 		tcp_free_sackholes(tp);
1040 		if (tp->t_state != TCPS_TIME_WAIT &&
1041 		    tp->t_state != TCPS_FIN_WAIT_2 &&
1042 		    ((idle_time > 0) && (idle_time < TCP_CONN_MAXIDLE(tp)))) {
1043 			tp->t_timer[TCPT_2MSL] = OFFSET_FROM_START(tp,
1044 			    (u_int32_t)TCP_CONN_KEEPINTVL(tp));
1045 		} else {
1046 			if (tp->t_state == TCPS_FIN_WAIT_2) {
1047 				TCP_LOG_DROP_PCB(NULL, NULL, tp, false,
1048 				    "FIN wait timeout drop");
1049 				tcpstat.tcps_fin_timeout_drops++;
1050 				tp = tcp_drop(tp, 0);
1051 			} else {
1052 				tp = tcp_close(tp);
1053 			}
1054 			return tp;
1055 		}
1056 		break;
1057 
1058 	/*
1059 	 * Retransmission timer went off.  Message has not
1060 	 * been acked within retransmit interval.  Back off
1061 	 * to a longer retransmit interval and retransmit one segment.
1062 	 */
1063 	case TCPT_REXMT:
1064 		absolutetime_to_nanoseconds(mach_absolutetime_asleep,
1065 		    &accsleep_ms);
1066 		accsleep_ms = accsleep_ms / 1000000UL;
1067 		if (accsleep_ms > tp->t_accsleep_ms) {
1068 			last_sleep_ms = accsleep_ms - tp->t_accsleep_ms;
1069 		}
1070 		/*
1071 		 * Drop a connection in the retransmit timer
1072 		 * 1. If we have retransmitted more than TCP_MAXRXTSHIFT
1073 		 * times
1074 		 * 2. If the time spent in this retransmission episode is
1075 		 * more than the time limit set with TCP_RXT_CONNDROPTIME
1076 		 * socket option
1077 		 * 3. If TCP_RXT_FINDROP socket option was set and
1078 		 * we have already retransmitted the FIN 3 times without
1079 		 * receiving an ack
1080 		 */
1081 		if (++tp->t_rxtshift > TCP_MAXRXTSHIFT ||
1082 		    (tp->t_rxt_conndroptime > 0 && tp->t_rxtstart > 0 &&
1083 		    (tcp_now - tp->t_rxtstart) >= tp->t_rxt_conndroptime) ||
1084 		    ((tp->t_flagsext & TF_RXTFINDROP) != 0 &&
1085 		    (tp->t_flags & TF_SENTFIN) != 0 && tp->t_rxtshift >= 4) ||
1086 		    (tp->t_rxtshift > 4 && last_sleep_ms >= TCP_SLEEP_TOO_LONG)) {
1087 			if (tp->t_state == TCPS_ESTABLISHED &&
1088 			    tp->t_rxt_minimum_timeout > 0) {
1089 				/*
1090 				 * Avoid dropping a connection if minimum
1091 				 * timeout is set and that time did not
1092 				 * pass. We will retry sending
1093 				 * retransmissions at the maximum interval
1094 				 */
1095 				if (TSTMP_LT(tcp_now, (tp->t_rxtstart +
1096 				    tp->t_rxt_minimum_timeout))) {
1097 					tp->t_rxtshift = TCP_MAXRXTSHIFT - 1;
1098 					goto retransmit_packet;
1099 				}
1100 			}
1101 			if ((tp->t_flagsext & TF_RXTFINDROP) != 0) {
1102 				tcpstat.tcps_rxtfindrop++;
1103 			} else if (last_sleep_ms >= TCP_SLEEP_TOO_LONG) {
1104 				tcpstat.tcps_drop_after_sleep++;
1105 			} else {
1106 				tcpstat.tcps_timeoutdrop++;
1107 			}
1108 			if (tp->t_rxtshift >= TCP_MAXRXTSHIFT) {
1109 				if (TCP_ECN_ENABLED(tp)) {
1110 					INP_INC_IFNET_STAT(tp->t_inpcb,
1111 					    ecn_on.rxmit_drop);
1112 				} else {
1113 					INP_INC_IFNET_STAT(tp->t_inpcb,
1114 					    ecn_off.rxmit_drop);
1115 				}
1116 			}
1117 			tp->t_rxtshift = TCP_MAXRXTSHIFT;
1118 			soevent(so,
1119 			    (SO_FILT_HINT_LOCKED | SO_FILT_HINT_TIMEOUT));
1120 
1121 			if (TCP_ECN_ENABLED(tp) &&
1122 			    tp->t_state == TCPS_ESTABLISHED) {
1123 				tcp_heuristic_ecn_droprxmt(tp);
1124 			}
1125 
1126 			TCP_LOG_DROP_PCB(NULL, NULL, tp, false,
1127 			    "retransmission timeout drop");
1128 			tp = tcp_drop(tp, tp->t_softerror ?
1129 			    tp->t_softerror : ETIMEDOUT);
1130 
1131 			break;
1132 		}
1133 retransmit_packet:
1134 		tcpstat.tcps_rexmttimeo++;
1135 		tp->t_accsleep_ms = accsleep_ms;
1136 
1137 		if (tp->t_rxtshift == 1 &&
1138 		    tp->t_state == TCPS_ESTABLISHED) {
1139 			/* Set the time at which retransmission started. */
1140 			tp->t_rxtstart = tcp_now;
1141 
1142 			/*
1143 			 * if this is the first retransmit timeout, save
1144 			 * the state so that we can recover if the timeout
1145 			 * is spurious.
1146 			 */
1147 			tcp_rexmt_save_state(tp);
1148 			tcp_ccdbg_trace(tp, NULL, TCP_CC_FIRST_REXMT);
1149 		}
1150 #if MPTCP
1151 		if ((tp->t_rxtshift >= mptcp_fail_thresh) &&
1152 		    (tp->t_state == TCPS_ESTABLISHED) &&
1153 		    (tp->t_mpflags & TMPF_MPTCP_TRUE)) {
1154 			mptcp_act_on_txfail(so);
1155 		}
1156 
1157 		if (TCPS_HAVEESTABLISHED(tp->t_state) &&
1158 		    (so->so_flags & SOF_MP_SUBFLOW)) {
1159 			struct mptses *mpte = tptomptp(tp)->mpt_mpte;
1160 
1161 			if (mpte->mpte_svctype == MPTCP_SVCTYPE_HANDOVER ||
1162 			    mpte->mpte_svctype == MPTCP_SVCTYPE_PURE_HANDOVER) {
1163 				mptcp_check_subflows_and_add(mpte);
1164 			}
1165 		}
1166 #endif /* MPTCP */
1167 
1168 		if (tp->t_adaptive_wtimo > 0 &&
1169 		    tp->t_rxtshift > tp->t_adaptive_wtimo &&
1170 		    TCPS_HAVEESTABLISHED(tp->t_state)) {
1171 			/* Send an event to the application */
1172 			soevent(so,
1173 			    (SO_FILT_HINT_LOCKED |
1174 			    SO_FILT_HINT_ADAPTIVE_WTIMO));
1175 		}
1176 
1177 		/*
1178 		 * If this is a retransmit timeout after PTO, the PTO
1179 		 * was not effective
1180 		 */
1181 		if (tp->t_flagsext & TF_SENT_TLPROBE) {
1182 			tp->t_flagsext &= ~(TF_SENT_TLPROBE);
1183 			tcpstat.tcps_rto_after_pto++;
1184 		}
1185 
1186 		if (tp->t_flagsext & TF_DELAY_RECOVERY) {
1187 			/*
1188 			 * Retransmit timer fired before entering recovery
1189 			 * on a connection with packet re-ordering. This
1190 			 * suggests that the reordering metrics computed
1191 			 * are not accurate.
1192 			 */
1193 			tp->t_reorderwin = 0;
1194 			tp->t_timer[TCPT_DELAYFR] = 0;
1195 			tp->t_flagsext &= ~(TF_DELAY_RECOVERY);
1196 		}
1197 
1198 		if (!(tp->t_flagsext & TF_FASTOPEN_FORCE_ENABLE) &&
1199 		    tp->t_state == TCPS_SYN_RECEIVED) {
1200 			tcp_disable_tfo(tp);
1201 		}
1202 
1203 		if (!(tp->t_flagsext & TF_FASTOPEN_FORCE_ENABLE) &&
1204 		    !(tp->t_tfo_flags & TFO_F_HEURISTIC_DONE) &&
1205 		    (tp->t_tfo_stats & TFO_S_SYN_DATA_SENT) &&
1206 		    !(tp->t_tfo_flags & TFO_F_NO_SNDPROBING) &&
1207 		    ((tp->t_state != TCPS_SYN_SENT && tp->t_rxtshift > 1) ||
1208 		    tp->t_rxtshift > 4)) {
1209 			/*
1210 			 * For regular retransmissions, a first one is being
1211 			 * done for tail-loss probe.
1212 			 * Thus, if rxtshift > 1, this means we have sent the segment
1213 			 * a total of 3 times.
1214 			 *
1215 			 * If we are in SYN-SENT state, then there is no tail-loss
1216 			 * probe thus we have to let rxtshift go up to 3.
1217 			 */
1218 			tcp_heuristic_tfo_middlebox(tp);
1219 
1220 			so->so_error = ENODATA;
1221 			soevent(so,
1222 			    (SO_FILT_HINT_LOCKED | SO_FILT_HINT_MP_SUB_ERROR));
1223 			sorwakeup(so);
1224 			sowwakeup(so);
1225 
1226 			tp->t_tfo_stats |= TFO_S_SEND_BLACKHOLE;
1227 			tcpstat.tcps_tfo_sndblackhole++;
1228 		}
1229 
1230 		if (!(tp->t_flagsext & TF_FASTOPEN_FORCE_ENABLE) &&
1231 		    !(tp->t_tfo_flags & TFO_F_HEURISTIC_DONE) &&
1232 		    (tp->t_tfo_stats & TFO_S_SYN_DATA_ACKED) &&
1233 		    tp->t_rxtshift > 3) {
1234 			if (TSTMP_GT(tp->t_sndtime - 10 * TCP_RETRANSHZ, tp->t_rcvtime)) {
1235 				tcp_heuristic_tfo_middlebox(tp);
1236 
1237 				so->so_error = ENODATA;
1238 				soevent(so,
1239 				    (SO_FILT_HINT_LOCKED | SO_FILT_HINT_MP_SUB_ERROR));
1240 				sorwakeup(so);
1241 				sowwakeup(so);
1242 			}
1243 		}
1244 
1245 		if (tp->t_state == TCPS_SYN_SENT) {
1246 			rexmt = TCP_REXMTVAL(tp) * tcp_syn_backoff[tp->t_rxtshift];
1247 			tp->t_stat.synrxtshift = tp->t_rxtshift;
1248 			tp->t_stat.rxmitsyns++;
1249 
1250 			/* When retransmitting, disable TFO */
1251 			if (tfo_enabled(tp) &&
1252 			    !(tp->t_flagsext & TF_FASTOPEN_FORCE_ENABLE)) {
1253 				tcp_disable_tfo(tp);
1254 				tp->t_tfo_flags |= TFO_F_SYN_LOSS;
1255 			}
1256 		} else {
1257 			rexmt = TCP_REXMTVAL(tp) * tcp_backoff[tp->t_rxtshift];
1258 		}
1259 
1260 		TCPT_RANGESET(tp->t_rxtcur, rexmt, tp->t_rttmin, TCPTV_REXMTMAX,
1261 		    TCP_ADD_REXMTSLOP(tp));
1262 		tp->t_timer[TCPT_REXMT] = OFFSET_FROM_START(tp, tp->t_rxtcur);
1263 
1264 		TCP_LOG_RTT_INFO(tp);
1265 
1266 		if (INP_WAIT_FOR_IF_FEEDBACK(tp->t_inpcb)) {
1267 			goto fc_output;
1268 		}
1269 
1270 		tcp_free_sackholes(tp);
1271 		/*
1272 		 * Check for potential Path MTU Discovery Black Hole
1273 		 */
1274 		if (tcp_pmtud_black_hole_detect &&
1275 		    !(tp->t_flagsext & TF_NOBLACKHOLE_DETECTION) &&
1276 		    (tp->t_state == TCPS_ESTABLISHED)) {
1277 			if ((tp->t_flags & TF_PMTUD) &&
1278 			    tp->t_pmtud_lastseg_size > tcp_pmtud_black_holed_next_mss(tp) &&
1279 			    tp->t_rxtshift == 2) {
1280 				/*
1281 				 * Enter Path MTU Black-hole Detection mechanism:
1282 				 * - Disable Path MTU Discovery (IP "DF" bit).
1283 				 * - Reduce MTU to lower value than what we
1284 				 * negotiated with the peer.
1285 				 */
1286 				/* Disable Path MTU Discovery for now */
1287 				tp->t_flags &= ~TF_PMTUD;
1288 				/* Record that we may have found a black hole */
1289 				tp->t_flags |= TF_BLACKHOLE;
1290 				optlen = tp->t_maxopd - tp->t_maxseg;
1291 				/* Keep track of previous MSS */
1292 				tp->t_pmtud_saved_maxopd = tp->t_maxopd;
1293 				tp->t_pmtud_start_ts = tcp_now;
1294 				if (tp->t_pmtud_start_ts == 0) {
1295 					tp->t_pmtud_start_ts++;
1296 				}
1297 				/* Reduce the MSS to intermediary value */
1298 				tp->t_maxopd = tcp_pmtud_black_holed_next_mss(tp);
1299 				tp->t_maxseg = tp->t_maxopd - optlen;
1300 
1301 				/*
1302 				 * Reset the slow-start flight size
1303 				 * as it may depend on the new MSS
1304 				 */
1305 				if (CC_ALGO(tp)->cwnd_init != NULL) {
1306 					CC_ALGO(tp)->cwnd_init(tp);
1307 				}
1308 				tp->snd_cwnd = tp->t_maxseg;
1309 
1310 				if (TCP_USE_RLEDBAT(tp, so) &&
1311 				    tcp_cc_rledbat.rwnd_init != NULL) {
1312 					tcp_cc_rledbat.rwnd_init(tp);
1313 				}
1314 			}
1315 			/*
1316 			 * If further retransmissions are still
1317 			 * unsuccessful with a lowered MTU, maybe this
1318 			 * isn't a Black Hole and we restore the previous
1319 			 * MSS and blackhole detection flags.
1320 			 */
1321 			else {
1322 				if ((tp->t_flags & TF_BLACKHOLE) &&
1323 				    (tp->t_rxtshift > 4)) {
1324 					tcp_pmtud_revert_segment_size(tp);
1325 					tp->snd_cwnd = tp->t_maxseg;
1326 				}
1327 			}
1328 		}
1329 
1330 		/*
1331 		 * Disable rfc1323 and rfc1644 if we haven't got any
1332 		 * response to our SYN (after we reach the threshold)
1333 		 * to work-around some broken terminal servers (most of
1334 		 * which have hopefully been retired) that have bad VJ
1335 		 * header compression code which trashes TCP segments
1336 		 * containing unknown-to-them TCP options.
1337 		 * Do this only on non-local connections.
1338 		 */
1339 		if (tp->t_state == TCPS_SYN_SENT &&
1340 		    tp->t_rxtshift == tcp_broken_peer_syn_rxmit_thres) {
1341 			tp->t_flags &= ~(TF_REQ_SCALE | TF_REQ_TSTMP | TF_REQ_CC);
1342 		}
1343 
1344 		/*
1345 		 * If losing, let the lower level know and try for
1346 		 * a better route.  Also, if we backed off this far,
1347 		 * our srtt estimate is probably bogus.  Clobber it
1348 		 * so we'll take the next rtt measurement as our srtt;
1349 		 * move the current srtt into rttvar to keep the current
1350 		 * retransmit times until then.
1351 		 */
1352 		if (tp->t_rxtshift > TCP_MAXRXTSHIFT / 4) {
1353 			if (!(tp->t_inpcb->inp_vflag & INP_IPV4)) {
1354 				in6_losing(tp->t_inpcb);
1355 			} else {
1356 				in_losing(tp->t_inpcb);
1357 			}
1358 			tp->t_rttvar += (tp->t_srtt >> TCP_RTT_SHIFT);
1359 			tp->t_srtt = 0;
1360 		}
1361 		tp->snd_nxt = tp->snd_una;
1362 		/*
1363 		 * Note:  We overload snd_recover to function also as the
1364 		 * snd_last variable described in RFC 2582
1365 		 */
1366 		tp->snd_recover = tp->snd_max;
1367 		/*
1368 		 * Force a segment to be sent.
1369 		 */
1370 		tp->t_flags |= TF_ACKNOW;
1371 
1372 		/* If timing a segment in this window, stop the timer */
1373 		tp->t_rtttime = 0;
1374 
1375 		if (!IN_FASTRECOVERY(tp) && tp->t_rxtshift == 1) {
1376 			tcpstat.tcps_tailloss_rto++;
1377 		}
1378 
1379 
1380 		/*
1381 		 * RFC 5681 says: when a TCP sender detects segment loss
1382 		 * using retransmit timer and the given segment has already
1383 		 * been retransmitted by way of the retransmission timer at
1384 		 * least once, the value of ssthresh is held constant
1385 		 */
1386 		if (tp->t_rxtshift == 1 &&
1387 		    CC_ALGO(tp)->after_timeout != NULL) {
1388 			CC_ALGO(tp)->after_timeout(tp);
1389 			/*
1390 			 * CWR notifications are to be sent on new data
1391 			 * right after Fast Retransmits and ECE
1392 			 * notification receipts.
1393 			 */
1394 			if (TCP_ECN_ENABLED(tp)) {
1395 				tp->ecn_flags |= TE_SENDCWR;
1396 			}
1397 		}
1398 
1399 		EXIT_FASTRECOVERY(tp);
1400 
1401 		/* Exit cwnd non validated phase */
1402 		tp->t_flagsext &= ~TF_CWND_NONVALIDATED;
1403 
1404 
1405 fc_output:
1406 		tcp_ccdbg_trace(tp, NULL, TCP_CC_REXMT_TIMEOUT);
1407 
1408 		(void) tcp_output(tp);
1409 		break;
1410 
1411 	/*
1412 	 * Persistance timer into zero window.
1413 	 * Force a byte to be output, if possible.
1414 	 */
1415 	case TCPT_PERSIST:
1416 		tcpstat.tcps_persisttimeo++;
1417 		/*
1418 		 * Hack: if the peer is dead/unreachable, we do not
1419 		 * time out if the window is closed.  After a full
1420 		 * backoff, drop the connection if the idle time
1421 		 * (no responses to probes) reaches the maximum
1422 		 * backoff that we would use if retransmitting.
1423 		 *
1424 		 * Drop the connection if we reached the maximum allowed time for
1425 		 * Zero Window Probes without a non-zero update from the peer.
1426 		 * See rdar://5805356
1427 		 */
1428 		if ((tp->t_rxtshift == TCP_MAXRXTSHIFT &&
1429 		    (idle_time >= tcp_maxpersistidle ||
1430 		    idle_time >= TCP_REXMTVAL(tp) * tcp_totbackoff)) ||
1431 		    ((tp->t_persist_stop != 0) &&
1432 		    TSTMP_LEQ(tp->t_persist_stop, tcp_now))) {
1433 			TCP_LOG_DROP_PCB(NULL, NULL, tp, false, "persist timeout drop");
1434 			tcpstat.tcps_persistdrop++;
1435 			soevent(so,
1436 			    (SO_FILT_HINT_LOCKED | SO_FILT_HINT_TIMEOUT));
1437 			tp = tcp_drop(tp, ETIMEDOUT);
1438 			break;
1439 		}
1440 		tcp_setpersist(tp);
1441 		tp->t_flagsext |= TF_FORCE;
1442 		(void) tcp_output(tp);
1443 		tp->t_flagsext &= ~TF_FORCE;
1444 		break;
1445 
1446 	/*
1447 	 * Keep-alive timer went off; send something
1448 	 * or drop connection if idle for too long.
1449 	 */
1450 	case TCPT_KEEP:
1451 #if FLOW_DIVERT
1452 		if (tp->t_inpcb->inp_socket->so_flags & SOF_FLOW_DIVERT) {
1453 			break;
1454 		}
1455 #endif /* FLOW_DIVERT */
1456 
1457 		tcpstat.tcps_keeptimeo++;
1458 #if MPTCP
1459 		/*
1460 		 * Regular TCP connections do not send keepalives after closing
1461 		 * MPTCP must not also, after sending Data FINs.
1462 		 */
1463 		struct mptcb *mp_tp = tptomptp(tp);
1464 		if ((tp->t_mpflags & TMPF_MPTCP_TRUE) &&
1465 		    (tp->t_state > TCPS_ESTABLISHED)) {
1466 			goto dropit;
1467 		} else if (mp_tp != NULL) {
1468 			if ((mptcp_ok_to_keepalive(mp_tp) == 0)) {
1469 				goto dropit;
1470 			}
1471 		}
1472 #endif /* MPTCP */
1473 		if (tp->t_state < TCPS_ESTABLISHED) {
1474 			goto dropit;
1475 		}
1476 		if ((always_keepalive ||
1477 		    (tp->t_inpcb->inp_socket->so_options & SO_KEEPALIVE) ||
1478 		    (tp->t_flagsext & TF_DETECT_READSTALL) ||
1479 		    (tp->t_tfo_probe_state == TFO_PROBE_PROBING)) &&
1480 		    (tp->t_state <= TCPS_CLOSING || tp->t_state == TCPS_FIN_WAIT_2)) {
1481 			if (idle_time >= TCP_CONN_KEEPIDLE(tp) + TCP_CONN_MAXIDLE(tp)) {
1482 				TCP_LOG_DROP_PCB(NULL, NULL, tp, false,
1483 				    "keep alive timeout drop");
1484 				goto dropit;
1485 			}
1486 
1487 			if (tcp_send_keep_alive(tp)) {
1488 				if (tp->t_flagsext & TF_DETECT_READSTALL) {
1489 					tp->t_rtimo_probes++;
1490 				}
1491 
1492 				TCP_LOG_KEEP_ALIVE(tp, idle_time);
1493 			}
1494 
1495 			tp->t_timer[TCPT_KEEP] = OFFSET_FROM_START(tp,
1496 			    TCP_CONN_KEEPINTVL(tp));
1497 		} else {
1498 			tp->t_timer[TCPT_KEEP] = OFFSET_FROM_START(tp,
1499 			    TCP_CONN_KEEPIDLE(tp));
1500 		}
1501 		if (tp->t_flagsext & TF_DETECT_READSTALL) {
1502 			struct ifnet *outifp = tp->t_inpcb->inp_last_outifp;
1503 			bool reenable_probe = false;
1504 			/*
1505 			 * The keep alive packets sent to detect a read
1506 			 * stall did not get a response from the
1507 			 * peer. Generate more keep-alives to confirm this.
1508 			 * If the number of probes sent reaches the limit,
1509 			 * generate an event.
1510 			 */
1511 			if (tp->t_adaptive_rtimo > 0) {
1512 				if (tp->t_rtimo_probes > tp->t_adaptive_rtimo) {
1513 					/* Generate an event */
1514 					soevent(so,
1515 					    (SO_FILT_HINT_LOCKED |
1516 					    SO_FILT_HINT_ADAPTIVE_RTIMO));
1517 					tcp_keepalive_reset(tp);
1518 				} else {
1519 					reenable_probe = true;
1520 				}
1521 			} else if (outifp != NULL &&
1522 			    (outifp->if_eflags & IFEF_PROBE_CONNECTIVITY) &&
1523 			    tp->t_rtimo_probes <= TCP_CONNECTIVITY_PROBES_MAX) {
1524 				reenable_probe = true;
1525 			} else {
1526 				tp->t_flagsext &= ~TF_DETECT_READSTALL;
1527 			}
1528 			if (reenable_probe) {
1529 				int ind = min(tp->t_rtimo_probes,
1530 				    TCP_MAXRXTSHIFT);
1531 				tp->t_timer[TCPT_KEEP] = OFFSET_FROM_START(
1532 					tp, tcp_backoff[ind] * TCP_REXMTVAL(tp));
1533 			}
1534 		}
1535 		if (tp->t_tfo_probe_state == TFO_PROBE_PROBING) {
1536 			int ind;
1537 
1538 			tp->t_tfo_probes++;
1539 			ind = min(tp->t_tfo_probes, TCP_MAXRXTSHIFT);
1540 
1541 			/*
1542 			 * We take the minimum among the time set by true
1543 			 * keepalive (see above) and the backoff'd RTO. That
1544 			 * way we backoff in case of packet-loss but will never
1545 			 * timeout slower than regular keepalive due to the
1546 			 * backing off.
1547 			 */
1548 			tp->t_timer[TCPT_KEEP] = min(OFFSET_FROM_START(
1549 				    tp, tcp_backoff[ind] * TCP_REXMTVAL(tp)),
1550 			    tp->t_timer[TCPT_KEEP]);
1551 		} else if (!(tp->t_flagsext & TF_FASTOPEN_FORCE_ENABLE) &&
1552 		    !(tp->t_tfo_flags & TFO_F_HEURISTIC_DONE) &&
1553 		    tp->t_tfo_probe_state == TFO_PROBE_WAIT_DATA) {
1554 			/* Still no data! Let's assume a TFO-error and err out... */
1555 			tcp_heuristic_tfo_middlebox(tp);
1556 
1557 			so->so_error = ENODATA;
1558 			soevent(so,
1559 			    (SO_FILT_HINT_LOCKED | SO_FILT_HINT_MP_SUB_ERROR));
1560 			sorwakeup(so);
1561 			tp->t_tfo_stats |= TFO_S_RECV_BLACKHOLE;
1562 			tcpstat.tcps_tfo_blackhole++;
1563 		}
1564 		break;
1565 	case TCPT_DELACK:
1566 		if (tcp_delack_enabled && (tp->t_flags & TF_DELACK)) {
1567 			tp->t_flags &= ~TF_DELACK;
1568 			tp->t_timer[TCPT_DELACK] = 0;
1569 			tp->t_flags |= TF_ACKNOW;
1570 
1571 			/*
1572 			 * If delayed ack timer fired while stretching
1573 			 * acks, count the number of times the streaming
1574 			 * detection was not correct. If this exceeds a
1575 			 * threshold, disable strech ack on this
1576 			 * connection
1577 			 *
1578 			 * Also, go back to acking every other packet.
1579 			 */
1580 			if ((tp->t_flags & TF_STRETCHACK)) {
1581 				if (tp->t_unacksegs > 1 &&
1582 				    tp->t_unacksegs < maxseg_unacked) {
1583 					tp->t_stretchack_delayed++;
1584 				}
1585 
1586 				if (tp->t_stretchack_delayed >
1587 				    TCP_STRETCHACK_DELAY_THRESHOLD) {
1588 					tp->t_flagsext |= TF_DISABLE_STRETCHACK;
1589 					/*
1590 					 * Note the time at which stretch
1591 					 * ack was disabled automatically
1592 					 */
1593 					tp->rcv_nostrack_ts = tcp_now;
1594 					tcpstat.tcps_nostretchack++;
1595 					tp->t_stretchack_delayed = 0;
1596 					tp->rcv_nostrack_pkts = 0;
1597 				}
1598 				tcp_reset_stretch_ack(tp);
1599 			}
1600 			tp->t_forced_acks = TCP_FORCED_ACKS_COUNT;
1601 
1602 			/*
1603 			 * If we are measuring inter packet arrival jitter
1604 			 * for throttling a connection, this delayed ack
1605 			 * might be the reason for accumulating some
1606 			 * jitter. So let's restart the measurement.
1607 			 */
1608 			CLEAR_IAJ_STATE(tp);
1609 
1610 			tcpstat.tcps_delack++;
1611 			tp->t_stat.delayed_acks_sent++;
1612 			(void) tcp_output(tp);
1613 		}
1614 		break;
1615 
1616 #if MPTCP
1617 	case TCPT_JACK_RXMT:
1618 		if ((tp->t_state == TCPS_ESTABLISHED) &&
1619 		    (tp->t_mpflags & TMPF_PREESTABLISHED) &&
1620 		    (tp->t_mpflags & TMPF_JOINED_FLOW)) {
1621 			if (++tp->t_mprxtshift > TCP_MAXRXTSHIFT) {
1622 				tcpstat.tcps_timeoutdrop++;
1623 				soevent(so,
1624 				    (SO_FILT_HINT_LOCKED |
1625 				    SO_FILT_HINT_TIMEOUT));
1626 				tp = tcp_drop(tp, tp->t_softerror ?
1627 				    tp->t_softerror : ETIMEDOUT);
1628 				break;
1629 			}
1630 			tcpstat.tcps_join_rxmts++;
1631 			tp->t_mpflags |= TMPF_SND_JACK;
1632 			tp->t_flags |= TF_ACKNOW;
1633 
1634 			/*
1635 			 * No backoff is implemented for simplicity for this
1636 			 * corner case.
1637 			 */
1638 			(void) tcp_output(tp);
1639 		}
1640 		break;
1641 	case TCPT_CELLICON:
1642 	{
1643 		struct mptses *mpte = tptomptp(tp)->mpt_mpte;
1644 
1645 		tp->t_timer[TCPT_CELLICON] = 0;
1646 
1647 		if (mpte->mpte_cellicon_increments == 0) {
1648 			/* Cell-icon not set by this connection */
1649 			break;
1650 		}
1651 
1652 		if (TSTMP_LT(mpte->mpte_last_cellicon_set + MPTCP_CELLICON_TOGGLE_RATE, tcp_now)) {
1653 			mptcp_unset_cellicon(mpte, NULL, 1);
1654 		}
1655 
1656 		if (mpte->mpte_cellicon_increments) {
1657 			tp->t_timer[TCPT_CELLICON] = OFFSET_FROM_START(tp, MPTCP_CELLICON_TOGGLE_RATE);
1658 		}
1659 
1660 		break;
1661 	}
1662 #endif /* MPTCP */
1663 
1664 	case TCPT_PTO:
1665 	{
1666 		int32_t ret = 0;
1667 
1668 		if (!(tp->t_flagsext & TF_IF_PROBING)) {
1669 			tp->t_flagsext &= ~(TF_SENT_TLPROBE);
1670 		}
1671 		/*
1672 		 * Check if the connection is in the right state to
1673 		 * send a probe
1674 		 */
1675 		if ((tp->t_state != TCPS_ESTABLISHED ||
1676 		    tp->t_rxtshift > 0 ||
1677 		    tp->snd_max == tp->snd_una ||
1678 		    !SACK_ENABLED(tp) ||
1679 		    (tcp_do_better_lr != 1 && !TAILQ_EMPTY(&tp->snd_holes)) ||
1680 		    IN_FASTRECOVERY(tp)) &&
1681 		    !(tp->t_flagsext & TF_IF_PROBING)) {
1682 			break;
1683 		}
1684 
1685 		/*
1686 		 * When the interface state is changed explicitly reset the retransmission
1687 		 * timer state for both SYN and data packets because we do not want to
1688 		 * wait unnecessarily or timeout too quickly if the link characteristics
1689 		 * have changed drastically
1690 		 */
1691 		if (tp->t_flagsext & TF_IF_PROBING) {
1692 			tp->t_rxtshift = 0;
1693 			if (tp->t_state == TCPS_SYN_SENT) {
1694 				tp->t_stat.synrxtshift = tp->t_rxtshift;
1695 			}
1696 			/*
1697 			 * Reset to the the default RTO
1698 			 */
1699 			tp->t_srtt = TCPTV_SRTTBASE;
1700 			tp->t_rttvar =
1701 			    ((TCPTV_RTOBASE - TCPTV_SRTTBASE) << TCP_RTTVAR_SHIFT) / 4;
1702 			tp->t_rttmin = tp->t_flags & TF_LOCAL ? tcp_TCPTV_MIN :
1703 			    TCPTV_REXMTMIN;
1704 			TCPT_RANGESET(tp->t_rxtcur, TCP_REXMTVAL(tp),
1705 			    tp->t_rttmin, TCPTV_REXMTMAX, TCP_ADD_REXMTSLOP(tp));
1706 			TCP_LOG_RTT_INFO(tp);
1707 		}
1708 
1709 		if (tp->t_state == TCPS_SYN_SENT) {
1710 			/*
1711 			 * The PTO for SYN_SENT reinitializes TCP as if it was a fresh
1712 			 * connection attempt
1713 			 */
1714 			tp->snd_nxt = tp->snd_una;
1715 			/*
1716 			 * Note:  We overload snd_recover to function also as the
1717 			 * snd_last variable described in RFC 2582
1718 			 */
1719 			tp->snd_recover = tp->snd_max;
1720 			/*
1721 			 * Force a segment to be sent.
1722 			 */
1723 			tp->t_flags |= TF_ACKNOW;
1724 
1725 			/* If timing a segment in this window, stop the timer */
1726 			tp->t_rtttime = 0;
1727 		} else {
1728 			int32_t snd_len;
1729 
1730 			/*
1731 			 * If there is no new data to send or if the
1732 			 * connection is limited by receive window then
1733 			 * retransmit the last segment, otherwise send
1734 			 * new data.
1735 			 */
1736 			snd_len = min(so->so_snd.sb_cc, tp->snd_wnd)
1737 			    - (tp->snd_max - tp->snd_una);
1738 			if (snd_len > 0) {
1739 				tp->snd_nxt = tp->snd_max;
1740 			} else {
1741 				snd_len = min((tp->snd_max - tp->snd_una),
1742 				    tp->t_maxseg);
1743 				tp->snd_nxt = tp->snd_max - snd_len;
1744 			}
1745 		}
1746 
1747 		tcpstat.tcps_pto++;
1748 		if (tp->t_flagsext & TF_IF_PROBING) {
1749 			tcpstat.tcps_probe_if++;
1750 		}
1751 
1752 		/* If timing a segment in this window, stop the timer */
1753 		tp->t_rtttime = 0;
1754 		/* Note that tail loss probe is being sent. Exclude IF probe */
1755 		if (!(tp->t_flagsext & TF_IF_PROBING)) {
1756 			tp->t_flagsext |= TF_SENT_TLPROBE;
1757 			tp->t_tlpstart = tcp_now;
1758 		}
1759 
1760 		tp->snd_cwnd += tp->t_maxseg;
1761 		/*
1762 		 * When tail-loss-probe fires, we reset the RTO timer, because
1763 		 * a probe just got sent, so we are good to push out the timer.
1764 		 *
1765 		 * Set to 0 to ensure that tcp_output() will reschedule it
1766 		 */
1767 		tp->t_timer[TCPT_REXMT] = 0;
1768 		ret = tcp_output(tp);
1769 
1770 #if (DEBUG || DEVELOPMENT)
1771 		if ((tp->t_flagsext & TF_IF_PROBING) &&
1772 		    ((IFNET_IS_COMPANION_LINK(tp->t_inpcb->inp_last_outifp)) ||
1773 		    tp->t_state == TCPS_SYN_SENT)) {
1774 			if (ret == 0 && tcp_probe_if_fix_port > 0 &&
1775 			    tcp_probe_if_fix_port <= IPPORT_HILASTAUTO) {
1776 				tp->t_timer[TCPT_REXMT] = 0;
1777 				tcp_set_lotimer_index(tp);
1778 			}
1779 
1780 			os_log(OS_LOG_DEFAULT,
1781 			    "%s: sent %s probe for %u > %u on interface %s"
1782 			    " (%u) %s(%d)",
1783 			    __func__,
1784 			    tp->t_state == TCPS_SYN_SENT ? "SYN" : "data",
1785 			    ntohs(tp->t_inpcb->inp_lport),
1786 			    ntohs(tp->t_inpcb->inp_fport),
1787 			    if_name(tp->t_inpcb->inp_last_outifp),
1788 			    tp->t_inpcb->inp_last_outifp->if_index,
1789 			    ret == 0 ? "succeeded" :"failed", ret);
1790 		}
1791 #endif /* DEBUG || DEVELOPMENT */
1792 
1793 		/*
1794 		 * When the connection is not idle, make sure the retransmission timer
1795 		 * is armed because it was set to zero above
1796 		 */
1797 		if ((tp->t_timer[TCPT_REXMT] == 0 || tp->t_timer[TCPT_PERSIST] == 0) &&
1798 		    (tp->t_inpcb->inp_socket->so_snd.sb_cc != 0 || tp->t_state == TCPS_SYN_SENT ||
1799 		    tp->t_state == TCPS_SYN_RECEIVED)) {
1800 			tp->t_timer[TCPT_REXMT] =
1801 			    OFFSET_FROM_START(tp, tp->t_rxtcur);
1802 
1803 			os_log(OS_LOG_DEFAULT,
1804 			    "%s: tcp_output() returned %u with retransmission timer disabled "
1805 			    "for %u > %u in state %d, reset timer to %d",
1806 			    __func__, ret,
1807 			    ntohs(tp->t_inpcb->inp_lport),
1808 			    ntohs(tp->t_inpcb->inp_fport),
1809 			    tp->t_state,
1810 			    tp->t_timer[TCPT_REXMT]);
1811 
1812 			tcp_check_timer_state(tp);
1813 		}
1814 		tp->snd_cwnd -= tp->t_maxseg;
1815 
1816 		if (!(tp->t_flagsext & TF_IF_PROBING)) {
1817 			tp->t_tlphighrxt = tp->snd_nxt;
1818 		}
1819 		break;
1820 	}
1821 	case TCPT_DELAYFR:
1822 		tp->t_flagsext &= ~TF_DELAY_RECOVERY;
1823 
1824 		/*
1825 		 * Don't do anything if one of the following is true:
1826 		 * - the connection is already in recovery
1827 		 * - sequence until snd_recover has been acknowledged.
1828 		 * - retransmit timeout has fired
1829 		 */
1830 		if (IN_FASTRECOVERY(tp) ||
1831 		    SEQ_GEQ(tp->snd_una, tp->snd_recover) ||
1832 		    tp->t_rxtshift > 0) {
1833 			break;
1834 		}
1835 
1836 		VERIFY(SACK_ENABLED(tp));
1837 		tcp_rexmt_save_state(tp);
1838 		if (CC_ALGO(tp)->pre_fr != NULL) {
1839 			CC_ALGO(tp)->pre_fr(tp);
1840 			if (TCP_ECN_ENABLED(tp)) {
1841 				tp->ecn_flags |= TE_SENDCWR;
1842 			}
1843 		}
1844 		ENTER_FASTRECOVERY(tp);
1845 
1846 		tp->t_timer[TCPT_REXMT] = 0;
1847 		tcpstat.tcps_sack_recovery_episode++;
1848 		tp->t_sack_recovery_episode++;
1849 		tp->sack_newdata = tp->snd_nxt;
1850 		tp->snd_cwnd = tp->t_maxseg;
1851 		tcp_ccdbg_trace(tp, NULL, TCP_CC_ENTER_FASTRECOVERY);
1852 		(void) tcp_output(tp);
1853 		break;
1854 
1855 dropit:
1856 		tcpstat.tcps_keepdrops++;
1857 		soevent(so,
1858 		    (SO_FILT_HINT_LOCKED | SO_FILT_HINT_TIMEOUT));
1859 		tp = tcp_drop(tp, ETIMEDOUT);
1860 		break;
1861 	}
1862 #if TCPDEBUG
1863 	if (tp->t_inpcb->inp_socket->so_options & SO_DEBUG) {
1864 		tcp_trace(TA_USER, ostate, tp, (void *)0, (struct tcphdr *)0,
1865 		    PRU_SLOWTIMO);
1866 	}
1867 #endif
1868 	return tp;
1869 }
1870 
1871 /* Remove a timer entry from timer list */
1872 void
tcp_remove_timer(struct tcpcb * tp)1873 tcp_remove_timer(struct tcpcb *tp)
1874 {
1875 	struct tcptimerlist *listp = &tcp_timer_list;
1876 
1877 	socket_lock_assert_owned(tp->t_inpcb->inp_socket);
1878 	if (!(TIMER_IS_ON_LIST(tp))) {
1879 		return;
1880 	}
1881 	lck_mtx_lock(&listp->mtx);
1882 
1883 	/* Check if pcb is on timer list again after acquiring the lock */
1884 	if (!(TIMER_IS_ON_LIST(tp))) {
1885 		lck_mtx_unlock(&listp->mtx);
1886 		return;
1887 	}
1888 
1889 	if (listp->next_te != NULL && listp->next_te == &tp->tentry) {
1890 		listp->next_te = LIST_NEXT(&tp->tentry, le);
1891 	}
1892 
1893 	LIST_REMOVE(&tp->tentry, le);
1894 	tp->t_flags &= ~(TF_TIMER_ONLIST);
1895 
1896 	listp->entries--;
1897 
1898 	tp->tentry.le.le_next = NULL;
1899 	tp->tentry.le.le_prev = NULL;
1900 	lck_mtx_unlock(&listp->mtx);
1901 }
1902 
1903 /*
1904  * Function to check if the timerlist needs to be rescheduled to run
1905  * the timer entry correctly. Basically, this is to check if we can avoid
1906  * taking the list lock.
1907  */
1908 
1909 static boolean_t
need_to_resched_timerlist(u_int32_t runtime,u_int16_t mode)1910 need_to_resched_timerlist(u_int32_t runtime, u_int16_t mode)
1911 {
1912 	struct tcptimerlist *listp = &tcp_timer_list;
1913 	int32_t diff;
1914 
1915 	/*
1916 	 * If the list is being processed then the state of the list is
1917 	 * in flux. In this case always acquire the lock and set the state
1918 	 * correctly.
1919 	 */
1920 	if (listp->running) {
1921 		return TRUE;
1922 	}
1923 
1924 	if (!listp->scheduled) {
1925 		return TRUE;
1926 	}
1927 
1928 	diff = timer_diff(listp->runtime, 0, runtime, 0);
1929 	if (diff <= 0) {
1930 		/* The list is going to run before this timer */
1931 		return FALSE;
1932 	} else {
1933 		if (mode & TCP_TIMERLIST_10MS_MODE) {
1934 			if (diff <= TCP_TIMER_10MS_QUANTUM) {
1935 				return FALSE;
1936 			}
1937 		} else if (mode & TCP_TIMERLIST_100MS_MODE) {
1938 			if (diff <= TCP_TIMER_100MS_QUANTUM) {
1939 				return FALSE;
1940 			}
1941 		} else {
1942 			if (diff <= TCP_TIMER_500MS_QUANTUM) {
1943 				return FALSE;
1944 			}
1945 		}
1946 	}
1947 	return TRUE;
1948 }
1949 
1950 void
tcp_sched_timerlist(uint32_t offset)1951 tcp_sched_timerlist(uint32_t offset)
1952 {
1953 	uint64_t deadline = 0;
1954 	struct tcptimerlist *listp = &tcp_timer_list;
1955 
1956 	LCK_MTX_ASSERT(&listp->mtx, LCK_MTX_ASSERT_OWNED);
1957 
1958 	offset = min(offset, TCP_TIMERLIST_MAX_OFFSET);
1959 	listp->runtime = tcp_now + offset;
1960 	listp->schedtime = tcp_now;
1961 	if (listp->runtime == 0) {
1962 		listp->runtime++;
1963 		offset++;
1964 	}
1965 
1966 	clock_interval_to_deadline(offset, USEC_PER_SEC, &deadline);
1967 
1968 	thread_call_enter_delayed(listp->call, deadline);
1969 	listp->scheduled = TRUE;
1970 }
1971 
1972 /*
1973  * Function to run the timers for a connection.
1974  *
1975  * Returns the offset of next timer to be run for this connection which
1976  * can be used to reschedule the timerlist.
1977  *
1978  * te_mode is an out parameter that indicates the modes of active
1979  * timers for this connection.
1980  */
1981 u_int32_t
tcp_run_conn_timer(struct tcpcb * tp,u_int16_t * te_mode,u_int16_t probe_if_index)1982 tcp_run_conn_timer(struct tcpcb *tp, u_int16_t *te_mode,
1983     u_int16_t probe_if_index)
1984 {
1985 	struct socket *so;
1986 	u_int16_t i = 0, index = TCPT_NONE, lo_index = TCPT_NONE;
1987 	u_int32_t timer_val, offset = 0, lo_timer = 0;
1988 	int32_t diff;
1989 	boolean_t needtorun[TCPT_NTIMERS];
1990 	int count = 0;
1991 
1992 	VERIFY(tp != NULL);
1993 	bzero(needtorun, sizeof(needtorun));
1994 	*te_mode = 0;
1995 
1996 	socket_lock(tp->t_inpcb->inp_socket, 1);
1997 
1998 	so = tp->t_inpcb->inp_socket;
1999 	/* Release the want count on inp */
2000 	if (in_pcb_checkstate(tp->t_inpcb, WNT_RELEASE, 1)
2001 	    == WNT_STOPUSING) {
2002 		if (TIMER_IS_ON_LIST(tp)) {
2003 			tcp_remove_timer(tp);
2004 		}
2005 
2006 		/* Looks like the TCP connection got closed while we
2007 		 * were waiting for the lock.. Done
2008 		 */
2009 		goto done;
2010 	}
2011 
2012 	/*
2013 	 * If this connection is over an interface that needs to
2014 	 * be probed, send probe packets to reinitiate communication.
2015 	 */
2016 	if (TCP_IF_STATE_CHANGED(tp, probe_if_index)) {
2017 		tp->t_flagsext |= TF_IF_PROBING;
2018 		tcp_timers(tp, TCPT_PTO);
2019 		tp->t_timer[TCPT_PTO] = 0;
2020 		tp->t_flagsext &= ~TF_IF_PROBING;
2021 	}
2022 
2023 	/*
2024 	 * Since the timer thread needs to wait for tcp lock, it may race
2025 	 * with another thread that can cancel or reschedule the timer
2026 	 * that is about to run. Check if we need to run anything.
2027 	 */
2028 	if ((index = tp->tentry.index) == TCPT_NONE) {
2029 		goto done;
2030 	}
2031 
2032 	timer_val = tp->t_timer[index];
2033 
2034 	diff = timer_diff(tp->tentry.runtime, 0, tcp_now, 0);
2035 	if (diff > 0) {
2036 		if (tp->tentry.index != TCPT_NONE) {
2037 			offset = diff;
2038 			*(te_mode) = tp->tentry.mode;
2039 		}
2040 		goto done;
2041 	}
2042 
2043 	tp->t_timer[index] = 0;
2044 	if (timer_val > 0) {
2045 		tp = tcp_timers(tp, index);
2046 		if (tp == NULL) {
2047 			goto done;
2048 		}
2049 	}
2050 
2051 	/*
2052 	 * Check if there are any other timers that need to be run.
2053 	 * While doing it, adjust the timer values wrt tcp_now.
2054 	 */
2055 	tp->tentry.mode = 0;
2056 	for (i = 0; i < TCPT_NTIMERS; ++i) {
2057 		if (tp->t_timer[i] != 0) {
2058 			diff = timer_diff(tp->tentry.timer_start,
2059 			    tp->t_timer[i], tcp_now, 0);
2060 			if (diff <= 0) {
2061 				needtorun[i] = TRUE;
2062 				count++;
2063 			} else {
2064 				tp->t_timer[i] = diff;
2065 				needtorun[i] = FALSE;
2066 				if (lo_timer == 0 || diff < lo_timer) {
2067 					lo_timer = diff;
2068 					lo_index = i;
2069 				}
2070 				TCP_SET_TIMER_MODE(tp->tentry.mode, i);
2071 			}
2072 		}
2073 	}
2074 
2075 	tp->tentry.timer_start = tcp_now;
2076 	tp->tentry.index = lo_index;
2077 	VERIFY(tp->tentry.index == TCPT_NONE || tp->tentry.mode > 0);
2078 
2079 	if (tp->tentry.index != TCPT_NONE) {
2080 		tp->tentry.runtime = tp->tentry.timer_start +
2081 		    tp->t_timer[tp->tentry.index];
2082 		if (tp->tentry.runtime == 0) {
2083 			tp->tentry.runtime++;
2084 		}
2085 	}
2086 
2087 	if (count > 0) {
2088 		/* run any other timers outstanding at this time. */
2089 		for (i = 0; i < TCPT_NTIMERS; ++i) {
2090 			if (needtorun[i]) {
2091 				tp->t_timer[i] = 0;
2092 				tp = tcp_timers(tp, i);
2093 				if (tp == NULL) {
2094 					offset = 0;
2095 					*(te_mode) = 0;
2096 					goto done;
2097 				}
2098 			}
2099 		}
2100 		tcp_set_lotimer_index(tp);
2101 	}
2102 
2103 	if (tp->tentry.index < TCPT_NONE) {
2104 		offset = tp->t_timer[tp->tentry.index];
2105 		*(te_mode) = tp->tentry.mode;
2106 	}
2107 
2108 done:
2109 	if (tp != NULL && tp->tentry.index == TCPT_NONE) {
2110 		tcp_remove_timer(tp);
2111 		offset = 0;
2112 	}
2113 
2114 	socket_unlock(so, 1);
2115 	return offset;
2116 }
2117 
2118 void
tcp_run_timerlist(void * arg1,void * arg2)2119 tcp_run_timerlist(void * arg1, void * arg2)
2120 {
2121 #pragma unused(arg1, arg2)
2122 	struct tcptimerentry *te, *next_te;
2123 	struct tcptimerlist *listp = &tcp_timer_list;
2124 	struct tcpcb *tp;
2125 	uint32_t next_timer = 0; /* offset of the next timer on the list */
2126 	u_int16_t te_mode = 0;  /* modes of all active timers in a tcpcb */
2127 	u_int16_t list_mode = 0; /* cumulative of modes of all tcpcbs */
2128 	uint32_t active_count = 0;
2129 
2130 	calculate_tcp_clock();
2131 
2132 	lck_mtx_lock(&listp->mtx);
2133 
2134 	int32_t drift = tcp_now - listp->runtime;
2135 	if (drift <= 1) {
2136 		tcpstat.tcps_timer_drift_le_1_ms++;
2137 	} else if (drift <= 10) {
2138 		tcpstat.tcps_timer_drift_le_10_ms++;
2139 	} else if (drift <= 20) {
2140 		tcpstat.tcps_timer_drift_le_20_ms++;
2141 	} else if (drift <= 50) {
2142 		tcpstat.tcps_timer_drift_le_50_ms++;
2143 	} else if (drift <= 100) {
2144 		tcpstat.tcps_timer_drift_le_100_ms++;
2145 	} else if (drift <= 200) {
2146 		tcpstat.tcps_timer_drift_le_200_ms++;
2147 	} else if (drift <= 500) {
2148 		tcpstat.tcps_timer_drift_le_500_ms++;
2149 	} else if (drift <= 1000) {
2150 		tcpstat.tcps_timer_drift_le_1000_ms++;
2151 	} else {
2152 		tcpstat.tcps_timer_drift_gt_1000_ms++;
2153 	}
2154 
2155 	listp->running = TRUE;
2156 
2157 	LIST_FOREACH_SAFE(te, &listp->lhead, le, next_te) {
2158 		uint32_t offset = 0;
2159 		uint32_t runtime = te->runtime;
2160 
2161 		tp = TIMERENTRY_TO_TP(te);
2162 
2163 		/*
2164 		 * An interface probe may need to happen before the previously scheduled runtime
2165 		 */
2166 		if (te->index < TCPT_NONE && TSTMP_GT(runtime, tcp_now) &&
2167 		    !TCP_IF_STATE_CHANGED(tp, listp->probe_if_index)) {
2168 			offset = timer_diff(runtime, 0, tcp_now, 0);
2169 			if (next_timer == 0 || offset < next_timer) {
2170 				next_timer = offset;
2171 			}
2172 			list_mode |= te->mode;
2173 			continue;
2174 		}
2175 
2176 		/*
2177 		 * Acquire an inp wantcnt on the inpcb so that the socket
2178 		 * won't get detached even if tcp_close is called
2179 		 */
2180 		if (in_pcb_checkstate(tp->t_inpcb, WNT_ACQUIRE, 0)
2181 		    == WNT_STOPUSING) {
2182 			/*
2183 			 * Some how this pcb went into dead state while
2184 			 * on the timer list, just take it off the list.
2185 			 * Since the timer list entry pointers are
2186 			 * protected by the timer list lock, we can
2187 			 * do it here without the socket lock.
2188 			 */
2189 			if (TIMER_IS_ON_LIST(tp)) {
2190 				tp->t_flags &= ~(TF_TIMER_ONLIST);
2191 				LIST_REMOVE(&tp->tentry, le);
2192 				listp->entries--;
2193 
2194 				tp->tentry.le.le_next = NULL;
2195 				tp->tentry.le.le_prev = NULL;
2196 			}
2197 			continue;
2198 		}
2199 		active_count++;
2200 
2201 		/*
2202 		 * Store the next timerentry pointer before releasing the
2203 		 * list lock. If that entry has to be removed when we
2204 		 * release the lock, this pointer will be updated to the
2205 		 * element after that.
2206 		 */
2207 		listp->next_te = next_te;
2208 
2209 		VERIFY_NEXT_LINK(&tp->tentry, le);
2210 		VERIFY_PREV_LINK(&tp->tentry, le);
2211 
2212 		lck_mtx_unlock(&listp->mtx);
2213 
2214 		offset = tcp_run_conn_timer(tp, &te_mode,
2215 		    listp->probe_if_index);
2216 
2217 		lck_mtx_lock(&listp->mtx);
2218 
2219 		next_te = listp->next_te;
2220 		listp->next_te = NULL;
2221 
2222 		if (offset > 0 && te_mode != 0) {
2223 			list_mode |= te_mode;
2224 
2225 			if (next_timer == 0 || offset < next_timer) {
2226 				next_timer = offset;
2227 			}
2228 		}
2229 	}
2230 
2231 	if (!LIST_EMPTY(&listp->lhead)) {
2232 		uint32_t next_mode = 0;
2233 		if ((list_mode & TCP_TIMERLIST_10MS_MODE) ||
2234 		    (listp->pref_mode & TCP_TIMERLIST_10MS_MODE)) {
2235 			next_mode = TCP_TIMERLIST_10MS_MODE;
2236 		} else if ((list_mode & TCP_TIMERLIST_100MS_MODE) ||
2237 		    (listp->pref_mode & TCP_TIMERLIST_100MS_MODE)) {
2238 			next_mode = TCP_TIMERLIST_100MS_MODE;
2239 		} else {
2240 			next_mode = TCP_TIMERLIST_500MS_MODE;
2241 		}
2242 
2243 		if (next_mode != TCP_TIMERLIST_500MS_MODE) {
2244 			listp->idleruns = 0;
2245 		} else {
2246 			/*
2247 			 * the next required mode is slow mode, but if
2248 			 * the last one was a faster mode and we did not
2249 			 * have enough idle runs, repeat the last mode.
2250 			 *
2251 			 * We try to keep the timer list in fast mode for
2252 			 * some idle time in expectation of new data.
2253 			 */
2254 			if (listp->mode != next_mode &&
2255 			    listp->idleruns < timer_fastmode_idlemax) {
2256 				listp->idleruns++;
2257 				next_mode = listp->mode;
2258 				next_timer = TCP_TIMER_100MS_QUANTUM;
2259 			} else {
2260 				listp->idleruns = 0;
2261 			}
2262 		}
2263 		listp->mode = next_mode;
2264 		if (listp->pref_offset != 0) {
2265 			next_timer = min(listp->pref_offset, next_timer);
2266 		}
2267 
2268 		if (listp->mode == TCP_TIMERLIST_500MS_MODE) {
2269 			next_timer = max(next_timer,
2270 			    TCP_TIMER_500MS_QUANTUM);
2271 		}
2272 
2273 		tcp_sched_timerlist(next_timer);
2274 	} else {
2275 		/*
2276 		 * No need to reschedule this timer, but always run
2277 		 * periodically at a much higher granularity.
2278 		 */
2279 		tcp_sched_timerlist(TCP_TIMERLIST_MAX_OFFSET);
2280 	}
2281 
2282 	listp->running = FALSE;
2283 	listp->pref_mode = 0;
2284 	listp->pref_offset = 0;
2285 	listp->probe_if_index = 0;
2286 
2287 	lck_mtx_unlock(&listp->mtx);
2288 }
2289 
2290 /*
2291  * Function to check if the timerlist needs to be rescheduled to run this
2292  * connection's timers correctly.
2293  */
2294 void
tcp_sched_timers(struct tcpcb * tp)2295 tcp_sched_timers(struct tcpcb *tp)
2296 {
2297 	struct tcptimerentry *te = &tp->tentry;
2298 	u_int16_t index = te->index;
2299 	u_int16_t mode = te->mode;
2300 	struct tcptimerlist *listp = &tcp_timer_list;
2301 	int32_t offset = 0;
2302 	boolean_t list_locked = FALSE;
2303 
2304 	if (tp->t_inpcb->inp_state == INPCB_STATE_DEAD) {
2305 		/* Just return without adding the dead pcb to the list */
2306 		if (TIMER_IS_ON_LIST(tp)) {
2307 			tcp_remove_timer(tp);
2308 		}
2309 		return;
2310 	}
2311 
2312 	if (index == TCPT_NONE) {
2313 		/* Nothing to run */
2314 		tcp_remove_timer(tp);
2315 		return;
2316 	}
2317 
2318 	/*
2319 	 * compute the offset at which the next timer for this connection
2320 	 * has to run.
2321 	 */
2322 	offset = timer_diff(te->runtime, 0, tcp_now, 0);
2323 	if (offset <= 0) {
2324 		offset = 1;
2325 		tcp_timer_advanced++;
2326 	}
2327 
2328 	if (!TIMER_IS_ON_LIST(tp)) {
2329 		if (!list_locked) {
2330 			lck_mtx_lock(&listp->mtx);
2331 			list_locked = TRUE;
2332 		}
2333 
2334 		if (!TIMER_IS_ON_LIST(tp)) {
2335 			LIST_INSERT_HEAD(&listp->lhead, te, le);
2336 			tp->t_flags |= TF_TIMER_ONLIST;
2337 
2338 			listp->entries++;
2339 			if (listp->entries > listp->maxentries) {
2340 				listp->maxentries = listp->entries;
2341 			}
2342 
2343 			/* if the list is not scheduled, just schedule it */
2344 			if (!listp->scheduled) {
2345 				goto schedule;
2346 			}
2347 		}
2348 	}
2349 
2350 	/*
2351 	 * Timer entry is currently on the list, check if the list needs
2352 	 * to be rescheduled.
2353 	 */
2354 	if (need_to_resched_timerlist(te->runtime, mode)) {
2355 		tcp_resched_timerlist++;
2356 
2357 		if (!list_locked) {
2358 			lck_mtx_lock(&listp->mtx);
2359 			list_locked = TRUE;
2360 		}
2361 
2362 		VERIFY_NEXT_LINK(te, le);
2363 		VERIFY_PREV_LINK(te, le);
2364 
2365 		if (listp->running) {
2366 			listp->pref_mode |= mode;
2367 			if (listp->pref_offset == 0 ||
2368 			    offset < listp->pref_offset) {
2369 				listp->pref_offset = offset;
2370 			}
2371 		} else {
2372 			/*
2373 			 * The list could have got rescheduled while
2374 			 * this thread was waiting for the lock
2375 			 */
2376 			if (listp->scheduled) {
2377 				int32_t diff;
2378 				diff = timer_diff(listp->runtime, 0,
2379 				    tcp_now, offset);
2380 				if (diff <= 0) {
2381 					goto done;
2382 				} else {
2383 					goto schedule;
2384 				}
2385 			} else {
2386 				goto schedule;
2387 			}
2388 		}
2389 	}
2390 	goto done;
2391 
2392 schedule:
2393 	/*
2394 	 * Since a connection with timers is getting scheduled, the timer
2395 	 * list moves from idle to active state and that is why idlegen is
2396 	 * reset
2397 	 */
2398 	if (mode & TCP_TIMERLIST_10MS_MODE) {
2399 		listp->mode = TCP_TIMERLIST_10MS_MODE;
2400 		listp->idleruns = 0;
2401 		offset = min(offset, TCP_TIMER_10MS_QUANTUM);
2402 	} else if (mode & TCP_TIMERLIST_100MS_MODE) {
2403 		if (listp->mode > TCP_TIMERLIST_100MS_MODE) {
2404 			listp->mode = TCP_TIMERLIST_100MS_MODE;
2405 		}
2406 		listp->idleruns = 0;
2407 		offset = min(offset, TCP_TIMER_100MS_QUANTUM);
2408 	}
2409 	tcp_sched_timerlist(offset);
2410 
2411 done:
2412 	if (list_locked) {
2413 		lck_mtx_unlock(&listp->mtx);
2414 	}
2415 
2416 	return;
2417 }
2418 
2419 static inline void
tcp_set_lotimer_index(struct tcpcb * tp)2420 tcp_set_lotimer_index(struct tcpcb *tp)
2421 {
2422 	uint16_t i, lo_index = TCPT_NONE, mode = 0;
2423 	uint32_t lo_timer = 0;
2424 	for (i = 0; i < TCPT_NTIMERS; ++i) {
2425 		if (tp->t_timer[i] != 0) {
2426 			TCP_SET_TIMER_MODE(mode, i);
2427 			if (lo_timer == 0 || tp->t_timer[i] < lo_timer) {
2428 				lo_timer = tp->t_timer[i];
2429 				lo_index = i;
2430 			}
2431 		}
2432 	}
2433 	tp->tentry.index = lo_index;
2434 	tp->tentry.mode = mode;
2435 	VERIFY(tp->tentry.index == TCPT_NONE || tp->tentry.mode > 0);
2436 
2437 	if (tp->tentry.index != TCPT_NONE) {
2438 		tp->tentry.runtime = tp->tentry.timer_start
2439 		    + tp->t_timer[tp->tentry.index];
2440 		if (tp->tentry.runtime == 0) {
2441 			tp->tentry.runtime++;
2442 		}
2443 	}
2444 }
2445 
2446 void
tcp_check_timer_state(struct tcpcb * tp)2447 tcp_check_timer_state(struct tcpcb *tp)
2448 {
2449 	socket_lock_assert_owned(tp->t_inpcb->inp_socket);
2450 
2451 	if (tp->t_inpcb->inp_flags2 & INP2_TIMEWAIT) {
2452 		return;
2453 	}
2454 
2455 	tcp_set_lotimer_index(tp);
2456 
2457 	tcp_sched_timers(tp);
2458 	return;
2459 }
2460 
2461 static inline void
tcp_cumulative_stat(u_int32_t cur,u_int32_t * prev,u_int32_t * dest)2462 tcp_cumulative_stat(u_int32_t cur, u_int32_t *prev, u_int32_t *dest)
2463 {
2464 	/* handle wrap around */
2465 	int32_t diff = (int32_t) (cur - *prev);
2466 	if (diff > 0) {
2467 		*dest = diff;
2468 	} else {
2469 		*dest = 0;
2470 	}
2471 	*prev = cur;
2472 	return;
2473 }
2474 
2475 static inline void
tcp_cumulative_stat64(u_int64_t cur,u_int64_t * prev,u_int64_t * dest)2476 tcp_cumulative_stat64(u_int64_t cur, u_int64_t *prev, u_int64_t *dest)
2477 {
2478 	/* handle wrap around */
2479 	int64_t diff = (int64_t) (cur - *prev);
2480 	if (diff > 0) {
2481 		*dest = diff;
2482 	} else {
2483 		*dest = 0;
2484 	}
2485 	*prev = cur;
2486 	return;
2487 }
2488 
2489 __private_extern__ void
tcp_report_stats(void)2490 tcp_report_stats(void)
2491 {
2492 	struct nstat_sysinfo_data data;
2493 	struct sockaddr_in dst;
2494 	struct sockaddr_in6 dst6;
2495 	struct rtentry *rt = NULL;
2496 	static struct tcp_last_report_stats prev;
2497 	u_int64_t var, uptime;
2498 
2499 #define stat    data.u.tcp_stats
2500 	if (((uptime = net_uptime()) - tcp_last_report_time) <
2501 	    tcp_report_stats_interval) {
2502 		return;
2503 	}
2504 
2505 	tcp_last_report_time = uptime;
2506 
2507 	bzero(&data, sizeof(data));
2508 	data.flags = NSTAT_SYSINFO_TCP_STATS;
2509 
2510 	bzero(&dst, sizeof(dst));
2511 	dst.sin_len = sizeof(dst);
2512 	dst.sin_family = AF_INET;
2513 
2514 	/* ipv4 avg rtt */
2515 	lck_mtx_lock(rnh_lock);
2516 	rt =  rt_lookup(TRUE, (struct sockaddr *)&dst, NULL,
2517 	    rt_tables[AF_INET], IFSCOPE_NONE);
2518 	lck_mtx_unlock(rnh_lock);
2519 	if (rt != NULL) {
2520 		RT_LOCK(rt);
2521 		if (rt_primary_default(rt, rt_key(rt)) &&
2522 		    rt->rt_stats != NULL) {
2523 			stat.ipv4_avgrtt = rt->rt_stats->nstat_avg_rtt;
2524 		}
2525 		RT_UNLOCK(rt);
2526 		rtfree(rt);
2527 		rt = NULL;
2528 	}
2529 
2530 	/* ipv6 avg rtt */
2531 	bzero(&dst6, sizeof(dst6));
2532 	dst6.sin6_len = sizeof(dst6);
2533 	dst6.sin6_family = AF_INET6;
2534 
2535 	lck_mtx_lock(rnh_lock);
2536 	rt = rt_lookup(TRUE, (struct sockaddr *)&dst6, NULL,
2537 	    rt_tables[AF_INET6], IFSCOPE_NONE);
2538 	lck_mtx_unlock(rnh_lock);
2539 	if (rt != NULL) {
2540 		RT_LOCK(rt);
2541 		if (rt_primary_default(rt, rt_key(rt)) &&
2542 		    rt->rt_stats != NULL) {
2543 			stat.ipv6_avgrtt = rt->rt_stats->nstat_avg_rtt;
2544 		}
2545 		RT_UNLOCK(rt);
2546 		rtfree(rt);
2547 		rt = NULL;
2548 	}
2549 
2550 	/* send packet loss rate, shift by 10 for precision */
2551 	if (tcpstat.tcps_sndpack > 0 && tcpstat.tcps_sndrexmitpack > 0) {
2552 		var = tcpstat.tcps_sndrexmitpack << 10;
2553 		stat.send_plr = (uint32_t)((var * 100) / tcpstat.tcps_sndpack);
2554 	}
2555 
2556 	/* recv packet loss rate, shift by 10 for precision */
2557 	if (tcpstat.tcps_rcvpack > 0 && tcpstat.tcps_recovered_pkts > 0) {
2558 		var = tcpstat.tcps_recovered_pkts << 10;
2559 		stat.recv_plr = (uint32_t)((var * 100) / tcpstat.tcps_rcvpack);
2560 	}
2561 
2562 	/* RTO after tail loss, shift by 10 for precision */
2563 	if (tcpstat.tcps_sndrexmitpack > 0
2564 	    && tcpstat.tcps_tailloss_rto > 0) {
2565 		var = tcpstat.tcps_tailloss_rto << 10;
2566 		stat.send_tlrto_rate =
2567 		    (uint32_t)((var * 100) / tcpstat.tcps_sndrexmitpack);
2568 	}
2569 
2570 	/* packet reordering */
2571 	if (tcpstat.tcps_sndpack > 0 && tcpstat.tcps_reordered_pkts > 0) {
2572 		var = tcpstat.tcps_reordered_pkts << 10;
2573 		stat.send_reorder_rate =
2574 		    (uint32_t)((var * 100) / tcpstat.tcps_sndpack);
2575 	}
2576 
2577 	if (tcp_ecn_outbound == 1) {
2578 		stat.ecn_client_enabled = 1;
2579 	}
2580 	if (tcp_ecn_inbound == 1) {
2581 		stat.ecn_server_enabled = 1;
2582 	}
2583 	tcp_cumulative_stat(tcpstat.tcps_connattempt,
2584 	    &prev.tcps_connattempt, &stat.connection_attempts);
2585 	tcp_cumulative_stat(tcpstat.tcps_accepts,
2586 	    &prev.tcps_accepts, &stat.connection_accepts);
2587 	tcp_cumulative_stat(tcpstat.tcps_ecn_client_setup,
2588 	    &prev.tcps_ecn_client_setup, &stat.ecn_client_setup);
2589 	tcp_cumulative_stat(tcpstat.tcps_ecn_server_setup,
2590 	    &prev.tcps_ecn_server_setup, &stat.ecn_server_setup);
2591 	tcp_cumulative_stat(tcpstat.tcps_ecn_client_success,
2592 	    &prev.tcps_ecn_client_success, &stat.ecn_client_success);
2593 	tcp_cumulative_stat(tcpstat.tcps_ecn_server_success,
2594 	    &prev.tcps_ecn_server_success, &stat.ecn_server_success);
2595 	tcp_cumulative_stat(tcpstat.tcps_ecn_not_supported,
2596 	    &prev.tcps_ecn_not_supported, &stat.ecn_not_supported);
2597 	tcp_cumulative_stat(tcpstat.tcps_ecn_lost_syn,
2598 	    &prev.tcps_ecn_lost_syn, &stat.ecn_lost_syn);
2599 	tcp_cumulative_stat(tcpstat.tcps_ecn_lost_synack,
2600 	    &prev.tcps_ecn_lost_synack, &stat.ecn_lost_synack);
2601 	tcp_cumulative_stat(tcpstat.tcps_ecn_recv_ce,
2602 	    &prev.tcps_ecn_recv_ce, &stat.ecn_recv_ce);
2603 	tcp_cumulative_stat(tcpstat.tcps_ecn_recv_ece,
2604 	    &prev.tcps_ecn_recv_ece, &stat.ecn_recv_ece);
2605 	tcp_cumulative_stat(tcpstat.tcps_ecn_recv_ece,
2606 	    &prev.tcps_ecn_recv_ece, &stat.ecn_recv_ece);
2607 	tcp_cumulative_stat(tcpstat.tcps_ecn_sent_ece,
2608 	    &prev.tcps_ecn_sent_ece, &stat.ecn_sent_ece);
2609 	tcp_cumulative_stat(tcpstat.tcps_ecn_sent_ece,
2610 	    &prev.tcps_ecn_sent_ece, &stat.ecn_sent_ece);
2611 	tcp_cumulative_stat(tcpstat.tcps_ecn_conn_recv_ce,
2612 	    &prev.tcps_ecn_conn_recv_ce, &stat.ecn_conn_recv_ce);
2613 	tcp_cumulative_stat(tcpstat.tcps_ecn_conn_recv_ece,
2614 	    &prev.tcps_ecn_conn_recv_ece, &stat.ecn_conn_recv_ece);
2615 	tcp_cumulative_stat(tcpstat.tcps_ecn_conn_plnoce,
2616 	    &prev.tcps_ecn_conn_plnoce, &stat.ecn_conn_plnoce);
2617 	tcp_cumulative_stat(tcpstat.tcps_ecn_conn_pl_ce,
2618 	    &prev.tcps_ecn_conn_pl_ce, &stat.ecn_conn_pl_ce);
2619 	tcp_cumulative_stat(tcpstat.tcps_ecn_conn_nopl_ce,
2620 	    &prev.tcps_ecn_conn_nopl_ce, &stat.ecn_conn_nopl_ce);
2621 	tcp_cumulative_stat(tcpstat.tcps_ecn_fallback_synloss,
2622 	    &prev.tcps_ecn_fallback_synloss, &stat.ecn_fallback_synloss);
2623 	tcp_cumulative_stat(tcpstat.tcps_ecn_fallback_reorder,
2624 	    &prev.tcps_ecn_fallback_reorder, &stat.ecn_fallback_reorder);
2625 	tcp_cumulative_stat(tcpstat.tcps_ecn_fallback_ce,
2626 	    &prev.tcps_ecn_fallback_ce, &stat.ecn_fallback_ce);
2627 	tcp_cumulative_stat(tcpstat.tcps_tfo_syn_data_rcv,
2628 	    &prev.tcps_tfo_syn_data_rcv, &stat.tfo_syn_data_rcv);
2629 	tcp_cumulative_stat(tcpstat.tcps_tfo_cookie_req_rcv,
2630 	    &prev.tcps_tfo_cookie_req_rcv, &stat.tfo_cookie_req_rcv);
2631 	tcp_cumulative_stat(tcpstat.tcps_tfo_cookie_sent,
2632 	    &prev.tcps_tfo_cookie_sent, &stat.tfo_cookie_sent);
2633 	tcp_cumulative_stat(tcpstat.tcps_tfo_cookie_invalid,
2634 	    &prev.tcps_tfo_cookie_invalid, &stat.tfo_cookie_invalid);
2635 	tcp_cumulative_stat(tcpstat.tcps_tfo_cookie_req,
2636 	    &prev.tcps_tfo_cookie_req, &stat.tfo_cookie_req);
2637 	tcp_cumulative_stat(tcpstat.tcps_tfo_cookie_rcv,
2638 	    &prev.tcps_tfo_cookie_rcv, &stat.tfo_cookie_rcv);
2639 	tcp_cumulative_stat(tcpstat.tcps_tfo_syn_data_sent,
2640 	    &prev.tcps_tfo_syn_data_sent, &stat.tfo_syn_data_sent);
2641 	tcp_cumulative_stat(tcpstat.tcps_tfo_syn_data_acked,
2642 	    &prev.tcps_tfo_syn_data_acked, &stat.tfo_syn_data_acked);
2643 	tcp_cumulative_stat(tcpstat.tcps_tfo_syn_loss,
2644 	    &prev.tcps_tfo_syn_loss, &stat.tfo_syn_loss);
2645 	tcp_cumulative_stat(tcpstat.tcps_tfo_blackhole,
2646 	    &prev.tcps_tfo_blackhole, &stat.tfo_blackhole);
2647 	tcp_cumulative_stat(tcpstat.tcps_tfo_cookie_wrong,
2648 	    &prev.tcps_tfo_cookie_wrong, &stat.tfo_cookie_wrong);
2649 	tcp_cumulative_stat(tcpstat.tcps_tfo_no_cookie_rcv,
2650 	    &prev.tcps_tfo_no_cookie_rcv, &stat.tfo_no_cookie_rcv);
2651 	tcp_cumulative_stat(tcpstat.tcps_tfo_heuristics_disable,
2652 	    &prev.tcps_tfo_heuristics_disable, &stat.tfo_heuristics_disable);
2653 	tcp_cumulative_stat(tcpstat.tcps_tfo_sndblackhole,
2654 	    &prev.tcps_tfo_sndblackhole, &stat.tfo_sndblackhole);
2655 
2656 
2657 	tcp_cumulative_stat(tcpstat.tcps_mptcp_handover_attempt,
2658 	    &prev.tcps_mptcp_handover_attempt, &stat.mptcp_handover_attempt);
2659 	tcp_cumulative_stat(tcpstat.tcps_mptcp_interactive_attempt,
2660 	    &prev.tcps_mptcp_interactive_attempt, &stat.mptcp_interactive_attempt);
2661 	tcp_cumulative_stat(tcpstat.tcps_mptcp_aggregate_attempt,
2662 	    &prev.tcps_mptcp_aggregate_attempt, &stat.mptcp_aggregate_attempt);
2663 	tcp_cumulative_stat(tcpstat.tcps_mptcp_fp_handover_attempt,
2664 	    &prev.tcps_mptcp_fp_handover_attempt, &stat.mptcp_fp_handover_attempt);
2665 	tcp_cumulative_stat(tcpstat.tcps_mptcp_fp_interactive_attempt,
2666 	    &prev.tcps_mptcp_fp_interactive_attempt, &stat.mptcp_fp_interactive_attempt);
2667 	tcp_cumulative_stat(tcpstat.tcps_mptcp_fp_aggregate_attempt,
2668 	    &prev.tcps_mptcp_fp_aggregate_attempt, &stat.mptcp_fp_aggregate_attempt);
2669 	tcp_cumulative_stat(tcpstat.tcps_mptcp_heuristic_fallback,
2670 	    &prev.tcps_mptcp_heuristic_fallback, &stat.mptcp_heuristic_fallback);
2671 	tcp_cumulative_stat(tcpstat.tcps_mptcp_fp_heuristic_fallback,
2672 	    &prev.tcps_mptcp_fp_heuristic_fallback, &stat.mptcp_fp_heuristic_fallback);
2673 	tcp_cumulative_stat(tcpstat.tcps_mptcp_handover_success_wifi,
2674 	    &prev.tcps_mptcp_handover_success_wifi, &stat.mptcp_handover_success_wifi);
2675 	tcp_cumulative_stat(tcpstat.tcps_mptcp_handover_success_cell,
2676 	    &prev.tcps_mptcp_handover_success_cell, &stat.mptcp_handover_success_cell);
2677 	tcp_cumulative_stat(tcpstat.tcps_mptcp_interactive_success,
2678 	    &prev.tcps_mptcp_interactive_success, &stat.mptcp_interactive_success);
2679 	tcp_cumulative_stat(tcpstat.tcps_mptcp_aggregate_success,
2680 	    &prev.tcps_mptcp_aggregate_success, &stat.mptcp_aggregate_success);
2681 	tcp_cumulative_stat(tcpstat.tcps_mptcp_fp_handover_success_wifi,
2682 	    &prev.tcps_mptcp_fp_handover_success_wifi, &stat.mptcp_fp_handover_success_wifi);
2683 	tcp_cumulative_stat(tcpstat.tcps_mptcp_fp_handover_success_cell,
2684 	    &prev.tcps_mptcp_fp_handover_success_cell, &stat.mptcp_fp_handover_success_cell);
2685 	tcp_cumulative_stat(tcpstat.tcps_mptcp_fp_interactive_success,
2686 	    &prev.tcps_mptcp_fp_interactive_success, &stat.mptcp_fp_interactive_success);
2687 	tcp_cumulative_stat(tcpstat.tcps_mptcp_fp_aggregate_success,
2688 	    &prev.tcps_mptcp_fp_aggregate_success, &stat.mptcp_fp_aggregate_success);
2689 	tcp_cumulative_stat(tcpstat.tcps_mptcp_handover_cell_from_wifi,
2690 	    &prev.tcps_mptcp_handover_cell_from_wifi, &stat.mptcp_handover_cell_from_wifi);
2691 	tcp_cumulative_stat(tcpstat.tcps_mptcp_handover_wifi_from_cell,
2692 	    &prev.tcps_mptcp_handover_wifi_from_cell, &stat.mptcp_handover_wifi_from_cell);
2693 	tcp_cumulative_stat(tcpstat.tcps_mptcp_interactive_cell_from_wifi,
2694 	    &prev.tcps_mptcp_interactive_cell_from_wifi, &stat.mptcp_interactive_cell_from_wifi);
2695 	tcp_cumulative_stat64(tcpstat.tcps_mptcp_handover_cell_bytes,
2696 	    &prev.tcps_mptcp_handover_cell_bytes, &stat.mptcp_handover_cell_bytes);
2697 	tcp_cumulative_stat64(tcpstat.tcps_mptcp_interactive_cell_bytes,
2698 	    &prev.tcps_mptcp_interactive_cell_bytes, &stat.mptcp_interactive_cell_bytes);
2699 	tcp_cumulative_stat64(tcpstat.tcps_mptcp_aggregate_cell_bytes,
2700 	    &prev.tcps_mptcp_aggregate_cell_bytes, &stat.mptcp_aggregate_cell_bytes);
2701 	tcp_cumulative_stat64(tcpstat.tcps_mptcp_handover_all_bytes,
2702 	    &prev.tcps_mptcp_handover_all_bytes, &stat.mptcp_handover_all_bytes);
2703 	tcp_cumulative_stat64(tcpstat.tcps_mptcp_interactive_all_bytes,
2704 	    &prev.tcps_mptcp_interactive_all_bytes, &stat.mptcp_interactive_all_bytes);
2705 	tcp_cumulative_stat64(tcpstat.tcps_mptcp_aggregate_all_bytes,
2706 	    &prev.tcps_mptcp_aggregate_all_bytes, &stat.mptcp_aggregate_all_bytes);
2707 	tcp_cumulative_stat(tcpstat.tcps_mptcp_back_to_wifi,
2708 	    &prev.tcps_mptcp_back_to_wifi, &stat.mptcp_back_to_wifi);
2709 	tcp_cumulative_stat(tcpstat.tcps_mptcp_wifi_proxy,
2710 	    &prev.tcps_mptcp_wifi_proxy, &stat.mptcp_wifi_proxy);
2711 	tcp_cumulative_stat(tcpstat.tcps_mptcp_cell_proxy,
2712 	    &prev.tcps_mptcp_cell_proxy, &stat.mptcp_cell_proxy);
2713 	tcp_cumulative_stat(tcpstat.tcps_mptcp_triggered_cell,
2714 	    &prev.tcps_mptcp_triggered_cell, &stat.mptcp_triggered_cell);
2715 
2716 	nstat_sysinfo_send_data(&data);
2717 
2718 #undef  stat
2719 }
2720 
2721 void
tcp_interface_send_probe(u_int16_t probe_if_index)2722 tcp_interface_send_probe(u_int16_t probe_if_index)
2723 {
2724 	int32_t offset = 0;
2725 	struct tcptimerlist *listp = &tcp_timer_list;
2726 
2727 	/* Make sure TCP clock is up to date */
2728 	calculate_tcp_clock();
2729 
2730 	lck_mtx_lock(&listp->mtx);
2731 	if (listp->probe_if_index > 0 && listp->probe_if_index != probe_if_index) {
2732 		tcpstat.tcps_probe_if_conflict++;
2733 		os_log(OS_LOG_DEFAULT,
2734 		    "%s: probe_if_index %u conflicts with %u, tcps_probe_if_conflict %u\n",
2735 		    __func__, probe_if_index, listp->probe_if_index,
2736 		    tcpstat.tcps_probe_if_conflict);
2737 		goto done;
2738 	}
2739 
2740 	listp->probe_if_index = probe_if_index;
2741 	if (listp->running) {
2742 		os_log(OS_LOG_DEFAULT, "%s: timer list already running for if_index %u\n",
2743 		    __func__, probe_if_index);
2744 		goto done;
2745 	}
2746 
2747 	/*
2748 	 * Reschedule the timerlist to run within the next 10ms, which is
2749 	 * the fastest that we can do.
2750 	 */
2751 	offset = TCP_TIMER_10MS_QUANTUM;
2752 	if (listp->scheduled) {
2753 		int32_t diff;
2754 		diff = timer_diff(listp->runtime, 0, tcp_now, offset);
2755 		if (diff <= 0) {
2756 			/* The timer will fire sooner than what's needed */
2757 			os_log(OS_LOG_DEFAULT,
2758 			    "%s: timer will fire sooner than needed for if_index %u\n",
2759 			    __func__, probe_if_index);
2760 			goto done;
2761 		}
2762 	}
2763 	listp->mode = TCP_TIMERLIST_10MS_MODE;
2764 	listp->idleruns = 0;
2765 
2766 	tcp_sched_timerlist(offset);
2767 
2768 done:
2769 	lck_mtx_unlock(&listp->mtx);
2770 	return;
2771 }
2772 
2773 /*
2774  * Enable read probes on this connection, if:
2775  * - it is in established state
2776  * - doesn't have any data outstanding
2777  * - the outgoing ifp matches
2778  * - we have not already sent any read probes
2779  */
2780 static void
tcp_enable_read_probe(struct tcpcb * tp,struct ifnet * ifp)2781 tcp_enable_read_probe(struct tcpcb *tp, struct ifnet *ifp)
2782 {
2783 	if (tp->t_state == TCPS_ESTABLISHED &&
2784 	    tp->snd_max == tp->snd_una &&
2785 	    tp->t_inpcb->inp_last_outifp == ifp &&
2786 	    !(tp->t_flagsext & TF_DETECT_READSTALL) &&
2787 	    tp->t_rtimo_probes == 0) {
2788 		tp->t_flagsext |= TF_DETECT_READSTALL;
2789 		tp->t_rtimo_probes = 0;
2790 		tp->t_timer[TCPT_KEEP] = OFFSET_FROM_START(tp,
2791 		    TCP_TIMER_10MS_QUANTUM);
2792 		if (tp->tentry.index == TCPT_NONE) {
2793 			tp->tentry.index = TCPT_KEEP;
2794 			tp->tentry.runtime = tcp_now +
2795 			    TCP_TIMER_10MS_QUANTUM;
2796 		} else {
2797 			int32_t diff = 0;
2798 
2799 			/* Reset runtime to be in next 10ms */
2800 			diff = timer_diff(tp->tentry.runtime, 0,
2801 			    tcp_now, TCP_TIMER_10MS_QUANTUM);
2802 			if (diff > 0) {
2803 				tp->tentry.index = TCPT_KEEP;
2804 				tp->tentry.runtime = tcp_now +
2805 				    TCP_TIMER_10MS_QUANTUM;
2806 				if (tp->tentry.runtime == 0) {
2807 					tp->tentry.runtime++;
2808 				}
2809 			}
2810 		}
2811 	}
2812 }
2813 
2814 /*
2815  * Disable read probe and reset the keep alive timer
2816  */
2817 static void
tcp_disable_read_probe(struct tcpcb * tp)2818 tcp_disable_read_probe(struct tcpcb *tp)
2819 {
2820 	if (tp->t_adaptive_rtimo == 0 &&
2821 	    ((tp->t_flagsext & TF_DETECT_READSTALL) ||
2822 	    tp->t_rtimo_probes > 0)) {
2823 		tcp_keepalive_reset(tp);
2824 
2825 		if (tp->t_mpsub) {
2826 			mptcp_reset_keepalive(tp);
2827 		}
2828 	}
2829 }
2830 
2831 /*
2832  * Reschedule the tcp timerlist in the next 10ms to re-enable read/write
2833  * probes on connections going over a particular interface.
2834  */
2835 void
tcp_probe_connectivity(struct ifnet * ifp,u_int32_t enable)2836 tcp_probe_connectivity(struct ifnet *ifp, u_int32_t enable)
2837 {
2838 	int32_t offset;
2839 	struct tcptimerlist *listp = &tcp_timer_list;
2840 	struct inpcbinfo *pcbinfo = &tcbinfo;
2841 	struct inpcb *inp, *nxt;
2842 
2843 	if (ifp == NULL) {
2844 		return;
2845 	}
2846 
2847 	/* update clock */
2848 	calculate_tcp_clock();
2849 
2850 	/*
2851 	 * Enable keep alive timer on all connections that are
2852 	 * active/established on this interface.
2853 	 */
2854 	lck_rw_lock_shared(&pcbinfo->ipi_lock);
2855 
2856 	LIST_FOREACH_SAFE(inp, pcbinfo->ipi_listhead, inp_list, nxt) {
2857 		struct tcpcb *tp = NULL;
2858 		if (in_pcb_checkstate(inp, WNT_ACQUIRE, 0) ==
2859 		    WNT_STOPUSING) {
2860 			continue;
2861 		}
2862 
2863 		/* Acquire lock to look at the state of the connection */
2864 		socket_lock(inp->inp_socket, 1);
2865 
2866 		/* Release the want count */
2867 		if (inp->inp_ppcb == NULL ||
2868 		    (in_pcb_checkstate(inp, WNT_RELEASE, 1) == WNT_STOPUSING)) {
2869 			socket_unlock(inp->inp_socket, 1);
2870 			continue;
2871 		}
2872 		tp = intotcpcb(inp);
2873 		if (enable) {
2874 			tcp_enable_read_probe(tp, ifp);
2875 		} else {
2876 			tcp_disable_read_probe(tp);
2877 		}
2878 
2879 		socket_unlock(inp->inp_socket, 1);
2880 	}
2881 	lck_rw_done(&pcbinfo->ipi_lock);
2882 
2883 	lck_mtx_lock(&listp->mtx);
2884 	if (listp->running) {
2885 		listp->pref_mode |= TCP_TIMERLIST_10MS_MODE;
2886 		goto done;
2887 	}
2888 
2889 	/* Reschedule within the next 10ms */
2890 	offset = TCP_TIMER_10MS_QUANTUM;
2891 	if (listp->scheduled) {
2892 		int32_t diff;
2893 		diff = timer_diff(listp->runtime, 0, tcp_now, offset);
2894 		if (diff <= 0) {
2895 			/* The timer will fire sooner than what's needed */
2896 			goto done;
2897 		}
2898 	}
2899 	listp->mode = TCP_TIMERLIST_10MS_MODE;
2900 	listp->idleruns = 0;
2901 
2902 	tcp_sched_timerlist(offset);
2903 done:
2904 	lck_mtx_unlock(&listp->mtx);
2905 	return;
2906 }
2907 
2908 inline void
tcp_update_mss_core(struct tcpcb * tp,struct ifnet * ifp)2909 tcp_update_mss_core(struct tcpcb *tp, struct ifnet *ifp)
2910 {
2911 	struct if_cellular_status_v1 *ifsr;
2912 	u_int32_t optlen;
2913 	ifsr = &ifp->if_link_status->ifsr_u.ifsr_cell.if_cell_u.if_status_v1;
2914 	if (ifsr->valid_bitmask & IF_CELL_UL_MSS_RECOMMENDED_VALID) {
2915 		optlen = tp->t_maxopd - tp->t_maxseg;
2916 
2917 		if (ifsr->mss_recommended ==
2918 		    IF_CELL_UL_MSS_RECOMMENDED_NONE &&
2919 		    tp->t_cached_maxopd > 0 &&
2920 		    tp->t_maxopd < tp->t_cached_maxopd) {
2921 			tp->t_maxopd = tp->t_cached_maxopd;
2922 			tcpstat.tcps_mss_to_default++;
2923 		} else if (ifsr->mss_recommended ==
2924 		    IF_CELL_UL_MSS_RECOMMENDED_MEDIUM &&
2925 		    tp->t_maxopd > tcp_mss_rec_medium) {
2926 			tp->t_cached_maxopd = tp->t_maxopd;
2927 			tp->t_maxopd = tcp_mss_rec_medium;
2928 			tcpstat.tcps_mss_to_medium++;
2929 		} else if (ifsr->mss_recommended ==
2930 		    IF_CELL_UL_MSS_RECOMMENDED_LOW &&
2931 		    tp->t_maxopd > tcp_mss_rec_low) {
2932 			tp->t_cached_maxopd = tp->t_maxopd;
2933 			tp->t_maxopd = tcp_mss_rec_low;
2934 			tcpstat.tcps_mss_to_low++;
2935 		}
2936 		tp->t_maxseg = tp->t_maxopd - optlen;
2937 
2938 		/*
2939 		 * clear the cached value if it is same as the current
2940 		 */
2941 		if (tp->t_maxopd == tp->t_cached_maxopd) {
2942 			tp->t_cached_maxopd = 0;
2943 		}
2944 	}
2945 }
2946 
2947 void
tcp_update_mss_locked(struct socket * so,struct ifnet * ifp)2948 tcp_update_mss_locked(struct socket *so, struct ifnet *ifp)
2949 {
2950 	struct inpcb *inp = sotoinpcb(so);
2951 	struct tcpcb *tp = intotcpcb(inp);
2952 
2953 	if (ifp == NULL && (ifp = inp->inp_last_outifp) == NULL) {
2954 		return;
2955 	}
2956 
2957 	if (!IFNET_IS_CELLULAR(ifp)) {
2958 		/*
2959 		 * This optimization is implemented for cellular
2960 		 * networks only
2961 		 */
2962 		return;
2963 	}
2964 	if (tp->t_state <= TCPS_CLOSE_WAIT) {
2965 		/*
2966 		 * If the connection is currently doing or has done PMTU
2967 		 * blackhole detection, do not change the MSS
2968 		 */
2969 		if (tp->t_flags & TF_BLACKHOLE) {
2970 			return;
2971 		}
2972 		if (ifp->if_link_status == NULL) {
2973 			return;
2974 		}
2975 		tcp_update_mss_core(tp, ifp);
2976 	}
2977 }
2978 
2979 void
tcp_itimer(struct inpcbinfo * ipi)2980 tcp_itimer(struct inpcbinfo *ipi)
2981 {
2982 	struct inpcb *inp, *nxt;
2983 
2984 	if (lck_rw_try_lock_exclusive(&ipi->ipi_lock) == FALSE) {
2985 		if (tcp_itimer_done == TRUE) {
2986 			tcp_itimer_done = FALSE;
2987 			atomic_add_32(&ipi->ipi_timer_req.intimer_fast, 1);
2988 			return;
2989 		}
2990 		/* Upgrade failed, lost lock now take it again exclusive */
2991 		lck_rw_lock_exclusive(&ipi->ipi_lock);
2992 	}
2993 	tcp_itimer_done = TRUE;
2994 
2995 	LIST_FOREACH_SAFE(inp, &tcb, inp_list, nxt) {
2996 		struct socket *so;
2997 		struct ifnet *ifp;
2998 
2999 		if (inp->inp_ppcb == NULL ||
3000 		    in_pcb_checkstate(inp, WNT_ACQUIRE, 0) == WNT_STOPUSING) {
3001 			continue;
3002 		}
3003 		so = inp->inp_socket;
3004 		ifp = inp->inp_last_outifp;
3005 		socket_lock(so, 1);
3006 		if (in_pcb_checkstate(inp, WNT_RELEASE, 1) == WNT_STOPUSING) {
3007 			socket_unlock(so, 1);
3008 			continue;
3009 		}
3010 		so_check_extended_bk_idle_time(so);
3011 		if (ipi->ipi_flags & INPCBINFO_UPDATE_MSS) {
3012 			tcp_update_mss_locked(so, NULL);
3013 		}
3014 		socket_unlock(so, 1);
3015 
3016 		/*
3017 		 * Defunct all system-initiated background sockets if the
3018 		 * socket is using the cellular interface and the interface
3019 		 * has its LQM set to abort.
3020 		 */
3021 		if ((ipi->ipi_flags & INPCBINFO_HANDLE_LQM_ABORT) &&
3022 		    IS_SO_TC_BACKGROUNDSYSTEM(so->so_traffic_class) &&
3023 		    ifp != NULL && IFNET_IS_CELLULAR(ifp) &&
3024 		    (ifp->if_interface_state.valid_bitmask &
3025 		    IF_INTERFACE_STATE_LQM_STATE_VALID) &&
3026 		    ifp->if_interface_state.lqm_state ==
3027 		    IFNET_LQM_THRESH_ABORT) {
3028 			socket_defunct(current_proc(), so,
3029 			    SHUTDOWN_SOCKET_LEVEL_DISCONNECT_ALL);
3030 		}
3031 	}
3032 
3033 	ipi->ipi_flags &= ~(INPCBINFO_UPDATE_MSS | INPCBINFO_HANDLE_LQM_ABORT);
3034 	lck_rw_done(&ipi->ipi_lock);
3035 }
3036