xref: /xnu-11215.41.3/bsd/netinet/tcp_timer.c (revision 33de042d024d46de5ff4e89f2471de6608e37fa4)
1 /*
2  * Copyright (c) 2000-2024 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995
30  *	The Regents of the University of California.  All rights reserved.
31  *
32  * Redistribution and use in source and binary forms, with or without
33  * modification, are permitted provided that the following conditions
34  * are met:
35  * 1. Redistributions of source code must retain the above copyright
36  *    notice, this list of conditions and the following disclaimer.
37  * 2. Redistributions in binary form must reproduce the above copyright
38  *    notice, this list of conditions and the following disclaimer in the
39  *    documentation and/or other materials provided with the distribution.
40  * 3. All advertising materials mentioning features or use of this software
41  *    must display the following acknowledgement:
42  *	This product includes software developed by the University of
43  *	California, Berkeley and its contributors.
44  * 4. Neither the name of the University nor the names of its contributors
45  *    may be used to endorse or promote products derived from this software
46  *    without specific prior written permission.
47  *
48  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58  * SUCH DAMAGE.
59  *
60  *	@(#)tcp_timer.c	8.2 (Berkeley) 5/24/95
61  * $FreeBSD: src/sys/netinet/tcp_timer.c,v 1.34.2.11 2001/08/22 00:59:12 silby Exp $
62  */
63 
64 #include "tcp_includes.h"
65 
66 #include <sys/param.h>
67 #include <sys/systm.h>
68 #include <sys/kernel.h>
69 #include <sys/mbuf.h>
70 #include <sys/sysctl.h>
71 #include <sys/socket.h>
72 #include <sys/socketvar.h>
73 #include <sys/protosw.h>
74 #include <sys/domain.h>
75 #include <sys/mcache.h>
76 #include <sys/queue.h>
77 #include <kern/locks.h>
78 #include <kern/cpu_number.h>    /* before tcp_seq.h, for tcp_random18() */
79 #include <mach/boolean.h>
80 
81 #include <net/route.h>
82 #include <net/if_var.h>
83 #include <net/ntstat.h>
84 
85 #include <netinet/in.h>
86 #include <netinet/in_systm.h>
87 #include <netinet/in_pcb.h>
88 #include <netinet/in_var.h>
89 #include <netinet6/in6_pcb.h>
90 #include <netinet/ip_var.h>
91 #include <netinet/tcp.h>
92 #include <netinet/tcp_cache.h>
93 #include <netinet/tcp_fsm.h>
94 #include <netinet/tcp_seq.h>
95 #include <netinet/tcp_timer.h>
96 #include <netinet/tcp_var.h>
97 #include <netinet/tcp_cc.h>
98 #include <netinet6/tcp6_var.h>
99 #include <netinet/tcpip.h>
100 #include <netinet/tcp_log.h>
101 
102 #include <sys/kdebug.h>
103 #include <mach/sdt.h>
104 #include <netinet/mptcp_var.h>
105 #include <net/content_filter.h>
106 #include <net/sockaddr_utils.h>
107 
108 /* Max number of times a stretch ack can be delayed on a connection */
109 #define TCP_STRETCHACK_DELAY_THRESHOLD  5
110 
111 /*
112  * If the host processor has been sleeping for too long, this is the threshold
113  * used to avoid sending stale retransmissions.
114  */
115 #define TCP_SLEEP_TOO_LONG      (10 * 60 * 1000) /* 10 minutes in ms */
116 
117 /* tcp timer list */
118 struct tcptimerlist tcp_timer_list;
119 
120 /* List of pcbs in timewait state, protected by tcbinfo's ipi_lock */
121 struct tcptailq tcp_tw_tailq;
122 
123 
124 static int
125 sysctl_msec_to_ticks SYSCTL_HANDLER_ARGS
126 {
127 #pragma unused(arg2)
128 	int error, temp;
129 	long s, tt;
130 
131 	tt = *(int *)arg1;
132 	s = tt * 1000 / TCP_RETRANSHZ;
133 	if (tt < 0 || s > INT_MAX) {
134 		return EINVAL;
135 	}
136 	temp = (int)s;
137 
138 	error = sysctl_handle_int(oidp, &temp, 0, req);
139 	if (error || !req->newptr) {
140 		return error;
141 	}
142 
143 	tt = (long)temp * TCP_RETRANSHZ / 1000;
144 	if (tt < 1 || tt > INT_MAX) {
145 		return EINVAL;
146 	}
147 
148 	*(int *)arg1 = (int)tt;
149 	SYSCTL_SKMEM_UPDATE_AT_OFFSET(arg2, *(int*)arg1);
150 	return 0;
151 }
152 
153 #if SYSCTL_SKMEM
154 int     tcp_keepinit = TCPTV_KEEP_INIT;
155 SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPINIT, keepinit,
156     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
157     &tcp_keepinit, offsetof(skmem_sysctl, tcp.keepinit),
158     sysctl_msec_to_ticks, "I", "");
159 
160 int     tcp_keepidle = TCPTV_KEEP_IDLE;
161 SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPIDLE, keepidle,
162     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
163     &tcp_keepidle, offsetof(skmem_sysctl, tcp.keepidle),
164     sysctl_msec_to_ticks, "I", "");
165 
166 int     tcp_keepintvl = TCPTV_KEEPINTVL;
167 SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPINTVL, keepintvl,
168     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
169     &tcp_keepintvl, offsetof(skmem_sysctl, tcp.keepintvl),
170     sysctl_msec_to_ticks, "I", "");
171 
172 SYSCTL_SKMEM_TCP_INT(OID_AUTO, keepcnt,
173     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
174     int, tcp_keepcnt, TCPTV_KEEPCNT, "number of times to repeat keepalive");
175 
176 int     tcp_msl = TCPTV_MSL;
177 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, msl,
178     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
179     &tcp_msl, offsetof(skmem_sysctl, tcp.msl),
180     sysctl_msec_to_ticks, "I", "Maximum segment lifetime");
181 #else /* SYSCTL_SKMEM */
182 int     tcp_keepinit;
183 SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPINIT, keepinit,
184     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
185     &tcp_keepinit, 0, sysctl_msec_to_ticks, "I", "");
186 
187 int     tcp_keepidle;
188 SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPIDLE, keepidle,
189     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
190     &tcp_keepidle, 0, sysctl_msec_to_ticks, "I", "");
191 
192 int     tcp_keepintvl;
193 SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPINTVL, keepintvl,
194     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
195     &tcp_keepintvl, 0, sysctl_msec_to_ticks, "I", "");
196 
197 int     tcp_keepcnt;
198 SYSCTL_INT(_net_inet_tcp, OID_AUTO, keepcnt,
199     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
200     &tcp_keepcnt, 0, "number of times to repeat keepalive");
201 
202 int     tcp_msl;
203 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, msl,
204     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
205     &tcp_msl, 0, sysctl_msec_to_ticks, "I", "Maximum segment lifetime");
206 #endif /* SYSCTL_SKMEM */
207 
208 /*
209  * Avoid DoS with connections half-closed in TIME_WAIT_2
210  */
211 int     tcp_fin_timeout = TCPTV_FINWAIT2;
212 
213 static int
214 sysctl_tcp_fin_timeout SYSCTL_HANDLER_ARGS
215 {
216 #pragma unused(arg2)
217 	int error;
218 	int value = tcp_fin_timeout;
219 
220 	error = sysctl_handle_int(oidp, &value, 0, req);
221 	if (error != 0 || req->newptr == USER_ADDR_NULL) {
222 		return error;
223 	}
224 
225 	if (value == -1) {
226 		/* Reset to default value */
227 		value = TCPTV_FINWAIT2;
228 	} else {
229 		/* Convert from milliseconds */
230 		long big_value = value * TCP_RETRANSHZ / 1000;
231 
232 		if (big_value < 0 || big_value > INT_MAX) {
233 			return EINVAL;
234 		}
235 		value = (int)big_value;
236 	}
237 	tcp_fin_timeout = value;
238 	SYSCTL_SKMEM_UPDATE_AT_OFFSET(arg2, value);
239 	return 0;
240 }
241 
242 #if SYSCTL_SKMEM
243 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, fin_timeout,
244     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
245     &tcp_fin_timeout, offsetof(skmem_sysctl, tcp.fin_timeout),
246     sysctl_tcp_fin_timeout, "I", "");
247 #else /* SYSCTL_SKMEM */
248 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, fin_timeout,
249     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
250     &tcp_fin_timeout, 0,
251     sysctl_tcp_fin_timeout, "I", "");
252 #endif /* SYSCTL_SKMEM */
253 
254 /*
255  * Avoid DoS via TCP Robustness in Persist Condition
256  * (see http://www.ietf.org/id/draft-ananth-tcpm-persist-02.txt)
257  * by allowing a system wide maximum persistence timeout value when in
258  * Zero Window Probe mode.
259  *
260  * Expressed in milliseconds to be consistent without timeout related
261  * values, the TCP socket option is in seconds.
262  */
263 #if SYSCTL_SKMEM
264 u_int32_t tcp_max_persist_timeout = 0;
265 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, max_persist_timeout,
266     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
267     &tcp_max_persist_timeout, offsetof(skmem_sysctl, tcp.max_persist_timeout),
268     sysctl_msec_to_ticks, "I", "Maximum persistence timeout for ZWP");
269 #else /* SYSCTL_SKMEM */
270 u_int32_t tcp_max_persist_timeout = 0;
271 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, max_persist_timeout,
272     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
273     &tcp_max_persist_timeout, 0, sysctl_msec_to_ticks, "I",
274     "Maximum persistence timeout for ZWP");
275 #endif /* SYSCTL_SKMEM */
276 
277 SYSCTL_SKMEM_TCP_INT(OID_AUTO, always_keepalive,
278     CTLFLAG_RW | CTLFLAG_LOCKED, static int, always_keepalive, 0,
279     "Assume SO_KEEPALIVE on all TCP connections");
280 
281 /*
282  * This parameter determines how long the timer list will stay in fast or
283  * quick mode even though all connections are idle. In this state, the
284  * timer will run more frequently anticipating new data.
285  */
286 SYSCTL_SKMEM_TCP_INT(OID_AUTO, timer_fastmode_idlemax,
287     CTLFLAG_RW | CTLFLAG_LOCKED, int, timer_fastmode_idlemax,
288     TCP_FASTMODE_IDLERUN_MAX, "Maximum idle generations in fast mode");
289 
290 /*
291  * See tcp_syn_backoff[] for interval values between SYN retransmits;
292  * the value set below defines the number of retransmits, before we
293  * disable the timestamp and window scaling options during subsequent
294  * SYN retransmits.  Setting it to 0 disables the dropping off of those
295  * two options.
296  */
297 SYSCTL_SKMEM_TCP_INT(OID_AUTO, broken_peer_syn_rexmit_thres,
298     CTLFLAG_RW | CTLFLAG_LOCKED, static int, tcp_broken_peer_syn_rxmit_thres,
299     10, "Number of retransmitted SYNs before disabling RFC 1323 "
300     "options on local connections");
301 
302 static int tcp_timer_advanced = 0;
303 SYSCTL_INT(_net_inet_tcp, OID_AUTO, tcp_timer_advanced,
304     CTLFLAG_RD | CTLFLAG_LOCKED, &tcp_timer_advanced, 0,
305     "Number of times one of the timers was advanced");
306 
307 static int tcp_resched_timerlist = 0;
308 SYSCTL_INT(_net_inet_tcp, OID_AUTO, tcp_resched_timerlist,
309     CTLFLAG_RD | CTLFLAG_LOCKED, &tcp_resched_timerlist, 0,
310     "Number of times timer list was rescheduled as part of processing a packet");
311 
312 SYSCTL_SKMEM_TCP_INT(OID_AUTO, pmtud_blackhole_detection,
313     CTLFLAG_RW | CTLFLAG_LOCKED, int, tcp_pmtud_black_hole_detect, 1,
314     "Path MTU Discovery Black Hole Detection");
315 
316 SYSCTL_SKMEM_TCP_INT(OID_AUTO, pmtud_blackhole_mss,
317     CTLFLAG_RW | CTLFLAG_LOCKED, int, tcp_pmtud_black_hole_mss, 1200,
318     "Path MTU Discovery Black Hole Detection lowered MSS");
319 
320 #if (DEBUG || DEVELOPMENT)
321 int tcp_probe_if_fix_port = 0;
322 SYSCTL_INT(_net_inet_tcp, OID_AUTO, probe_if_fix_port,
323     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
324     &tcp_probe_if_fix_port, 0, "");
325 #endif /* (DEBUG || DEVELOPMENT) */
326 
327 static u_int32_t tcp_mss_rec_medium = 1200;
328 static u_int32_t tcp_mss_rec_low = 512;
329 
330 #define TCP_REPORT_STATS_INTERVAL       43200 /* 12 hours, in seconds */
331 int tcp_report_stats_interval = TCP_REPORT_STATS_INTERVAL;
332 
333 /* performed garbage collection of "used" sockets */
334 static boolean_t tcp_gc_done = FALSE;
335 
336 /* max idle probes */
337 int     tcp_maxpersistidle = TCPTV_KEEP_IDLE;
338 
339 /*
340  * TCP delack timer is set to 100 ms. Since the processing of timer list
341  * in fast mode will happen no faster than 100 ms, the delayed ack timer
342  * will fire some where between 100 and 200 ms.
343  */
344 int     tcp_delack = TCP_RETRANSHZ / 10;
345 
346 #if MPTCP
347 /*
348  * MP_JOIN retransmission of 3rd ACK will be every 500 msecs without backoff
349  */
350 int     tcp_jack_rxmt = TCP_RETRANSHZ / 2;
351 #endif /* MPTCP */
352 
353 static boolean_t tcp_itimer_done = FALSE;
354 
355 static void tcp_remove_timer(struct tcpcb *tp);
356 static void tcp_sched_timerlist(uint32_t offset);
357 static u_int32_t tcp_run_conn_timer(struct tcpcb *tp, u_int16_t *mode,
358     u_int16_t probe_if_index);
359 static inline void tcp_set_lotimer_index(struct tcpcb *);
360 __private_extern__ void tcp_remove_from_time_wait(struct inpcb *inp);
361 static inline void tcp_update_mss_core(struct tcpcb *tp, struct ifnet *ifp);
362 __private_extern__ void tcp_report_stats(void);
363 
364 static  u_int64_t tcp_last_report_time;
365 
366 /*
367  * Structure to store previously reported stats so that we can send
368  * incremental changes in each report interval.
369  */
370 struct tcp_last_report_stats {
371 	u_int32_t       tcps_connattempt;
372 	u_int32_t       tcps_accepts;
373 	u_int32_t       tcps_ecn_client_setup;
374 	u_int32_t       tcps_ecn_server_setup;
375 	u_int32_t       tcps_ecn_client_success;
376 	u_int32_t       tcps_ecn_server_success;
377 	u_int32_t       tcps_ecn_not_supported;
378 	u_int32_t       tcps_ecn_lost_syn;
379 	u_int32_t       tcps_ecn_lost_synack;
380 	u_int32_t       tcps_ecn_recv_ce;
381 	u_int32_t       tcps_ecn_recv_ece;
382 	u_int32_t       tcps_ecn_sent_ece;
383 	u_int32_t       tcps_ecn_conn_recv_ce;
384 	u_int32_t       tcps_ecn_conn_recv_ece;
385 	u_int32_t       tcps_ecn_conn_plnoce;
386 	u_int32_t       tcps_ecn_conn_pl_ce;
387 	u_int32_t       tcps_ecn_conn_nopl_ce;
388 	u_int32_t       tcps_ecn_fallback_synloss;
389 	u_int32_t       tcps_ecn_fallback_reorder;
390 	u_int32_t       tcps_ecn_fallback_ce;
391 
392 	/* TFO-related statistics */
393 	u_int32_t       tcps_tfo_syn_data_rcv;
394 	u_int32_t       tcps_tfo_cookie_req_rcv;
395 	u_int32_t       tcps_tfo_cookie_sent;
396 	u_int32_t       tcps_tfo_cookie_invalid;
397 	u_int32_t       tcps_tfo_cookie_req;
398 	u_int32_t       tcps_tfo_cookie_rcv;
399 	u_int32_t       tcps_tfo_syn_data_sent;
400 	u_int32_t       tcps_tfo_syn_data_acked;
401 	u_int32_t       tcps_tfo_syn_loss;
402 	u_int32_t       tcps_tfo_blackhole;
403 	u_int32_t       tcps_tfo_cookie_wrong;
404 	u_int32_t       tcps_tfo_no_cookie_rcv;
405 	u_int32_t       tcps_tfo_heuristics_disable;
406 	u_int32_t       tcps_tfo_sndblackhole;
407 
408 	/* MPTCP-related statistics */
409 	u_int32_t       tcps_mptcp_handover_attempt;
410 	u_int32_t       tcps_mptcp_interactive_attempt;
411 	u_int32_t       tcps_mptcp_aggregate_attempt;
412 	u_int32_t       tcps_mptcp_fp_handover_attempt;
413 	u_int32_t       tcps_mptcp_fp_interactive_attempt;
414 	u_int32_t       tcps_mptcp_fp_aggregate_attempt;
415 	u_int32_t       tcps_mptcp_heuristic_fallback;
416 	u_int32_t       tcps_mptcp_fp_heuristic_fallback;
417 	u_int32_t       tcps_mptcp_handover_success_wifi;
418 	u_int32_t       tcps_mptcp_handover_success_cell;
419 	u_int32_t       tcps_mptcp_interactive_success;
420 	u_int32_t       tcps_mptcp_aggregate_success;
421 	u_int32_t       tcps_mptcp_fp_handover_success_wifi;
422 	u_int32_t       tcps_mptcp_fp_handover_success_cell;
423 	u_int32_t       tcps_mptcp_fp_interactive_success;
424 	u_int32_t       tcps_mptcp_fp_aggregate_success;
425 	u_int32_t       tcps_mptcp_handover_cell_from_wifi;
426 	u_int32_t       tcps_mptcp_handover_wifi_from_cell;
427 	u_int32_t       tcps_mptcp_interactive_cell_from_wifi;
428 	u_int64_t       tcps_mptcp_handover_cell_bytes;
429 	u_int64_t       tcps_mptcp_interactive_cell_bytes;
430 	u_int64_t       tcps_mptcp_aggregate_cell_bytes;
431 	u_int64_t       tcps_mptcp_handover_all_bytes;
432 	u_int64_t       tcps_mptcp_interactive_all_bytes;
433 	u_int64_t       tcps_mptcp_aggregate_all_bytes;
434 	u_int32_t       tcps_mptcp_back_to_wifi;
435 	u_int32_t       tcps_mptcp_wifi_proxy;
436 	u_int32_t       tcps_mptcp_cell_proxy;
437 	u_int32_t       tcps_mptcp_triggered_cell;
438 };
439 
440 
441 /* Returns true if the timer is on the timer list */
442 #define TIMER_IS_ON_LIST(tp) ((tp)->t_flags & TF_TIMER_ONLIST)
443 
444 /* Run the TCP timerlist atleast once every hour */
445 #define TCP_TIMERLIST_MAX_OFFSET (60 * 60 * TCP_RETRANSHZ)
446 
447 
448 static void add_to_time_wait_locked(struct tcpcb *tp, uint32_t delay);
449 static boolean_t tcp_garbage_collect(struct inpcb *, int);
450 
451 #define TIMERENTRY_TO_TP(te) (__unsafe_forge_single(struct tcpcb *, ((uintptr_t)te - offsetof(struct tcpcb, tentry.le.le_next))))
452 
453 #define VERIFY_NEXT_LINK(elm, field) do {       \
454 	if (LIST_NEXT((elm),field) != NULL &&   \
455 	    LIST_NEXT((elm),field)->field.le_prev !=    \
456 	        &((elm)->field.le_next))        \
457 	        panic("Bad link elm %p next->prev != elm", (elm));      \
458 } while(0)
459 
460 #define VERIFY_PREV_LINK(elm, field) do {       \
461 	if (*(elm)->field.le_prev != (elm))     \
462 	        panic("Bad link elm %p prev->next != elm", (elm));      \
463 } while(0)
464 
465 #define TCP_SET_TIMER_MODE(mode, i) do { \
466 	if (IS_TIMER_HZ_10MS(i)) \
467 	        (mode) |= TCP_TIMERLIST_10MS_MODE; \
468 	else if (IS_TIMER_HZ_100MS(i)) \
469 	        (mode) |= TCP_TIMERLIST_100MS_MODE; \
470 	else \
471 	        (mode) |= TCP_TIMERLIST_500MS_MODE; \
472 } while(0)
473 
474 #if (DEVELOPMENT || DEBUG)
475 SYSCTL_UINT(_net_inet_tcp, OID_AUTO, mss_rec_medium,
476     CTLFLAG_RW | CTLFLAG_LOCKED, &tcp_mss_rec_medium, 0,
477     "Medium MSS based on recommendation in link status report");
478 SYSCTL_UINT(_net_inet_tcp, OID_AUTO, mss_rec_low,
479     CTLFLAG_RW | CTLFLAG_LOCKED, &tcp_mss_rec_low, 0,
480     "Low MSS based on recommendation in link status report");
481 
482 static int32_t tcp_change_mss_recommended = 0;
483 static int
484 sysctl_change_mss_recommended SYSCTL_HANDLER_ARGS
485 {
486 #pragma unused(oidp, arg1, arg2)
487 	int i, err = 0, changed = 0;
488 	struct ifnet *ifp;
489 	struct if_link_status ifsr;
490 	struct if_cellular_status_v1 *new_cell_sr;
491 	err = sysctl_io_number(req, tcp_change_mss_recommended,
492 	    sizeof(int32_t), &i, &changed);
493 	if (changed) {
494 		if (i < 0 || i > UINT16_MAX) {
495 			return EINVAL;
496 		}
497 		ifnet_head_lock_shared();
498 		TAILQ_FOREACH(ifp, &ifnet_head, if_link) {
499 			if (IFNET_IS_CELLULAR(ifp)) {
500 				bzero(&ifsr, sizeof(ifsr));
501 				new_cell_sr = &ifsr.ifsr_u.ifsr_cell.if_cell_u.if_status_v1;
502 				ifsr.ifsr_version = IF_CELLULAR_STATUS_REPORT_CURRENT_VERSION;
503 				ifsr.ifsr_len = sizeof(*new_cell_sr);
504 
505 				/* Set MSS recommended */
506 				new_cell_sr->valid_bitmask |= IF_CELL_UL_MSS_RECOMMENDED_VALID;
507 				new_cell_sr->mss_recommended = (uint16_t)i;
508 				err = ifnet_link_status_report(ifp, new_cell_sr, sizeof(new_cell_sr));
509 				if (err == 0) {
510 					tcp_change_mss_recommended = i;
511 				} else {
512 					break;
513 				}
514 			}
515 		}
516 		ifnet_head_done();
517 	}
518 	return err;
519 }
520 
521 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, change_mss_recommended,
522     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, &tcp_change_mss_recommended,
523     0, sysctl_change_mss_recommended, "IU", "Change MSS recommended");
524 
525 SYSCTL_INT(_net_inet_tcp, OID_AUTO, report_stats_interval,
526     CTLFLAG_RW | CTLFLAG_LOCKED, &tcp_report_stats_interval, 0,
527     "Report stats interval");
528 #endif /* (DEVELOPMENT || DEBUG) */
529 
530 /*
531  * Macro to compare two timers. If there is a reset of the sign bit,
532  * it is safe to assume that the timer has wrapped around. By doing
533  * signed comparision, we take care of wrap around such that the value
534  * with the sign bit reset is actually ahead of the other.
535  */
536 inline int32_t
timer_diff(uint32_t t1,uint32_t toff1,uint32_t t2,uint32_t toff2)537 timer_diff(uint32_t t1, uint32_t toff1, uint32_t t2, uint32_t toff2)
538 {
539 	return (int32_t)((t1 + toff1) - (t2 + toff2));
540 }
541 
542 /*
543  * Add to tcp timewait list, delay is given in milliseconds.
544  */
545 static void
add_to_time_wait_locked(struct tcpcb * tp,uint32_t delay)546 add_to_time_wait_locked(struct tcpcb *tp, uint32_t delay)
547 {
548 	struct inpcbinfo *pcbinfo = &tcbinfo;
549 	struct inpcb *inp = tp->t_inpcb;
550 	uint32_t timer;
551 
552 	/* pcb list should be locked when we get here */
553 	LCK_RW_ASSERT(&pcbinfo->ipi_lock, LCK_RW_ASSERT_EXCLUSIVE);
554 
555 	/* We may get here multiple times, so check */
556 	if (!(inp->inp_flags2 & INP2_TIMEWAIT)) {
557 		pcbinfo->ipi_twcount++;
558 		inp->inp_flags2 |= INP2_TIMEWAIT;
559 
560 		/* Remove from global inp list */
561 		LIST_REMOVE(inp, inp_list);
562 	} else {
563 		TAILQ_REMOVE(&tcp_tw_tailq, tp, t_twentry);
564 	}
565 
566 	/* Compute the time at which this socket can be closed */
567 	timer = tcp_now + delay;
568 
569 	/* We will use the TCPT_2MSL timer for tracking this delay */
570 
571 	if (TIMER_IS_ON_LIST(tp)) {
572 		tcp_remove_timer(tp);
573 	}
574 	tp->t_timer[TCPT_2MSL] = timer;
575 
576 	TAILQ_INSERT_TAIL(&tcp_tw_tailq, tp, t_twentry);
577 }
578 
579 void
add_to_time_wait(struct tcpcb * tp,uint32_t delay)580 add_to_time_wait(struct tcpcb *tp, uint32_t delay)
581 {
582 	if (tp->t_inpcb->inp_socket->so_options & SO_NOWAKEFROMSLEEP) {
583 		socket_post_kev_msg_closed(tp->t_inpcb->inp_socket);
584 	}
585 
586 	tcp_del_fsw_flow(tp);
587 
588 	/* 19182803: Notify nstat that connection is closing before waiting. */
589 	nstat_pcb_detach(tp->t_inpcb);
590 
591 #if CONTENT_FILTER
592 	if ((tp->t_inpcb->inp_socket->so_flags & SOF_CONTENT_FILTER) != 0) {
593 		/* If filter present, allow filter to finish processing all queued up data before adding to time wait queue */
594 		(void) cfil_sock_tcp_add_time_wait(tp->t_inpcb->inp_socket);
595 	} else
596 #endif /* CONTENT_FILTER */
597 	{
598 		add_to_time_wait_now(tp, delay);
599 	}
600 }
601 
602 void
add_to_time_wait_now(struct tcpcb * tp,uint32_t delay)603 add_to_time_wait_now(struct tcpcb *tp, uint32_t delay)
604 {
605 	struct inpcbinfo *pcbinfo = &tcbinfo;
606 
607 	if (!lck_rw_try_lock_exclusive(&pcbinfo->ipi_lock)) {
608 		socket_unlock(tp->t_inpcb->inp_socket, 0);
609 		lck_rw_lock_exclusive(&pcbinfo->ipi_lock);
610 		socket_lock(tp->t_inpcb->inp_socket, 0);
611 	}
612 	add_to_time_wait_locked(tp, delay);
613 	lck_rw_done(&pcbinfo->ipi_lock);
614 
615 	inpcb_gc_sched(pcbinfo, INPCB_TIMER_LAZY);
616 }
617 
618 /* If this is on time wait queue, remove it. */
619 void
tcp_remove_from_time_wait(struct inpcb * inp)620 tcp_remove_from_time_wait(struct inpcb *inp)
621 {
622 	struct tcpcb *tp = intotcpcb(inp);
623 	if (inp->inp_flags2 & INP2_TIMEWAIT) {
624 		TAILQ_REMOVE(&tcp_tw_tailq, tp, t_twentry);
625 	}
626 }
627 
628 static boolean_t
tcp_garbage_collect(struct inpcb * inp,int istimewait)629 tcp_garbage_collect(struct inpcb *inp, int istimewait)
630 {
631 	boolean_t active = FALSE;
632 	struct socket *so, *mp_so = NULL;
633 	struct tcpcb *tp;
634 
635 	so = inp->inp_socket;
636 	tp = intotcpcb(inp);
637 
638 	if (so->so_flags & SOF_MP_SUBFLOW) {
639 		mp_so = mptetoso(tptomptp(tp)->mpt_mpte);
640 		if (!socket_try_lock(mp_so)) {
641 			mp_so = NULL;
642 			active = TRUE;
643 			goto out;
644 		}
645 		if (mpsotomppcb(mp_so)->mpp_inside > 0) {
646 			os_log(mptcp_log_handle, "%s - %lx: Still inside %d usecount %d\n", __func__,
647 			    (unsigned long)VM_KERNEL_ADDRPERM(mpsotompte(mp_so)),
648 			    mpsotomppcb(mp_so)->mpp_inside,
649 			    mp_so->so_usecount);
650 			socket_unlock(mp_so, 0);
651 			mp_so = NULL;
652 			active = TRUE;
653 			goto out;
654 		}
655 		/* We call socket_unlock with refcount further below */
656 		mp_so->so_usecount++;
657 		tptomptp(tp)->mpt_mpte->mpte_mppcb->mpp_inside++;
658 	}
659 
660 	/*
661 	 * Skip if still in use or busy; it would have been more efficient
662 	 * if we were to test so_usecount against 0, but this isn't possible
663 	 * due to the current implementation of tcp_dropdropablreq() where
664 	 * overflow sockets that are eligible for garbage collection have
665 	 * their usecounts set to 1.
666 	 */
667 	if (!lck_mtx_try_lock_spin(&inp->inpcb_mtx)) {
668 		active = TRUE;
669 		goto out;
670 	}
671 
672 	/* Check again under the lock */
673 	if (so->so_usecount > 1) {
674 		if (inp->inp_wantcnt == WNT_STOPUSING) {
675 			active = TRUE;
676 		}
677 		lck_mtx_unlock(&inp->inpcb_mtx);
678 		goto out;
679 	}
680 
681 	if (istimewait && TSTMP_GEQ(tcp_now, tp->t_timer[TCPT_2MSL]) &&
682 	    tp->t_state != TCPS_CLOSED) {
683 		/* Become a regular mutex */
684 		lck_mtx_convert_spin(&inp->inpcb_mtx);
685 		tcp_close(tp);
686 	}
687 
688 	/*
689 	 * Overflowed socket dropped from the listening queue?  Do this
690 	 * only if we are called to clean up the time wait slots, since
691 	 * tcp_dropdropablreq() considers a socket to have been fully
692 	 * dropped after add_to_time_wait() is finished.
693 	 * Also handle the case of connections getting closed by the peer
694 	 * while in the queue as seen with rdar://6422317
695 	 *
696 	 */
697 	if (so->so_usecount == 1 &&
698 	    ((istimewait && (so->so_flags & SOF_OVERFLOW)) ||
699 	    ((tp != NULL) && (tp->t_state == TCPS_CLOSED) &&
700 	    (so->so_head != NULL) &&
701 	    ((so->so_state & (SS_INCOMP | SS_CANTSENDMORE | SS_CANTRCVMORE)) ==
702 	    (SS_INCOMP | SS_CANTSENDMORE | SS_CANTRCVMORE))))) {
703 		if (inp->inp_state != INPCB_STATE_DEAD) {
704 			/* Become a regular mutex */
705 			lck_mtx_convert_spin(&inp->inpcb_mtx);
706 			if (SOCK_CHECK_DOM(so, PF_INET6)) {
707 				in6_pcbdetach(inp);
708 			} else {
709 				in_pcbdetach(inp);
710 			}
711 		}
712 		VERIFY(so->so_usecount > 0);
713 		so->so_usecount--;
714 		if (inp->inp_wantcnt == WNT_STOPUSING) {
715 			active = TRUE;
716 		}
717 		lck_mtx_unlock(&inp->inpcb_mtx);
718 		goto out;
719 	} else if (inp->inp_wantcnt != WNT_STOPUSING) {
720 		lck_mtx_unlock(&inp->inpcb_mtx);
721 		active = FALSE;
722 		goto out;
723 	}
724 
725 	/*
726 	 * We get here because the PCB is no longer searchable
727 	 * (WNT_STOPUSING); detach (if needed) and dispose if it is dead
728 	 * (usecount is 0).  This covers all cases, including overflow
729 	 * sockets and those that are considered as "embryonic",
730 	 * i.e. created by sonewconn() in TCP input path, and have
731 	 * not yet been committed.  For the former, we reduce the usecount
732 	 *  to 0 as done by the code above.  For the latter, the usecount
733 	 * would have reduced to 0 as part calling soabort() when the
734 	 * socket is dropped at the end of tcp_input().
735 	 */
736 	if (so->so_usecount == 0) {
737 		DTRACE_TCP4(state__change, void, NULL, struct inpcb *, inp,
738 		    struct tcpcb *, tp, int32_t, TCPS_CLOSED);
739 		/* Become a regular mutex */
740 		lck_mtx_convert_spin(&inp->inpcb_mtx);
741 
742 		/*
743 		 * If this tp still happens to be on the timer list,
744 		 * take it out
745 		 */
746 		if (TIMER_IS_ON_LIST(tp)) {
747 			tcp_remove_timer(tp);
748 		}
749 
750 		if (inp->inp_state != INPCB_STATE_DEAD) {
751 			if (SOCK_CHECK_DOM(so, PF_INET6)) {
752 				in6_pcbdetach(inp);
753 			} else {
754 				in_pcbdetach(inp);
755 			}
756 		}
757 
758 		if (mp_so) {
759 			mptcp_subflow_del(tptomptp(tp)->mpt_mpte, tp->t_mpsub);
760 
761 			/* so is now unlinked from mp_so - let's drop the lock */
762 			socket_unlock(mp_so, 1);
763 			mp_so = NULL;
764 		}
765 
766 		in_pcbdispose(inp);
767 		active = FALSE;
768 		goto out;
769 	}
770 
771 	lck_mtx_unlock(&inp->inpcb_mtx);
772 	active = TRUE;
773 
774 out:
775 	if (mp_so) {
776 		socket_unlock(mp_so, 1);
777 	}
778 
779 	return active;
780 }
781 
782 /*
783  * TCP garbage collector callback (inpcb_timer_func_t).
784  *
785  * Returns the number of pcbs that will need to be gc-ed soon,
786  * returnining > 0 will keep timer active.
787  */
788 void
tcp_gc(struct inpcbinfo * ipi)789 tcp_gc(struct inpcbinfo *ipi)
790 {
791 	struct inpcb *inp, *nxt;
792 	struct tcpcb *tw_tp, *tw_ntp;
793 #if  KDEBUG
794 	static int tws_checked = 0;
795 #endif
796 
797 	KERNEL_DEBUG(DBG_FNC_TCP_SLOW | DBG_FUNC_START, 0, 0, 0, 0, 0);
798 
799 	/*
800 	 * Update tcp_now here as it may get used while
801 	 * processing the slow timer.
802 	 */
803 	calculate_tcp_clock();
804 
805 	/*
806 	 * Garbage collect socket/tcpcb: We need to acquire the list lock
807 	 * exclusively to do this
808 	 */
809 
810 	if (lck_rw_try_lock_exclusive(&ipi->ipi_lock) == FALSE) {
811 		/* don't sweat it this time; cleanup was done last time */
812 		if (tcp_gc_done == TRUE) {
813 			tcp_gc_done = FALSE;
814 			KERNEL_DEBUG(DBG_FNC_TCP_SLOW | DBG_FUNC_END,
815 			    tws_checked, cur_tw_slot, 0, 0, 0);
816 			/* Lock upgrade failed, give up this round */
817 			os_atomic_inc(&ipi->ipi_gc_req.intimer_fast, relaxed);
818 			return;
819 		}
820 		/* Upgrade failed, lost lock now take it again exclusive */
821 		lck_rw_lock_exclusive(&ipi->ipi_lock);
822 	}
823 	tcp_gc_done = TRUE;
824 
825 	LIST_FOREACH_SAFE(inp, &tcb, inp_list, nxt) {
826 		if (tcp_garbage_collect(inp, 0)) {
827 			os_atomic_inc(&ipi->ipi_gc_req.intimer_fast, relaxed);
828 		}
829 	}
830 
831 	/* Now cleanup the time wait ones */
832 	TAILQ_FOREACH_SAFE(tw_tp, &tcp_tw_tailq, t_twentry, tw_ntp) {
833 		/*
834 		 * We check the timestamp here without holding the
835 		 * socket lock for better performance. If there are
836 		 * any pcbs in time-wait, the timer will get rescheduled.
837 		 * Hence some error in this check can be tolerated.
838 		 *
839 		 * Sometimes a socket on time-wait queue can be closed if
840 		 * 2MSL timer expired but the application still has a
841 		 * usecount on it.
842 		 */
843 		if (tw_tp->t_state == TCPS_CLOSED ||
844 		    TSTMP_GEQ(tcp_now, tw_tp->t_timer[TCPT_2MSL])) {
845 			if (tcp_garbage_collect(tw_tp->t_inpcb, 1)) {
846 				os_atomic_inc(&ipi->ipi_gc_req.intimer_lazy, relaxed);
847 			}
848 		}
849 	}
850 
851 	/* take into account pcbs that are still in time_wait_slots */
852 	os_atomic_add(&ipi->ipi_gc_req.intimer_lazy, ipi->ipi_twcount, relaxed);
853 
854 	lck_rw_done(&ipi->ipi_lock);
855 
856 	/* Clean up the socache while we are here */
857 	if (so_cache_timer()) {
858 		os_atomic_inc(&ipi->ipi_gc_req.intimer_lazy, relaxed);
859 	}
860 
861 	KERNEL_DEBUG(DBG_FNC_TCP_SLOW | DBG_FUNC_END, tws_checked,
862 	    cur_tw_slot, 0, 0, 0);
863 
864 	return;
865 }
866 
867 /*
868  * Cancel all timers for TCP tp.
869  */
870 void
tcp_canceltimers(struct tcpcb * tp)871 tcp_canceltimers(struct tcpcb *tp)
872 {
873 	int i;
874 
875 	tcp_remove_timer(tp);
876 	for (i = 0; i < TCPT_NTIMERS; i++) {
877 		tp->t_timer[i] = 0;
878 	}
879 	tp->tentry.timer_start = tcp_now;
880 	tp->tentry.index = TCPT_NONE;
881 }
882 
883 int     tcp_syn_backoff[TCP_MAXRXTSHIFT + 1] =
884 { 1, 1, 1, 1, 1, 2, 4, 8, 16, 32, 64, 64, 64 };
885 
886 int     tcp_backoff[TCP_MAXRXTSHIFT + 1] =
887 { 1, 2, 4, 8, 16, 32, 64, 64, 64, 64, 64, 64, 64 };
888 
889 static int tcp_totbackoff = 511;        /* sum of tcp_backoff[] */
890 
891 void
tcp_rexmt_save_state(struct tcpcb * tp)892 tcp_rexmt_save_state(struct tcpcb *tp)
893 {
894 	u_int32_t fsize;
895 	if (TSTMP_SUPPORTED(tp)) {
896 		/*
897 		 * Since timestamps are supported on the connection,
898 		 * we can do recovery as described in rfc 4015.
899 		 */
900 		fsize = tp->snd_max - tp->snd_una;
901 		tp->snd_ssthresh_prev = max(fsize, tp->snd_ssthresh);
902 		tp->snd_recover_prev = tp->snd_recover;
903 	} else {
904 		/*
905 		 * Timestamp option is not supported on this connection.
906 		 * Record ssthresh and cwnd so they can
907 		 * be recovered if this turns out to be a "bad" retransmit.
908 		 * A retransmit is considered "bad" if an ACK for this
909 		 * segment is received within RTT/2 interval; the assumption
910 		 * here is that the ACK was already in flight.  See
911 		 * "On Estimating End-to-End Network Path Properties" by
912 		 * Allman and Paxson for more details.
913 		 */
914 		tp->snd_cwnd_prev = tp->snd_cwnd;
915 		tp->snd_ssthresh_prev = tp->snd_ssthresh;
916 		tp->snd_recover_prev = tp->snd_recover;
917 		if (IN_FASTRECOVERY(tp)) {
918 			tp->t_flags |= TF_WASFRECOVERY;
919 		} else {
920 			tp->t_flags &= ~TF_WASFRECOVERY;
921 		}
922 	}
923 	tp->t_srtt_prev = (tp->t_srtt >> TCP_RTT_SHIFT) + 2;
924 	tp->t_rttvar_prev = (tp->t_rttvar >> TCP_RTTVAR_SHIFT);
925 	tp->t_flagsext &= ~(TF_RECOMPUTE_RTT);
926 }
927 
928 /*
929  * Revert to the older segment size if there is an indication that PMTU
930  * blackhole detection was not needed.
931  */
932 void
tcp_pmtud_revert_segment_size(struct tcpcb * tp)933 tcp_pmtud_revert_segment_size(struct tcpcb *tp)
934 {
935 	int32_t optlen;
936 
937 	VERIFY(tp->t_pmtud_saved_maxopd > 0);
938 	tp->t_flags |= TF_PMTUD;
939 	tp->t_flags &= ~TF_BLACKHOLE;
940 	optlen = tp->t_maxopd - tp->t_maxseg;
941 	tp->t_maxopd = tp->t_pmtud_saved_maxopd;
942 	tp->t_maxseg = tp->t_maxopd - optlen;
943 
944 	/*
945 	 * Reset the slow-start flight size as it
946 	 * may depend on the new MSS
947 	 */
948 	if (CC_ALGO(tp)->cwnd_init != NULL) {
949 		CC_ALGO(tp)->cwnd_init(tp);
950 	}
951 
952 	if (TCP_USE_RLEDBAT(tp, tp->t_inpcb->inp_socket) &&
953 	    tcp_cc_rledbat.rwnd_init != NULL) {
954 		tcp_cc_rledbat.rwnd_init(tp);
955 	}
956 
957 	tp->t_pmtud_start_ts = 0;
958 	tcpstat.tcps_pmtudbh_reverted++;
959 
960 	/* change MSS according to recommendation, if there was one */
961 	tcp_update_mss_locked(tp->t_inpcb->inp_socket, NULL);
962 }
963 
964 static uint32_t
tcp_pmtud_black_holed_next_mss(struct tcpcb * tp)965 tcp_pmtud_black_holed_next_mss(struct tcpcb *tp)
966 {
967 	/* Reduce the MSS to intermediary value */
968 	if (tp->t_maxopd > tcp_pmtud_black_hole_mss) {
969 		return tcp_pmtud_black_hole_mss;
970 	} else {
971 		if (tp->t_inpcb->inp_vflag & INP_IPV4) {
972 			return tcp_mssdflt;
973 		} else {
974 			return tcp_v6mssdflt;
975 		}
976 	}
977 }
978 
979 /*
980  * Send a packet designed to force a response
981  * if the peer is up and reachable:
982  * either an ACK if the connection is still alive,
983  * or an RST if the peer has closed the connection
984  * due to timeout or reboot.
985  * Using sequence number tp->snd_una-1
986  * causes the transmitted zero-length segment
987  * to lie outside the receive window;
988  * by the protocol spec, this requires the
989  * correspondent TCP to respond.
990  */
991 static bool
tcp_send_keep_alive(struct tcpcb * tp)992 tcp_send_keep_alive(struct tcpcb *tp)
993 {
994 	struct tcptemp *__single t_template;
995 	struct mbuf *__single m;
996 
997 	tcpstat.tcps_keepprobe++;
998 	t_template = tcp_maketemplate(tp, &m);
999 	if (t_template != NULL) {
1000 		struct inpcb *inp = tp->t_inpcb;
1001 		struct tcp_respond_args tra;
1002 
1003 		bzero(&tra, sizeof(tra));
1004 		tra.nocell = INP_NO_CELLULAR(inp) ? 1 : 0;
1005 		tra.noexpensive = INP_NO_EXPENSIVE(inp) ? 1 : 0;
1006 		tra.noconstrained = INP_NO_CONSTRAINED(inp) ? 1 : 0;
1007 		tra.awdl_unrestricted = INP_AWDL_UNRESTRICTED(inp) ? 1 : 0;
1008 		tra.intcoproc_allowed = INP_INTCOPROC_ALLOWED(inp) ? 1 : 0;
1009 		tra.management_allowed = INP_MANAGEMENT_ALLOWED(inp) ? 1 : 0;
1010 		tra.keep_alive = 1;
1011 		if (tp->t_inpcb->inp_flags & INP_BOUND_IF) {
1012 			tra.ifscope = tp->t_inpcb->inp_boundifp->if_index;
1013 		} else {
1014 			tra.ifscope = IFSCOPE_NONE;
1015 		}
1016 		tcp_respond(tp, t_template->tt_ipgen,
1017 		    &t_template->tt_t, (struct mbuf *)NULL,
1018 		    tp->rcv_nxt, tp->snd_una - 1, 0, &tra);
1019 		(void) m_free(m);
1020 		return true;
1021 	} else {
1022 		return false;
1023 	}
1024 }
1025 
1026 /*
1027  * TCP timer processing.
1028  */
1029 struct tcpcb *
tcp_timers(struct tcpcb * tp,int timer)1030 tcp_timers(struct tcpcb *tp, int timer)
1031 {
1032 	int32_t rexmt, optlen = 0, idle_time = 0;
1033 	struct socket *so;
1034 	u_int64_t accsleep_ms;
1035 	u_int64_t last_sleep_ms = 0;
1036 	struct ifnet *outifp = tp->t_inpcb->inp_last_outifp;
1037 
1038 	so = tp->t_inpcb->inp_socket;
1039 	idle_time = tcp_now - tp->t_rcvtime;
1040 
1041 	switch (timer) {
1042 	/*
1043 	 * 2 MSL timeout in shutdown went off.  If we're closed but
1044 	 * still waiting for peer to close and connection has been idle
1045 	 * too long, or if 2MSL time is up from TIME_WAIT or FIN_WAIT_2,
1046 	 * delete connection control block.
1047 	 * Otherwise, (this case shouldn't happen) check again in a bit
1048 	 * we keep the socket in the main list in that case.
1049 	 */
1050 	case TCPT_2MSL:
1051 		tcp_free_sackholes(tp);
1052 		if (tp->t_state != TCPS_TIME_WAIT &&
1053 		    tp->t_state != TCPS_FIN_WAIT_2 &&
1054 		    ((idle_time > 0) && (idle_time < TCP_CONN_MAXIDLE(tp)))) {
1055 			tp->t_timer[TCPT_2MSL] = OFFSET_FROM_START(tp,
1056 			    (u_int32_t)TCP_CONN_KEEPINTVL(tp));
1057 		} else {
1058 			if (tp->t_state == TCPS_FIN_WAIT_2) {
1059 				TCP_LOG_DROP_PCB(NULL, NULL, tp, false,
1060 				    "FIN wait timeout drop");
1061 				tcpstat.tcps_fin_timeout_drops++;
1062 				tp = tcp_drop(tp, 0);
1063 			} else {
1064 				tp = tcp_close(tp);
1065 			}
1066 			return tp;
1067 		}
1068 		break;
1069 
1070 	/*
1071 	 * Retransmission timer went off.  Message has not
1072 	 * been acked within retransmit interval.  Back off
1073 	 * to a longer retransmit interval and retransmit one segment.
1074 	 */
1075 	case TCPT_REXMT:
1076 		absolutetime_to_nanoseconds(mach_absolutetime_asleep,
1077 		    &accsleep_ms);
1078 		accsleep_ms = accsleep_ms / 1000000UL;
1079 		if (accsleep_ms > tp->t_accsleep_ms) {
1080 			last_sleep_ms = accsleep_ms - tp->t_accsleep_ms;
1081 		}
1082 		/*
1083 		 * Drop a connection in the retransmit timer
1084 		 * 1. If we have retransmitted more than TCP_MAXRXTSHIFT
1085 		 * times
1086 		 * 2. If the time spent in this retransmission episode is
1087 		 * more than the time limit set with TCP_RXT_CONNDROPTIME
1088 		 * socket option
1089 		 * 3. If TCP_RXT_FINDROP socket option was set and
1090 		 * we have already retransmitted the FIN 3 times without
1091 		 * receiving an ack
1092 		 */
1093 		if (++tp->t_rxtshift > TCP_MAXRXTSHIFT ||
1094 		    (tp->t_rxt_conndroptime > 0 && tp->t_rxtstart > 0 &&
1095 		    (tcp_now - tp->t_rxtstart) >= tp->t_rxt_conndroptime) ||
1096 		    ((tp->t_flagsext & TF_RXTFINDROP) != 0 &&
1097 		    (tp->t_flags & TF_SENTFIN) != 0 && tp->t_rxtshift >= 4) ||
1098 		    (tp->t_rxtshift > 4 && last_sleep_ms >= TCP_SLEEP_TOO_LONG)) {
1099 			if (tp->t_state == TCPS_ESTABLISHED &&
1100 			    tp->t_rxt_minimum_timeout > 0) {
1101 				/*
1102 				 * Avoid dropping a connection if minimum
1103 				 * timeout is set and that time did not
1104 				 * pass. We will retry sending
1105 				 * retransmissions at the maximum interval
1106 				 */
1107 				if (TSTMP_LT(tcp_now, (tp->t_rxtstart +
1108 				    tp->t_rxt_minimum_timeout))) {
1109 					tp->t_rxtshift = TCP_MAXRXTSHIFT - 1;
1110 					goto retransmit_packet;
1111 				}
1112 			}
1113 			if ((tp->t_flagsext & TF_RXTFINDROP) != 0) {
1114 				tcpstat.tcps_rxtfindrop++;
1115 			} else if (last_sleep_ms >= TCP_SLEEP_TOO_LONG) {
1116 				tcpstat.tcps_drop_after_sleep++;
1117 			} else {
1118 				tcpstat.tcps_timeoutdrop++;
1119 			}
1120 			if (tp->t_rxtshift >= TCP_MAXRXTSHIFT) {
1121 				if (TCP_ECN_ENABLED(tp)) {
1122 					INP_INC_IFNET_STAT(tp->t_inpcb,
1123 					    ecn_on.rxmit_drop);
1124 				} else {
1125 					INP_INC_IFNET_STAT(tp->t_inpcb,
1126 					    ecn_off.rxmit_drop);
1127 				}
1128 			}
1129 			tp->t_rxtshift = TCP_MAXRXTSHIFT;
1130 			soevent(so,
1131 			    (SO_FILT_HINT_LOCKED | SO_FILT_HINT_TIMEOUT));
1132 
1133 			if (TCP_ECN_ENABLED(tp) &&
1134 			    tp->t_state == TCPS_ESTABLISHED) {
1135 				tcp_heuristic_ecn_droprxmt(tp);
1136 			}
1137 
1138 			TCP_LOG_DROP_PCB(NULL, NULL, tp, false,
1139 			    "retransmission timeout drop");
1140 			tp = tcp_drop(tp, tp->t_softerror ?
1141 			    tp->t_softerror : ETIMEDOUT);
1142 
1143 			break;
1144 		}
1145 retransmit_packet:
1146 		tcpstat.tcps_rexmttimeo++;
1147 		tp->t_accsleep_ms = accsleep_ms;
1148 
1149 		if (tp->t_rxtshift == 1 &&
1150 		    tp->t_state == TCPS_ESTABLISHED) {
1151 			/* Set the time at which retransmission started. */
1152 			tp->t_rxtstart = tcp_now;
1153 
1154 			/*
1155 			 * if this is the first retransmit timeout, save
1156 			 * the state so that we can recover if the timeout
1157 			 * is spurious.
1158 			 */
1159 			tcp_rexmt_save_state(tp);
1160 			tcp_ccdbg_trace(tp, NULL, TCP_CC_FIRST_REXMT);
1161 		}
1162 #if MPTCP
1163 		if ((tp->t_rxtshift >= mptcp_fail_thresh) &&
1164 		    (tp->t_state == TCPS_ESTABLISHED) &&
1165 		    (tp->t_mpflags & TMPF_MPTCP_TRUE)) {
1166 			mptcp_act_on_txfail(so);
1167 		}
1168 
1169 		if (TCPS_HAVEESTABLISHED(tp->t_state) &&
1170 		    (so->so_flags & SOF_MP_SUBFLOW)) {
1171 			struct mptses *mpte = tptomptp(tp)->mpt_mpte;
1172 
1173 			if (mpte->mpte_svctype == MPTCP_SVCTYPE_HANDOVER ||
1174 			    mpte->mpte_svctype == MPTCP_SVCTYPE_PURE_HANDOVER) {
1175 				mptcp_check_subflows_and_add(mpte);
1176 			}
1177 		}
1178 #endif /* MPTCP */
1179 
1180 		if (tp->t_adaptive_wtimo > 0 &&
1181 		    tp->t_rxtshift > tp->t_adaptive_wtimo &&
1182 		    TCPS_HAVEESTABLISHED(tp->t_state)) {
1183 			/* Send an event to the application */
1184 			soevent(so,
1185 			    (SO_FILT_HINT_LOCKED |
1186 			    SO_FILT_HINT_ADAPTIVE_WTIMO));
1187 		}
1188 
1189 		/*
1190 		 * If this is a retransmit timeout after PTO, the PTO
1191 		 * was not effective
1192 		 */
1193 		if (tp->t_flagsext & TF_SENT_TLPROBE) {
1194 			tp->t_flagsext &= ~(TF_SENT_TLPROBE);
1195 			tcpstat.tcps_rto_after_pto++;
1196 		}
1197 
1198 		if (tp->t_flagsext & TF_DELAY_RECOVERY) {
1199 			/*
1200 			 * Retransmit timer fired before entering recovery
1201 			 * on a connection with packet re-ordering. This
1202 			 * suggests that the reordering metrics computed
1203 			 * are not accurate.
1204 			 */
1205 			tp->t_reorderwin = 0;
1206 			tp->t_timer[TCPT_DELAYFR] = 0;
1207 			tp->t_flagsext &= ~(TF_DELAY_RECOVERY);
1208 		}
1209 
1210 		if (!(tp->t_flagsext & TF_FASTOPEN_FORCE_ENABLE) &&
1211 		    tp->t_state == TCPS_SYN_RECEIVED) {
1212 			tcp_disable_tfo(tp);
1213 		}
1214 
1215 		if (!(tp->t_flagsext & TF_FASTOPEN_FORCE_ENABLE) &&
1216 		    !(tp->t_tfo_flags & TFO_F_HEURISTIC_DONE) &&
1217 		    (tp->t_tfo_stats & TFO_S_SYN_DATA_SENT) &&
1218 		    !(tp->t_tfo_flags & TFO_F_NO_SNDPROBING) &&
1219 		    ((tp->t_state != TCPS_SYN_SENT && tp->t_rxtshift > 1) ||
1220 		    tp->t_rxtshift > 4)) {
1221 			/*
1222 			 * For regular retransmissions, a first one is being
1223 			 * done for tail-loss probe.
1224 			 * Thus, if rxtshift > 1, this means we have sent the segment
1225 			 * a total of 3 times.
1226 			 *
1227 			 * If we are in SYN-SENT state, then there is no tail-loss
1228 			 * probe thus we have to let rxtshift go up to 3.
1229 			 */
1230 			tcp_heuristic_tfo_middlebox(tp);
1231 
1232 			so->so_error = ENODATA;
1233 			soevent(so,
1234 			    (SO_FILT_HINT_LOCKED | SO_FILT_HINT_MP_SUB_ERROR));
1235 			sorwakeup(so);
1236 			sowwakeup(so);
1237 
1238 			tp->t_tfo_stats |= TFO_S_SEND_BLACKHOLE;
1239 			tcpstat.tcps_tfo_sndblackhole++;
1240 		}
1241 
1242 		if (!(tp->t_flagsext & TF_FASTOPEN_FORCE_ENABLE) &&
1243 		    !(tp->t_tfo_flags & TFO_F_HEURISTIC_DONE) &&
1244 		    (tp->t_tfo_stats & TFO_S_SYN_DATA_ACKED) &&
1245 		    tp->t_rxtshift > 3) {
1246 			if (TSTMP_GT(tp->t_sndtime - 10 * TCP_RETRANSHZ, tp->t_rcvtime)) {
1247 				tcp_heuristic_tfo_middlebox(tp);
1248 
1249 				so->so_error = ENODATA;
1250 				soevent(so,
1251 				    (SO_FILT_HINT_LOCKED | SO_FILT_HINT_MP_SUB_ERROR));
1252 				sorwakeup(so);
1253 				sowwakeup(so);
1254 			}
1255 		}
1256 
1257 		if (tp->t_state == TCPS_SYN_SENT) {
1258 			rexmt = TCP_REXMTVAL(tp) * tcp_syn_backoff[tp->t_rxtshift];
1259 			tp->t_stat.synrxtshift = tp->t_rxtshift;
1260 			tp->t_stat.rxmitsyns++;
1261 
1262 			/* When retransmitting, disable TFO */
1263 			if (TFO_ENABLED(tp) &&
1264 			    !(tp->t_flagsext & TF_FASTOPEN_FORCE_ENABLE)) {
1265 				tcp_disable_tfo(tp);
1266 				tp->t_tfo_flags |= TFO_F_SYN_LOSS;
1267 			}
1268 		} else {
1269 			rexmt = TCP_REXMTVAL(tp) * tcp_backoff[tp->t_rxtshift];
1270 		}
1271 
1272 		TCPT_RANGESET(tp->t_rxtcur, rexmt, tp->t_rttmin, TCPTV_REXMTMAX,
1273 		    TCP_ADD_REXMTSLOP(tp));
1274 		tp->t_timer[TCPT_REXMT] = OFFSET_FROM_START(tp, tp->t_rxtcur);
1275 
1276 		TCP_LOG_RTT_INFO(tp);
1277 
1278 		if (INP_WAIT_FOR_IF_FEEDBACK(tp->t_inpcb)) {
1279 			goto fc_output;
1280 		}
1281 
1282 		tcp_free_sackholes(tp);
1283 		if (TCP_RACK_ENABLED(tp)) {
1284 			tcp_segs_clear_sacked(tp);
1285 			tcp_rack_loss_on_rto(tp, true);
1286 		}
1287 		/*
1288 		 * Check for potential Path MTU Discovery Black Hole
1289 		 */
1290 		if (tcp_pmtud_black_hole_detect &&
1291 		    !(tp->t_flagsext & TF_NOBLACKHOLE_DETECTION) &&
1292 		    (tp->t_state == TCPS_ESTABLISHED)) {
1293 			if ((tp->t_flags & TF_PMTUD) &&
1294 			    tp->t_pmtud_lastseg_size > tcp_pmtud_black_holed_next_mss(tp) &&
1295 			    tp->t_rxtshift == 2) {
1296 				/*
1297 				 * Enter Path MTU Black-hole Detection mechanism:
1298 				 * - Disable Path MTU Discovery (IP "DF" bit).
1299 				 * - Reduce MTU to lower value than what we
1300 				 * negotiated with the peer.
1301 				 */
1302 				/* Disable Path MTU Discovery for now */
1303 				tp->t_flags &= ~TF_PMTUD;
1304 				/* Record that we may have found a black hole */
1305 				tp->t_flags |= TF_BLACKHOLE;
1306 				optlen = tp->t_maxopd - tp->t_maxseg;
1307 				/* Keep track of previous MSS */
1308 				tp->t_pmtud_saved_maxopd = tp->t_maxopd;
1309 				tp->t_pmtud_start_ts = tcp_now;
1310 				if (tp->t_pmtud_start_ts == 0) {
1311 					tp->t_pmtud_start_ts++;
1312 				}
1313 				/* Reduce the MSS to intermediary value */
1314 				tp->t_maxopd = tcp_pmtud_black_holed_next_mss(tp);
1315 				tp->t_maxseg = tp->t_maxopd - optlen;
1316 
1317 				/*
1318 				 * Reset the slow-start flight size
1319 				 * as it may depend on the new MSS
1320 				 */
1321 				if (CC_ALGO(tp)->cwnd_init != NULL) {
1322 					CC_ALGO(tp)->cwnd_init(tp);
1323 				}
1324 				tp->snd_cwnd = tp->t_maxseg;
1325 
1326 				if (TCP_USE_RLEDBAT(tp, so) &&
1327 				    tcp_cc_rledbat.rwnd_init != NULL) {
1328 					tcp_cc_rledbat.rwnd_init(tp);
1329 				}
1330 			}
1331 			/*
1332 			 * If further retransmissions are still
1333 			 * unsuccessful with a lowered MTU, maybe this
1334 			 * isn't a Black Hole and we restore the previous
1335 			 * MSS and blackhole detection flags.
1336 			 */
1337 			else {
1338 				if ((tp->t_flags & TF_BLACKHOLE) &&
1339 				    (tp->t_rxtshift > 4)) {
1340 					tcp_pmtud_revert_segment_size(tp);
1341 					tp->snd_cwnd = tp->t_maxseg;
1342 				}
1343 			}
1344 		}
1345 
1346 		/*
1347 		 * Disable rfc1323 and rfc1644 if we haven't got any
1348 		 * response to our SYN (after we reach the threshold)
1349 		 * to work-around some broken terminal servers (most of
1350 		 * which have hopefully been retired) that have bad VJ
1351 		 * header compression code which trashes TCP segments
1352 		 * containing unknown-to-them TCP options.
1353 		 * Do this only on non-local connections.
1354 		 */
1355 		if (tp->t_state == TCPS_SYN_SENT &&
1356 		    tp->t_rxtshift == tcp_broken_peer_syn_rxmit_thres) {
1357 			tp->t_flags &= ~(TF_REQ_SCALE | TF_REQ_TSTMP);
1358 		}
1359 
1360 		/*
1361 		 * If losing, let the lower level know and try for
1362 		 * a better route.  Also, if we backed off this far,
1363 		 * our srtt estimate is probably bogus.  Clobber it
1364 		 * so we'll take the next rtt measurement as our srtt;
1365 		 * move the current srtt into rttvar to keep the current
1366 		 * retransmit times until then.
1367 		 */
1368 		if (tp->t_rxtshift > TCP_MAXRXTSHIFT / 4) {
1369 			if (!(tp->t_inpcb->inp_vflag & INP_IPV4)) {
1370 				in6_losing(tp->t_inpcb);
1371 			} else {
1372 				in_losing(tp->t_inpcb);
1373 			}
1374 			tp->t_rttvar += (tp->t_srtt >> TCP_RTT_SHIFT);
1375 			tp->t_srtt = 0;
1376 		}
1377 		tp->snd_nxt = tp->snd_una;
1378 		/*
1379 		 * Note:  We overload snd_recover to function also as the
1380 		 * snd_last variable described in RFC 2582
1381 		 */
1382 		tp->snd_recover = tp->snd_max;
1383 		/*
1384 		 * Force a segment to be sent.
1385 		 */
1386 		tp->t_flags |= TF_ACKNOW;
1387 
1388 		/*
1389 		 * If timing a segment in this window, stop the timer
1390 		 * except when we are in connecting states on cellular
1391 		 * interfaces
1392 		 */
1393 		if (tp->t_state >= TCPS_ESTABLISHED || (outifp != NULL &&
1394 		    IFNET_IS_CELLULAR(outifp) == false)) {
1395 			tp->t_rtttime = 0;
1396 		}
1397 
1398 		if (!IN_FASTRECOVERY(tp) && tp->t_rxtshift == 1) {
1399 			tcpstat.tcps_tailloss_rto++;
1400 		}
1401 
1402 		/*
1403 		 * RFC 5681 says: when a TCP sender detects segment loss
1404 		 * using retransmit timer and the given segment has already
1405 		 * been retransmitted by way of the retransmission timer at
1406 		 * least once, the value of ssthresh is held constant
1407 		 */
1408 		if (tp->t_rxtshift == 1 &&
1409 		    CC_ALGO(tp)->after_timeout != NULL) {
1410 			CC_ALGO(tp)->after_timeout(tp);
1411 			/*
1412 			 * CWR notifications are to be sent on new data
1413 			 * right after Fast Retransmits and ECE
1414 			 * notification receipts.
1415 			 */
1416 			if (!TCP_ACC_ECN_ON(tp) && TCP_ECN_ENABLED(tp)) {
1417 				tp->ecn_flags |= TE_SENDCWR;
1418 			}
1419 		}
1420 
1421 		EXIT_FASTRECOVERY(tp);
1422 
1423 		/* Exit cwnd non validated phase */
1424 		tp->t_flagsext &= ~TF_CWND_NONVALIDATED;
1425 
1426 
1427 fc_output:
1428 		tcp_ccdbg_trace(tp, NULL, TCP_CC_REXMT_TIMEOUT);
1429 
1430 		(void) tcp_output(tp);
1431 		break;
1432 
1433 	/*
1434 	 * Persistance timer into zero window.
1435 	 * Force a byte to be output, if possible.
1436 	 */
1437 	case TCPT_PERSIST:
1438 		tcpstat.tcps_persisttimeo++;
1439 		/*
1440 		 * Hack: if the peer is dead/unreachable, we do not
1441 		 * time out if the window is closed.  After a full
1442 		 * backoff, drop the connection if the idle time
1443 		 * (no responses to probes) reaches the maximum
1444 		 * backoff that we would use if retransmitting.
1445 		 *
1446 		 * Drop the connection if we reached the maximum allowed time for
1447 		 * Zero Window Probes without a non-zero update from the peer.
1448 		 * See rdar://5805356
1449 		 */
1450 		if ((tp->t_rxtshift == TCP_MAXRXTSHIFT &&
1451 		    (idle_time >= tcp_maxpersistidle ||
1452 		    idle_time >= TCP_REXMTVAL(tp) * tcp_totbackoff)) ||
1453 		    ((tp->t_persist_stop != 0) &&
1454 		    TSTMP_LEQ(tp->t_persist_stop, tcp_now))) {
1455 			TCP_LOG_DROP_PCB(NULL, NULL, tp, false, "persist timeout drop");
1456 			tcpstat.tcps_persistdrop++;
1457 			soevent(so,
1458 			    (SO_FILT_HINT_LOCKED | SO_FILT_HINT_TIMEOUT));
1459 			tp = tcp_drop(tp, ETIMEDOUT);
1460 			break;
1461 		}
1462 		tcp_setpersist(tp);
1463 		tp->t_flagsext |= TF_FORCE;
1464 		(void) tcp_output(tp);
1465 		tp->t_flagsext &= ~TF_FORCE;
1466 		break;
1467 
1468 	/*
1469 	 * Keep-alive timer went off; send something
1470 	 * or drop connection if idle for too long.
1471 	 */
1472 	case TCPT_KEEP:
1473 #if FLOW_DIVERT
1474 		if (tp->t_inpcb->inp_socket->so_flags & SOF_FLOW_DIVERT) {
1475 			break;
1476 		}
1477 #endif /* FLOW_DIVERT */
1478 
1479 		tcpstat.tcps_keeptimeo++;
1480 #if MPTCP
1481 		/*
1482 		 * Regular TCP connections do not send keepalives after closing
1483 		 * MPTCP must not also, after sending Data FINs.
1484 		 */
1485 		struct mptcb *mp_tp = tptomptp(tp);
1486 		if ((tp->t_mpflags & TMPF_MPTCP_TRUE) &&
1487 		    (tp->t_state > TCPS_ESTABLISHED)) {
1488 			goto dropit;
1489 		} else if (mp_tp != NULL) {
1490 			if ((mptcp_ok_to_keepalive(mp_tp) == 0)) {
1491 				goto dropit;
1492 			}
1493 		}
1494 #endif /* MPTCP */
1495 		if (tp->t_state < TCPS_ESTABLISHED) {
1496 			goto dropit;
1497 		}
1498 		if ((always_keepalive ||
1499 		    (tp->t_inpcb->inp_socket->so_options & SO_KEEPALIVE) ||
1500 		    (tp->t_flagsext & TF_DETECT_READSTALL) ||
1501 		    (tp->t_tfo_probe_state == TFO_PROBE_PROBING)) &&
1502 		    (tp->t_state <= TCPS_CLOSING || tp->t_state == TCPS_FIN_WAIT_2)) {
1503 			if (idle_time >= TCP_CONN_KEEPIDLE(tp) + TCP_CONN_MAXIDLE(tp)) {
1504 				TCP_LOG_DROP_PCB(NULL, NULL, tp, false,
1505 				    "keep alive timeout drop");
1506 				goto dropit;
1507 			}
1508 
1509 			if (tcp_send_keep_alive(tp)) {
1510 				if (tp->t_flagsext & TF_DETECT_READSTALL) {
1511 					tp->t_rtimo_probes++;
1512 				}
1513 
1514 				TCP_LOG_KEEP_ALIVE(tp, idle_time);
1515 			}
1516 
1517 			tp->t_timer[TCPT_KEEP] = OFFSET_FROM_START(tp,
1518 			    TCP_CONN_KEEPINTVL(tp));
1519 		} else {
1520 			tp->t_timer[TCPT_KEEP] = OFFSET_FROM_START(tp,
1521 			    TCP_CONN_KEEPIDLE(tp));
1522 		}
1523 		if (tp->t_flagsext & TF_DETECT_READSTALL) {
1524 			bool reenable_probe = false;
1525 			/*
1526 			 * The keep alive packets sent to detect a read
1527 			 * stall did not get a response from the
1528 			 * peer. Generate more keep-alives to confirm this.
1529 			 * If the number of probes sent reaches the limit,
1530 			 * generate an event.
1531 			 */
1532 			if (tp->t_adaptive_rtimo > 0) {
1533 				if (tp->t_rtimo_probes > tp->t_adaptive_rtimo) {
1534 					/* Generate an event */
1535 					soevent(so,
1536 					    (SO_FILT_HINT_LOCKED |
1537 					    SO_FILT_HINT_ADAPTIVE_RTIMO));
1538 					tcp_keepalive_reset(tp);
1539 				} else {
1540 					reenable_probe = true;
1541 				}
1542 			} else if (outifp != NULL &&
1543 			    (outifp->if_eflags & IFEF_PROBE_CONNECTIVITY) &&
1544 			    tp->t_rtimo_probes <= TCP_CONNECTIVITY_PROBES_MAX) {
1545 				reenable_probe = true;
1546 			} else {
1547 				tp->t_flagsext &= ~TF_DETECT_READSTALL;
1548 			}
1549 			if (reenable_probe) {
1550 				int ind = min(tp->t_rtimo_probes,
1551 				    TCP_MAXRXTSHIFT);
1552 				tp->t_timer[TCPT_KEEP] = OFFSET_FROM_START(
1553 					tp, tcp_backoff[ind] * TCP_REXMTVAL(tp));
1554 			}
1555 		}
1556 		if (tp->t_tfo_probe_state == TFO_PROBE_PROBING) {
1557 			int ind;
1558 
1559 			tp->t_tfo_probes++;
1560 			ind = min(tp->t_tfo_probes, TCP_MAXRXTSHIFT);
1561 
1562 			/*
1563 			 * We take the minimum among the time set by true
1564 			 * keepalive (see above) and the backoff'd RTO. That
1565 			 * way we backoff in case of packet-loss but will never
1566 			 * timeout slower than regular keepalive due to the
1567 			 * backing off.
1568 			 */
1569 			tp->t_timer[TCPT_KEEP] = min(OFFSET_FROM_START(
1570 				    tp, tcp_backoff[ind] * TCP_REXMTVAL(tp)),
1571 			    tp->t_timer[TCPT_KEEP]);
1572 		} else if (!(tp->t_flagsext & TF_FASTOPEN_FORCE_ENABLE) &&
1573 		    !(tp->t_tfo_flags & TFO_F_HEURISTIC_DONE) &&
1574 		    tp->t_tfo_probe_state == TFO_PROBE_WAIT_DATA) {
1575 			/* Still no data! Let's assume a TFO-error and err out... */
1576 			tcp_heuristic_tfo_middlebox(tp);
1577 
1578 			so->so_error = ENODATA;
1579 			soevent(so,
1580 			    (SO_FILT_HINT_LOCKED | SO_FILT_HINT_MP_SUB_ERROR));
1581 			sorwakeup(so);
1582 			tp->t_tfo_stats |= TFO_S_RECV_BLACKHOLE;
1583 			tcpstat.tcps_tfo_blackhole++;
1584 		}
1585 		break;
1586 	case TCPT_DELACK:
1587 		if (tcp_delack_enabled && (tp->t_flags & TF_DELACK)) {
1588 			tp->t_flags &= ~TF_DELACK;
1589 			tp->t_timer[TCPT_DELACK] = 0;
1590 			tp->t_flags |= TF_ACKNOW;
1591 
1592 			/*
1593 			 * If delayed ack timer fired while stretching
1594 			 * acks, count the number of times the streaming
1595 			 * detection was not correct. If this exceeds a
1596 			 * threshold, disable strech ack on this
1597 			 * connection
1598 			 *
1599 			 * Also, go back to acking every other packet.
1600 			 */
1601 			if ((tp->t_flags & TF_STRETCHACK)) {
1602 				if (tp->t_unacksegs > 1 &&
1603 				    tp->t_unacksegs < maxseg_unacked) {
1604 					tp->t_stretchack_delayed++;
1605 				}
1606 
1607 				if (tp->t_stretchack_delayed >
1608 				    TCP_STRETCHACK_DELAY_THRESHOLD) {
1609 					tp->t_flagsext |= TF_DISABLE_STRETCHACK;
1610 					/*
1611 					 * Note the time at which stretch
1612 					 * ack was disabled automatically
1613 					 */
1614 					tp->rcv_nostrack_ts = tcp_now;
1615 					tcpstat.tcps_nostretchack++;
1616 					tp->t_stretchack_delayed = 0;
1617 					tp->rcv_nostrack_pkts = 0;
1618 				}
1619 				tcp_reset_stretch_ack(tp);
1620 			}
1621 			tp->t_forced_acks = TCP_FORCED_ACKS_COUNT;
1622 
1623 			/*
1624 			 * If we are measuring inter packet arrival jitter
1625 			 * for throttling a connection, this delayed ack
1626 			 * might be the reason for accumulating some
1627 			 * jitter. So let's restart the measurement.
1628 			 */
1629 			CLEAR_IAJ_STATE(tp);
1630 
1631 			tcpstat.tcps_delack++;
1632 			tp->t_stat.delayed_acks_sent++;
1633 			(void) tcp_output(tp);
1634 		}
1635 		break;
1636 
1637 #if MPTCP
1638 	case TCPT_JACK_RXMT:
1639 		if ((tp->t_state == TCPS_ESTABLISHED) &&
1640 		    (tp->t_mpflags & TMPF_PREESTABLISHED) &&
1641 		    (tp->t_mpflags & TMPF_JOINED_FLOW)) {
1642 			if (++tp->t_mprxtshift > TCP_MAXRXTSHIFT) {
1643 				tcpstat.tcps_timeoutdrop++;
1644 				soevent(so,
1645 				    (SO_FILT_HINT_LOCKED |
1646 				    SO_FILT_HINT_TIMEOUT));
1647 				tp = tcp_drop(tp, tp->t_softerror ?
1648 				    tp->t_softerror : ETIMEDOUT);
1649 				break;
1650 			}
1651 			tcpstat.tcps_join_rxmts++;
1652 			tp->t_mpflags |= TMPF_SND_JACK;
1653 			tp->t_flags |= TF_ACKNOW;
1654 
1655 			/*
1656 			 * No backoff is implemented for simplicity for this
1657 			 * corner case.
1658 			 */
1659 			(void) tcp_output(tp);
1660 		}
1661 		break;
1662 	case TCPT_CELLICON:
1663 	{
1664 		struct mptses *mpte = tptomptp(tp)->mpt_mpte;
1665 
1666 		tp->t_timer[TCPT_CELLICON] = 0;
1667 
1668 		if (mpte->mpte_cellicon_increments == 0) {
1669 			/* Cell-icon not set by this connection */
1670 			break;
1671 		}
1672 
1673 		if (TSTMP_LT(mpte->mpte_last_cellicon_set + MPTCP_CELLICON_TOGGLE_RATE, tcp_now)) {
1674 			mptcp_unset_cellicon(mpte, NULL, 1);
1675 		}
1676 
1677 		if (mpte->mpte_cellicon_increments) {
1678 			tp->t_timer[TCPT_CELLICON] = OFFSET_FROM_START(tp, MPTCP_CELLICON_TOGGLE_RATE);
1679 		}
1680 
1681 		break;
1682 	}
1683 #endif /* MPTCP */
1684 
1685 	case TCPT_PTO:
1686 	{
1687 		int32_t ret = 0;
1688 
1689 		if (!(tp->t_flagsext & TF_IF_PROBING)) {
1690 			tp->t_flagsext &= ~(TF_SENT_TLPROBE);
1691 		}
1692 		/*
1693 		 * Check if the connection is in the right state to
1694 		 * send a probe
1695 		 */
1696 		if ((tp->t_state != TCPS_ESTABLISHED ||
1697 		    tp->t_rxtshift > 0 ||
1698 		    tp->snd_max == tp->snd_una ||
1699 		    !SACK_ENABLED(tp) || IN_FASTRECOVERY(tp)) &&
1700 		    !(tp->t_flagsext & TF_IF_PROBING)) {
1701 			break;
1702 		}
1703 
1704 		/*
1705 		 * When the interface state is changed explicitly reset the retransmission
1706 		 * timer state for both SYN and data packets because we do not want to
1707 		 * wait unnecessarily or timeout too quickly if the link characteristics
1708 		 * have changed drastically
1709 		 */
1710 		if (tp->t_flagsext & TF_IF_PROBING) {
1711 			tp->t_rxtshift = 0;
1712 			if (tp->t_state == TCPS_SYN_SENT) {
1713 				tp->t_stat.synrxtshift = tp->t_rxtshift;
1714 			}
1715 			/*
1716 			 * Reset to the the default RTO
1717 			 */
1718 			tp->t_srtt = TCPTV_SRTTBASE;
1719 			tp->t_rttvar =
1720 			    ((TCPTV_RTOBASE - TCPTV_SRTTBASE) << TCP_RTTVAR_SHIFT) / 4;
1721 			tp->t_rttmin = tp->t_flags & TF_LOCAL ? tcp_TCPTV_MIN :
1722 			    TCPTV_REXMTMIN;
1723 			TCPT_RANGESET(tp->t_rxtcur, TCP_REXMTVAL(tp),
1724 			    tp->t_rttmin, TCPTV_REXMTMAX, TCP_ADD_REXMTSLOP(tp));
1725 			TCP_LOG_RTT_INFO(tp);
1726 		}
1727 
1728 		if (tp->t_state == TCPS_SYN_SENT) {
1729 			/*
1730 			 * The PTO for SYN_SENT reinitializes TCP as if it was a fresh
1731 			 * connection attempt
1732 			 */
1733 			tp->snd_nxt = tp->snd_una;
1734 			/*
1735 			 * Note:  We overload snd_recover to function also as the
1736 			 * snd_last variable described in RFC 2582
1737 			 */
1738 			tp->snd_recover = tp->snd_max;
1739 			/*
1740 			 * Force a segment to be sent.
1741 			 */
1742 			tp->t_flags |= TF_ACKNOW;
1743 
1744 			/* If timing a segment in this window, stop the timer */
1745 			tp->t_rtttime = 0;
1746 		} else {
1747 			int32_t snd_len;
1748 
1749 			/*
1750 			 * If there is no new data to send or if the
1751 			 * connection is limited by receive window then
1752 			 * retransmit the last segment, otherwise send
1753 			 * new data.
1754 			 */
1755 			snd_len = min(so->so_snd.sb_cc, tp->snd_wnd)
1756 			    - (tp->snd_max - tp->snd_una);
1757 			if (snd_len > 0) {
1758 				tp->snd_nxt = tp->snd_max;
1759 			} else {
1760 				snd_len = min((tp->snd_max - tp->snd_una),
1761 				    tp->t_maxseg);
1762 				tp->snd_nxt = tp->snd_max - snd_len;
1763 			}
1764 		}
1765 
1766 		tcpstat.tcps_pto++;
1767 		if (tp->t_flagsext & TF_IF_PROBING) {
1768 			tcpstat.tcps_probe_if++;
1769 		}
1770 
1771 		/* If timing a segment in this window, stop the timer */
1772 		tp->t_rtttime = 0;
1773 		/* Note that tail loss probe is being sent. Exclude IF probe */
1774 		if (!(tp->t_flagsext & TF_IF_PROBING)) {
1775 			tp->t_flagsext |= TF_SENT_TLPROBE;
1776 			tp->t_tlpstart = tcp_now;
1777 		}
1778 
1779 		tp->snd_cwnd += tp->t_maxseg;
1780 		/*
1781 		 * When tail-loss-probe fires, we reset the RTO timer, because
1782 		 * a probe just got sent, so we are good to push out the timer.
1783 		 *
1784 		 * Set to 0 to ensure that tcp_output() will reschedule it
1785 		 */
1786 		tp->t_timer[TCPT_REXMT] = 0;
1787 		ret = tcp_output(tp);
1788 
1789 #if (DEBUG || DEVELOPMENT)
1790 		if ((tp->t_flagsext & TF_IF_PROBING) &&
1791 		    ((IFNET_IS_COMPANION_LINK(tp->t_inpcb->inp_last_outifp)) ||
1792 		    tp->t_state == TCPS_SYN_SENT)) {
1793 			if (ret == 0 && tcp_probe_if_fix_port > 0 &&
1794 			    tcp_probe_if_fix_port <= IPPORT_HILASTAUTO) {
1795 				tp->t_timer[TCPT_REXMT] = 0;
1796 				tcp_set_lotimer_index(tp);
1797 			}
1798 
1799 			os_log(OS_LOG_DEFAULT,
1800 			    "%s: sent %s probe for %u > %u on interface %s"
1801 			    " (%u) %s(%d)",
1802 			    __func__,
1803 			    tp->t_state == TCPS_SYN_SENT ? "SYN" : "data",
1804 			    ntohs(tp->t_inpcb->inp_lport),
1805 			    ntohs(tp->t_inpcb->inp_fport),
1806 			    if_name(tp->t_inpcb->inp_last_outifp),
1807 			    tp->t_inpcb->inp_last_outifp->if_index,
1808 			    ret == 0 ? "succeeded" :"failed", ret);
1809 		}
1810 #endif /* DEBUG || DEVELOPMENT */
1811 
1812 		/*
1813 		 * When there is data (or a SYN) to send, the above call to
1814 		 * tcp_output() should have armed either the REXMT or the
1815 		 * PERSIST timer. If it didn't, something is wrong and this
1816 		 * connection would idle around forever. Let's make sure that
1817 		 * at least the REXMT timer is set.
1818 		 */
1819 		if (tp->t_timer[TCPT_REXMT] == 0 && tp->t_timer[TCPT_PERSIST] == 0 &&
1820 		    (tp->t_inpcb->inp_socket->so_snd.sb_cc != 0 || tp->t_state == TCPS_SYN_SENT ||
1821 		    tp->t_state == TCPS_SYN_RECEIVED)) {
1822 			tp->t_timer[TCPT_REXMT] =
1823 			    OFFSET_FROM_START(tp, tp->t_rxtcur);
1824 
1825 			os_log(OS_LOG_DEFAULT,
1826 			    "%s: tcp_output() returned %u with retransmission timer disabled "
1827 			    "for %u > %u in state %d, reset timer to %d",
1828 			    __func__, ret,
1829 			    ntohs(tp->t_inpcb->inp_lport),
1830 			    ntohs(tp->t_inpcb->inp_fport),
1831 			    tp->t_state,
1832 			    tp->t_timer[TCPT_REXMT]);
1833 
1834 			tcp_check_timer_state(tp);
1835 		}
1836 		tp->snd_cwnd -= tp->t_maxseg;
1837 
1838 		if (!(tp->t_flagsext & TF_IF_PROBING)) {
1839 			tp->t_tlphighrxt = tp->snd_nxt;
1840 			tp->t_tlphightrxt_persist = tp->snd_nxt;
1841 		}
1842 		break;
1843 	}
1844 	case TCPT_DELAYFR:
1845 		tp->t_flagsext &= ~TF_DELAY_RECOVERY;
1846 
1847 		/*
1848 		 * Don't do anything if one of the following is true:
1849 		 * - the connection is already in recovery
1850 		 * - sequence until snd_recover has been acknowledged.
1851 		 * - retransmit timeout has fired
1852 		 */
1853 		if (IN_FASTRECOVERY(tp) ||
1854 		    SEQ_GEQ(tp->snd_una, tp->snd_recover) ||
1855 		    tp->t_rxtshift > 0) {
1856 			break;
1857 		}
1858 
1859 		VERIFY(SACK_ENABLED(tp));
1860 		tcp_rexmt_save_state(tp);
1861 		if (CC_ALGO(tp)->pre_fr != NULL) {
1862 			CC_ALGO(tp)->pre_fr(tp);
1863 			if (!TCP_ACC_ECN_ON(tp) && TCP_ECN_ENABLED(tp)) {
1864 				tp->ecn_flags |= TE_SENDCWR;
1865 			}
1866 		}
1867 		ENTER_FASTRECOVERY(tp);
1868 
1869 		tp->t_timer[TCPT_REXMT] = 0;
1870 		tcpstat.tcps_sack_recovery_episode++;
1871 		tp->t_sack_recovery_episode++;
1872 		tp->snd_cwnd = tp->t_maxseg;
1873 		tcp_ccdbg_trace(tp, NULL, TCP_CC_ENTER_FASTRECOVERY);
1874 		(void) tcp_output(tp);
1875 		break;
1876 
1877 dropit:
1878 		tcpstat.tcps_keepdrops++;
1879 		soevent(so,
1880 		    (SO_FILT_HINT_LOCKED | SO_FILT_HINT_TIMEOUT));
1881 		tp = tcp_drop(tp, ETIMEDOUT);
1882 		break;
1883 	case TCPT_REORDER:
1884 		if (TCP_RACK_ENABLED(tp)) {
1885 			tcp_rack_reordering_timeout(tp, 0);
1886 		}
1887 		break;
1888 	}
1889 	return tp;
1890 }
1891 
1892 /* Remove a timer entry from timer list */
1893 void
tcp_remove_timer(struct tcpcb * tp)1894 tcp_remove_timer(struct tcpcb *tp)
1895 {
1896 	struct tcptimerlist *listp = &tcp_timer_list;
1897 
1898 	socket_lock_assert_owned(tp->t_inpcb->inp_socket);
1899 	if (!(TIMER_IS_ON_LIST(tp))) {
1900 		return;
1901 	}
1902 	lck_mtx_lock(&listp->mtx);
1903 
1904 	/* Check if pcb is on timer list again after acquiring the lock */
1905 	if (!(TIMER_IS_ON_LIST(tp))) {
1906 		lck_mtx_unlock(&listp->mtx);
1907 		return;
1908 	}
1909 
1910 	if (listp->next_te != NULL && listp->next_te == &tp->tentry) {
1911 		listp->next_te = LIST_NEXT(&tp->tentry, le);
1912 	}
1913 
1914 	LIST_REMOVE(&tp->tentry, le);
1915 	tp->t_flags &= ~(TF_TIMER_ONLIST);
1916 
1917 	listp->entries--;
1918 
1919 	tp->tentry.le.le_next = NULL;
1920 	tp->tentry.le.le_prev = NULL;
1921 	lck_mtx_unlock(&listp->mtx);
1922 }
1923 
1924 /*
1925  * Function to check if the timerlist needs to be rescheduled to run
1926  * the timer entry correctly. Basically, this is to check if we can avoid
1927  * taking the list lock.
1928  */
1929 
1930 static boolean_t
need_to_resched_timerlist(u_int32_t runtime,u_int16_t mode)1931 need_to_resched_timerlist(u_int32_t runtime, u_int16_t mode)
1932 {
1933 	struct tcptimerlist *listp = &tcp_timer_list;
1934 	int32_t diff;
1935 
1936 	/*
1937 	 * If the list is being processed then the state of the list is
1938 	 * in flux. In this case always acquire the lock and set the state
1939 	 * correctly.
1940 	 */
1941 	if (listp->running) {
1942 		return TRUE;
1943 	}
1944 
1945 	if (!listp->scheduled) {
1946 		return TRUE;
1947 	}
1948 
1949 	diff = timer_diff(listp->runtime, 0, runtime, 0);
1950 	if (diff <= 0) {
1951 		/* The list is going to run before this timer */
1952 		return FALSE;
1953 	} else {
1954 		if (mode & TCP_TIMERLIST_10MS_MODE) {
1955 			if (diff <= TCP_TIMER_10MS_QUANTUM) {
1956 				return FALSE;
1957 			}
1958 		} else if (mode & TCP_TIMERLIST_100MS_MODE) {
1959 			if (diff <= TCP_TIMER_100MS_QUANTUM) {
1960 				return FALSE;
1961 			}
1962 		} else {
1963 			if (diff <= TCP_TIMER_500MS_QUANTUM) {
1964 				return FALSE;
1965 			}
1966 		}
1967 	}
1968 	return TRUE;
1969 }
1970 
1971 void
tcp_sched_timerlist(uint32_t offset)1972 tcp_sched_timerlist(uint32_t offset)
1973 {
1974 	uint64_t deadline = 0;
1975 	struct tcptimerlist *listp = &tcp_timer_list;
1976 
1977 	LCK_MTX_ASSERT(&listp->mtx, LCK_MTX_ASSERT_OWNED);
1978 
1979 	offset = min(offset, TCP_TIMERLIST_MAX_OFFSET);
1980 	listp->runtime = tcp_now + offset;
1981 	listp->schedtime = tcp_now;
1982 	if (listp->runtime == 0) {
1983 		listp->runtime++;
1984 		offset++;
1985 	}
1986 
1987 	clock_interval_to_deadline(offset, USEC_PER_SEC, &deadline);
1988 
1989 	thread_call_enter_delayed(listp->call, deadline);
1990 	listp->scheduled = TRUE;
1991 }
1992 
1993 /*
1994  * Function to run the timers for a connection.
1995  *
1996  * Returns the offset of next timer to be run for this connection which
1997  * can be used to reschedule the timerlist.
1998  *
1999  * te_mode is an out parameter that indicates the modes of active
2000  * timers for this connection.
2001  */
2002 u_int32_t
tcp_run_conn_timer(struct tcpcb * tp,u_int16_t * te_mode,u_int16_t probe_if_index)2003 tcp_run_conn_timer(struct tcpcb *tp, u_int16_t *te_mode,
2004     u_int16_t probe_if_index)
2005 {
2006 	struct socket *so;
2007 	u_int16_t i = 0, index = TCPT_NONE, lo_index = TCPT_NONE;
2008 	u_int32_t timer_val, offset = 0, lo_timer = 0;
2009 	int32_t diff;
2010 	boolean_t needtorun[TCPT_NTIMERS];
2011 	int count = 0;
2012 
2013 	VERIFY(tp != NULL);
2014 	bzero(needtorun, sizeof(needtorun));
2015 	*te_mode = 0;
2016 
2017 	socket_lock(tp->t_inpcb->inp_socket, 1);
2018 
2019 	so = tp->t_inpcb->inp_socket;
2020 	/* Release the want count on inp */
2021 	if (in_pcb_checkstate(tp->t_inpcb, WNT_RELEASE, 1)
2022 	    == WNT_STOPUSING) {
2023 		if (TIMER_IS_ON_LIST(tp)) {
2024 			tcp_remove_timer(tp);
2025 		}
2026 
2027 		/* Looks like the TCP connection got closed while we
2028 		 * were waiting for the lock.. Done
2029 		 */
2030 		goto done;
2031 	}
2032 
2033 	/*
2034 	 * If this connection is over an interface that needs to
2035 	 * be probed, send probe packets to reinitiate communication.
2036 	 */
2037 	if (TCP_IF_STATE_CHANGED(tp, probe_if_index)) {
2038 		tp->t_flagsext |= TF_IF_PROBING;
2039 		tcp_timers(tp, TCPT_PTO);
2040 		tp->t_timer[TCPT_PTO] = 0;
2041 		tp->t_flagsext &= ~TF_IF_PROBING;
2042 	}
2043 
2044 	/*
2045 	 * Since the timer thread needs to wait for tcp lock, it may race
2046 	 * with another thread that can cancel or reschedule the timer
2047 	 * that is about to run. Check if we need to run anything.
2048 	 */
2049 	if ((index = tp->tentry.index) == TCPT_NONE) {
2050 		goto done;
2051 	}
2052 
2053 	timer_val = tp->t_timer[index];
2054 
2055 	diff = timer_diff(tp->tentry.runtime, 0, tcp_now, 0);
2056 	if (diff > 0) {
2057 		if (tp->tentry.index != TCPT_NONE) {
2058 			offset = diff;
2059 			*(te_mode) = tp->tentry.mode;
2060 		}
2061 		goto done;
2062 	}
2063 
2064 	tp->t_timer[index] = 0;
2065 	if (timer_val > 0) {
2066 		tp = tcp_timers(tp, index);
2067 		if (tp == NULL) {
2068 			goto done;
2069 		}
2070 	}
2071 
2072 	/*
2073 	 * Check if there are any other timers that need to be run.
2074 	 * While doing it, adjust the timer values wrt tcp_now.
2075 	 */
2076 	tp->tentry.mode = 0;
2077 	for (i = 0; i < TCPT_NTIMERS; ++i) {
2078 		if (tp->t_timer[i] != 0) {
2079 			diff = timer_diff(tp->tentry.timer_start,
2080 			    tp->t_timer[i], tcp_now, 0);
2081 			if (diff <= 0) {
2082 				needtorun[i] = TRUE;
2083 				count++;
2084 			} else {
2085 				tp->t_timer[i] = diff;
2086 				needtorun[i] = FALSE;
2087 				if (lo_timer == 0 || diff < lo_timer) {
2088 					lo_timer = diff;
2089 					lo_index = i;
2090 				}
2091 				TCP_SET_TIMER_MODE(tp->tentry.mode, i);
2092 			}
2093 		}
2094 	}
2095 
2096 	tp->tentry.timer_start = tcp_now;
2097 	tp->tentry.index = lo_index;
2098 	VERIFY(tp->tentry.index == TCPT_NONE || tp->tentry.mode > 0);
2099 
2100 	if (tp->tentry.index != TCPT_NONE) {
2101 		tp->tentry.runtime = tp->tentry.timer_start +
2102 		    tp->t_timer[tp->tentry.index];
2103 		if (tp->tentry.runtime == 0) {
2104 			tp->tentry.runtime++;
2105 		}
2106 	}
2107 
2108 	if (count > 0) {
2109 		/* run any other timers outstanding at this time. */
2110 		for (i = 0; i < TCPT_NTIMERS; ++i) {
2111 			if (needtorun[i]) {
2112 				tp->t_timer[i] = 0;
2113 				tp = tcp_timers(tp, i);
2114 				if (tp == NULL) {
2115 					offset = 0;
2116 					*(te_mode) = 0;
2117 					goto done;
2118 				}
2119 			}
2120 		}
2121 		tcp_set_lotimer_index(tp);
2122 	}
2123 
2124 	if (tp->tentry.index < TCPT_NONE) {
2125 		offset = tp->t_timer[tp->tentry.index];
2126 		*(te_mode) = tp->tentry.mode;
2127 	}
2128 
2129 done:
2130 	if (tp != NULL && tp->tentry.index == TCPT_NONE) {
2131 		tcp_remove_timer(tp);
2132 		offset = 0;
2133 	}
2134 
2135 	socket_unlock(so, 1);
2136 	return offset;
2137 }
2138 
2139 void
tcp_run_timerlist(void * arg1,void * arg2)2140 tcp_run_timerlist(void * arg1, void * arg2)
2141 {
2142 #pragma unused(arg1, arg2)
2143 	struct tcptimerentry *te, *__single next_te;
2144 	struct tcptimerlist *__single listp = &tcp_timer_list;
2145 	struct tcpcb *__single tp;
2146 	uint32_t next_timer = 0; /* offset of the next timer on the list */
2147 	u_int16_t te_mode = 0;  /* modes of all active timers in a tcpcb */
2148 	u_int16_t list_mode = 0; /* cumulative of modes of all tcpcbs */
2149 	uint32_t active_count = 0;
2150 
2151 	calculate_tcp_clock();
2152 
2153 	lck_mtx_lock(&listp->mtx);
2154 
2155 	int32_t drift = tcp_now - listp->runtime;
2156 	if (drift <= 1) {
2157 		tcpstat.tcps_timer_drift_le_1_ms++;
2158 	} else if (drift <= 10) {
2159 		tcpstat.tcps_timer_drift_le_10_ms++;
2160 	} else if (drift <= 20) {
2161 		tcpstat.tcps_timer_drift_le_20_ms++;
2162 	} else if (drift <= 50) {
2163 		tcpstat.tcps_timer_drift_le_50_ms++;
2164 	} else if (drift <= 100) {
2165 		tcpstat.tcps_timer_drift_le_100_ms++;
2166 	} else if (drift <= 200) {
2167 		tcpstat.tcps_timer_drift_le_200_ms++;
2168 	} else if (drift <= 500) {
2169 		tcpstat.tcps_timer_drift_le_500_ms++;
2170 	} else if (drift <= 1000) {
2171 		tcpstat.tcps_timer_drift_le_1000_ms++;
2172 	} else {
2173 		tcpstat.tcps_timer_drift_gt_1000_ms++;
2174 	}
2175 
2176 	listp->running = TRUE;
2177 
2178 	LIST_FOREACH_SAFE(te, &listp->lhead, le, next_te) {
2179 		uint32_t offset = 0;
2180 		uint32_t runtime = te->runtime;
2181 
2182 		tp = TIMERENTRY_TO_TP(te);
2183 
2184 		/*
2185 		 * An interface probe may need to happen before the previously scheduled runtime
2186 		 */
2187 		if (te->index < TCPT_NONE && TSTMP_GT(runtime, tcp_now) &&
2188 		    !TCP_IF_STATE_CHANGED(tp, listp->probe_if_index)) {
2189 			offset = timer_diff(runtime, 0, tcp_now, 0);
2190 			if (next_timer == 0 || offset < next_timer) {
2191 				next_timer = offset;
2192 			}
2193 			list_mode |= te->mode;
2194 			continue;
2195 		}
2196 
2197 		/*
2198 		 * Acquire an inp wantcnt on the inpcb so that the socket
2199 		 * won't get detached even if tcp_close is called
2200 		 */
2201 		if (in_pcb_checkstate(tp->t_inpcb, WNT_ACQUIRE, 0)
2202 		    == WNT_STOPUSING) {
2203 			/*
2204 			 * Some how this pcb went into dead state while
2205 			 * on the timer list, just take it off the list.
2206 			 * Since the timer list entry pointers are
2207 			 * protected by the timer list lock, we can
2208 			 * do it here without the socket lock.
2209 			 */
2210 			if (TIMER_IS_ON_LIST(tp)) {
2211 				tp->t_flags &= ~(TF_TIMER_ONLIST);
2212 				LIST_REMOVE(&tp->tentry, le);
2213 				listp->entries--;
2214 
2215 				tp->tentry.le.le_next = NULL;
2216 				tp->tentry.le.le_prev = NULL;
2217 			}
2218 			continue;
2219 		}
2220 		active_count++;
2221 
2222 		/*
2223 		 * Store the next timerentry pointer before releasing the
2224 		 * list lock. If that entry has to be removed when we
2225 		 * release the lock, this pointer will be updated to the
2226 		 * element after that.
2227 		 */
2228 		listp->next_te = next_te;
2229 
2230 		VERIFY_NEXT_LINK(&tp->tentry, le);
2231 		VERIFY_PREV_LINK(&tp->tentry, le);
2232 
2233 		lck_mtx_unlock(&listp->mtx);
2234 
2235 		offset = tcp_run_conn_timer(tp, &te_mode,
2236 		    listp->probe_if_index);
2237 
2238 		lck_mtx_lock(&listp->mtx);
2239 
2240 		next_te = listp->next_te;
2241 		listp->next_te = NULL;
2242 
2243 		if (offset > 0 && te_mode != 0) {
2244 			list_mode |= te_mode;
2245 
2246 			if (next_timer == 0 || offset < next_timer) {
2247 				next_timer = offset;
2248 			}
2249 		}
2250 	}
2251 
2252 	if (!LIST_EMPTY(&listp->lhead)) {
2253 		uint32_t next_mode = 0;
2254 		if ((list_mode & TCP_TIMERLIST_10MS_MODE) ||
2255 		    (listp->pref_mode & TCP_TIMERLIST_10MS_MODE)) {
2256 			next_mode = TCP_TIMERLIST_10MS_MODE;
2257 		} else if ((list_mode & TCP_TIMERLIST_100MS_MODE) ||
2258 		    (listp->pref_mode & TCP_TIMERLIST_100MS_MODE)) {
2259 			next_mode = TCP_TIMERLIST_100MS_MODE;
2260 		} else {
2261 			next_mode = TCP_TIMERLIST_500MS_MODE;
2262 		}
2263 
2264 		if (next_mode != TCP_TIMERLIST_500MS_MODE) {
2265 			listp->idleruns = 0;
2266 		} else {
2267 			/*
2268 			 * the next required mode is slow mode, but if
2269 			 * the last one was a faster mode and we did not
2270 			 * have enough idle runs, repeat the last mode.
2271 			 *
2272 			 * We try to keep the timer list in fast mode for
2273 			 * some idle time in expectation of new data.
2274 			 */
2275 			if (listp->mode != next_mode &&
2276 			    listp->idleruns < timer_fastmode_idlemax) {
2277 				listp->idleruns++;
2278 				next_mode = listp->mode;
2279 				next_timer = TCP_TIMER_100MS_QUANTUM;
2280 			} else {
2281 				listp->idleruns = 0;
2282 			}
2283 		}
2284 		listp->mode = next_mode;
2285 		if (listp->pref_offset != 0) {
2286 			next_timer = min(listp->pref_offset, next_timer);
2287 		}
2288 
2289 		if (listp->mode == TCP_TIMERLIST_500MS_MODE) {
2290 			next_timer = max(next_timer,
2291 			    TCP_TIMER_500MS_QUANTUM);
2292 		}
2293 
2294 		tcp_sched_timerlist(next_timer);
2295 	} else {
2296 		/*
2297 		 * No need to reschedule this timer, but always run
2298 		 * periodically at a much higher granularity.
2299 		 */
2300 		tcp_sched_timerlist(TCP_TIMERLIST_MAX_OFFSET);
2301 	}
2302 
2303 	listp->running = FALSE;
2304 	listp->pref_mode = 0;
2305 	listp->pref_offset = 0;
2306 	listp->probe_if_index = 0;
2307 
2308 	lck_mtx_unlock(&listp->mtx);
2309 }
2310 
2311 /*
2312  * Function to check if the timerlist needs to be rescheduled to run this
2313  * connection's timers correctly.
2314  */
2315 void
tcp_sched_timers(struct tcpcb * tp)2316 tcp_sched_timers(struct tcpcb *tp)
2317 {
2318 	struct tcptimerentry *te = &tp->tentry;
2319 	u_int16_t index = te->index;
2320 	u_int16_t mode = te->mode;
2321 	struct tcptimerlist *listp = &tcp_timer_list;
2322 	int32_t offset = 0;
2323 	boolean_t list_locked = FALSE;
2324 
2325 	if (tp->t_inpcb->inp_state == INPCB_STATE_DEAD) {
2326 		/* Just return without adding the dead pcb to the list */
2327 		if (TIMER_IS_ON_LIST(tp)) {
2328 			tcp_remove_timer(tp);
2329 		}
2330 		return;
2331 	}
2332 
2333 	if (index == TCPT_NONE) {
2334 		/* Nothing to run */
2335 		tcp_remove_timer(tp);
2336 		return;
2337 	}
2338 
2339 	/*
2340 	 * compute the offset at which the next timer for this connection
2341 	 * has to run.
2342 	 */
2343 	offset = timer_diff(te->runtime, 0, tcp_now, 0);
2344 	if (offset <= 0) {
2345 		offset = 1;
2346 		tcp_timer_advanced++;
2347 	}
2348 
2349 	if (!TIMER_IS_ON_LIST(tp)) {
2350 		if (!list_locked) {
2351 			lck_mtx_lock(&listp->mtx);
2352 			list_locked = TRUE;
2353 		}
2354 
2355 		if (!TIMER_IS_ON_LIST(tp)) {
2356 			LIST_INSERT_HEAD(&listp->lhead, te, le);
2357 			tp->t_flags |= TF_TIMER_ONLIST;
2358 
2359 			listp->entries++;
2360 			if (listp->entries > listp->maxentries) {
2361 				listp->maxentries = listp->entries;
2362 			}
2363 
2364 			/* if the list is not scheduled, just schedule it */
2365 			if (!listp->scheduled) {
2366 				goto schedule;
2367 			}
2368 		}
2369 	}
2370 
2371 	/*
2372 	 * Timer entry is currently on the list, check if the list needs
2373 	 * to be rescheduled.
2374 	 */
2375 	if (need_to_resched_timerlist(te->runtime, mode)) {
2376 		tcp_resched_timerlist++;
2377 
2378 		if (!list_locked) {
2379 			lck_mtx_lock(&listp->mtx);
2380 			list_locked = TRUE;
2381 		}
2382 
2383 		VERIFY_NEXT_LINK(te, le);
2384 		VERIFY_PREV_LINK(te, le);
2385 
2386 		if (listp->running) {
2387 			listp->pref_mode |= mode;
2388 			if (listp->pref_offset == 0 ||
2389 			    offset < listp->pref_offset) {
2390 				listp->pref_offset = offset;
2391 			}
2392 		} else {
2393 			/*
2394 			 * The list could have got rescheduled while
2395 			 * this thread was waiting for the lock
2396 			 */
2397 			if (listp->scheduled) {
2398 				int32_t diff;
2399 				diff = timer_diff(listp->runtime, 0,
2400 				    tcp_now, offset);
2401 				if (diff <= 0) {
2402 					goto done;
2403 				} else {
2404 					goto schedule;
2405 				}
2406 			} else {
2407 				goto schedule;
2408 			}
2409 		}
2410 	}
2411 	goto done;
2412 
2413 schedule:
2414 	/*
2415 	 * Since a connection with timers is getting scheduled, the timer
2416 	 * list moves from idle to active state and that is why idlegen is
2417 	 * reset
2418 	 */
2419 	if (mode & TCP_TIMERLIST_10MS_MODE) {
2420 		listp->mode = TCP_TIMERLIST_10MS_MODE;
2421 		listp->idleruns = 0;
2422 		offset = min(offset, TCP_TIMER_10MS_QUANTUM);
2423 	} else if (mode & TCP_TIMERLIST_100MS_MODE) {
2424 		if (listp->mode > TCP_TIMERLIST_100MS_MODE) {
2425 			listp->mode = TCP_TIMERLIST_100MS_MODE;
2426 		}
2427 		listp->idleruns = 0;
2428 		offset = min(offset, TCP_TIMER_100MS_QUANTUM);
2429 	}
2430 	tcp_sched_timerlist(offset);
2431 
2432 done:
2433 	if (list_locked) {
2434 		lck_mtx_unlock(&listp->mtx);
2435 	}
2436 
2437 	return;
2438 }
2439 
2440 static inline void
tcp_set_lotimer_index(struct tcpcb * tp)2441 tcp_set_lotimer_index(struct tcpcb *tp)
2442 {
2443 	uint16_t i, lo_index = TCPT_NONE, mode = 0;
2444 	uint32_t lo_timer = 0;
2445 	for (i = 0; i < TCPT_NTIMERS; ++i) {
2446 		if (tp->t_timer[i] != 0) {
2447 			TCP_SET_TIMER_MODE(mode, i);
2448 			if (lo_timer == 0 || tp->t_timer[i] < lo_timer) {
2449 				lo_timer = tp->t_timer[i];
2450 				lo_index = i;
2451 			}
2452 		}
2453 	}
2454 	tp->tentry.index = lo_index;
2455 	tp->tentry.mode = mode;
2456 	VERIFY(tp->tentry.index == TCPT_NONE || tp->tentry.mode > 0);
2457 
2458 	if (tp->tentry.index != TCPT_NONE) {
2459 		tp->tentry.runtime = tp->tentry.timer_start
2460 		    + tp->t_timer[tp->tentry.index];
2461 		if (tp->tentry.runtime == 0) {
2462 			tp->tentry.runtime++;
2463 		}
2464 	}
2465 }
2466 
2467 void
tcp_check_timer_state(struct tcpcb * tp)2468 tcp_check_timer_state(struct tcpcb *tp)
2469 {
2470 	socket_lock_assert_owned(tp->t_inpcb->inp_socket);
2471 
2472 	if (tp->t_inpcb->inp_flags2 & INP2_TIMEWAIT) {
2473 		return;
2474 	}
2475 
2476 	tcp_set_lotimer_index(tp);
2477 
2478 	tcp_sched_timers(tp);
2479 	return;
2480 }
2481 
2482 static inline void
tcp_cumulative_stat(u_int32_t cur,u_int32_t * prev,u_int32_t * dest)2483 tcp_cumulative_stat(u_int32_t cur, u_int32_t *prev, u_int32_t *dest)
2484 {
2485 	/* handle wrap around */
2486 	int32_t diff = (int32_t) (cur - *prev);
2487 	if (diff > 0) {
2488 		*dest = diff;
2489 	} else {
2490 		*dest = 0;
2491 	}
2492 	*prev = cur;
2493 	return;
2494 }
2495 
2496 static inline void
tcp_cumulative_stat64(u_int64_t cur,u_int64_t * prev,u_int64_t * dest)2497 tcp_cumulative_stat64(u_int64_t cur, u_int64_t *prev, u_int64_t *dest)
2498 {
2499 	/* handle wrap around */
2500 	int64_t diff = (int64_t) (cur - *prev);
2501 	if (diff > 0) {
2502 		*dest = diff;
2503 	} else {
2504 		*dest = 0;
2505 	}
2506 	*prev = cur;
2507 	return;
2508 }
2509 
2510 __private_extern__ void
tcp_report_stats(void)2511 tcp_report_stats(void)
2512 {
2513 	struct nstat_sysinfo_data data;
2514 	struct sockaddr_in dst;
2515 	struct sockaddr_in6 dst6;
2516 	struct rtentry *rt = NULL;
2517 	static struct tcp_last_report_stats prev;
2518 	u_int64_t var, uptime;
2519 
2520 #define stat    data.u.tcp_stats
2521 	if (((uptime = net_uptime()) - tcp_last_report_time) <
2522 	    tcp_report_stats_interval) {
2523 		return;
2524 	}
2525 
2526 	tcp_last_report_time = uptime;
2527 
2528 	bzero(&data, sizeof(data));
2529 	data.flags = NSTAT_SYSINFO_TCP_STATS;
2530 
2531 	SOCKADDR_ZERO(&dst, sizeof(dst));
2532 	dst.sin_len = sizeof(dst);
2533 	dst.sin_family = AF_INET;
2534 
2535 	/* ipv4 avg rtt */
2536 	lck_mtx_lock(rnh_lock);
2537 	rt =  rt_lookup(TRUE, SA(&dst), NULL,
2538 	    rt_tables[AF_INET], IFSCOPE_NONE);
2539 	lck_mtx_unlock(rnh_lock);
2540 	if (rt != NULL) {
2541 		RT_LOCK(rt);
2542 		if (rt_primary_default(rt, rt_key(rt)) &&
2543 		    rt->rt_stats != NULL) {
2544 			stat.ipv4_avgrtt = rt->rt_stats->nstat_avg_rtt;
2545 		}
2546 		RT_UNLOCK(rt);
2547 		rtfree(rt);
2548 		rt = NULL;
2549 	}
2550 
2551 	/* ipv6 avg rtt */
2552 	SOCKADDR_ZERO(&dst6, sizeof(dst6));
2553 	dst6.sin6_len = sizeof(dst6);
2554 	dst6.sin6_family = AF_INET6;
2555 
2556 	lck_mtx_lock(rnh_lock);
2557 	rt = rt_lookup(TRUE, SA(&dst6), NULL,
2558 	    rt_tables[AF_INET6], IFSCOPE_NONE);
2559 	lck_mtx_unlock(rnh_lock);
2560 	if (rt != NULL) {
2561 		RT_LOCK(rt);
2562 		if (rt_primary_default(rt, rt_key(rt)) &&
2563 		    rt->rt_stats != NULL) {
2564 			stat.ipv6_avgrtt = rt->rt_stats->nstat_avg_rtt;
2565 		}
2566 		RT_UNLOCK(rt);
2567 		rtfree(rt);
2568 		rt = NULL;
2569 	}
2570 
2571 	/* send packet loss rate, shift by 10 for precision */
2572 	if (tcpstat.tcps_sndpack > 0 && tcpstat.tcps_sndrexmitpack > 0) {
2573 		var = tcpstat.tcps_sndrexmitpack << 10;
2574 		stat.send_plr = (uint32_t)((var * 100) / tcpstat.tcps_sndpack);
2575 	}
2576 
2577 	/* recv packet loss rate, shift by 10 for precision */
2578 	if (tcpstat.tcps_rcvpack > 0 && tcpstat.tcps_recovered_pkts > 0) {
2579 		var = tcpstat.tcps_recovered_pkts << 10;
2580 		stat.recv_plr = (uint32_t)((var * 100) / tcpstat.tcps_rcvpack);
2581 	}
2582 
2583 	/* RTO after tail loss, shift by 10 for precision */
2584 	if (tcpstat.tcps_sndrexmitpack > 0
2585 	    && tcpstat.tcps_tailloss_rto > 0) {
2586 		var = tcpstat.tcps_tailloss_rto << 10;
2587 		stat.send_tlrto_rate =
2588 		    (uint32_t)((var * 100) / tcpstat.tcps_sndrexmitpack);
2589 	}
2590 
2591 	/* packet reordering */
2592 	if (tcpstat.tcps_sndpack > 0 && tcpstat.tcps_reordered_pkts > 0) {
2593 		var = tcpstat.tcps_reordered_pkts << 10;
2594 		stat.send_reorder_rate =
2595 		    (uint32_t)((var * 100) / tcpstat.tcps_sndpack);
2596 	}
2597 
2598 	if (tcp_ecn_outbound == 1) {
2599 		stat.ecn_client_enabled = 1;
2600 	}
2601 	if (tcp_ecn_inbound == 1) {
2602 		stat.ecn_server_enabled = 1;
2603 	}
2604 	tcp_cumulative_stat(tcpstat.tcps_connattempt,
2605 	    &prev.tcps_connattempt, &stat.connection_attempts);
2606 	tcp_cumulative_stat(tcpstat.tcps_accepts,
2607 	    &prev.tcps_accepts, &stat.connection_accepts);
2608 	tcp_cumulative_stat(tcpstat.tcps_ecn_client_setup,
2609 	    &prev.tcps_ecn_client_setup, &stat.ecn_client_setup);
2610 	tcp_cumulative_stat(tcpstat.tcps_ecn_server_setup,
2611 	    &prev.tcps_ecn_server_setup, &stat.ecn_server_setup);
2612 	tcp_cumulative_stat(tcpstat.tcps_ecn_client_success,
2613 	    &prev.tcps_ecn_client_success, &stat.ecn_client_success);
2614 	tcp_cumulative_stat(tcpstat.tcps_ecn_server_success,
2615 	    &prev.tcps_ecn_server_success, &stat.ecn_server_success);
2616 	tcp_cumulative_stat(tcpstat.tcps_ecn_not_supported,
2617 	    &prev.tcps_ecn_not_supported, &stat.ecn_not_supported);
2618 	tcp_cumulative_stat(tcpstat.tcps_ecn_lost_syn,
2619 	    &prev.tcps_ecn_lost_syn, &stat.ecn_lost_syn);
2620 	tcp_cumulative_stat(tcpstat.tcps_ecn_lost_synack,
2621 	    &prev.tcps_ecn_lost_synack, &stat.ecn_lost_synack);
2622 	tcp_cumulative_stat(tcpstat.tcps_ecn_recv_ce,
2623 	    &prev.tcps_ecn_recv_ce, &stat.ecn_recv_ce);
2624 	tcp_cumulative_stat(tcpstat.tcps_ecn_recv_ece,
2625 	    &prev.tcps_ecn_recv_ece, &stat.ecn_recv_ece);
2626 	tcp_cumulative_stat(tcpstat.tcps_ecn_recv_ece,
2627 	    &prev.tcps_ecn_recv_ece, &stat.ecn_recv_ece);
2628 	tcp_cumulative_stat(tcpstat.tcps_ecn_sent_ece,
2629 	    &prev.tcps_ecn_sent_ece, &stat.ecn_sent_ece);
2630 	tcp_cumulative_stat(tcpstat.tcps_ecn_sent_ece,
2631 	    &prev.tcps_ecn_sent_ece, &stat.ecn_sent_ece);
2632 	tcp_cumulative_stat(tcpstat.tcps_ecn_conn_recv_ce,
2633 	    &prev.tcps_ecn_conn_recv_ce, &stat.ecn_conn_recv_ce);
2634 	tcp_cumulative_stat(tcpstat.tcps_ecn_conn_recv_ece,
2635 	    &prev.tcps_ecn_conn_recv_ece, &stat.ecn_conn_recv_ece);
2636 	tcp_cumulative_stat(tcpstat.tcps_ecn_conn_plnoce,
2637 	    &prev.tcps_ecn_conn_plnoce, &stat.ecn_conn_plnoce);
2638 	tcp_cumulative_stat(tcpstat.tcps_ecn_conn_pl_ce,
2639 	    &prev.tcps_ecn_conn_pl_ce, &stat.ecn_conn_pl_ce);
2640 	tcp_cumulative_stat(tcpstat.tcps_ecn_conn_nopl_ce,
2641 	    &prev.tcps_ecn_conn_nopl_ce, &stat.ecn_conn_nopl_ce);
2642 	tcp_cumulative_stat(tcpstat.tcps_ecn_fallback_synloss,
2643 	    &prev.tcps_ecn_fallback_synloss, &stat.ecn_fallback_synloss);
2644 	tcp_cumulative_stat(tcpstat.tcps_ecn_fallback_reorder,
2645 	    &prev.tcps_ecn_fallback_reorder, &stat.ecn_fallback_reorder);
2646 	tcp_cumulative_stat(tcpstat.tcps_ecn_fallback_ce,
2647 	    &prev.tcps_ecn_fallback_ce, &stat.ecn_fallback_ce);
2648 	tcp_cumulative_stat(tcpstat.tcps_tfo_syn_data_rcv,
2649 	    &prev.tcps_tfo_syn_data_rcv, &stat.tfo_syn_data_rcv);
2650 	tcp_cumulative_stat(tcpstat.tcps_tfo_cookie_req_rcv,
2651 	    &prev.tcps_tfo_cookie_req_rcv, &stat.tfo_cookie_req_rcv);
2652 	tcp_cumulative_stat(tcpstat.tcps_tfo_cookie_sent,
2653 	    &prev.tcps_tfo_cookie_sent, &stat.tfo_cookie_sent);
2654 	tcp_cumulative_stat(tcpstat.tcps_tfo_cookie_invalid,
2655 	    &prev.tcps_tfo_cookie_invalid, &stat.tfo_cookie_invalid);
2656 	tcp_cumulative_stat(tcpstat.tcps_tfo_cookie_req,
2657 	    &prev.tcps_tfo_cookie_req, &stat.tfo_cookie_req);
2658 	tcp_cumulative_stat(tcpstat.tcps_tfo_cookie_rcv,
2659 	    &prev.tcps_tfo_cookie_rcv, &stat.tfo_cookie_rcv);
2660 	tcp_cumulative_stat(tcpstat.tcps_tfo_syn_data_sent,
2661 	    &prev.tcps_tfo_syn_data_sent, &stat.tfo_syn_data_sent);
2662 	tcp_cumulative_stat(tcpstat.tcps_tfo_syn_data_acked,
2663 	    &prev.tcps_tfo_syn_data_acked, &stat.tfo_syn_data_acked);
2664 	tcp_cumulative_stat(tcpstat.tcps_tfo_syn_loss,
2665 	    &prev.tcps_tfo_syn_loss, &stat.tfo_syn_loss);
2666 	tcp_cumulative_stat(tcpstat.tcps_tfo_blackhole,
2667 	    &prev.tcps_tfo_blackhole, &stat.tfo_blackhole);
2668 	tcp_cumulative_stat(tcpstat.tcps_tfo_cookie_wrong,
2669 	    &prev.tcps_tfo_cookie_wrong, &stat.tfo_cookie_wrong);
2670 	tcp_cumulative_stat(tcpstat.tcps_tfo_no_cookie_rcv,
2671 	    &prev.tcps_tfo_no_cookie_rcv, &stat.tfo_no_cookie_rcv);
2672 	tcp_cumulative_stat(tcpstat.tcps_tfo_heuristics_disable,
2673 	    &prev.tcps_tfo_heuristics_disable, &stat.tfo_heuristics_disable);
2674 	tcp_cumulative_stat(tcpstat.tcps_tfo_sndblackhole,
2675 	    &prev.tcps_tfo_sndblackhole, &stat.tfo_sndblackhole);
2676 
2677 
2678 	tcp_cumulative_stat(tcpstat.tcps_mptcp_handover_attempt,
2679 	    &prev.tcps_mptcp_handover_attempt, &stat.mptcp_handover_attempt);
2680 	tcp_cumulative_stat(tcpstat.tcps_mptcp_interactive_attempt,
2681 	    &prev.tcps_mptcp_interactive_attempt, &stat.mptcp_interactive_attempt);
2682 	tcp_cumulative_stat(tcpstat.tcps_mptcp_aggregate_attempt,
2683 	    &prev.tcps_mptcp_aggregate_attempt, &stat.mptcp_aggregate_attempt);
2684 	tcp_cumulative_stat(tcpstat.tcps_mptcp_fp_handover_attempt,
2685 	    &prev.tcps_mptcp_fp_handover_attempt, &stat.mptcp_fp_handover_attempt);
2686 	tcp_cumulative_stat(tcpstat.tcps_mptcp_fp_interactive_attempt,
2687 	    &prev.tcps_mptcp_fp_interactive_attempt, &stat.mptcp_fp_interactive_attempt);
2688 	tcp_cumulative_stat(tcpstat.tcps_mptcp_fp_aggregate_attempt,
2689 	    &prev.tcps_mptcp_fp_aggregate_attempt, &stat.mptcp_fp_aggregate_attempt);
2690 	tcp_cumulative_stat(tcpstat.tcps_mptcp_heuristic_fallback,
2691 	    &prev.tcps_mptcp_heuristic_fallback, &stat.mptcp_heuristic_fallback);
2692 	tcp_cumulative_stat(tcpstat.tcps_mptcp_fp_heuristic_fallback,
2693 	    &prev.tcps_mptcp_fp_heuristic_fallback, &stat.mptcp_fp_heuristic_fallback);
2694 	tcp_cumulative_stat(tcpstat.tcps_mptcp_handover_success_wifi,
2695 	    &prev.tcps_mptcp_handover_success_wifi, &stat.mptcp_handover_success_wifi);
2696 	tcp_cumulative_stat(tcpstat.tcps_mptcp_handover_success_cell,
2697 	    &prev.tcps_mptcp_handover_success_cell, &stat.mptcp_handover_success_cell);
2698 	tcp_cumulative_stat(tcpstat.tcps_mptcp_interactive_success,
2699 	    &prev.tcps_mptcp_interactive_success, &stat.mptcp_interactive_success);
2700 	tcp_cumulative_stat(tcpstat.tcps_mptcp_aggregate_success,
2701 	    &prev.tcps_mptcp_aggregate_success, &stat.mptcp_aggregate_success);
2702 	tcp_cumulative_stat(tcpstat.tcps_mptcp_fp_handover_success_wifi,
2703 	    &prev.tcps_mptcp_fp_handover_success_wifi, &stat.mptcp_fp_handover_success_wifi);
2704 	tcp_cumulative_stat(tcpstat.tcps_mptcp_fp_handover_success_cell,
2705 	    &prev.tcps_mptcp_fp_handover_success_cell, &stat.mptcp_fp_handover_success_cell);
2706 	tcp_cumulative_stat(tcpstat.tcps_mptcp_fp_interactive_success,
2707 	    &prev.tcps_mptcp_fp_interactive_success, &stat.mptcp_fp_interactive_success);
2708 	tcp_cumulative_stat(tcpstat.tcps_mptcp_fp_aggregate_success,
2709 	    &prev.tcps_mptcp_fp_aggregate_success, &stat.mptcp_fp_aggregate_success);
2710 	tcp_cumulative_stat(tcpstat.tcps_mptcp_handover_cell_from_wifi,
2711 	    &prev.tcps_mptcp_handover_cell_from_wifi, &stat.mptcp_handover_cell_from_wifi);
2712 	tcp_cumulative_stat(tcpstat.tcps_mptcp_handover_wifi_from_cell,
2713 	    &prev.tcps_mptcp_handover_wifi_from_cell, &stat.mptcp_handover_wifi_from_cell);
2714 	tcp_cumulative_stat(tcpstat.tcps_mptcp_interactive_cell_from_wifi,
2715 	    &prev.tcps_mptcp_interactive_cell_from_wifi, &stat.mptcp_interactive_cell_from_wifi);
2716 	tcp_cumulative_stat64(tcpstat.tcps_mptcp_handover_cell_bytes,
2717 	    &prev.tcps_mptcp_handover_cell_bytes, &stat.mptcp_handover_cell_bytes);
2718 	tcp_cumulative_stat64(tcpstat.tcps_mptcp_interactive_cell_bytes,
2719 	    &prev.tcps_mptcp_interactive_cell_bytes, &stat.mptcp_interactive_cell_bytes);
2720 	tcp_cumulative_stat64(tcpstat.tcps_mptcp_aggregate_cell_bytes,
2721 	    &prev.tcps_mptcp_aggregate_cell_bytes, &stat.mptcp_aggregate_cell_bytes);
2722 	tcp_cumulative_stat64(tcpstat.tcps_mptcp_handover_all_bytes,
2723 	    &prev.tcps_mptcp_handover_all_bytes, &stat.mptcp_handover_all_bytes);
2724 	tcp_cumulative_stat64(tcpstat.tcps_mptcp_interactive_all_bytes,
2725 	    &prev.tcps_mptcp_interactive_all_bytes, &stat.mptcp_interactive_all_bytes);
2726 	tcp_cumulative_stat64(tcpstat.tcps_mptcp_aggregate_all_bytes,
2727 	    &prev.tcps_mptcp_aggregate_all_bytes, &stat.mptcp_aggregate_all_bytes);
2728 	tcp_cumulative_stat(tcpstat.tcps_mptcp_back_to_wifi,
2729 	    &prev.tcps_mptcp_back_to_wifi, &stat.mptcp_back_to_wifi);
2730 	tcp_cumulative_stat(tcpstat.tcps_mptcp_wifi_proxy,
2731 	    &prev.tcps_mptcp_wifi_proxy, &stat.mptcp_wifi_proxy);
2732 	tcp_cumulative_stat(tcpstat.tcps_mptcp_cell_proxy,
2733 	    &prev.tcps_mptcp_cell_proxy, &stat.mptcp_cell_proxy);
2734 	tcp_cumulative_stat(tcpstat.tcps_mptcp_triggered_cell,
2735 	    &prev.tcps_mptcp_triggered_cell, &stat.mptcp_triggered_cell);
2736 
2737 	nstat_sysinfo_send_data(&data);
2738 
2739 #undef  stat
2740 }
2741 
2742 void
tcp_interface_send_probe(u_int16_t probe_if_index)2743 tcp_interface_send_probe(u_int16_t probe_if_index)
2744 {
2745 	int32_t offset = 0;
2746 	struct tcptimerlist *listp = &tcp_timer_list;
2747 
2748 	/* Make sure TCP clock is up to date */
2749 	calculate_tcp_clock();
2750 
2751 	lck_mtx_lock(&listp->mtx);
2752 	if (listp->probe_if_index > 0 && listp->probe_if_index != probe_if_index) {
2753 		tcpstat.tcps_probe_if_conflict++;
2754 		os_log(OS_LOG_DEFAULT,
2755 		    "%s: probe_if_index %u conflicts with %u, tcps_probe_if_conflict %u\n",
2756 		    __func__, probe_if_index, listp->probe_if_index,
2757 		    tcpstat.tcps_probe_if_conflict);
2758 		goto done;
2759 	}
2760 
2761 	listp->probe_if_index = probe_if_index;
2762 	if (listp->running) {
2763 		os_log(OS_LOG_DEFAULT, "%s: timer list already running for if_index %u\n",
2764 		    __func__, probe_if_index);
2765 		goto done;
2766 	}
2767 
2768 	/*
2769 	 * Reschedule the timerlist to run within the next 10ms, which is
2770 	 * the fastest that we can do.
2771 	 */
2772 	offset = TCP_TIMER_10MS_QUANTUM;
2773 	if (listp->scheduled) {
2774 		int32_t diff;
2775 		diff = timer_diff(listp->runtime, 0, tcp_now, offset);
2776 		if (diff <= 0) {
2777 			/* The timer will fire sooner than what's needed */
2778 			os_log(OS_LOG_DEFAULT,
2779 			    "%s: timer will fire sooner than needed for if_index %u\n",
2780 			    __func__, probe_if_index);
2781 			goto done;
2782 		}
2783 	}
2784 	listp->mode = TCP_TIMERLIST_10MS_MODE;
2785 	listp->idleruns = 0;
2786 
2787 	tcp_sched_timerlist(offset);
2788 
2789 done:
2790 	lck_mtx_unlock(&listp->mtx);
2791 	return;
2792 }
2793 
2794 /*
2795  * Enable read probes on this connection, if:
2796  * - it is in established state
2797  * - doesn't have any data outstanding
2798  * - the outgoing ifp matches
2799  * - we have not already sent any read probes
2800  */
2801 static void
tcp_enable_read_probe(struct tcpcb * tp,struct ifnet * ifp)2802 tcp_enable_read_probe(struct tcpcb *tp, struct ifnet *ifp)
2803 {
2804 	if (tp->t_state == TCPS_ESTABLISHED &&
2805 	    tp->snd_max == tp->snd_una &&
2806 	    tp->t_inpcb->inp_last_outifp == ifp &&
2807 	    !(tp->t_flagsext & TF_DETECT_READSTALL) &&
2808 	    tp->t_rtimo_probes == 0) {
2809 		tp->t_flagsext |= TF_DETECT_READSTALL;
2810 		tp->t_rtimo_probes = 0;
2811 		tp->t_timer[TCPT_KEEP] = OFFSET_FROM_START(tp,
2812 		    TCP_TIMER_10MS_QUANTUM);
2813 		if (tp->tentry.index == TCPT_NONE) {
2814 			tp->tentry.index = TCPT_KEEP;
2815 			tp->tentry.runtime = tcp_now +
2816 			    TCP_TIMER_10MS_QUANTUM;
2817 		} else {
2818 			int32_t diff = 0;
2819 
2820 			/* Reset runtime to be in next 10ms */
2821 			diff = timer_diff(tp->tentry.runtime, 0,
2822 			    tcp_now, TCP_TIMER_10MS_QUANTUM);
2823 			if (diff > 0) {
2824 				tp->tentry.index = TCPT_KEEP;
2825 				tp->tentry.runtime = tcp_now +
2826 				    TCP_TIMER_10MS_QUANTUM;
2827 				if (tp->tentry.runtime == 0) {
2828 					tp->tentry.runtime++;
2829 				}
2830 			}
2831 		}
2832 	}
2833 }
2834 
2835 /*
2836  * Disable read probe and reset the keep alive timer
2837  */
2838 static void
tcp_disable_read_probe(struct tcpcb * tp)2839 tcp_disable_read_probe(struct tcpcb *tp)
2840 {
2841 	if (tp->t_adaptive_rtimo == 0 &&
2842 	    ((tp->t_flagsext & TF_DETECT_READSTALL) ||
2843 	    tp->t_rtimo_probes > 0)) {
2844 		tcp_keepalive_reset(tp);
2845 
2846 		if (tp->t_mpsub) {
2847 			mptcp_reset_keepalive(tp);
2848 		}
2849 	}
2850 }
2851 
2852 /*
2853  * Reschedule the tcp timerlist in the next 10ms to re-enable read/write
2854  * probes on connections going over a particular interface.
2855  */
2856 void
tcp_probe_connectivity(struct ifnet * ifp,u_int32_t enable)2857 tcp_probe_connectivity(struct ifnet *ifp, u_int32_t enable)
2858 {
2859 	int32_t offset;
2860 	struct tcptimerlist *listp = &tcp_timer_list;
2861 	struct inpcbinfo *pcbinfo = &tcbinfo;
2862 	struct inpcb *inp, *nxt;
2863 
2864 	if (ifp == NULL) {
2865 		return;
2866 	}
2867 
2868 	/* update clock */
2869 	calculate_tcp_clock();
2870 
2871 	/*
2872 	 * Enable keep alive timer on all connections that are
2873 	 * active/established on this interface.
2874 	 */
2875 	lck_rw_lock_shared(&pcbinfo->ipi_lock);
2876 
2877 	LIST_FOREACH_SAFE(inp, pcbinfo->ipi_listhead, inp_list, nxt) {
2878 		struct tcpcb *tp = NULL;
2879 		if (in_pcb_checkstate(inp, WNT_ACQUIRE, 0) ==
2880 		    WNT_STOPUSING) {
2881 			continue;
2882 		}
2883 
2884 		/* Acquire lock to look at the state of the connection */
2885 		socket_lock(inp->inp_socket, 1);
2886 
2887 		/* Release the want count */
2888 		if (inp->inp_ppcb == NULL ||
2889 		    (in_pcb_checkstate(inp, WNT_RELEASE, 1) == WNT_STOPUSING)) {
2890 			socket_unlock(inp->inp_socket, 1);
2891 			continue;
2892 		}
2893 		tp = intotcpcb(inp);
2894 		if (enable) {
2895 			tcp_enable_read_probe(tp, ifp);
2896 		} else {
2897 			tcp_disable_read_probe(tp);
2898 		}
2899 
2900 		socket_unlock(inp->inp_socket, 1);
2901 	}
2902 	lck_rw_done(&pcbinfo->ipi_lock);
2903 
2904 	lck_mtx_lock(&listp->mtx);
2905 	if (listp->running) {
2906 		listp->pref_mode |= TCP_TIMERLIST_10MS_MODE;
2907 		goto done;
2908 	}
2909 
2910 	/* Reschedule within the next 10ms */
2911 	offset = TCP_TIMER_10MS_QUANTUM;
2912 	if (listp->scheduled) {
2913 		int32_t diff;
2914 		diff = timer_diff(listp->runtime, 0, tcp_now, offset);
2915 		if (diff <= 0) {
2916 			/* The timer will fire sooner than what's needed */
2917 			goto done;
2918 		}
2919 	}
2920 	listp->mode = TCP_TIMERLIST_10MS_MODE;
2921 	listp->idleruns = 0;
2922 
2923 	tcp_sched_timerlist(offset);
2924 done:
2925 	lck_mtx_unlock(&listp->mtx);
2926 	return;
2927 }
2928 
2929 inline void
tcp_update_mss_core(struct tcpcb * tp,struct ifnet * ifp)2930 tcp_update_mss_core(struct tcpcb *tp, struct ifnet *ifp)
2931 {
2932 	struct if_cellular_status_v1 *ifsr;
2933 	u_int32_t optlen;
2934 	ifsr = &ifp->if_link_status->ifsr_u.ifsr_cell.if_cell_u.if_status_v1;
2935 	if (ifsr->valid_bitmask & IF_CELL_UL_MSS_RECOMMENDED_VALID) {
2936 		optlen = tp->t_maxopd - tp->t_maxseg;
2937 
2938 		if (ifsr->mss_recommended ==
2939 		    IF_CELL_UL_MSS_RECOMMENDED_NONE &&
2940 		    tp->t_cached_maxopd > 0 &&
2941 		    tp->t_maxopd < tp->t_cached_maxopd) {
2942 			tp->t_maxopd = tp->t_cached_maxopd;
2943 			tcpstat.tcps_mss_to_default++;
2944 		} else if (ifsr->mss_recommended ==
2945 		    IF_CELL_UL_MSS_RECOMMENDED_MEDIUM &&
2946 		    tp->t_maxopd > tcp_mss_rec_medium) {
2947 			tp->t_cached_maxopd = tp->t_maxopd;
2948 			tp->t_maxopd = tcp_mss_rec_medium;
2949 			tcpstat.tcps_mss_to_medium++;
2950 		} else if (ifsr->mss_recommended ==
2951 		    IF_CELL_UL_MSS_RECOMMENDED_LOW &&
2952 		    tp->t_maxopd > tcp_mss_rec_low) {
2953 			tp->t_cached_maxopd = tp->t_maxopd;
2954 			tp->t_maxopd = tcp_mss_rec_low;
2955 			tcpstat.tcps_mss_to_low++;
2956 		}
2957 		tp->t_maxseg = tp->t_maxopd - optlen;
2958 
2959 		/*
2960 		 * clear the cached value if it is same as the current
2961 		 */
2962 		if (tp->t_maxopd == tp->t_cached_maxopd) {
2963 			tp->t_cached_maxopd = 0;
2964 		}
2965 	}
2966 }
2967 
2968 void
tcp_update_mss_locked(struct socket * so,struct ifnet * ifp)2969 tcp_update_mss_locked(struct socket *so, struct ifnet *ifp)
2970 {
2971 	struct inpcb *inp = sotoinpcb(so);
2972 	struct tcpcb *tp = intotcpcb(inp);
2973 
2974 	if (ifp == NULL && (ifp = inp->inp_last_outifp) == NULL) {
2975 		return;
2976 	}
2977 
2978 	if (!IFNET_IS_CELLULAR(ifp)) {
2979 		/*
2980 		 * This optimization is implemented for cellular
2981 		 * networks only
2982 		 */
2983 		return;
2984 	}
2985 	if (tp->t_state <= TCPS_CLOSE_WAIT) {
2986 		/*
2987 		 * If the connection is currently doing or has done PMTU
2988 		 * blackhole detection, do not change the MSS
2989 		 */
2990 		if (tp->t_flags & TF_BLACKHOLE) {
2991 			return;
2992 		}
2993 		if (ifp->if_link_status == NULL) {
2994 			return;
2995 		}
2996 		tcp_update_mss_core(tp, ifp);
2997 	}
2998 }
2999 
3000 void
tcp_itimer(struct inpcbinfo * ipi)3001 tcp_itimer(struct inpcbinfo *ipi)
3002 {
3003 	struct inpcb *inp, *nxt;
3004 
3005 	if (lck_rw_try_lock_exclusive(&ipi->ipi_lock) == FALSE) {
3006 		if (tcp_itimer_done == TRUE) {
3007 			tcp_itimer_done = FALSE;
3008 			os_atomic_inc(&ipi->ipi_timer_req.intimer_fast, relaxed);
3009 			return;
3010 		}
3011 		/* Upgrade failed, lost lock now take it again exclusive */
3012 		lck_rw_lock_exclusive(&ipi->ipi_lock);
3013 	}
3014 	tcp_itimer_done = TRUE;
3015 
3016 	LIST_FOREACH_SAFE(inp, &tcb, inp_list, nxt) {
3017 		struct socket *so;
3018 		struct ifnet *ifp;
3019 
3020 		if (inp->inp_ppcb == NULL ||
3021 		    in_pcb_checkstate(inp, WNT_ACQUIRE, 0) == WNT_STOPUSING) {
3022 			continue;
3023 		}
3024 		so = inp->inp_socket;
3025 		ifp = inp->inp_last_outifp;
3026 		socket_lock(so, 1);
3027 		if (in_pcb_checkstate(inp, WNT_RELEASE, 1) == WNT_STOPUSING) {
3028 			socket_unlock(so, 1);
3029 			continue;
3030 		}
3031 		so_check_extended_bk_idle_time(so);
3032 		if (ipi->ipi_flags & INPCBINFO_UPDATE_MSS) {
3033 			tcp_update_mss_locked(so, NULL);
3034 		}
3035 		socket_unlock(so, 1);
3036 
3037 		/*
3038 		 * Defunct all system-initiated background sockets if the
3039 		 * socket is using the cellular interface and the interface
3040 		 * has its LQM set to abort.
3041 		 */
3042 		if ((ipi->ipi_flags & INPCBINFO_HANDLE_LQM_ABORT) &&
3043 		    IS_SO_TC_BACKGROUNDSYSTEM(so->so_traffic_class) &&
3044 		    ifp != NULL && IFNET_IS_CELLULAR(ifp) &&
3045 		    (ifp->if_interface_state.valid_bitmask &
3046 		    IF_INTERFACE_STATE_LQM_STATE_VALID) &&
3047 		    ifp->if_interface_state.lqm_state ==
3048 		    IFNET_LQM_THRESH_ABORT) {
3049 			socket_defunct(current_proc(), so,
3050 			    SHUTDOWN_SOCKET_LEVEL_DISCONNECT_ALL);
3051 		}
3052 	}
3053 
3054 	ipi->ipi_flags &= ~(INPCBINFO_UPDATE_MSS | INPCBINFO_HANDLE_LQM_ABORT);
3055 	lck_rw_done(&ipi->ipi_lock);
3056 }
3057