xref: /xnu-10002.61.3/bsd/netinet/tcp_timer.c (revision 0f4c859e951fba394238ab619495c4e1d54d0f34)
1 /*
2  * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995
30  *	The Regents of the University of California.  All rights reserved.
31  *
32  * Redistribution and use in source and binary forms, with or without
33  * modification, are permitted provided that the following conditions
34  * are met:
35  * 1. Redistributions of source code must retain the above copyright
36  *    notice, this list of conditions and the following disclaimer.
37  * 2. Redistributions in binary form must reproduce the above copyright
38  *    notice, this list of conditions and the following disclaimer in the
39  *    documentation and/or other materials provided with the distribution.
40  * 3. All advertising materials mentioning features or use of this software
41  *    must display the following acknowledgement:
42  *	This product includes software developed by the University of
43  *	California, Berkeley and its contributors.
44  * 4. Neither the name of the University nor the names of its contributors
45  *    may be used to endorse or promote products derived from this software
46  *    without specific prior written permission.
47  *
48  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58  * SUCH DAMAGE.
59  *
60  *	@(#)tcp_timer.c	8.2 (Berkeley) 5/24/95
61  * $FreeBSD: src/sys/netinet/tcp_timer.c,v 1.34.2.11 2001/08/22 00:59:12 silby Exp $
62  */
63 
64 #include "tcp_includes.h"
65 
66 #include <sys/param.h>
67 #include <sys/systm.h>
68 #include <sys/kernel.h>
69 #include <sys/mbuf.h>
70 #include <sys/sysctl.h>
71 #include <sys/socket.h>
72 #include <sys/socketvar.h>
73 #include <sys/protosw.h>
74 #include <sys/domain.h>
75 #include <sys/mcache.h>
76 #include <sys/queue.h>
77 #include <kern/locks.h>
78 #include <kern/cpu_number.h>    /* before tcp_seq.h, for tcp_random18() */
79 #include <mach/boolean.h>
80 
81 #include <net/route.h>
82 #include <net/if_var.h>
83 #include <net/ntstat.h>
84 
85 #include <netinet/in.h>
86 #include <netinet/in_systm.h>
87 #include <netinet/in_pcb.h>
88 #include <netinet/in_var.h>
89 #include <netinet6/in6_pcb.h>
90 #include <netinet/ip_var.h>
91 #include <netinet/tcp.h>
92 #include <netinet/tcp_cache.h>
93 #include <netinet/tcp_fsm.h>
94 #include <netinet/tcp_seq.h>
95 #include <netinet/tcp_timer.h>
96 #include <netinet/tcp_var.h>
97 #include <netinet/tcp_cc.h>
98 #include <netinet6/tcp6_var.h>
99 #include <netinet/tcpip.h>
100 #if TCPDEBUG
101 #include <netinet/tcp_debug.h>
102 #endif
103 #include <netinet/tcp_log.h>
104 
105 #include <sys/kdebug.h>
106 #include <mach/sdt.h>
107 #include <netinet/mptcp_var.h>
108 #include <net/content_filter.h>
109 
110 /* Max number of times a stretch ack can be delayed on a connection */
111 #define TCP_STRETCHACK_DELAY_THRESHOLD  5
112 
113 /*
114  * If the host processor has been sleeping for too long, this is the threshold
115  * used to avoid sending stale retransmissions.
116  */
117 #define TCP_SLEEP_TOO_LONG      (10 * 60 * 1000) /* 10 minutes in ms */
118 
119 /* tcp timer list */
120 struct tcptimerlist tcp_timer_list;
121 
122 /* List of pcbs in timewait state, protected by tcbinfo's ipi_lock */
123 struct tcptailq tcp_tw_tailq;
124 
125 
126 static int
127 sysctl_msec_to_ticks SYSCTL_HANDLER_ARGS
128 {
129 #pragma unused(arg2)
130 	int error, temp;
131 	long s, tt;
132 
133 	tt = *(int *)arg1;
134 	s = tt * 1000 / TCP_RETRANSHZ;
135 	if (tt < 0 || s > INT_MAX) {
136 		return EINVAL;
137 	}
138 	temp = (int)s;
139 
140 	error = sysctl_handle_int(oidp, &temp, 0, req);
141 	if (error || !req->newptr) {
142 		return error;
143 	}
144 
145 	tt = (long)temp * TCP_RETRANSHZ / 1000;
146 	if (tt < 1 || tt > INT_MAX) {
147 		return EINVAL;
148 	}
149 
150 	*(int *)arg1 = (int)tt;
151 	SYSCTL_SKMEM_UPDATE_AT_OFFSET(arg2, *(int*)arg1);
152 	return 0;
153 }
154 
155 #if SYSCTL_SKMEM
156 int     tcp_keepinit = TCPTV_KEEP_INIT;
157 SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPINIT, keepinit,
158     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
159     &tcp_keepinit, offsetof(skmem_sysctl, tcp.keepinit),
160     sysctl_msec_to_ticks, "I", "");
161 
162 int     tcp_keepidle = TCPTV_KEEP_IDLE;
163 SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPIDLE, keepidle,
164     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
165     &tcp_keepidle, offsetof(skmem_sysctl, tcp.keepidle),
166     sysctl_msec_to_ticks, "I", "");
167 
168 int     tcp_keepintvl = TCPTV_KEEPINTVL;
169 SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPINTVL, keepintvl,
170     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
171     &tcp_keepintvl, offsetof(skmem_sysctl, tcp.keepintvl),
172     sysctl_msec_to_ticks, "I", "");
173 
174 SYSCTL_SKMEM_TCP_INT(OID_AUTO, keepcnt,
175     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
176     int, tcp_keepcnt, TCPTV_KEEPCNT, "number of times to repeat keepalive");
177 
178 int     tcp_msl = TCPTV_MSL;
179 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, msl,
180     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
181     &tcp_msl, offsetof(skmem_sysctl, tcp.msl),
182     sysctl_msec_to_ticks, "I", "Maximum segment lifetime");
183 #else /* SYSCTL_SKMEM */
184 int     tcp_keepinit;
185 SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPINIT, keepinit,
186     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
187     &tcp_keepinit, 0, sysctl_msec_to_ticks, "I", "");
188 
189 int     tcp_keepidle;
190 SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPIDLE, keepidle,
191     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
192     &tcp_keepidle, 0, sysctl_msec_to_ticks, "I", "");
193 
194 int     tcp_keepintvl;
195 SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPINTVL, keepintvl,
196     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
197     &tcp_keepintvl, 0, sysctl_msec_to_ticks, "I", "");
198 
199 int     tcp_keepcnt;
200 SYSCTL_INT(_net_inet_tcp, OID_AUTO, keepcnt,
201     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
202     &tcp_keepcnt, 0, "number of times to repeat keepalive");
203 
204 int     tcp_msl;
205 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, msl,
206     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
207     &tcp_msl, 0, sysctl_msec_to_ticks, "I", "Maximum segment lifetime");
208 #endif /* SYSCTL_SKMEM */
209 
210 /*
211  * Avoid DoS with connections half-closed in TIME_WAIT_2
212  */
213 int     tcp_fin_timeout = TCPTV_FINWAIT2;
214 
215 static int
216 sysctl_tcp_fin_timeout SYSCTL_HANDLER_ARGS
217 {
218 #pragma unused(arg2)
219 	int error;
220 	int value = tcp_fin_timeout;
221 
222 	error = sysctl_handle_int(oidp, &value, 0, req);
223 	if (error != 0 || req->newptr == USER_ADDR_NULL) {
224 		return error;
225 	}
226 
227 	if (value == -1) {
228 		/* Reset to default value */
229 		value = TCPTV_FINWAIT2;
230 	} else {
231 		/* Convert from milliseconds */
232 		long big_value = value * TCP_RETRANSHZ / 1000;
233 
234 		if (big_value < 0 || big_value > INT_MAX) {
235 			return EINVAL;
236 		}
237 		value = (int)big_value;
238 	}
239 	tcp_fin_timeout = value;
240 	SYSCTL_SKMEM_UPDATE_AT_OFFSET(arg2, value);
241 	return 0;
242 }
243 
244 #if SYSCTL_SKMEM
245 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, fin_timeout,
246     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
247     &tcp_fin_timeout, offsetof(skmem_sysctl, tcp.fin_timeout),
248     sysctl_tcp_fin_timeout, "I", "");
249 #else /* SYSCTL_SKMEM */
250 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, fin_timeout,
251     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
252     &tcp_fin_timeout, 0,
253     sysctl_tcp_fin_timeout, "I", "");
254 #endif /* SYSCTL_SKMEM */
255 
256 /*
257  * Avoid DoS via TCP Robustness in Persist Condition
258  * (see http://www.ietf.org/id/draft-ananth-tcpm-persist-02.txt)
259  * by allowing a system wide maximum persistence timeout value when in
260  * Zero Window Probe mode.
261  *
262  * Expressed in milliseconds to be consistent without timeout related
263  * values, the TCP socket option is in seconds.
264  */
265 #if SYSCTL_SKMEM
266 u_int32_t tcp_max_persist_timeout = 0;
267 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, max_persist_timeout,
268     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
269     &tcp_max_persist_timeout, offsetof(skmem_sysctl, tcp.max_persist_timeout),
270     sysctl_msec_to_ticks, "I", "Maximum persistence timeout for ZWP");
271 #else /* SYSCTL_SKMEM */
272 u_int32_t tcp_max_persist_timeout = 0;
273 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, max_persist_timeout,
274     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
275     &tcp_max_persist_timeout, 0, sysctl_msec_to_ticks, "I",
276     "Maximum persistence timeout for ZWP");
277 #endif /* SYSCTL_SKMEM */
278 
279 SYSCTL_SKMEM_TCP_INT(OID_AUTO, always_keepalive,
280     CTLFLAG_RW | CTLFLAG_LOCKED, static int, always_keepalive, 0,
281     "Assume SO_KEEPALIVE on all TCP connections");
282 
283 /*
284  * This parameter determines how long the timer list will stay in fast or
285  * quick mode even though all connections are idle. In this state, the
286  * timer will run more frequently anticipating new data.
287  */
288 SYSCTL_SKMEM_TCP_INT(OID_AUTO, timer_fastmode_idlemax,
289     CTLFLAG_RW | CTLFLAG_LOCKED, int, timer_fastmode_idlemax,
290     TCP_FASTMODE_IDLERUN_MAX, "Maximum idle generations in fast mode");
291 
292 /*
293  * See tcp_syn_backoff[] for interval values between SYN retransmits;
294  * the value set below defines the number of retransmits, before we
295  * disable the timestamp and window scaling options during subsequent
296  * SYN retransmits.  Setting it to 0 disables the dropping off of those
297  * two options.
298  */
299 SYSCTL_SKMEM_TCP_INT(OID_AUTO, broken_peer_syn_rexmit_thres,
300     CTLFLAG_RW | CTLFLAG_LOCKED, static int, tcp_broken_peer_syn_rxmit_thres,
301     10, "Number of retransmitted SYNs before disabling RFC 1323 "
302     "options on local connections");
303 
304 static int tcp_timer_advanced = 0;
305 SYSCTL_INT(_net_inet_tcp, OID_AUTO, tcp_timer_advanced,
306     CTLFLAG_RD | CTLFLAG_LOCKED, &tcp_timer_advanced, 0,
307     "Number of times one of the timers was advanced");
308 
309 static int tcp_resched_timerlist = 0;
310 SYSCTL_INT(_net_inet_tcp, OID_AUTO, tcp_resched_timerlist,
311     CTLFLAG_RD | CTLFLAG_LOCKED, &tcp_resched_timerlist, 0,
312     "Number of times timer list was rescheduled as part of processing a packet");
313 
314 SYSCTL_SKMEM_TCP_INT(OID_AUTO, pmtud_blackhole_detection,
315     CTLFLAG_RW | CTLFLAG_LOCKED, int, tcp_pmtud_black_hole_detect, 1,
316     "Path MTU Discovery Black Hole Detection");
317 
318 SYSCTL_SKMEM_TCP_INT(OID_AUTO, pmtud_blackhole_mss,
319     CTLFLAG_RW | CTLFLAG_LOCKED, int, tcp_pmtud_black_hole_mss, 1200,
320     "Path MTU Discovery Black Hole Detection lowered MSS");
321 
322 #if (DEBUG || DEVELOPMENT)
323 int tcp_probe_if_fix_port = 0;
324 SYSCTL_INT(_net_inet_tcp, OID_AUTO, probe_if_fix_port,
325     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
326     &tcp_probe_if_fix_port, 0, "");
327 #endif /* (DEBUG || DEVELOPMENT) */
328 
329 static u_int32_t tcp_mss_rec_medium = 1200;
330 static u_int32_t tcp_mss_rec_low = 512;
331 
332 #define TCP_REPORT_STATS_INTERVAL       43200 /* 12 hours, in seconds */
333 int tcp_report_stats_interval = TCP_REPORT_STATS_INTERVAL;
334 
335 /* performed garbage collection of "used" sockets */
336 static boolean_t tcp_gc_done = FALSE;
337 
338 /* max idle probes */
339 int     tcp_maxpersistidle = TCPTV_KEEP_IDLE;
340 
341 /*
342  * TCP delack timer is set to 100 ms. Since the processing of timer list
343  * in fast mode will happen no faster than 100 ms, the delayed ack timer
344  * will fire some where between 100 and 200 ms.
345  */
346 int     tcp_delack = TCP_RETRANSHZ / 10;
347 
348 #if MPTCP
349 /*
350  * MP_JOIN retransmission of 3rd ACK will be every 500 msecs without backoff
351  */
352 int     tcp_jack_rxmt = TCP_RETRANSHZ / 2;
353 #endif /* MPTCP */
354 
355 static boolean_t tcp_itimer_done = FALSE;
356 
357 static void tcp_remove_timer(struct tcpcb *tp);
358 static void tcp_sched_timerlist(uint32_t offset);
359 static u_int32_t tcp_run_conn_timer(struct tcpcb *tp, u_int16_t *mode,
360     u_int16_t probe_if_index);
361 static inline void tcp_set_lotimer_index(struct tcpcb *);
362 __private_extern__ void tcp_remove_from_time_wait(struct inpcb *inp);
363 static inline void tcp_update_mss_core(struct tcpcb *tp, struct ifnet *ifp);
364 __private_extern__ void tcp_report_stats(void);
365 
366 static  u_int64_t tcp_last_report_time;
367 
368 /*
369  * Structure to store previously reported stats so that we can send
370  * incremental changes in each report interval.
371  */
372 struct tcp_last_report_stats {
373 	u_int32_t       tcps_connattempt;
374 	u_int32_t       tcps_accepts;
375 	u_int32_t       tcps_ecn_client_setup;
376 	u_int32_t       tcps_ecn_server_setup;
377 	u_int32_t       tcps_ecn_client_success;
378 	u_int32_t       tcps_ecn_server_success;
379 	u_int32_t       tcps_ecn_not_supported;
380 	u_int32_t       tcps_ecn_lost_syn;
381 	u_int32_t       tcps_ecn_lost_synack;
382 	u_int32_t       tcps_ecn_recv_ce;
383 	u_int32_t       tcps_ecn_recv_ece;
384 	u_int32_t       tcps_ecn_sent_ece;
385 	u_int32_t       tcps_ecn_conn_recv_ce;
386 	u_int32_t       tcps_ecn_conn_recv_ece;
387 	u_int32_t       tcps_ecn_conn_plnoce;
388 	u_int32_t       tcps_ecn_conn_pl_ce;
389 	u_int32_t       tcps_ecn_conn_nopl_ce;
390 	u_int32_t       tcps_ecn_fallback_synloss;
391 	u_int32_t       tcps_ecn_fallback_reorder;
392 	u_int32_t       tcps_ecn_fallback_ce;
393 
394 	/* TFO-related statistics */
395 	u_int32_t       tcps_tfo_syn_data_rcv;
396 	u_int32_t       tcps_tfo_cookie_req_rcv;
397 	u_int32_t       tcps_tfo_cookie_sent;
398 	u_int32_t       tcps_tfo_cookie_invalid;
399 	u_int32_t       tcps_tfo_cookie_req;
400 	u_int32_t       tcps_tfo_cookie_rcv;
401 	u_int32_t       tcps_tfo_syn_data_sent;
402 	u_int32_t       tcps_tfo_syn_data_acked;
403 	u_int32_t       tcps_tfo_syn_loss;
404 	u_int32_t       tcps_tfo_blackhole;
405 	u_int32_t       tcps_tfo_cookie_wrong;
406 	u_int32_t       tcps_tfo_no_cookie_rcv;
407 	u_int32_t       tcps_tfo_heuristics_disable;
408 	u_int32_t       tcps_tfo_sndblackhole;
409 
410 	/* MPTCP-related statistics */
411 	u_int32_t       tcps_mptcp_handover_attempt;
412 	u_int32_t       tcps_mptcp_interactive_attempt;
413 	u_int32_t       tcps_mptcp_aggregate_attempt;
414 	u_int32_t       tcps_mptcp_fp_handover_attempt;
415 	u_int32_t       tcps_mptcp_fp_interactive_attempt;
416 	u_int32_t       tcps_mptcp_fp_aggregate_attempt;
417 	u_int32_t       tcps_mptcp_heuristic_fallback;
418 	u_int32_t       tcps_mptcp_fp_heuristic_fallback;
419 	u_int32_t       tcps_mptcp_handover_success_wifi;
420 	u_int32_t       tcps_mptcp_handover_success_cell;
421 	u_int32_t       tcps_mptcp_interactive_success;
422 	u_int32_t       tcps_mptcp_aggregate_success;
423 	u_int32_t       tcps_mptcp_fp_handover_success_wifi;
424 	u_int32_t       tcps_mptcp_fp_handover_success_cell;
425 	u_int32_t       tcps_mptcp_fp_interactive_success;
426 	u_int32_t       tcps_mptcp_fp_aggregate_success;
427 	u_int32_t       tcps_mptcp_handover_cell_from_wifi;
428 	u_int32_t       tcps_mptcp_handover_wifi_from_cell;
429 	u_int32_t       tcps_mptcp_interactive_cell_from_wifi;
430 	u_int64_t       tcps_mptcp_handover_cell_bytes;
431 	u_int64_t       tcps_mptcp_interactive_cell_bytes;
432 	u_int64_t       tcps_mptcp_aggregate_cell_bytes;
433 	u_int64_t       tcps_mptcp_handover_all_bytes;
434 	u_int64_t       tcps_mptcp_interactive_all_bytes;
435 	u_int64_t       tcps_mptcp_aggregate_all_bytes;
436 	u_int32_t       tcps_mptcp_back_to_wifi;
437 	u_int32_t       tcps_mptcp_wifi_proxy;
438 	u_int32_t       tcps_mptcp_cell_proxy;
439 	u_int32_t       tcps_mptcp_triggered_cell;
440 };
441 
442 
443 /* Returns true if the timer is on the timer list */
444 #define TIMER_IS_ON_LIST(tp) ((tp)->t_flags & TF_TIMER_ONLIST)
445 
446 /* Run the TCP timerlist atleast once every hour */
447 #define TCP_TIMERLIST_MAX_OFFSET (60 * 60 * TCP_RETRANSHZ)
448 
449 
450 static void add_to_time_wait_locked(struct tcpcb *tp, uint32_t delay);
451 static boolean_t tcp_garbage_collect(struct inpcb *, int);
452 
453 #define TIMERENTRY_TO_TP(te) ((struct tcpcb *)((uintptr_t)te - offsetof(struct tcpcb, tentry.le.le_next)))
454 
455 #define VERIFY_NEXT_LINK(elm, field) do {       \
456 	if (LIST_NEXT((elm),field) != NULL &&   \
457 	    LIST_NEXT((elm),field)->field.le_prev !=    \
458 	        &((elm)->field.le_next))        \
459 	        panic("Bad link elm %p next->prev != elm", (elm));      \
460 } while(0)
461 
462 #define VERIFY_PREV_LINK(elm, field) do {       \
463 	if (*(elm)->field.le_prev != (elm))     \
464 	        panic("Bad link elm %p prev->next != elm", (elm));      \
465 } while(0)
466 
467 #define TCP_SET_TIMER_MODE(mode, i) do { \
468 	if (IS_TIMER_HZ_10MS(i)) \
469 	        (mode) |= TCP_TIMERLIST_10MS_MODE; \
470 	else if (IS_TIMER_HZ_100MS(i)) \
471 	        (mode) |= TCP_TIMERLIST_100MS_MODE; \
472 	else \
473 	        (mode) |= TCP_TIMERLIST_500MS_MODE; \
474 } while(0)
475 
476 #if (DEVELOPMENT || DEBUG)
477 SYSCTL_UINT(_net_inet_tcp, OID_AUTO, mss_rec_medium,
478     CTLFLAG_RW | CTLFLAG_LOCKED, &tcp_mss_rec_medium, 0,
479     "Medium MSS based on recommendation in link status report");
480 SYSCTL_UINT(_net_inet_tcp, OID_AUTO, mss_rec_low,
481     CTLFLAG_RW | CTLFLAG_LOCKED, &tcp_mss_rec_low, 0,
482     "Low MSS based on recommendation in link status report");
483 
484 static int32_t tcp_change_mss_recommended = 0;
485 static int
486 sysctl_change_mss_recommended SYSCTL_HANDLER_ARGS
487 {
488 #pragma unused(oidp, arg1, arg2)
489 	int i, err = 0, changed = 0;
490 	struct ifnet *ifp;
491 	struct if_link_status ifsr;
492 	struct if_cellular_status_v1 *new_cell_sr;
493 	err = sysctl_io_number(req, tcp_change_mss_recommended,
494 	    sizeof(int32_t), &i, &changed);
495 	if (changed) {
496 		if (i < 0 || i > UINT16_MAX) {
497 			return EINVAL;
498 		}
499 		ifnet_head_lock_shared();
500 		TAILQ_FOREACH(ifp, &ifnet_head, if_link) {
501 			if (IFNET_IS_CELLULAR(ifp)) {
502 				bzero(&ifsr, sizeof(ifsr));
503 				new_cell_sr = &ifsr.ifsr_u.ifsr_cell.if_cell_u.if_status_v1;
504 				ifsr.ifsr_version = IF_CELLULAR_STATUS_REPORT_CURRENT_VERSION;
505 				ifsr.ifsr_len = sizeof(*new_cell_sr);
506 
507 				/* Set MSS recommended */
508 				new_cell_sr->valid_bitmask |= IF_CELL_UL_MSS_RECOMMENDED_VALID;
509 				new_cell_sr->mss_recommended = (uint16_t)i;
510 				err = ifnet_link_status_report(ifp, new_cell_sr, sizeof(new_cell_sr));
511 				if (err == 0) {
512 					tcp_change_mss_recommended = i;
513 				} else {
514 					break;
515 				}
516 			}
517 		}
518 		ifnet_head_done();
519 	}
520 	return err;
521 }
522 
523 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, change_mss_recommended,
524     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, &tcp_change_mss_recommended,
525     0, sysctl_change_mss_recommended, "IU", "Change MSS recommended");
526 
527 SYSCTL_INT(_net_inet_tcp, OID_AUTO, report_stats_interval,
528     CTLFLAG_RW | CTLFLAG_LOCKED, &tcp_report_stats_interval, 0,
529     "Report stats interval");
530 #endif /* (DEVELOPMENT || DEBUG) */
531 
532 /*
533  * Macro to compare two timers. If there is a reset of the sign bit,
534  * it is safe to assume that the timer has wrapped around. By doing
535  * signed comparision, we take care of wrap around such that the value
536  * with the sign bit reset is actually ahead of the other.
537  */
538 inline int32_t
timer_diff(uint32_t t1,uint32_t toff1,uint32_t t2,uint32_t toff2)539 timer_diff(uint32_t t1, uint32_t toff1, uint32_t t2, uint32_t toff2)
540 {
541 	return (int32_t)((t1 + toff1) - (t2 + toff2));
542 }
543 
544 /*
545  * Add to tcp timewait list, delay is given in milliseconds.
546  */
547 static void
add_to_time_wait_locked(struct tcpcb * tp,uint32_t delay)548 add_to_time_wait_locked(struct tcpcb *tp, uint32_t delay)
549 {
550 	struct inpcbinfo *pcbinfo = &tcbinfo;
551 	struct inpcb *inp = tp->t_inpcb;
552 	uint32_t timer;
553 
554 	/* pcb list should be locked when we get here */
555 	LCK_RW_ASSERT(&pcbinfo->ipi_lock, LCK_RW_ASSERT_EXCLUSIVE);
556 
557 	/* We may get here multiple times, so check */
558 	if (!(inp->inp_flags2 & INP2_TIMEWAIT)) {
559 		pcbinfo->ipi_twcount++;
560 		inp->inp_flags2 |= INP2_TIMEWAIT;
561 
562 		/* Remove from global inp list */
563 		LIST_REMOVE(inp, inp_list);
564 	} else {
565 		TAILQ_REMOVE(&tcp_tw_tailq, tp, t_twentry);
566 	}
567 
568 	/* Compute the time at which this socket can be closed */
569 	timer = tcp_now + delay;
570 
571 	/* We will use the TCPT_2MSL timer for tracking this delay */
572 
573 	if (TIMER_IS_ON_LIST(tp)) {
574 		tcp_remove_timer(tp);
575 	}
576 	tp->t_timer[TCPT_2MSL] = timer;
577 
578 	TAILQ_INSERT_TAIL(&tcp_tw_tailq, tp, t_twentry);
579 }
580 
581 void
add_to_time_wait(struct tcpcb * tp,uint32_t delay)582 add_to_time_wait(struct tcpcb *tp, uint32_t delay)
583 {
584 	if (tp->t_inpcb->inp_socket->so_options & SO_NOWAKEFROMSLEEP) {
585 		socket_post_kev_msg_closed(tp->t_inpcb->inp_socket);
586 	}
587 
588 	tcp_del_fsw_flow(tp);
589 
590 	/* 19182803: Notify nstat that connection is closing before waiting. */
591 	nstat_pcb_detach(tp->t_inpcb);
592 
593 #if CONTENT_FILTER
594 	if ((tp->t_inpcb->inp_socket->so_flags & SOF_CONTENT_FILTER) != 0) {
595 		/* If filter present, allow filter to finish processing all queued up data before adding to time wait queue */
596 		(void) cfil_sock_tcp_add_time_wait(tp->t_inpcb->inp_socket);
597 	} else
598 #endif /* CONTENT_FILTER */
599 	{
600 		add_to_time_wait_now(tp, delay);
601 	}
602 }
603 
604 void
add_to_time_wait_now(struct tcpcb * tp,uint32_t delay)605 add_to_time_wait_now(struct tcpcb *tp, uint32_t delay)
606 {
607 	struct inpcbinfo *pcbinfo = &tcbinfo;
608 
609 	if (!lck_rw_try_lock_exclusive(&pcbinfo->ipi_lock)) {
610 		socket_unlock(tp->t_inpcb->inp_socket, 0);
611 		lck_rw_lock_exclusive(&pcbinfo->ipi_lock);
612 		socket_lock(tp->t_inpcb->inp_socket, 0);
613 	}
614 	add_to_time_wait_locked(tp, delay);
615 	lck_rw_done(&pcbinfo->ipi_lock);
616 
617 	inpcb_gc_sched(pcbinfo, INPCB_TIMER_LAZY);
618 }
619 
620 /* If this is on time wait queue, remove it. */
621 void
tcp_remove_from_time_wait(struct inpcb * inp)622 tcp_remove_from_time_wait(struct inpcb *inp)
623 {
624 	struct tcpcb *tp = intotcpcb(inp);
625 	if (inp->inp_flags2 & INP2_TIMEWAIT) {
626 		TAILQ_REMOVE(&tcp_tw_tailq, tp, t_twentry);
627 	}
628 }
629 
630 static boolean_t
tcp_garbage_collect(struct inpcb * inp,int istimewait)631 tcp_garbage_collect(struct inpcb *inp, int istimewait)
632 {
633 	boolean_t active = FALSE;
634 	struct socket *so, *mp_so = NULL;
635 	struct tcpcb *tp;
636 
637 	so = inp->inp_socket;
638 	tp = intotcpcb(inp);
639 
640 	if (so->so_flags & SOF_MP_SUBFLOW) {
641 		mp_so = mptetoso(tptomptp(tp)->mpt_mpte);
642 		if (!socket_try_lock(mp_so)) {
643 			mp_so = NULL;
644 			active = TRUE;
645 			goto out;
646 		}
647 		if (mpsotomppcb(mp_so)->mpp_inside > 0) {
648 			os_log(mptcp_log_handle, "%s - %lx: Still inside %d usecount %d\n", __func__,
649 			    (unsigned long)VM_KERNEL_ADDRPERM(mpsotompte(mp_so)),
650 			    mpsotomppcb(mp_so)->mpp_inside,
651 			    mp_so->so_usecount);
652 			socket_unlock(mp_so, 0);
653 			mp_so = NULL;
654 			active = TRUE;
655 			goto out;
656 		}
657 		/* We call socket_unlock with refcount further below */
658 		mp_so->so_usecount++;
659 		tptomptp(tp)->mpt_mpte->mpte_mppcb->mpp_inside++;
660 	}
661 
662 	/*
663 	 * Skip if still in use or busy; it would have been more efficient
664 	 * if we were to test so_usecount against 0, but this isn't possible
665 	 * due to the current implementation of tcp_dropdropablreq() where
666 	 * overflow sockets that are eligible for garbage collection have
667 	 * their usecounts set to 1.
668 	 */
669 	if (!lck_mtx_try_lock_spin(&inp->inpcb_mtx)) {
670 		active = TRUE;
671 		goto out;
672 	}
673 
674 	/* Check again under the lock */
675 	if (so->so_usecount > 1) {
676 		if (inp->inp_wantcnt == WNT_STOPUSING) {
677 			active = TRUE;
678 		}
679 		lck_mtx_unlock(&inp->inpcb_mtx);
680 		goto out;
681 	}
682 
683 	if (istimewait && TSTMP_GEQ(tcp_now, tp->t_timer[TCPT_2MSL]) &&
684 	    tp->t_state != TCPS_CLOSED) {
685 		/* Become a regular mutex */
686 		lck_mtx_convert_spin(&inp->inpcb_mtx);
687 		tcp_close(tp);
688 	}
689 
690 	/*
691 	 * Overflowed socket dropped from the listening queue?  Do this
692 	 * only if we are called to clean up the time wait slots, since
693 	 * tcp_dropdropablreq() considers a socket to have been fully
694 	 * dropped after add_to_time_wait() is finished.
695 	 * Also handle the case of connections getting closed by the peer
696 	 * while in the queue as seen with rdar://6422317
697 	 *
698 	 */
699 	if (so->so_usecount == 1 &&
700 	    ((istimewait && (so->so_flags & SOF_OVERFLOW)) ||
701 	    ((tp != NULL) && (tp->t_state == TCPS_CLOSED) &&
702 	    (so->so_head != NULL) &&
703 	    ((so->so_state & (SS_INCOMP | SS_CANTSENDMORE | SS_CANTRCVMORE)) ==
704 	    (SS_INCOMP | SS_CANTSENDMORE | SS_CANTRCVMORE))))) {
705 		if (inp->inp_state != INPCB_STATE_DEAD) {
706 			/* Become a regular mutex */
707 			lck_mtx_convert_spin(&inp->inpcb_mtx);
708 			if (SOCK_CHECK_DOM(so, PF_INET6)) {
709 				in6_pcbdetach(inp);
710 			} else {
711 				in_pcbdetach(inp);
712 			}
713 		}
714 		VERIFY(so->so_usecount > 0);
715 		so->so_usecount--;
716 		if (inp->inp_wantcnt == WNT_STOPUSING) {
717 			active = TRUE;
718 		}
719 		lck_mtx_unlock(&inp->inpcb_mtx);
720 		goto out;
721 	} else if (inp->inp_wantcnt != WNT_STOPUSING) {
722 		lck_mtx_unlock(&inp->inpcb_mtx);
723 		active = FALSE;
724 		goto out;
725 	}
726 
727 	/*
728 	 * We get here because the PCB is no longer searchable
729 	 * (WNT_STOPUSING); detach (if needed) and dispose if it is dead
730 	 * (usecount is 0).  This covers all cases, including overflow
731 	 * sockets and those that are considered as "embryonic",
732 	 * i.e. created by sonewconn() in TCP input path, and have
733 	 * not yet been committed.  For the former, we reduce the usecount
734 	 *  to 0 as done by the code above.  For the latter, the usecount
735 	 * would have reduced to 0 as part calling soabort() when the
736 	 * socket is dropped at the end of tcp_input().
737 	 */
738 	if (so->so_usecount == 0) {
739 		DTRACE_TCP4(state__change, void, NULL, struct inpcb *, inp,
740 		    struct tcpcb *, tp, int32_t, TCPS_CLOSED);
741 		/* Become a regular mutex */
742 		lck_mtx_convert_spin(&inp->inpcb_mtx);
743 
744 		/*
745 		 * If this tp still happens to be on the timer list,
746 		 * take it out
747 		 */
748 		if (TIMER_IS_ON_LIST(tp)) {
749 			tcp_remove_timer(tp);
750 		}
751 
752 		if (inp->inp_state != INPCB_STATE_DEAD) {
753 			if (SOCK_CHECK_DOM(so, PF_INET6)) {
754 				in6_pcbdetach(inp);
755 			} else {
756 				in_pcbdetach(inp);
757 			}
758 		}
759 
760 		if (mp_so) {
761 			mptcp_subflow_del(tptomptp(tp)->mpt_mpte, tp->t_mpsub);
762 
763 			/* so is now unlinked from mp_so - let's drop the lock */
764 			socket_unlock(mp_so, 1);
765 			mp_so = NULL;
766 		}
767 
768 		in_pcbdispose(inp);
769 		active = FALSE;
770 		goto out;
771 	}
772 
773 	lck_mtx_unlock(&inp->inpcb_mtx);
774 	active = TRUE;
775 
776 out:
777 	if (mp_so) {
778 		socket_unlock(mp_so, 1);
779 	}
780 
781 	return active;
782 }
783 
784 /*
785  * TCP garbage collector callback (inpcb_timer_func_t).
786  *
787  * Returns the number of pcbs that will need to be gc-ed soon,
788  * returnining > 0 will keep timer active.
789  */
790 void
tcp_gc(struct inpcbinfo * ipi)791 tcp_gc(struct inpcbinfo *ipi)
792 {
793 	struct inpcb *inp, *nxt;
794 	struct tcpcb *tw_tp, *tw_ntp;
795 #if TCPDEBUG
796 	int ostate;
797 #endif
798 #if  KDEBUG
799 	static int tws_checked = 0;
800 #endif
801 
802 	KERNEL_DEBUG(DBG_FNC_TCP_SLOW | DBG_FUNC_START, 0, 0, 0, 0, 0);
803 
804 	/*
805 	 * Update tcp_now here as it may get used while
806 	 * processing the slow timer.
807 	 */
808 	calculate_tcp_clock();
809 
810 	/*
811 	 * Garbage collect socket/tcpcb: We need to acquire the list lock
812 	 * exclusively to do this
813 	 */
814 
815 	if (lck_rw_try_lock_exclusive(&ipi->ipi_lock) == FALSE) {
816 		/* don't sweat it this time; cleanup was done last time */
817 		if (tcp_gc_done == TRUE) {
818 			tcp_gc_done = FALSE;
819 			KERNEL_DEBUG(DBG_FNC_TCP_SLOW | DBG_FUNC_END,
820 			    tws_checked, cur_tw_slot, 0, 0, 0);
821 			/* Lock upgrade failed, give up this round */
822 			os_atomic_inc(&ipi->ipi_gc_req.intimer_fast, relaxed);
823 			return;
824 		}
825 		/* Upgrade failed, lost lock now take it again exclusive */
826 		lck_rw_lock_exclusive(&ipi->ipi_lock);
827 	}
828 	tcp_gc_done = TRUE;
829 
830 	LIST_FOREACH_SAFE(inp, &tcb, inp_list, nxt) {
831 		if (tcp_garbage_collect(inp, 0)) {
832 			os_atomic_inc(&ipi->ipi_gc_req.intimer_fast, relaxed);
833 		}
834 	}
835 
836 	/* Now cleanup the time wait ones */
837 	TAILQ_FOREACH_SAFE(tw_tp, &tcp_tw_tailq, t_twentry, tw_ntp) {
838 		/*
839 		 * We check the timestamp here without holding the
840 		 * socket lock for better performance. If there are
841 		 * any pcbs in time-wait, the timer will get rescheduled.
842 		 * Hence some error in this check can be tolerated.
843 		 *
844 		 * Sometimes a socket on time-wait queue can be closed if
845 		 * 2MSL timer expired but the application still has a
846 		 * usecount on it.
847 		 */
848 		if (tw_tp->t_state == TCPS_CLOSED ||
849 		    TSTMP_GEQ(tcp_now, tw_tp->t_timer[TCPT_2MSL])) {
850 			if (tcp_garbage_collect(tw_tp->t_inpcb, 1)) {
851 				os_atomic_inc(&ipi->ipi_gc_req.intimer_lazy, relaxed);
852 			}
853 		}
854 	}
855 
856 	/* take into account pcbs that are still in time_wait_slots */
857 	os_atomic_add(&ipi->ipi_gc_req.intimer_lazy, ipi->ipi_twcount, relaxed);
858 
859 	lck_rw_done(&ipi->ipi_lock);
860 
861 	/* Clean up the socache while we are here */
862 	if (so_cache_timer()) {
863 		os_atomic_inc(&ipi->ipi_gc_req.intimer_lazy, relaxed);
864 	}
865 
866 	KERNEL_DEBUG(DBG_FNC_TCP_SLOW | DBG_FUNC_END, tws_checked,
867 	    cur_tw_slot, 0, 0, 0);
868 
869 	return;
870 }
871 
872 /*
873  * Cancel all timers for TCP tp.
874  */
875 void
tcp_canceltimers(struct tcpcb * tp)876 tcp_canceltimers(struct tcpcb *tp)
877 {
878 	int i;
879 
880 	tcp_remove_timer(tp);
881 	for (i = 0; i < TCPT_NTIMERS; i++) {
882 		tp->t_timer[i] = 0;
883 	}
884 	tp->tentry.timer_start = tcp_now;
885 	tp->tentry.index = TCPT_NONE;
886 }
887 
888 int     tcp_syn_backoff[TCP_MAXRXTSHIFT + 1] =
889 { 1, 1, 1, 1, 1, 2, 4, 8, 16, 32, 64, 64, 64 };
890 
891 int     tcp_backoff[TCP_MAXRXTSHIFT + 1] =
892 { 1, 2, 4, 8, 16, 32, 64, 64, 64, 64, 64, 64, 64 };
893 
894 static int tcp_totbackoff = 511;        /* sum of tcp_backoff[] */
895 
896 void
tcp_rexmt_save_state(struct tcpcb * tp)897 tcp_rexmt_save_state(struct tcpcb *tp)
898 {
899 	u_int32_t fsize;
900 	if (TSTMP_SUPPORTED(tp)) {
901 		/*
902 		 * Since timestamps are supported on the connection,
903 		 * we can do recovery as described in rfc 4015.
904 		 */
905 		fsize = tp->snd_max - tp->snd_una;
906 		tp->snd_ssthresh_prev = max(fsize, tp->snd_ssthresh);
907 		tp->snd_recover_prev = tp->snd_recover;
908 	} else {
909 		/*
910 		 * Timestamp option is not supported on this connection.
911 		 * Record ssthresh and cwnd so they can
912 		 * be recovered if this turns out to be a "bad" retransmit.
913 		 * A retransmit is considered "bad" if an ACK for this
914 		 * segment is received within RTT/2 interval; the assumption
915 		 * here is that the ACK was already in flight.  See
916 		 * "On Estimating End-to-End Network Path Properties" by
917 		 * Allman and Paxson for more details.
918 		 */
919 		tp->snd_cwnd_prev = tp->snd_cwnd;
920 		tp->snd_ssthresh_prev = tp->snd_ssthresh;
921 		tp->snd_recover_prev = tp->snd_recover;
922 		if (IN_FASTRECOVERY(tp)) {
923 			tp->t_flags |= TF_WASFRECOVERY;
924 		} else {
925 			tp->t_flags &= ~TF_WASFRECOVERY;
926 		}
927 	}
928 	tp->t_srtt_prev = (tp->t_srtt >> TCP_RTT_SHIFT) + 2;
929 	tp->t_rttvar_prev = (tp->t_rttvar >> TCP_RTTVAR_SHIFT);
930 	tp->t_flagsext &= ~(TF_RECOMPUTE_RTT);
931 }
932 
933 /*
934  * Revert to the older segment size if there is an indication that PMTU
935  * blackhole detection was not needed.
936  */
937 void
tcp_pmtud_revert_segment_size(struct tcpcb * tp)938 tcp_pmtud_revert_segment_size(struct tcpcb *tp)
939 {
940 	int32_t optlen;
941 
942 	VERIFY(tp->t_pmtud_saved_maxopd > 0);
943 	tp->t_flags |= TF_PMTUD;
944 	tp->t_flags &= ~TF_BLACKHOLE;
945 	optlen = tp->t_maxopd - tp->t_maxseg;
946 	tp->t_maxopd = tp->t_pmtud_saved_maxopd;
947 	tp->t_maxseg = tp->t_maxopd - optlen;
948 
949 	/*
950 	 * Reset the slow-start flight size as it
951 	 * may depend on the new MSS
952 	 */
953 	if (CC_ALGO(tp)->cwnd_init != NULL) {
954 		CC_ALGO(tp)->cwnd_init(tp);
955 	}
956 
957 	if (TCP_USE_RLEDBAT(tp, tp->t_inpcb->inp_socket) &&
958 	    tcp_cc_rledbat.rwnd_init != NULL) {
959 		tcp_cc_rledbat.rwnd_init(tp);
960 	}
961 
962 	tp->t_pmtud_start_ts = 0;
963 	tcpstat.tcps_pmtudbh_reverted++;
964 
965 	/* change MSS according to recommendation, if there was one */
966 	tcp_update_mss_locked(tp->t_inpcb->inp_socket, NULL);
967 }
968 
969 static uint32_t
tcp_pmtud_black_holed_next_mss(struct tcpcb * tp)970 tcp_pmtud_black_holed_next_mss(struct tcpcb *tp)
971 {
972 	/* Reduce the MSS to intermediary value */
973 	if (tp->t_maxopd > tcp_pmtud_black_hole_mss) {
974 		return tcp_pmtud_black_hole_mss;
975 	} else {
976 		if (tp->t_inpcb->inp_vflag & INP_IPV4) {
977 			return tcp_mssdflt;
978 		} else {
979 			return tcp_v6mssdflt;
980 		}
981 	}
982 }
983 
984 /*
985  * Send a packet designed to force a response
986  * if the peer is up and reachable:
987  * either an ACK if the connection is still alive,
988  * or an RST if the peer has closed the connection
989  * due to timeout or reboot.
990  * Using sequence number tp->snd_una-1
991  * causes the transmitted zero-length segment
992  * to lie outside the receive window;
993  * by the protocol spec, this requires the
994  * correspondent TCP to respond.
995  */
996 static bool
tcp_send_keep_alive(struct tcpcb * tp)997 tcp_send_keep_alive(struct tcpcb *tp)
998 {
999 	struct tcptemp *t_template;
1000 	struct mbuf *m;
1001 
1002 	tcpstat.tcps_keepprobe++;
1003 	t_template = tcp_maketemplate(tp, &m);
1004 	if (t_template != NULL) {
1005 		struct inpcb *inp = tp->t_inpcb;
1006 		struct tcp_respond_args tra;
1007 
1008 		bzero(&tra, sizeof(tra));
1009 		tra.nocell = INP_NO_CELLULAR(inp) ? 1 : 0;
1010 		tra.noexpensive = INP_NO_EXPENSIVE(inp) ? 1 : 0;
1011 		tra.noconstrained = INP_NO_CONSTRAINED(inp) ? 1 : 0;
1012 		tra.awdl_unrestricted = INP_AWDL_UNRESTRICTED(inp) ? 1 : 0;
1013 		tra.intcoproc_allowed = INP_INTCOPROC_ALLOWED(inp) ? 1 : 0;
1014 		tra.management_allowed = INP_MANAGEMENT_ALLOWED(inp) ? 1 : 0;
1015 		tra.keep_alive = 1;
1016 		if (tp->t_inpcb->inp_flags & INP_BOUND_IF) {
1017 			tra.ifscope = tp->t_inpcb->inp_boundifp->if_index;
1018 		} else {
1019 			tra.ifscope = IFSCOPE_NONE;
1020 		}
1021 		tcp_respond(tp, t_template->tt_ipgen,
1022 		    &t_template->tt_t, (struct mbuf *)NULL,
1023 		    tp->rcv_nxt, tp->snd_una - 1, 0, &tra);
1024 		(void) m_free(m);
1025 		return true;
1026 	} else {
1027 		return false;
1028 	}
1029 }
1030 
1031 /*
1032  * TCP timer processing.
1033  */
1034 struct tcpcb *
tcp_timers(struct tcpcb * tp,int timer)1035 tcp_timers(struct tcpcb *tp, int timer)
1036 {
1037 	int32_t rexmt, optlen = 0, idle_time = 0;
1038 	struct socket *so;
1039 #if TCPDEBUG
1040 	int ostate;
1041 #endif
1042 	u_int64_t accsleep_ms;
1043 	u_int64_t last_sleep_ms = 0;
1044 
1045 	so = tp->t_inpcb->inp_socket;
1046 	idle_time = tcp_now - tp->t_rcvtime;
1047 
1048 	switch (timer) {
1049 	/*
1050 	 * 2 MSL timeout in shutdown went off.  If we're closed but
1051 	 * still waiting for peer to close and connection has been idle
1052 	 * too long, or if 2MSL time is up from TIME_WAIT or FIN_WAIT_2,
1053 	 * delete connection control block.
1054 	 * Otherwise, (this case shouldn't happen) check again in a bit
1055 	 * we keep the socket in the main list in that case.
1056 	 */
1057 	case TCPT_2MSL:
1058 		tcp_free_sackholes(tp);
1059 		if (tp->t_state != TCPS_TIME_WAIT &&
1060 		    tp->t_state != TCPS_FIN_WAIT_2 &&
1061 		    ((idle_time > 0) && (idle_time < TCP_CONN_MAXIDLE(tp)))) {
1062 			tp->t_timer[TCPT_2MSL] = OFFSET_FROM_START(tp,
1063 			    (u_int32_t)TCP_CONN_KEEPINTVL(tp));
1064 		} else {
1065 			if (tp->t_state == TCPS_FIN_WAIT_2) {
1066 				TCP_LOG_DROP_PCB(NULL, NULL, tp, false,
1067 				    "FIN wait timeout drop");
1068 				tcpstat.tcps_fin_timeout_drops++;
1069 				tp = tcp_drop(tp, 0);
1070 			} else {
1071 				tp = tcp_close(tp);
1072 			}
1073 			return tp;
1074 		}
1075 		break;
1076 
1077 	/*
1078 	 * Retransmission timer went off.  Message has not
1079 	 * been acked within retransmit interval.  Back off
1080 	 * to a longer retransmit interval and retransmit one segment.
1081 	 */
1082 	case TCPT_REXMT:
1083 		absolutetime_to_nanoseconds(mach_absolutetime_asleep,
1084 		    &accsleep_ms);
1085 		accsleep_ms = accsleep_ms / 1000000UL;
1086 		if (accsleep_ms > tp->t_accsleep_ms) {
1087 			last_sleep_ms = accsleep_ms - tp->t_accsleep_ms;
1088 		}
1089 		/*
1090 		 * Drop a connection in the retransmit timer
1091 		 * 1. If we have retransmitted more than TCP_MAXRXTSHIFT
1092 		 * times
1093 		 * 2. If the time spent in this retransmission episode is
1094 		 * more than the time limit set with TCP_RXT_CONNDROPTIME
1095 		 * socket option
1096 		 * 3. If TCP_RXT_FINDROP socket option was set and
1097 		 * we have already retransmitted the FIN 3 times without
1098 		 * receiving an ack
1099 		 */
1100 		if (++tp->t_rxtshift > TCP_MAXRXTSHIFT ||
1101 		    (tp->t_rxt_conndroptime > 0 && tp->t_rxtstart > 0 &&
1102 		    (tcp_now - tp->t_rxtstart) >= tp->t_rxt_conndroptime) ||
1103 		    ((tp->t_flagsext & TF_RXTFINDROP) != 0 &&
1104 		    (tp->t_flags & TF_SENTFIN) != 0 && tp->t_rxtshift >= 4) ||
1105 		    (tp->t_rxtshift > 4 && last_sleep_ms >= TCP_SLEEP_TOO_LONG)) {
1106 			if (tp->t_state == TCPS_ESTABLISHED &&
1107 			    tp->t_rxt_minimum_timeout > 0) {
1108 				/*
1109 				 * Avoid dropping a connection if minimum
1110 				 * timeout is set and that time did not
1111 				 * pass. We will retry sending
1112 				 * retransmissions at the maximum interval
1113 				 */
1114 				if (TSTMP_LT(tcp_now, (tp->t_rxtstart +
1115 				    tp->t_rxt_minimum_timeout))) {
1116 					tp->t_rxtshift = TCP_MAXRXTSHIFT - 1;
1117 					goto retransmit_packet;
1118 				}
1119 			}
1120 			if ((tp->t_flagsext & TF_RXTFINDROP) != 0) {
1121 				tcpstat.tcps_rxtfindrop++;
1122 			} else if (last_sleep_ms >= TCP_SLEEP_TOO_LONG) {
1123 				tcpstat.tcps_drop_after_sleep++;
1124 			} else {
1125 				tcpstat.tcps_timeoutdrop++;
1126 			}
1127 			if (tp->t_rxtshift >= TCP_MAXRXTSHIFT) {
1128 				if (TCP_ECN_ENABLED(tp)) {
1129 					INP_INC_IFNET_STAT(tp->t_inpcb,
1130 					    ecn_on.rxmit_drop);
1131 				} else {
1132 					INP_INC_IFNET_STAT(tp->t_inpcb,
1133 					    ecn_off.rxmit_drop);
1134 				}
1135 			}
1136 			tp->t_rxtshift = TCP_MAXRXTSHIFT;
1137 			soevent(so,
1138 			    (SO_FILT_HINT_LOCKED | SO_FILT_HINT_TIMEOUT));
1139 
1140 			if (TCP_ECN_ENABLED(tp) &&
1141 			    tp->t_state == TCPS_ESTABLISHED) {
1142 				tcp_heuristic_ecn_droprxmt(tp);
1143 			}
1144 
1145 			TCP_LOG_DROP_PCB(NULL, NULL, tp, false,
1146 			    "retransmission timeout drop");
1147 			tp = tcp_drop(tp, tp->t_softerror ?
1148 			    tp->t_softerror : ETIMEDOUT);
1149 
1150 			break;
1151 		}
1152 retransmit_packet:
1153 		tcpstat.tcps_rexmttimeo++;
1154 		tp->t_accsleep_ms = accsleep_ms;
1155 
1156 		if (tp->t_rxtshift == 1 &&
1157 		    tp->t_state == TCPS_ESTABLISHED) {
1158 			/* Set the time at which retransmission started. */
1159 			tp->t_rxtstart = tcp_now;
1160 
1161 			/*
1162 			 * if this is the first retransmit timeout, save
1163 			 * the state so that we can recover if the timeout
1164 			 * is spurious.
1165 			 */
1166 			tcp_rexmt_save_state(tp);
1167 			tcp_ccdbg_trace(tp, NULL, TCP_CC_FIRST_REXMT);
1168 		}
1169 #if MPTCP
1170 		if ((tp->t_rxtshift >= mptcp_fail_thresh) &&
1171 		    (tp->t_state == TCPS_ESTABLISHED) &&
1172 		    (tp->t_mpflags & TMPF_MPTCP_TRUE)) {
1173 			mptcp_act_on_txfail(so);
1174 		}
1175 
1176 		if (TCPS_HAVEESTABLISHED(tp->t_state) &&
1177 		    (so->so_flags & SOF_MP_SUBFLOW)) {
1178 			struct mptses *mpte = tptomptp(tp)->mpt_mpte;
1179 
1180 			if (mpte->mpte_svctype == MPTCP_SVCTYPE_HANDOVER ||
1181 			    mpte->mpte_svctype == MPTCP_SVCTYPE_PURE_HANDOVER) {
1182 				mptcp_check_subflows_and_add(mpte);
1183 			}
1184 		}
1185 #endif /* MPTCP */
1186 
1187 		if (tp->t_adaptive_wtimo > 0 &&
1188 		    tp->t_rxtshift > tp->t_adaptive_wtimo &&
1189 		    TCPS_HAVEESTABLISHED(tp->t_state)) {
1190 			/* Send an event to the application */
1191 			soevent(so,
1192 			    (SO_FILT_HINT_LOCKED |
1193 			    SO_FILT_HINT_ADAPTIVE_WTIMO));
1194 		}
1195 
1196 		/*
1197 		 * If this is a retransmit timeout after PTO, the PTO
1198 		 * was not effective
1199 		 */
1200 		if (tp->t_flagsext & TF_SENT_TLPROBE) {
1201 			tp->t_flagsext &= ~(TF_SENT_TLPROBE);
1202 			tcpstat.tcps_rto_after_pto++;
1203 		}
1204 
1205 		if (tp->t_flagsext & TF_DELAY_RECOVERY) {
1206 			/*
1207 			 * Retransmit timer fired before entering recovery
1208 			 * on a connection with packet re-ordering. This
1209 			 * suggests that the reordering metrics computed
1210 			 * are not accurate.
1211 			 */
1212 			tp->t_reorderwin = 0;
1213 			tp->t_timer[TCPT_DELAYFR] = 0;
1214 			tp->t_flagsext &= ~(TF_DELAY_RECOVERY);
1215 		}
1216 
1217 		if (!(tp->t_flagsext & TF_FASTOPEN_FORCE_ENABLE) &&
1218 		    tp->t_state == TCPS_SYN_RECEIVED) {
1219 			tcp_disable_tfo(tp);
1220 		}
1221 
1222 		if (!(tp->t_flagsext & TF_FASTOPEN_FORCE_ENABLE) &&
1223 		    !(tp->t_tfo_flags & TFO_F_HEURISTIC_DONE) &&
1224 		    (tp->t_tfo_stats & TFO_S_SYN_DATA_SENT) &&
1225 		    !(tp->t_tfo_flags & TFO_F_NO_SNDPROBING) &&
1226 		    ((tp->t_state != TCPS_SYN_SENT && tp->t_rxtshift > 1) ||
1227 		    tp->t_rxtshift > 4)) {
1228 			/*
1229 			 * For regular retransmissions, a first one is being
1230 			 * done for tail-loss probe.
1231 			 * Thus, if rxtshift > 1, this means we have sent the segment
1232 			 * a total of 3 times.
1233 			 *
1234 			 * If we are in SYN-SENT state, then there is no tail-loss
1235 			 * probe thus we have to let rxtshift go up to 3.
1236 			 */
1237 			tcp_heuristic_tfo_middlebox(tp);
1238 
1239 			so->so_error = ENODATA;
1240 			soevent(so,
1241 			    (SO_FILT_HINT_LOCKED | SO_FILT_HINT_MP_SUB_ERROR));
1242 			sorwakeup(so);
1243 			sowwakeup(so);
1244 
1245 			tp->t_tfo_stats |= TFO_S_SEND_BLACKHOLE;
1246 			tcpstat.tcps_tfo_sndblackhole++;
1247 		}
1248 
1249 		if (!(tp->t_flagsext & TF_FASTOPEN_FORCE_ENABLE) &&
1250 		    !(tp->t_tfo_flags & TFO_F_HEURISTIC_DONE) &&
1251 		    (tp->t_tfo_stats & TFO_S_SYN_DATA_ACKED) &&
1252 		    tp->t_rxtshift > 3) {
1253 			if (TSTMP_GT(tp->t_sndtime - 10 * TCP_RETRANSHZ, tp->t_rcvtime)) {
1254 				tcp_heuristic_tfo_middlebox(tp);
1255 
1256 				so->so_error = ENODATA;
1257 				soevent(so,
1258 				    (SO_FILT_HINT_LOCKED | SO_FILT_HINT_MP_SUB_ERROR));
1259 				sorwakeup(so);
1260 				sowwakeup(so);
1261 			}
1262 		}
1263 
1264 		if (tp->t_state == TCPS_SYN_SENT) {
1265 			rexmt = TCP_REXMTVAL(tp) * tcp_syn_backoff[tp->t_rxtshift];
1266 			tp->t_stat.synrxtshift = tp->t_rxtshift;
1267 			tp->t_stat.rxmitsyns++;
1268 
1269 			/* When retransmitting, disable TFO */
1270 			if (tfo_enabled(tp) &&
1271 			    !(tp->t_flagsext & TF_FASTOPEN_FORCE_ENABLE)) {
1272 				tcp_disable_tfo(tp);
1273 				tp->t_tfo_flags |= TFO_F_SYN_LOSS;
1274 			}
1275 		} else {
1276 			rexmt = TCP_REXMTVAL(tp) * tcp_backoff[tp->t_rxtshift];
1277 		}
1278 
1279 		TCPT_RANGESET(tp->t_rxtcur, rexmt, tp->t_rttmin, TCPTV_REXMTMAX,
1280 		    TCP_ADD_REXMTSLOP(tp));
1281 		tp->t_timer[TCPT_REXMT] = OFFSET_FROM_START(tp, tp->t_rxtcur);
1282 
1283 		TCP_LOG_RTT_INFO(tp);
1284 
1285 		if (INP_WAIT_FOR_IF_FEEDBACK(tp->t_inpcb)) {
1286 			goto fc_output;
1287 		}
1288 
1289 		tcp_free_sackholes(tp);
1290 		/*
1291 		 * Check for potential Path MTU Discovery Black Hole
1292 		 */
1293 		if (tcp_pmtud_black_hole_detect &&
1294 		    !(tp->t_flagsext & TF_NOBLACKHOLE_DETECTION) &&
1295 		    (tp->t_state == TCPS_ESTABLISHED)) {
1296 			if ((tp->t_flags & TF_PMTUD) &&
1297 			    tp->t_pmtud_lastseg_size > tcp_pmtud_black_holed_next_mss(tp) &&
1298 			    tp->t_rxtshift == 2) {
1299 				/*
1300 				 * Enter Path MTU Black-hole Detection mechanism:
1301 				 * - Disable Path MTU Discovery (IP "DF" bit).
1302 				 * - Reduce MTU to lower value than what we
1303 				 * negotiated with the peer.
1304 				 */
1305 				/* Disable Path MTU Discovery for now */
1306 				tp->t_flags &= ~TF_PMTUD;
1307 				/* Record that we may have found a black hole */
1308 				tp->t_flags |= TF_BLACKHOLE;
1309 				optlen = tp->t_maxopd - tp->t_maxseg;
1310 				/* Keep track of previous MSS */
1311 				tp->t_pmtud_saved_maxopd = tp->t_maxopd;
1312 				tp->t_pmtud_start_ts = tcp_now;
1313 				if (tp->t_pmtud_start_ts == 0) {
1314 					tp->t_pmtud_start_ts++;
1315 				}
1316 				/* Reduce the MSS to intermediary value */
1317 				tp->t_maxopd = tcp_pmtud_black_holed_next_mss(tp);
1318 				tp->t_maxseg = tp->t_maxopd - optlen;
1319 
1320 				/*
1321 				 * Reset the slow-start flight size
1322 				 * as it may depend on the new MSS
1323 				 */
1324 				if (CC_ALGO(tp)->cwnd_init != NULL) {
1325 					CC_ALGO(tp)->cwnd_init(tp);
1326 				}
1327 				tp->snd_cwnd = tp->t_maxseg;
1328 
1329 				if (TCP_USE_RLEDBAT(tp, so) &&
1330 				    tcp_cc_rledbat.rwnd_init != NULL) {
1331 					tcp_cc_rledbat.rwnd_init(tp);
1332 				}
1333 			}
1334 			/*
1335 			 * If further retransmissions are still
1336 			 * unsuccessful with a lowered MTU, maybe this
1337 			 * isn't a Black Hole and we restore the previous
1338 			 * MSS and blackhole detection flags.
1339 			 */
1340 			else {
1341 				if ((tp->t_flags & TF_BLACKHOLE) &&
1342 				    (tp->t_rxtshift > 4)) {
1343 					tcp_pmtud_revert_segment_size(tp);
1344 					tp->snd_cwnd = tp->t_maxseg;
1345 				}
1346 			}
1347 		}
1348 
1349 		/*
1350 		 * Disable rfc1323 and rfc1644 if we haven't got any
1351 		 * response to our SYN (after we reach the threshold)
1352 		 * to work-around some broken terminal servers (most of
1353 		 * which have hopefully been retired) that have bad VJ
1354 		 * header compression code which trashes TCP segments
1355 		 * containing unknown-to-them TCP options.
1356 		 * Do this only on non-local connections.
1357 		 */
1358 		if (tp->t_state == TCPS_SYN_SENT &&
1359 		    tp->t_rxtshift == tcp_broken_peer_syn_rxmit_thres) {
1360 			tp->t_flags &= ~(TF_REQ_SCALE | TF_REQ_TSTMP | TF_REQ_CC);
1361 		}
1362 
1363 		/*
1364 		 * If losing, let the lower level know and try for
1365 		 * a better route.  Also, if we backed off this far,
1366 		 * our srtt estimate is probably bogus.  Clobber it
1367 		 * so we'll take the next rtt measurement as our srtt;
1368 		 * move the current srtt into rttvar to keep the current
1369 		 * retransmit times until then.
1370 		 */
1371 		if (tp->t_rxtshift > TCP_MAXRXTSHIFT / 4) {
1372 			if (!(tp->t_inpcb->inp_vflag & INP_IPV4)) {
1373 				in6_losing(tp->t_inpcb);
1374 			} else {
1375 				in_losing(tp->t_inpcb);
1376 			}
1377 			tp->t_rttvar += (tp->t_srtt >> TCP_RTT_SHIFT);
1378 			tp->t_srtt = 0;
1379 		}
1380 		tp->snd_nxt = tp->snd_una;
1381 		/*
1382 		 * Note:  We overload snd_recover to function also as the
1383 		 * snd_last variable described in RFC 2582
1384 		 */
1385 		tp->snd_recover = tp->snd_max;
1386 		/*
1387 		 * Force a segment to be sent.
1388 		 */
1389 		tp->t_flags |= TF_ACKNOW;
1390 
1391 		/* If timing a segment in this window, stop the timer */
1392 		tp->t_rtttime = 0;
1393 
1394 		if (!IN_FASTRECOVERY(tp) && tp->t_rxtshift == 1) {
1395 			tcpstat.tcps_tailloss_rto++;
1396 		}
1397 
1398 
1399 		/*
1400 		 * RFC 5681 says: when a TCP sender detects segment loss
1401 		 * using retransmit timer and the given segment has already
1402 		 * been retransmitted by way of the retransmission timer at
1403 		 * least once, the value of ssthresh is held constant
1404 		 */
1405 		if (tp->t_rxtshift == 1 &&
1406 		    CC_ALGO(tp)->after_timeout != NULL) {
1407 			CC_ALGO(tp)->after_timeout(tp);
1408 			/*
1409 			 * CWR notifications are to be sent on new data
1410 			 * right after Fast Retransmits and ECE
1411 			 * notification receipts.
1412 			 */
1413 			if (!TCP_ACC_ECN_ON(tp) && TCP_ECN_ENABLED(tp)) {
1414 				tp->ecn_flags |= TE_SENDCWR;
1415 			}
1416 		}
1417 
1418 		EXIT_FASTRECOVERY(tp);
1419 
1420 		/* Exit cwnd non validated phase */
1421 		tp->t_flagsext &= ~TF_CWND_NONVALIDATED;
1422 
1423 
1424 fc_output:
1425 		tcp_ccdbg_trace(tp, NULL, TCP_CC_REXMT_TIMEOUT);
1426 
1427 		(void) tcp_output(tp);
1428 		break;
1429 
1430 	/*
1431 	 * Persistance timer into zero window.
1432 	 * Force a byte to be output, if possible.
1433 	 */
1434 	case TCPT_PERSIST:
1435 		tcpstat.tcps_persisttimeo++;
1436 		/*
1437 		 * Hack: if the peer is dead/unreachable, we do not
1438 		 * time out if the window is closed.  After a full
1439 		 * backoff, drop the connection if the idle time
1440 		 * (no responses to probes) reaches the maximum
1441 		 * backoff that we would use if retransmitting.
1442 		 *
1443 		 * Drop the connection if we reached the maximum allowed time for
1444 		 * Zero Window Probes without a non-zero update from the peer.
1445 		 * See rdar://5805356
1446 		 */
1447 		if ((tp->t_rxtshift == TCP_MAXRXTSHIFT &&
1448 		    (idle_time >= tcp_maxpersistidle ||
1449 		    idle_time >= TCP_REXMTVAL(tp) * tcp_totbackoff)) ||
1450 		    ((tp->t_persist_stop != 0) &&
1451 		    TSTMP_LEQ(tp->t_persist_stop, tcp_now))) {
1452 			TCP_LOG_DROP_PCB(NULL, NULL, tp, false, "persist timeout drop");
1453 			tcpstat.tcps_persistdrop++;
1454 			soevent(so,
1455 			    (SO_FILT_HINT_LOCKED | SO_FILT_HINT_TIMEOUT));
1456 			tp = tcp_drop(tp, ETIMEDOUT);
1457 			break;
1458 		}
1459 		tcp_setpersist(tp);
1460 		tp->t_flagsext |= TF_FORCE;
1461 		(void) tcp_output(tp);
1462 		tp->t_flagsext &= ~TF_FORCE;
1463 		break;
1464 
1465 	/*
1466 	 * Keep-alive timer went off; send something
1467 	 * or drop connection if idle for too long.
1468 	 */
1469 	case TCPT_KEEP:
1470 #if FLOW_DIVERT
1471 		if (tp->t_inpcb->inp_socket->so_flags & SOF_FLOW_DIVERT) {
1472 			break;
1473 		}
1474 #endif /* FLOW_DIVERT */
1475 
1476 		tcpstat.tcps_keeptimeo++;
1477 #if MPTCP
1478 		/*
1479 		 * Regular TCP connections do not send keepalives after closing
1480 		 * MPTCP must not also, after sending Data FINs.
1481 		 */
1482 		struct mptcb *mp_tp = tptomptp(tp);
1483 		if ((tp->t_mpflags & TMPF_MPTCP_TRUE) &&
1484 		    (tp->t_state > TCPS_ESTABLISHED)) {
1485 			goto dropit;
1486 		} else if (mp_tp != NULL) {
1487 			if ((mptcp_ok_to_keepalive(mp_tp) == 0)) {
1488 				goto dropit;
1489 			}
1490 		}
1491 #endif /* MPTCP */
1492 		if (tp->t_state < TCPS_ESTABLISHED) {
1493 			goto dropit;
1494 		}
1495 		if ((always_keepalive ||
1496 		    (tp->t_inpcb->inp_socket->so_options & SO_KEEPALIVE) ||
1497 		    (tp->t_flagsext & TF_DETECT_READSTALL) ||
1498 		    (tp->t_tfo_probe_state == TFO_PROBE_PROBING)) &&
1499 		    (tp->t_state <= TCPS_CLOSING || tp->t_state == TCPS_FIN_WAIT_2)) {
1500 			if (idle_time >= TCP_CONN_KEEPIDLE(tp) + TCP_CONN_MAXIDLE(tp)) {
1501 				TCP_LOG_DROP_PCB(NULL, NULL, tp, false,
1502 				    "keep alive timeout drop");
1503 				goto dropit;
1504 			}
1505 
1506 			if (tcp_send_keep_alive(tp)) {
1507 				if (tp->t_flagsext & TF_DETECT_READSTALL) {
1508 					tp->t_rtimo_probes++;
1509 				}
1510 
1511 				TCP_LOG_KEEP_ALIVE(tp, idle_time);
1512 			}
1513 
1514 			tp->t_timer[TCPT_KEEP] = OFFSET_FROM_START(tp,
1515 			    TCP_CONN_KEEPINTVL(tp));
1516 		} else {
1517 			tp->t_timer[TCPT_KEEP] = OFFSET_FROM_START(tp,
1518 			    TCP_CONN_KEEPIDLE(tp));
1519 		}
1520 		if (tp->t_flagsext & TF_DETECT_READSTALL) {
1521 			struct ifnet *outifp = tp->t_inpcb->inp_last_outifp;
1522 			bool reenable_probe = false;
1523 			/*
1524 			 * The keep alive packets sent to detect a read
1525 			 * stall did not get a response from the
1526 			 * peer. Generate more keep-alives to confirm this.
1527 			 * If the number of probes sent reaches the limit,
1528 			 * generate an event.
1529 			 */
1530 			if (tp->t_adaptive_rtimo > 0) {
1531 				if (tp->t_rtimo_probes > tp->t_adaptive_rtimo) {
1532 					/* Generate an event */
1533 					soevent(so,
1534 					    (SO_FILT_HINT_LOCKED |
1535 					    SO_FILT_HINT_ADAPTIVE_RTIMO));
1536 					tcp_keepalive_reset(tp);
1537 				} else {
1538 					reenable_probe = true;
1539 				}
1540 			} else if (outifp != NULL &&
1541 			    (outifp->if_eflags & IFEF_PROBE_CONNECTIVITY) &&
1542 			    tp->t_rtimo_probes <= TCP_CONNECTIVITY_PROBES_MAX) {
1543 				reenable_probe = true;
1544 			} else {
1545 				tp->t_flagsext &= ~TF_DETECT_READSTALL;
1546 			}
1547 			if (reenable_probe) {
1548 				int ind = min(tp->t_rtimo_probes,
1549 				    TCP_MAXRXTSHIFT);
1550 				tp->t_timer[TCPT_KEEP] = OFFSET_FROM_START(
1551 					tp, tcp_backoff[ind] * TCP_REXMTVAL(tp));
1552 			}
1553 		}
1554 		if (tp->t_tfo_probe_state == TFO_PROBE_PROBING) {
1555 			int ind;
1556 
1557 			tp->t_tfo_probes++;
1558 			ind = min(tp->t_tfo_probes, TCP_MAXRXTSHIFT);
1559 
1560 			/*
1561 			 * We take the minimum among the time set by true
1562 			 * keepalive (see above) and the backoff'd RTO. That
1563 			 * way we backoff in case of packet-loss but will never
1564 			 * timeout slower than regular keepalive due to the
1565 			 * backing off.
1566 			 */
1567 			tp->t_timer[TCPT_KEEP] = min(OFFSET_FROM_START(
1568 				    tp, tcp_backoff[ind] * TCP_REXMTVAL(tp)),
1569 			    tp->t_timer[TCPT_KEEP]);
1570 		} else if (!(tp->t_flagsext & TF_FASTOPEN_FORCE_ENABLE) &&
1571 		    !(tp->t_tfo_flags & TFO_F_HEURISTIC_DONE) &&
1572 		    tp->t_tfo_probe_state == TFO_PROBE_WAIT_DATA) {
1573 			/* Still no data! Let's assume a TFO-error and err out... */
1574 			tcp_heuristic_tfo_middlebox(tp);
1575 
1576 			so->so_error = ENODATA;
1577 			soevent(so,
1578 			    (SO_FILT_HINT_LOCKED | SO_FILT_HINT_MP_SUB_ERROR));
1579 			sorwakeup(so);
1580 			tp->t_tfo_stats |= TFO_S_RECV_BLACKHOLE;
1581 			tcpstat.tcps_tfo_blackhole++;
1582 		}
1583 		break;
1584 	case TCPT_DELACK:
1585 		if (tcp_delack_enabled && (tp->t_flags & TF_DELACK)) {
1586 			tp->t_flags &= ~TF_DELACK;
1587 			tp->t_timer[TCPT_DELACK] = 0;
1588 			tp->t_flags |= TF_ACKNOW;
1589 
1590 			/*
1591 			 * If delayed ack timer fired while stretching
1592 			 * acks, count the number of times the streaming
1593 			 * detection was not correct. If this exceeds a
1594 			 * threshold, disable strech ack on this
1595 			 * connection
1596 			 *
1597 			 * Also, go back to acking every other packet.
1598 			 */
1599 			if ((tp->t_flags & TF_STRETCHACK)) {
1600 				if (tp->t_unacksegs > 1 &&
1601 				    tp->t_unacksegs < maxseg_unacked) {
1602 					tp->t_stretchack_delayed++;
1603 				}
1604 
1605 				if (tp->t_stretchack_delayed >
1606 				    TCP_STRETCHACK_DELAY_THRESHOLD) {
1607 					tp->t_flagsext |= TF_DISABLE_STRETCHACK;
1608 					/*
1609 					 * Note the time at which stretch
1610 					 * ack was disabled automatically
1611 					 */
1612 					tp->rcv_nostrack_ts = tcp_now;
1613 					tcpstat.tcps_nostretchack++;
1614 					tp->t_stretchack_delayed = 0;
1615 					tp->rcv_nostrack_pkts = 0;
1616 				}
1617 				tcp_reset_stretch_ack(tp);
1618 			}
1619 			tp->t_forced_acks = TCP_FORCED_ACKS_COUNT;
1620 
1621 			/*
1622 			 * If we are measuring inter packet arrival jitter
1623 			 * for throttling a connection, this delayed ack
1624 			 * might be the reason for accumulating some
1625 			 * jitter. So let's restart the measurement.
1626 			 */
1627 			CLEAR_IAJ_STATE(tp);
1628 
1629 			tcpstat.tcps_delack++;
1630 			tp->t_stat.delayed_acks_sent++;
1631 			(void) tcp_output(tp);
1632 		}
1633 		break;
1634 
1635 #if MPTCP
1636 	case TCPT_JACK_RXMT:
1637 		if ((tp->t_state == TCPS_ESTABLISHED) &&
1638 		    (tp->t_mpflags & TMPF_PREESTABLISHED) &&
1639 		    (tp->t_mpflags & TMPF_JOINED_FLOW)) {
1640 			if (++tp->t_mprxtshift > TCP_MAXRXTSHIFT) {
1641 				tcpstat.tcps_timeoutdrop++;
1642 				soevent(so,
1643 				    (SO_FILT_HINT_LOCKED |
1644 				    SO_FILT_HINT_TIMEOUT));
1645 				tp = tcp_drop(tp, tp->t_softerror ?
1646 				    tp->t_softerror : ETIMEDOUT);
1647 				break;
1648 			}
1649 			tcpstat.tcps_join_rxmts++;
1650 			tp->t_mpflags |= TMPF_SND_JACK;
1651 			tp->t_flags |= TF_ACKNOW;
1652 
1653 			/*
1654 			 * No backoff is implemented for simplicity for this
1655 			 * corner case.
1656 			 */
1657 			(void) tcp_output(tp);
1658 		}
1659 		break;
1660 	case TCPT_CELLICON:
1661 	{
1662 		struct mptses *mpte = tptomptp(tp)->mpt_mpte;
1663 
1664 		tp->t_timer[TCPT_CELLICON] = 0;
1665 
1666 		if (mpte->mpte_cellicon_increments == 0) {
1667 			/* Cell-icon not set by this connection */
1668 			break;
1669 		}
1670 
1671 		if (TSTMP_LT(mpte->mpte_last_cellicon_set + MPTCP_CELLICON_TOGGLE_RATE, tcp_now)) {
1672 			mptcp_unset_cellicon(mpte, NULL, 1);
1673 		}
1674 
1675 		if (mpte->mpte_cellicon_increments) {
1676 			tp->t_timer[TCPT_CELLICON] = OFFSET_FROM_START(tp, MPTCP_CELLICON_TOGGLE_RATE);
1677 		}
1678 
1679 		break;
1680 	}
1681 #endif /* MPTCP */
1682 
1683 	case TCPT_PTO:
1684 	{
1685 		int32_t ret = 0;
1686 
1687 		if (!(tp->t_flagsext & TF_IF_PROBING)) {
1688 			tp->t_flagsext &= ~(TF_SENT_TLPROBE);
1689 		}
1690 		/*
1691 		 * Check if the connection is in the right state to
1692 		 * send a probe
1693 		 */
1694 		if ((tp->t_state != TCPS_ESTABLISHED ||
1695 		    tp->t_rxtshift > 0 ||
1696 		    tp->snd_max == tp->snd_una ||
1697 		    !SACK_ENABLED(tp) ||
1698 		    (tcp_do_better_lr != 1 && !TAILQ_EMPTY(&tp->snd_holes)) ||
1699 		    IN_FASTRECOVERY(tp)) &&
1700 		    !(tp->t_flagsext & TF_IF_PROBING)) {
1701 			break;
1702 		}
1703 
1704 		/*
1705 		 * When the interface state is changed explicitly reset the retransmission
1706 		 * timer state for both SYN and data packets because we do not want to
1707 		 * wait unnecessarily or timeout too quickly if the link characteristics
1708 		 * have changed drastically
1709 		 */
1710 		if (tp->t_flagsext & TF_IF_PROBING) {
1711 			tp->t_rxtshift = 0;
1712 			if (tp->t_state == TCPS_SYN_SENT) {
1713 				tp->t_stat.synrxtshift = tp->t_rxtshift;
1714 			}
1715 			/*
1716 			 * Reset to the the default RTO
1717 			 */
1718 			tp->t_srtt = TCPTV_SRTTBASE;
1719 			tp->t_rttvar =
1720 			    ((TCPTV_RTOBASE - TCPTV_SRTTBASE) << TCP_RTTVAR_SHIFT) / 4;
1721 			tp->t_rttmin = tp->t_flags & TF_LOCAL ? tcp_TCPTV_MIN :
1722 			    TCPTV_REXMTMIN;
1723 			TCPT_RANGESET(tp->t_rxtcur, TCP_REXMTVAL(tp),
1724 			    tp->t_rttmin, TCPTV_REXMTMAX, TCP_ADD_REXMTSLOP(tp));
1725 			TCP_LOG_RTT_INFO(tp);
1726 		}
1727 
1728 		if (tp->t_state == TCPS_SYN_SENT) {
1729 			/*
1730 			 * The PTO for SYN_SENT reinitializes TCP as if it was a fresh
1731 			 * connection attempt
1732 			 */
1733 			tp->snd_nxt = tp->snd_una;
1734 			/*
1735 			 * Note:  We overload snd_recover to function also as the
1736 			 * snd_last variable described in RFC 2582
1737 			 */
1738 			tp->snd_recover = tp->snd_max;
1739 			/*
1740 			 * Force a segment to be sent.
1741 			 */
1742 			tp->t_flags |= TF_ACKNOW;
1743 
1744 			/* If timing a segment in this window, stop the timer */
1745 			tp->t_rtttime = 0;
1746 		} else {
1747 			int32_t snd_len;
1748 
1749 			/*
1750 			 * If there is no new data to send or if the
1751 			 * connection is limited by receive window then
1752 			 * retransmit the last segment, otherwise send
1753 			 * new data.
1754 			 */
1755 			snd_len = min(so->so_snd.sb_cc, tp->snd_wnd)
1756 			    - (tp->snd_max - tp->snd_una);
1757 			if (snd_len > 0) {
1758 				tp->snd_nxt = tp->snd_max;
1759 			} else {
1760 				snd_len = min((tp->snd_max - tp->snd_una),
1761 				    tp->t_maxseg);
1762 				tp->snd_nxt = tp->snd_max - snd_len;
1763 			}
1764 		}
1765 
1766 		tcpstat.tcps_pto++;
1767 		if (tp->t_flagsext & TF_IF_PROBING) {
1768 			tcpstat.tcps_probe_if++;
1769 		}
1770 
1771 		/* If timing a segment in this window, stop the timer */
1772 		tp->t_rtttime = 0;
1773 		/* Note that tail loss probe is being sent. Exclude IF probe */
1774 		if (!(tp->t_flagsext & TF_IF_PROBING)) {
1775 			tp->t_flagsext |= TF_SENT_TLPROBE;
1776 			tp->t_tlpstart = tcp_now;
1777 		}
1778 
1779 		tp->snd_cwnd += tp->t_maxseg;
1780 		/*
1781 		 * When tail-loss-probe fires, we reset the RTO timer, because
1782 		 * a probe just got sent, so we are good to push out the timer.
1783 		 *
1784 		 * Set to 0 to ensure that tcp_output() will reschedule it
1785 		 */
1786 		tp->t_timer[TCPT_REXMT] = 0;
1787 		ret = tcp_output(tp);
1788 
1789 #if (DEBUG || DEVELOPMENT)
1790 		if ((tp->t_flagsext & TF_IF_PROBING) &&
1791 		    ((IFNET_IS_COMPANION_LINK(tp->t_inpcb->inp_last_outifp)) ||
1792 		    tp->t_state == TCPS_SYN_SENT)) {
1793 			if (ret == 0 && tcp_probe_if_fix_port > 0 &&
1794 			    tcp_probe_if_fix_port <= IPPORT_HILASTAUTO) {
1795 				tp->t_timer[TCPT_REXMT] = 0;
1796 				tcp_set_lotimer_index(tp);
1797 			}
1798 
1799 			os_log(OS_LOG_DEFAULT,
1800 			    "%s: sent %s probe for %u > %u on interface %s"
1801 			    " (%u) %s(%d)",
1802 			    __func__,
1803 			    tp->t_state == TCPS_SYN_SENT ? "SYN" : "data",
1804 			    ntohs(tp->t_inpcb->inp_lport),
1805 			    ntohs(tp->t_inpcb->inp_fport),
1806 			    if_name(tp->t_inpcb->inp_last_outifp),
1807 			    tp->t_inpcb->inp_last_outifp->if_index,
1808 			    ret == 0 ? "succeeded" :"failed", ret);
1809 		}
1810 #endif /* DEBUG || DEVELOPMENT */
1811 
1812 		/*
1813 		 * When there is data (or a SYN) to send, the above call to
1814 		 * tcp_output() should have armed either the REXMT or the
1815 		 * PERSIST timer. If it didn't, something is wrong and this
1816 		 * connection would idle around forever. Let's make sure that
1817 		 * at least the REXMT timer is set.
1818 		 */
1819 		if (tp->t_timer[TCPT_REXMT] == 0 && tp->t_timer[TCPT_PERSIST] == 0 &&
1820 		    (tp->t_inpcb->inp_socket->so_snd.sb_cc != 0 || tp->t_state == TCPS_SYN_SENT ||
1821 		    tp->t_state == TCPS_SYN_RECEIVED)) {
1822 			tp->t_timer[TCPT_REXMT] =
1823 			    OFFSET_FROM_START(tp, tp->t_rxtcur);
1824 
1825 			os_log(OS_LOG_DEFAULT,
1826 			    "%s: tcp_output() returned %u with retransmission timer disabled "
1827 			    "for %u > %u in state %d, reset timer to %d",
1828 			    __func__, ret,
1829 			    ntohs(tp->t_inpcb->inp_lport),
1830 			    ntohs(tp->t_inpcb->inp_fport),
1831 			    tp->t_state,
1832 			    tp->t_timer[TCPT_REXMT]);
1833 
1834 			tcp_check_timer_state(tp);
1835 		}
1836 		tp->snd_cwnd -= tp->t_maxseg;
1837 
1838 		if (!(tp->t_flagsext & TF_IF_PROBING)) {
1839 			tp->t_tlphighrxt = tp->snd_nxt;
1840 		}
1841 		break;
1842 	}
1843 	case TCPT_DELAYFR:
1844 		tp->t_flagsext &= ~TF_DELAY_RECOVERY;
1845 
1846 		/*
1847 		 * Don't do anything if one of the following is true:
1848 		 * - the connection is already in recovery
1849 		 * - sequence until snd_recover has been acknowledged.
1850 		 * - retransmit timeout has fired
1851 		 */
1852 		if (IN_FASTRECOVERY(tp) ||
1853 		    SEQ_GEQ(tp->snd_una, tp->snd_recover) ||
1854 		    tp->t_rxtshift > 0) {
1855 			break;
1856 		}
1857 
1858 		VERIFY(SACK_ENABLED(tp));
1859 		tcp_rexmt_save_state(tp);
1860 		if (CC_ALGO(tp)->pre_fr != NULL) {
1861 			CC_ALGO(tp)->pre_fr(tp);
1862 			if (!TCP_ACC_ECN_ON(tp) && TCP_ECN_ENABLED(tp)) {
1863 				tp->ecn_flags |= TE_SENDCWR;
1864 			}
1865 		}
1866 		ENTER_FASTRECOVERY(tp);
1867 
1868 		tp->t_timer[TCPT_REXMT] = 0;
1869 		tcpstat.tcps_sack_recovery_episode++;
1870 		tp->t_sack_recovery_episode++;
1871 		tp->sack_newdata = tp->snd_nxt;
1872 		tp->snd_cwnd = tp->t_maxseg;
1873 		tcp_ccdbg_trace(tp, NULL, TCP_CC_ENTER_FASTRECOVERY);
1874 		(void) tcp_output(tp);
1875 		break;
1876 
1877 dropit:
1878 		tcpstat.tcps_keepdrops++;
1879 		soevent(so,
1880 		    (SO_FILT_HINT_LOCKED | SO_FILT_HINT_TIMEOUT));
1881 		tp = tcp_drop(tp, ETIMEDOUT);
1882 		break;
1883 	}
1884 #if TCPDEBUG
1885 	if (tp->t_inpcb->inp_socket->so_options & SO_DEBUG) {
1886 		tcp_trace(TA_USER, ostate, tp, (void *)0, (struct tcphdr *)0,
1887 		    PRU_SLOWTIMO);
1888 	}
1889 #endif
1890 	return tp;
1891 }
1892 
1893 /* Remove a timer entry from timer list */
1894 void
tcp_remove_timer(struct tcpcb * tp)1895 tcp_remove_timer(struct tcpcb *tp)
1896 {
1897 	struct tcptimerlist *listp = &tcp_timer_list;
1898 
1899 	socket_lock_assert_owned(tp->t_inpcb->inp_socket);
1900 	if (!(TIMER_IS_ON_LIST(tp))) {
1901 		return;
1902 	}
1903 	lck_mtx_lock(&listp->mtx);
1904 
1905 	/* Check if pcb is on timer list again after acquiring the lock */
1906 	if (!(TIMER_IS_ON_LIST(tp))) {
1907 		lck_mtx_unlock(&listp->mtx);
1908 		return;
1909 	}
1910 
1911 	if (listp->next_te != NULL && listp->next_te == &tp->tentry) {
1912 		listp->next_te = LIST_NEXT(&tp->tentry, le);
1913 	}
1914 
1915 	LIST_REMOVE(&tp->tentry, le);
1916 	tp->t_flags &= ~(TF_TIMER_ONLIST);
1917 
1918 	listp->entries--;
1919 
1920 	tp->tentry.le.le_next = NULL;
1921 	tp->tentry.le.le_prev = NULL;
1922 	lck_mtx_unlock(&listp->mtx);
1923 }
1924 
1925 /*
1926  * Function to check if the timerlist needs to be rescheduled to run
1927  * the timer entry correctly. Basically, this is to check if we can avoid
1928  * taking the list lock.
1929  */
1930 
1931 static boolean_t
need_to_resched_timerlist(u_int32_t runtime,u_int16_t mode)1932 need_to_resched_timerlist(u_int32_t runtime, u_int16_t mode)
1933 {
1934 	struct tcptimerlist *listp = &tcp_timer_list;
1935 	int32_t diff;
1936 
1937 	/*
1938 	 * If the list is being processed then the state of the list is
1939 	 * in flux. In this case always acquire the lock and set the state
1940 	 * correctly.
1941 	 */
1942 	if (listp->running) {
1943 		return TRUE;
1944 	}
1945 
1946 	if (!listp->scheduled) {
1947 		return TRUE;
1948 	}
1949 
1950 	diff = timer_diff(listp->runtime, 0, runtime, 0);
1951 	if (diff <= 0) {
1952 		/* The list is going to run before this timer */
1953 		return FALSE;
1954 	} else {
1955 		if (mode & TCP_TIMERLIST_10MS_MODE) {
1956 			if (diff <= TCP_TIMER_10MS_QUANTUM) {
1957 				return FALSE;
1958 			}
1959 		} else if (mode & TCP_TIMERLIST_100MS_MODE) {
1960 			if (diff <= TCP_TIMER_100MS_QUANTUM) {
1961 				return FALSE;
1962 			}
1963 		} else {
1964 			if (diff <= TCP_TIMER_500MS_QUANTUM) {
1965 				return FALSE;
1966 			}
1967 		}
1968 	}
1969 	return TRUE;
1970 }
1971 
1972 void
tcp_sched_timerlist(uint32_t offset)1973 tcp_sched_timerlist(uint32_t offset)
1974 {
1975 	uint64_t deadline = 0;
1976 	struct tcptimerlist *listp = &tcp_timer_list;
1977 
1978 	LCK_MTX_ASSERT(&listp->mtx, LCK_MTX_ASSERT_OWNED);
1979 
1980 	offset = min(offset, TCP_TIMERLIST_MAX_OFFSET);
1981 	listp->runtime = tcp_now + offset;
1982 	listp->schedtime = tcp_now;
1983 	if (listp->runtime == 0) {
1984 		listp->runtime++;
1985 		offset++;
1986 	}
1987 
1988 	clock_interval_to_deadline(offset, USEC_PER_SEC, &deadline);
1989 
1990 	thread_call_enter_delayed(listp->call, deadline);
1991 	listp->scheduled = TRUE;
1992 }
1993 
1994 /*
1995  * Function to run the timers for a connection.
1996  *
1997  * Returns the offset of next timer to be run for this connection which
1998  * can be used to reschedule the timerlist.
1999  *
2000  * te_mode is an out parameter that indicates the modes of active
2001  * timers for this connection.
2002  */
2003 u_int32_t
tcp_run_conn_timer(struct tcpcb * tp,u_int16_t * te_mode,u_int16_t probe_if_index)2004 tcp_run_conn_timer(struct tcpcb *tp, u_int16_t *te_mode,
2005     u_int16_t probe_if_index)
2006 {
2007 	struct socket *so;
2008 	u_int16_t i = 0, index = TCPT_NONE, lo_index = TCPT_NONE;
2009 	u_int32_t timer_val, offset = 0, lo_timer = 0;
2010 	int32_t diff;
2011 	boolean_t needtorun[TCPT_NTIMERS];
2012 	int count = 0;
2013 
2014 	VERIFY(tp != NULL);
2015 	bzero(needtorun, sizeof(needtorun));
2016 	*te_mode = 0;
2017 
2018 	socket_lock(tp->t_inpcb->inp_socket, 1);
2019 
2020 	so = tp->t_inpcb->inp_socket;
2021 	/* Release the want count on inp */
2022 	if (in_pcb_checkstate(tp->t_inpcb, WNT_RELEASE, 1)
2023 	    == WNT_STOPUSING) {
2024 		if (TIMER_IS_ON_LIST(tp)) {
2025 			tcp_remove_timer(tp);
2026 		}
2027 
2028 		/* Looks like the TCP connection got closed while we
2029 		 * were waiting for the lock.. Done
2030 		 */
2031 		goto done;
2032 	}
2033 
2034 	/*
2035 	 * If this connection is over an interface that needs to
2036 	 * be probed, send probe packets to reinitiate communication.
2037 	 */
2038 	if (TCP_IF_STATE_CHANGED(tp, probe_if_index)) {
2039 		tp->t_flagsext |= TF_IF_PROBING;
2040 		tcp_timers(tp, TCPT_PTO);
2041 		tp->t_timer[TCPT_PTO] = 0;
2042 		tp->t_flagsext &= ~TF_IF_PROBING;
2043 	}
2044 
2045 	/*
2046 	 * Since the timer thread needs to wait for tcp lock, it may race
2047 	 * with another thread that can cancel or reschedule the timer
2048 	 * that is about to run. Check if we need to run anything.
2049 	 */
2050 	if ((index = tp->tentry.index) == TCPT_NONE) {
2051 		goto done;
2052 	}
2053 
2054 	timer_val = tp->t_timer[index];
2055 
2056 	diff = timer_diff(tp->tentry.runtime, 0, tcp_now, 0);
2057 	if (diff > 0) {
2058 		if (tp->tentry.index != TCPT_NONE) {
2059 			offset = diff;
2060 			*(te_mode) = tp->tentry.mode;
2061 		}
2062 		goto done;
2063 	}
2064 
2065 	tp->t_timer[index] = 0;
2066 	if (timer_val > 0) {
2067 		tp = tcp_timers(tp, index);
2068 		if (tp == NULL) {
2069 			goto done;
2070 		}
2071 	}
2072 
2073 	/*
2074 	 * Check if there are any other timers that need to be run.
2075 	 * While doing it, adjust the timer values wrt tcp_now.
2076 	 */
2077 	tp->tentry.mode = 0;
2078 	for (i = 0; i < TCPT_NTIMERS; ++i) {
2079 		if (tp->t_timer[i] != 0) {
2080 			diff = timer_diff(tp->tentry.timer_start,
2081 			    tp->t_timer[i], tcp_now, 0);
2082 			if (diff <= 0) {
2083 				needtorun[i] = TRUE;
2084 				count++;
2085 			} else {
2086 				tp->t_timer[i] = diff;
2087 				needtorun[i] = FALSE;
2088 				if (lo_timer == 0 || diff < lo_timer) {
2089 					lo_timer = diff;
2090 					lo_index = i;
2091 				}
2092 				TCP_SET_TIMER_MODE(tp->tentry.mode, i);
2093 			}
2094 		}
2095 	}
2096 
2097 	tp->tentry.timer_start = tcp_now;
2098 	tp->tentry.index = lo_index;
2099 	VERIFY(tp->tentry.index == TCPT_NONE || tp->tentry.mode > 0);
2100 
2101 	if (tp->tentry.index != TCPT_NONE) {
2102 		tp->tentry.runtime = tp->tentry.timer_start +
2103 		    tp->t_timer[tp->tentry.index];
2104 		if (tp->tentry.runtime == 0) {
2105 			tp->tentry.runtime++;
2106 		}
2107 	}
2108 
2109 	if (count > 0) {
2110 		/* run any other timers outstanding at this time. */
2111 		for (i = 0; i < TCPT_NTIMERS; ++i) {
2112 			if (needtorun[i]) {
2113 				tp->t_timer[i] = 0;
2114 				tp = tcp_timers(tp, i);
2115 				if (tp == NULL) {
2116 					offset = 0;
2117 					*(te_mode) = 0;
2118 					goto done;
2119 				}
2120 			}
2121 		}
2122 		tcp_set_lotimer_index(tp);
2123 	}
2124 
2125 	if (tp->tentry.index < TCPT_NONE) {
2126 		offset = tp->t_timer[tp->tentry.index];
2127 		*(te_mode) = tp->tentry.mode;
2128 	}
2129 
2130 done:
2131 	if (tp != NULL && tp->tentry.index == TCPT_NONE) {
2132 		tcp_remove_timer(tp);
2133 		offset = 0;
2134 	}
2135 
2136 	socket_unlock(so, 1);
2137 	return offset;
2138 }
2139 
2140 void
tcp_run_timerlist(void * arg1,void * arg2)2141 tcp_run_timerlist(void * arg1, void * arg2)
2142 {
2143 #pragma unused(arg1, arg2)
2144 	struct tcptimerentry *te, *next_te;
2145 	struct tcptimerlist *listp = &tcp_timer_list;
2146 	struct tcpcb *tp;
2147 	uint32_t next_timer = 0; /* offset of the next timer on the list */
2148 	u_int16_t te_mode = 0;  /* modes of all active timers in a tcpcb */
2149 	u_int16_t list_mode = 0; /* cumulative of modes of all tcpcbs */
2150 	uint32_t active_count = 0;
2151 
2152 	calculate_tcp_clock();
2153 
2154 	lck_mtx_lock(&listp->mtx);
2155 
2156 	int32_t drift = tcp_now - listp->runtime;
2157 	if (drift <= 1) {
2158 		tcpstat.tcps_timer_drift_le_1_ms++;
2159 	} else if (drift <= 10) {
2160 		tcpstat.tcps_timer_drift_le_10_ms++;
2161 	} else if (drift <= 20) {
2162 		tcpstat.tcps_timer_drift_le_20_ms++;
2163 	} else if (drift <= 50) {
2164 		tcpstat.tcps_timer_drift_le_50_ms++;
2165 	} else if (drift <= 100) {
2166 		tcpstat.tcps_timer_drift_le_100_ms++;
2167 	} else if (drift <= 200) {
2168 		tcpstat.tcps_timer_drift_le_200_ms++;
2169 	} else if (drift <= 500) {
2170 		tcpstat.tcps_timer_drift_le_500_ms++;
2171 	} else if (drift <= 1000) {
2172 		tcpstat.tcps_timer_drift_le_1000_ms++;
2173 	} else {
2174 		tcpstat.tcps_timer_drift_gt_1000_ms++;
2175 	}
2176 
2177 	listp->running = TRUE;
2178 
2179 	LIST_FOREACH_SAFE(te, &listp->lhead, le, next_te) {
2180 		uint32_t offset = 0;
2181 		uint32_t runtime = te->runtime;
2182 
2183 		tp = TIMERENTRY_TO_TP(te);
2184 
2185 		/*
2186 		 * An interface probe may need to happen before the previously scheduled runtime
2187 		 */
2188 		if (te->index < TCPT_NONE && TSTMP_GT(runtime, tcp_now) &&
2189 		    !TCP_IF_STATE_CHANGED(tp, listp->probe_if_index)) {
2190 			offset = timer_diff(runtime, 0, tcp_now, 0);
2191 			if (next_timer == 0 || offset < next_timer) {
2192 				next_timer = offset;
2193 			}
2194 			list_mode |= te->mode;
2195 			continue;
2196 		}
2197 
2198 		/*
2199 		 * Acquire an inp wantcnt on the inpcb so that the socket
2200 		 * won't get detached even if tcp_close is called
2201 		 */
2202 		if (in_pcb_checkstate(tp->t_inpcb, WNT_ACQUIRE, 0)
2203 		    == WNT_STOPUSING) {
2204 			/*
2205 			 * Some how this pcb went into dead state while
2206 			 * on the timer list, just take it off the list.
2207 			 * Since the timer list entry pointers are
2208 			 * protected by the timer list lock, we can
2209 			 * do it here without the socket lock.
2210 			 */
2211 			if (TIMER_IS_ON_LIST(tp)) {
2212 				tp->t_flags &= ~(TF_TIMER_ONLIST);
2213 				LIST_REMOVE(&tp->tentry, le);
2214 				listp->entries--;
2215 
2216 				tp->tentry.le.le_next = NULL;
2217 				tp->tentry.le.le_prev = NULL;
2218 			}
2219 			continue;
2220 		}
2221 		active_count++;
2222 
2223 		/*
2224 		 * Store the next timerentry pointer before releasing the
2225 		 * list lock. If that entry has to be removed when we
2226 		 * release the lock, this pointer will be updated to the
2227 		 * element after that.
2228 		 */
2229 		listp->next_te = next_te;
2230 
2231 		VERIFY_NEXT_LINK(&tp->tentry, le);
2232 		VERIFY_PREV_LINK(&tp->tentry, le);
2233 
2234 		lck_mtx_unlock(&listp->mtx);
2235 
2236 		offset = tcp_run_conn_timer(tp, &te_mode,
2237 		    listp->probe_if_index);
2238 
2239 		lck_mtx_lock(&listp->mtx);
2240 
2241 		next_te = listp->next_te;
2242 		listp->next_te = NULL;
2243 
2244 		if (offset > 0 && te_mode != 0) {
2245 			list_mode |= te_mode;
2246 
2247 			if (next_timer == 0 || offset < next_timer) {
2248 				next_timer = offset;
2249 			}
2250 		}
2251 	}
2252 
2253 	if (!LIST_EMPTY(&listp->lhead)) {
2254 		uint32_t next_mode = 0;
2255 		if ((list_mode & TCP_TIMERLIST_10MS_MODE) ||
2256 		    (listp->pref_mode & TCP_TIMERLIST_10MS_MODE)) {
2257 			next_mode = TCP_TIMERLIST_10MS_MODE;
2258 		} else if ((list_mode & TCP_TIMERLIST_100MS_MODE) ||
2259 		    (listp->pref_mode & TCP_TIMERLIST_100MS_MODE)) {
2260 			next_mode = TCP_TIMERLIST_100MS_MODE;
2261 		} else {
2262 			next_mode = TCP_TIMERLIST_500MS_MODE;
2263 		}
2264 
2265 		if (next_mode != TCP_TIMERLIST_500MS_MODE) {
2266 			listp->idleruns = 0;
2267 		} else {
2268 			/*
2269 			 * the next required mode is slow mode, but if
2270 			 * the last one was a faster mode and we did not
2271 			 * have enough idle runs, repeat the last mode.
2272 			 *
2273 			 * We try to keep the timer list in fast mode for
2274 			 * some idle time in expectation of new data.
2275 			 */
2276 			if (listp->mode != next_mode &&
2277 			    listp->idleruns < timer_fastmode_idlemax) {
2278 				listp->idleruns++;
2279 				next_mode = listp->mode;
2280 				next_timer = TCP_TIMER_100MS_QUANTUM;
2281 			} else {
2282 				listp->idleruns = 0;
2283 			}
2284 		}
2285 		listp->mode = next_mode;
2286 		if (listp->pref_offset != 0) {
2287 			next_timer = min(listp->pref_offset, next_timer);
2288 		}
2289 
2290 		if (listp->mode == TCP_TIMERLIST_500MS_MODE) {
2291 			next_timer = max(next_timer,
2292 			    TCP_TIMER_500MS_QUANTUM);
2293 		}
2294 
2295 		tcp_sched_timerlist(next_timer);
2296 	} else {
2297 		/*
2298 		 * No need to reschedule this timer, but always run
2299 		 * periodically at a much higher granularity.
2300 		 */
2301 		tcp_sched_timerlist(TCP_TIMERLIST_MAX_OFFSET);
2302 	}
2303 
2304 	listp->running = FALSE;
2305 	listp->pref_mode = 0;
2306 	listp->pref_offset = 0;
2307 	listp->probe_if_index = 0;
2308 
2309 	lck_mtx_unlock(&listp->mtx);
2310 }
2311 
2312 /*
2313  * Function to check if the timerlist needs to be rescheduled to run this
2314  * connection's timers correctly.
2315  */
2316 void
tcp_sched_timers(struct tcpcb * tp)2317 tcp_sched_timers(struct tcpcb *tp)
2318 {
2319 	struct tcptimerentry *te = &tp->tentry;
2320 	u_int16_t index = te->index;
2321 	u_int16_t mode = te->mode;
2322 	struct tcptimerlist *listp = &tcp_timer_list;
2323 	int32_t offset = 0;
2324 	boolean_t list_locked = FALSE;
2325 
2326 	if (tp->t_inpcb->inp_state == INPCB_STATE_DEAD) {
2327 		/* Just return without adding the dead pcb to the list */
2328 		if (TIMER_IS_ON_LIST(tp)) {
2329 			tcp_remove_timer(tp);
2330 		}
2331 		return;
2332 	}
2333 
2334 	if (index == TCPT_NONE) {
2335 		/* Nothing to run */
2336 		tcp_remove_timer(tp);
2337 		return;
2338 	}
2339 
2340 	/*
2341 	 * compute the offset at which the next timer for this connection
2342 	 * has to run.
2343 	 */
2344 	offset = timer_diff(te->runtime, 0, tcp_now, 0);
2345 	if (offset <= 0) {
2346 		offset = 1;
2347 		tcp_timer_advanced++;
2348 	}
2349 
2350 	if (!TIMER_IS_ON_LIST(tp)) {
2351 		if (!list_locked) {
2352 			lck_mtx_lock(&listp->mtx);
2353 			list_locked = TRUE;
2354 		}
2355 
2356 		if (!TIMER_IS_ON_LIST(tp)) {
2357 			LIST_INSERT_HEAD(&listp->lhead, te, le);
2358 			tp->t_flags |= TF_TIMER_ONLIST;
2359 
2360 			listp->entries++;
2361 			if (listp->entries > listp->maxentries) {
2362 				listp->maxentries = listp->entries;
2363 			}
2364 
2365 			/* if the list is not scheduled, just schedule it */
2366 			if (!listp->scheduled) {
2367 				goto schedule;
2368 			}
2369 		}
2370 	}
2371 
2372 	/*
2373 	 * Timer entry is currently on the list, check if the list needs
2374 	 * to be rescheduled.
2375 	 */
2376 	if (need_to_resched_timerlist(te->runtime, mode)) {
2377 		tcp_resched_timerlist++;
2378 
2379 		if (!list_locked) {
2380 			lck_mtx_lock(&listp->mtx);
2381 			list_locked = TRUE;
2382 		}
2383 
2384 		VERIFY_NEXT_LINK(te, le);
2385 		VERIFY_PREV_LINK(te, le);
2386 
2387 		if (listp->running) {
2388 			listp->pref_mode |= mode;
2389 			if (listp->pref_offset == 0 ||
2390 			    offset < listp->pref_offset) {
2391 				listp->pref_offset = offset;
2392 			}
2393 		} else {
2394 			/*
2395 			 * The list could have got rescheduled while
2396 			 * this thread was waiting for the lock
2397 			 */
2398 			if (listp->scheduled) {
2399 				int32_t diff;
2400 				diff = timer_diff(listp->runtime, 0,
2401 				    tcp_now, offset);
2402 				if (diff <= 0) {
2403 					goto done;
2404 				} else {
2405 					goto schedule;
2406 				}
2407 			} else {
2408 				goto schedule;
2409 			}
2410 		}
2411 	}
2412 	goto done;
2413 
2414 schedule:
2415 	/*
2416 	 * Since a connection with timers is getting scheduled, the timer
2417 	 * list moves from idle to active state and that is why idlegen is
2418 	 * reset
2419 	 */
2420 	if (mode & TCP_TIMERLIST_10MS_MODE) {
2421 		listp->mode = TCP_TIMERLIST_10MS_MODE;
2422 		listp->idleruns = 0;
2423 		offset = min(offset, TCP_TIMER_10MS_QUANTUM);
2424 	} else if (mode & TCP_TIMERLIST_100MS_MODE) {
2425 		if (listp->mode > TCP_TIMERLIST_100MS_MODE) {
2426 			listp->mode = TCP_TIMERLIST_100MS_MODE;
2427 		}
2428 		listp->idleruns = 0;
2429 		offset = min(offset, TCP_TIMER_100MS_QUANTUM);
2430 	}
2431 	tcp_sched_timerlist(offset);
2432 
2433 done:
2434 	if (list_locked) {
2435 		lck_mtx_unlock(&listp->mtx);
2436 	}
2437 
2438 	return;
2439 }
2440 
2441 static inline void
tcp_set_lotimer_index(struct tcpcb * tp)2442 tcp_set_lotimer_index(struct tcpcb *tp)
2443 {
2444 	uint16_t i, lo_index = TCPT_NONE, mode = 0;
2445 	uint32_t lo_timer = 0;
2446 	for (i = 0; i < TCPT_NTIMERS; ++i) {
2447 		if (tp->t_timer[i] != 0) {
2448 			TCP_SET_TIMER_MODE(mode, i);
2449 			if (lo_timer == 0 || tp->t_timer[i] < lo_timer) {
2450 				lo_timer = tp->t_timer[i];
2451 				lo_index = i;
2452 			}
2453 		}
2454 	}
2455 	tp->tentry.index = lo_index;
2456 	tp->tentry.mode = mode;
2457 	VERIFY(tp->tentry.index == TCPT_NONE || tp->tentry.mode > 0);
2458 
2459 	if (tp->tentry.index != TCPT_NONE) {
2460 		tp->tentry.runtime = tp->tentry.timer_start
2461 		    + tp->t_timer[tp->tentry.index];
2462 		if (tp->tentry.runtime == 0) {
2463 			tp->tentry.runtime++;
2464 		}
2465 	}
2466 }
2467 
2468 void
tcp_check_timer_state(struct tcpcb * tp)2469 tcp_check_timer_state(struct tcpcb *tp)
2470 {
2471 	socket_lock_assert_owned(tp->t_inpcb->inp_socket);
2472 
2473 	if (tp->t_inpcb->inp_flags2 & INP2_TIMEWAIT) {
2474 		return;
2475 	}
2476 
2477 	tcp_set_lotimer_index(tp);
2478 
2479 	tcp_sched_timers(tp);
2480 	return;
2481 }
2482 
2483 static inline void
tcp_cumulative_stat(u_int32_t cur,u_int32_t * prev,u_int32_t * dest)2484 tcp_cumulative_stat(u_int32_t cur, u_int32_t *prev, u_int32_t *dest)
2485 {
2486 	/* handle wrap around */
2487 	int32_t diff = (int32_t) (cur - *prev);
2488 	if (diff > 0) {
2489 		*dest = diff;
2490 	} else {
2491 		*dest = 0;
2492 	}
2493 	*prev = cur;
2494 	return;
2495 }
2496 
2497 static inline void
tcp_cumulative_stat64(u_int64_t cur,u_int64_t * prev,u_int64_t * dest)2498 tcp_cumulative_stat64(u_int64_t cur, u_int64_t *prev, u_int64_t *dest)
2499 {
2500 	/* handle wrap around */
2501 	int64_t diff = (int64_t) (cur - *prev);
2502 	if (diff > 0) {
2503 		*dest = diff;
2504 	} else {
2505 		*dest = 0;
2506 	}
2507 	*prev = cur;
2508 	return;
2509 }
2510 
2511 __private_extern__ void
tcp_report_stats(void)2512 tcp_report_stats(void)
2513 {
2514 	struct nstat_sysinfo_data data;
2515 	struct sockaddr_in dst;
2516 	struct sockaddr_in6 dst6;
2517 	struct rtentry *rt = NULL;
2518 	static struct tcp_last_report_stats prev;
2519 	u_int64_t var, uptime;
2520 
2521 #define stat    data.u.tcp_stats
2522 	if (((uptime = net_uptime()) - tcp_last_report_time) <
2523 	    tcp_report_stats_interval) {
2524 		return;
2525 	}
2526 
2527 	tcp_last_report_time = uptime;
2528 
2529 	bzero(&data, sizeof(data));
2530 	data.flags = NSTAT_SYSINFO_TCP_STATS;
2531 
2532 	bzero(&dst, sizeof(dst));
2533 	dst.sin_len = sizeof(dst);
2534 	dst.sin_family = AF_INET;
2535 
2536 	/* ipv4 avg rtt */
2537 	lck_mtx_lock(rnh_lock);
2538 	rt =  rt_lookup(TRUE, (struct sockaddr *)&dst, NULL,
2539 	    rt_tables[AF_INET], IFSCOPE_NONE);
2540 	lck_mtx_unlock(rnh_lock);
2541 	if (rt != NULL) {
2542 		RT_LOCK(rt);
2543 		if (rt_primary_default(rt, rt_key(rt)) &&
2544 		    rt->rt_stats != NULL) {
2545 			stat.ipv4_avgrtt = rt->rt_stats->nstat_avg_rtt;
2546 		}
2547 		RT_UNLOCK(rt);
2548 		rtfree(rt);
2549 		rt = NULL;
2550 	}
2551 
2552 	/* ipv6 avg rtt */
2553 	bzero(&dst6, sizeof(dst6));
2554 	dst6.sin6_len = sizeof(dst6);
2555 	dst6.sin6_family = AF_INET6;
2556 
2557 	lck_mtx_lock(rnh_lock);
2558 	rt = rt_lookup(TRUE, (struct sockaddr *)&dst6, NULL,
2559 	    rt_tables[AF_INET6], IFSCOPE_NONE);
2560 	lck_mtx_unlock(rnh_lock);
2561 	if (rt != NULL) {
2562 		RT_LOCK(rt);
2563 		if (rt_primary_default(rt, rt_key(rt)) &&
2564 		    rt->rt_stats != NULL) {
2565 			stat.ipv6_avgrtt = rt->rt_stats->nstat_avg_rtt;
2566 		}
2567 		RT_UNLOCK(rt);
2568 		rtfree(rt);
2569 		rt = NULL;
2570 	}
2571 
2572 	/* send packet loss rate, shift by 10 for precision */
2573 	if (tcpstat.tcps_sndpack > 0 && tcpstat.tcps_sndrexmitpack > 0) {
2574 		var = tcpstat.tcps_sndrexmitpack << 10;
2575 		stat.send_plr = (uint32_t)((var * 100) / tcpstat.tcps_sndpack);
2576 	}
2577 
2578 	/* recv packet loss rate, shift by 10 for precision */
2579 	if (tcpstat.tcps_rcvpack > 0 && tcpstat.tcps_recovered_pkts > 0) {
2580 		var = tcpstat.tcps_recovered_pkts << 10;
2581 		stat.recv_plr = (uint32_t)((var * 100) / tcpstat.tcps_rcvpack);
2582 	}
2583 
2584 	/* RTO after tail loss, shift by 10 for precision */
2585 	if (tcpstat.tcps_sndrexmitpack > 0
2586 	    && tcpstat.tcps_tailloss_rto > 0) {
2587 		var = tcpstat.tcps_tailloss_rto << 10;
2588 		stat.send_tlrto_rate =
2589 		    (uint32_t)((var * 100) / tcpstat.tcps_sndrexmitpack);
2590 	}
2591 
2592 	/* packet reordering */
2593 	if (tcpstat.tcps_sndpack > 0 && tcpstat.tcps_reordered_pkts > 0) {
2594 		var = tcpstat.tcps_reordered_pkts << 10;
2595 		stat.send_reorder_rate =
2596 		    (uint32_t)((var * 100) / tcpstat.tcps_sndpack);
2597 	}
2598 
2599 	if (tcp_ecn_outbound == 1) {
2600 		stat.ecn_client_enabled = 1;
2601 	}
2602 	if (tcp_ecn_inbound == 1) {
2603 		stat.ecn_server_enabled = 1;
2604 	}
2605 	tcp_cumulative_stat(tcpstat.tcps_connattempt,
2606 	    &prev.tcps_connattempt, &stat.connection_attempts);
2607 	tcp_cumulative_stat(tcpstat.tcps_accepts,
2608 	    &prev.tcps_accepts, &stat.connection_accepts);
2609 	tcp_cumulative_stat(tcpstat.tcps_ecn_client_setup,
2610 	    &prev.tcps_ecn_client_setup, &stat.ecn_client_setup);
2611 	tcp_cumulative_stat(tcpstat.tcps_ecn_server_setup,
2612 	    &prev.tcps_ecn_server_setup, &stat.ecn_server_setup);
2613 	tcp_cumulative_stat(tcpstat.tcps_ecn_client_success,
2614 	    &prev.tcps_ecn_client_success, &stat.ecn_client_success);
2615 	tcp_cumulative_stat(tcpstat.tcps_ecn_server_success,
2616 	    &prev.tcps_ecn_server_success, &stat.ecn_server_success);
2617 	tcp_cumulative_stat(tcpstat.tcps_ecn_not_supported,
2618 	    &prev.tcps_ecn_not_supported, &stat.ecn_not_supported);
2619 	tcp_cumulative_stat(tcpstat.tcps_ecn_lost_syn,
2620 	    &prev.tcps_ecn_lost_syn, &stat.ecn_lost_syn);
2621 	tcp_cumulative_stat(tcpstat.tcps_ecn_lost_synack,
2622 	    &prev.tcps_ecn_lost_synack, &stat.ecn_lost_synack);
2623 	tcp_cumulative_stat(tcpstat.tcps_ecn_recv_ce,
2624 	    &prev.tcps_ecn_recv_ce, &stat.ecn_recv_ce);
2625 	tcp_cumulative_stat(tcpstat.tcps_ecn_recv_ece,
2626 	    &prev.tcps_ecn_recv_ece, &stat.ecn_recv_ece);
2627 	tcp_cumulative_stat(tcpstat.tcps_ecn_recv_ece,
2628 	    &prev.tcps_ecn_recv_ece, &stat.ecn_recv_ece);
2629 	tcp_cumulative_stat(tcpstat.tcps_ecn_sent_ece,
2630 	    &prev.tcps_ecn_sent_ece, &stat.ecn_sent_ece);
2631 	tcp_cumulative_stat(tcpstat.tcps_ecn_sent_ece,
2632 	    &prev.tcps_ecn_sent_ece, &stat.ecn_sent_ece);
2633 	tcp_cumulative_stat(tcpstat.tcps_ecn_conn_recv_ce,
2634 	    &prev.tcps_ecn_conn_recv_ce, &stat.ecn_conn_recv_ce);
2635 	tcp_cumulative_stat(tcpstat.tcps_ecn_conn_recv_ece,
2636 	    &prev.tcps_ecn_conn_recv_ece, &stat.ecn_conn_recv_ece);
2637 	tcp_cumulative_stat(tcpstat.tcps_ecn_conn_plnoce,
2638 	    &prev.tcps_ecn_conn_plnoce, &stat.ecn_conn_plnoce);
2639 	tcp_cumulative_stat(tcpstat.tcps_ecn_conn_pl_ce,
2640 	    &prev.tcps_ecn_conn_pl_ce, &stat.ecn_conn_pl_ce);
2641 	tcp_cumulative_stat(tcpstat.tcps_ecn_conn_nopl_ce,
2642 	    &prev.tcps_ecn_conn_nopl_ce, &stat.ecn_conn_nopl_ce);
2643 	tcp_cumulative_stat(tcpstat.tcps_ecn_fallback_synloss,
2644 	    &prev.tcps_ecn_fallback_synloss, &stat.ecn_fallback_synloss);
2645 	tcp_cumulative_stat(tcpstat.tcps_ecn_fallback_reorder,
2646 	    &prev.tcps_ecn_fallback_reorder, &stat.ecn_fallback_reorder);
2647 	tcp_cumulative_stat(tcpstat.tcps_ecn_fallback_ce,
2648 	    &prev.tcps_ecn_fallback_ce, &stat.ecn_fallback_ce);
2649 	tcp_cumulative_stat(tcpstat.tcps_tfo_syn_data_rcv,
2650 	    &prev.tcps_tfo_syn_data_rcv, &stat.tfo_syn_data_rcv);
2651 	tcp_cumulative_stat(tcpstat.tcps_tfo_cookie_req_rcv,
2652 	    &prev.tcps_tfo_cookie_req_rcv, &stat.tfo_cookie_req_rcv);
2653 	tcp_cumulative_stat(tcpstat.tcps_tfo_cookie_sent,
2654 	    &prev.tcps_tfo_cookie_sent, &stat.tfo_cookie_sent);
2655 	tcp_cumulative_stat(tcpstat.tcps_tfo_cookie_invalid,
2656 	    &prev.tcps_tfo_cookie_invalid, &stat.tfo_cookie_invalid);
2657 	tcp_cumulative_stat(tcpstat.tcps_tfo_cookie_req,
2658 	    &prev.tcps_tfo_cookie_req, &stat.tfo_cookie_req);
2659 	tcp_cumulative_stat(tcpstat.tcps_tfo_cookie_rcv,
2660 	    &prev.tcps_tfo_cookie_rcv, &stat.tfo_cookie_rcv);
2661 	tcp_cumulative_stat(tcpstat.tcps_tfo_syn_data_sent,
2662 	    &prev.tcps_tfo_syn_data_sent, &stat.tfo_syn_data_sent);
2663 	tcp_cumulative_stat(tcpstat.tcps_tfo_syn_data_acked,
2664 	    &prev.tcps_tfo_syn_data_acked, &stat.tfo_syn_data_acked);
2665 	tcp_cumulative_stat(tcpstat.tcps_tfo_syn_loss,
2666 	    &prev.tcps_tfo_syn_loss, &stat.tfo_syn_loss);
2667 	tcp_cumulative_stat(tcpstat.tcps_tfo_blackhole,
2668 	    &prev.tcps_tfo_blackhole, &stat.tfo_blackhole);
2669 	tcp_cumulative_stat(tcpstat.tcps_tfo_cookie_wrong,
2670 	    &prev.tcps_tfo_cookie_wrong, &stat.tfo_cookie_wrong);
2671 	tcp_cumulative_stat(tcpstat.tcps_tfo_no_cookie_rcv,
2672 	    &prev.tcps_tfo_no_cookie_rcv, &stat.tfo_no_cookie_rcv);
2673 	tcp_cumulative_stat(tcpstat.tcps_tfo_heuristics_disable,
2674 	    &prev.tcps_tfo_heuristics_disable, &stat.tfo_heuristics_disable);
2675 	tcp_cumulative_stat(tcpstat.tcps_tfo_sndblackhole,
2676 	    &prev.tcps_tfo_sndblackhole, &stat.tfo_sndblackhole);
2677 
2678 
2679 	tcp_cumulative_stat(tcpstat.tcps_mptcp_handover_attempt,
2680 	    &prev.tcps_mptcp_handover_attempt, &stat.mptcp_handover_attempt);
2681 	tcp_cumulative_stat(tcpstat.tcps_mptcp_interactive_attempt,
2682 	    &prev.tcps_mptcp_interactive_attempt, &stat.mptcp_interactive_attempt);
2683 	tcp_cumulative_stat(tcpstat.tcps_mptcp_aggregate_attempt,
2684 	    &prev.tcps_mptcp_aggregate_attempt, &stat.mptcp_aggregate_attempt);
2685 	tcp_cumulative_stat(tcpstat.tcps_mptcp_fp_handover_attempt,
2686 	    &prev.tcps_mptcp_fp_handover_attempt, &stat.mptcp_fp_handover_attempt);
2687 	tcp_cumulative_stat(tcpstat.tcps_mptcp_fp_interactive_attempt,
2688 	    &prev.tcps_mptcp_fp_interactive_attempt, &stat.mptcp_fp_interactive_attempt);
2689 	tcp_cumulative_stat(tcpstat.tcps_mptcp_fp_aggregate_attempt,
2690 	    &prev.tcps_mptcp_fp_aggregate_attempt, &stat.mptcp_fp_aggregate_attempt);
2691 	tcp_cumulative_stat(tcpstat.tcps_mptcp_heuristic_fallback,
2692 	    &prev.tcps_mptcp_heuristic_fallback, &stat.mptcp_heuristic_fallback);
2693 	tcp_cumulative_stat(tcpstat.tcps_mptcp_fp_heuristic_fallback,
2694 	    &prev.tcps_mptcp_fp_heuristic_fallback, &stat.mptcp_fp_heuristic_fallback);
2695 	tcp_cumulative_stat(tcpstat.tcps_mptcp_handover_success_wifi,
2696 	    &prev.tcps_mptcp_handover_success_wifi, &stat.mptcp_handover_success_wifi);
2697 	tcp_cumulative_stat(tcpstat.tcps_mptcp_handover_success_cell,
2698 	    &prev.tcps_mptcp_handover_success_cell, &stat.mptcp_handover_success_cell);
2699 	tcp_cumulative_stat(tcpstat.tcps_mptcp_interactive_success,
2700 	    &prev.tcps_mptcp_interactive_success, &stat.mptcp_interactive_success);
2701 	tcp_cumulative_stat(tcpstat.tcps_mptcp_aggregate_success,
2702 	    &prev.tcps_mptcp_aggregate_success, &stat.mptcp_aggregate_success);
2703 	tcp_cumulative_stat(tcpstat.tcps_mptcp_fp_handover_success_wifi,
2704 	    &prev.tcps_mptcp_fp_handover_success_wifi, &stat.mptcp_fp_handover_success_wifi);
2705 	tcp_cumulative_stat(tcpstat.tcps_mptcp_fp_handover_success_cell,
2706 	    &prev.tcps_mptcp_fp_handover_success_cell, &stat.mptcp_fp_handover_success_cell);
2707 	tcp_cumulative_stat(tcpstat.tcps_mptcp_fp_interactive_success,
2708 	    &prev.tcps_mptcp_fp_interactive_success, &stat.mptcp_fp_interactive_success);
2709 	tcp_cumulative_stat(tcpstat.tcps_mptcp_fp_aggregate_success,
2710 	    &prev.tcps_mptcp_fp_aggregate_success, &stat.mptcp_fp_aggregate_success);
2711 	tcp_cumulative_stat(tcpstat.tcps_mptcp_handover_cell_from_wifi,
2712 	    &prev.tcps_mptcp_handover_cell_from_wifi, &stat.mptcp_handover_cell_from_wifi);
2713 	tcp_cumulative_stat(tcpstat.tcps_mptcp_handover_wifi_from_cell,
2714 	    &prev.tcps_mptcp_handover_wifi_from_cell, &stat.mptcp_handover_wifi_from_cell);
2715 	tcp_cumulative_stat(tcpstat.tcps_mptcp_interactive_cell_from_wifi,
2716 	    &prev.tcps_mptcp_interactive_cell_from_wifi, &stat.mptcp_interactive_cell_from_wifi);
2717 	tcp_cumulative_stat64(tcpstat.tcps_mptcp_handover_cell_bytes,
2718 	    &prev.tcps_mptcp_handover_cell_bytes, &stat.mptcp_handover_cell_bytes);
2719 	tcp_cumulative_stat64(tcpstat.tcps_mptcp_interactive_cell_bytes,
2720 	    &prev.tcps_mptcp_interactive_cell_bytes, &stat.mptcp_interactive_cell_bytes);
2721 	tcp_cumulative_stat64(tcpstat.tcps_mptcp_aggregate_cell_bytes,
2722 	    &prev.tcps_mptcp_aggregate_cell_bytes, &stat.mptcp_aggregate_cell_bytes);
2723 	tcp_cumulative_stat64(tcpstat.tcps_mptcp_handover_all_bytes,
2724 	    &prev.tcps_mptcp_handover_all_bytes, &stat.mptcp_handover_all_bytes);
2725 	tcp_cumulative_stat64(tcpstat.tcps_mptcp_interactive_all_bytes,
2726 	    &prev.tcps_mptcp_interactive_all_bytes, &stat.mptcp_interactive_all_bytes);
2727 	tcp_cumulative_stat64(tcpstat.tcps_mptcp_aggregate_all_bytes,
2728 	    &prev.tcps_mptcp_aggregate_all_bytes, &stat.mptcp_aggregate_all_bytes);
2729 	tcp_cumulative_stat(tcpstat.tcps_mptcp_back_to_wifi,
2730 	    &prev.tcps_mptcp_back_to_wifi, &stat.mptcp_back_to_wifi);
2731 	tcp_cumulative_stat(tcpstat.tcps_mptcp_wifi_proxy,
2732 	    &prev.tcps_mptcp_wifi_proxy, &stat.mptcp_wifi_proxy);
2733 	tcp_cumulative_stat(tcpstat.tcps_mptcp_cell_proxy,
2734 	    &prev.tcps_mptcp_cell_proxy, &stat.mptcp_cell_proxy);
2735 	tcp_cumulative_stat(tcpstat.tcps_mptcp_triggered_cell,
2736 	    &prev.tcps_mptcp_triggered_cell, &stat.mptcp_triggered_cell);
2737 
2738 	nstat_sysinfo_send_data(&data);
2739 
2740 #undef  stat
2741 }
2742 
2743 void
tcp_interface_send_probe(u_int16_t probe_if_index)2744 tcp_interface_send_probe(u_int16_t probe_if_index)
2745 {
2746 	int32_t offset = 0;
2747 	struct tcptimerlist *listp = &tcp_timer_list;
2748 
2749 	/* Make sure TCP clock is up to date */
2750 	calculate_tcp_clock();
2751 
2752 	lck_mtx_lock(&listp->mtx);
2753 	if (listp->probe_if_index > 0 && listp->probe_if_index != probe_if_index) {
2754 		tcpstat.tcps_probe_if_conflict++;
2755 		os_log(OS_LOG_DEFAULT,
2756 		    "%s: probe_if_index %u conflicts with %u, tcps_probe_if_conflict %u\n",
2757 		    __func__, probe_if_index, listp->probe_if_index,
2758 		    tcpstat.tcps_probe_if_conflict);
2759 		goto done;
2760 	}
2761 
2762 	listp->probe_if_index = probe_if_index;
2763 	if (listp->running) {
2764 		os_log(OS_LOG_DEFAULT, "%s: timer list already running for if_index %u\n",
2765 		    __func__, probe_if_index);
2766 		goto done;
2767 	}
2768 
2769 	/*
2770 	 * Reschedule the timerlist to run within the next 10ms, which is
2771 	 * the fastest that we can do.
2772 	 */
2773 	offset = TCP_TIMER_10MS_QUANTUM;
2774 	if (listp->scheduled) {
2775 		int32_t diff;
2776 		diff = timer_diff(listp->runtime, 0, tcp_now, offset);
2777 		if (diff <= 0) {
2778 			/* The timer will fire sooner than what's needed */
2779 			os_log(OS_LOG_DEFAULT,
2780 			    "%s: timer will fire sooner than needed for if_index %u\n",
2781 			    __func__, probe_if_index);
2782 			goto done;
2783 		}
2784 	}
2785 	listp->mode = TCP_TIMERLIST_10MS_MODE;
2786 	listp->idleruns = 0;
2787 
2788 	tcp_sched_timerlist(offset);
2789 
2790 done:
2791 	lck_mtx_unlock(&listp->mtx);
2792 	return;
2793 }
2794 
2795 /*
2796  * Enable read probes on this connection, if:
2797  * - it is in established state
2798  * - doesn't have any data outstanding
2799  * - the outgoing ifp matches
2800  * - we have not already sent any read probes
2801  */
2802 static void
tcp_enable_read_probe(struct tcpcb * tp,struct ifnet * ifp)2803 tcp_enable_read_probe(struct tcpcb *tp, struct ifnet *ifp)
2804 {
2805 	if (tp->t_state == TCPS_ESTABLISHED &&
2806 	    tp->snd_max == tp->snd_una &&
2807 	    tp->t_inpcb->inp_last_outifp == ifp &&
2808 	    !(tp->t_flagsext & TF_DETECT_READSTALL) &&
2809 	    tp->t_rtimo_probes == 0) {
2810 		tp->t_flagsext |= TF_DETECT_READSTALL;
2811 		tp->t_rtimo_probes = 0;
2812 		tp->t_timer[TCPT_KEEP] = OFFSET_FROM_START(tp,
2813 		    TCP_TIMER_10MS_QUANTUM);
2814 		if (tp->tentry.index == TCPT_NONE) {
2815 			tp->tentry.index = TCPT_KEEP;
2816 			tp->tentry.runtime = tcp_now +
2817 			    TCP_TIMER_10MS_QUANTUM;
2818 		} else {
2819 			int32_t diff = 0;
2820 
2821 			/* Reset runtime to be in next 10ms */
2822 			diff = timer_diff(tp->tentry.runtime, 0,
2823 			    tcp_now, TCP_TIMER_10MS_QUANTUM);
2824 			if (diff > 0) {
2825 				tp->tentry.index = TCPT_KEEP;
2826 				tp->tentry.runtime = tcp_now +
2827 				    TCP_TIMER_10MS_QUANTUM;
2828 				if (tp->tentry.runtime == 0) {
2829 					tp->tentry.runtime++;
2830 				}
2831 			}
2832 		}
2833 	}
2834 }
2835 
2836 /*
2837  * Disable read probe and reset the keep alive timer
2838  */
2839 static void
tcp_disable_read_probe(struct tcpcb * tp)2840 tcp_disable_read_probe(struct tcpcb *tp)
2841 {
2842 	if (tp->t_adaptive_rtimo == 0 &&
2843 	    ((tp->t_flagsext & TF_DETECT_READSTALL) ||
2844 	    tp->t_rtimo_probes > 0)) {
2845 		tcp_keepalive_reset(tp);
2846 
2847 		if (tp->t_mpsub) {
2848 			mptcp_reset_keepalive(tp);
2849 		}
2850 	}
2851 }
2852 
2853 /*
2854  * Reschedule the tcp timerlist in the next 10ms to re-enable read/write
2855  * probes on connections going over a particular interface.
2856  */
2857 void
tcp_probe_connectivity(struct ifnet * ifp,u_int32_t enable)2858 tcp_probe_connectivity(struct ifnet *ifp, u_int32_t enable)
2859 {
2860 	int32_t offset;
2861 	struct tcptimerlist *listp = &tcp_timer_list;
2862 	struct inpcbinfo *pcbinfo = &tcbinfo;
2863 	struct inpcb *inp, *nxt;
2864 
2865 	if (ifp == NULL) {
2866 		return;
2867 	}
2868 
2869 	/* update clock */
2870 	calculate_tcp_clock();
2871 
2872 	/*
2873 	 * Enable keep alive timer on all connections that are
2874 	 * active/established on this interface.
2875 	 */
2876 	lck_rw_lock_shared(&pcbinfo->ipi_lock);
2877 
2878 	LIST_FOREACH_SAFE(inp, pcbinfo->ipi_listhead, inp_list, nxt) {
2879 		struct tcpcb *tp = NULL;
2880 		if (in_pcb_checkstate(inp, WNT_ACQUIRE, 0) ==
2881 		    WNT_STOPUSING) {
2882 			continue;
2883 		}
2884 
2885 		/* Acquire lock to look at the state of the connection */
2886 		socket_lock(inp->inp_socket, 1);
2887 
2888 		/* Release the want count */
2889 		if (inp->inp_ppcb == NULL ||
2890 		    (in_pcb_checkstate(inp, WNT_RELEASE, 1) == WNT_STOPUSING)) {
2891 			socket_unlock(inp->inp_socket, 1);
2892 			continue;
2893 		}
2894 		tp = intotcpcb(inp);
2895 		if (enable) {
2896 			tcp_enable_read_probe(tp, ifp);
2897 		} else {
2898 			tcp_disable_read_probe(tp);
2899 		}
2900 
2901 		socket_unlock(inp->inp_socket, 1);
2902 	}
2903 	lck_rw_done(&pcbinfo->ipi_lock);
2904 
2905 	lck_mtx_lock(&listp->mtx);
2906 	if (listp->running) {
2907 		listp->pref_mode |= TCP_TIMERLIST_10MS_MODE;
2908 		goto done;
2909 	}
2910 
2911 	/* Reschedule within the next 10ms */
2912 	offset = TCP_TIMER_10MS_QUANTUM;
2913 	if (listp->scheduled) {
2914 		int32_t diff;
2915 		diff = timer_diff(listp->runtime, 0, tcp_now, offset);
2916 		if (diff <= 0) {
2917 			/* The timer will fire sooner than what's needed */
2918 			goto done;
2919 		}
2920 	}
2921 	listp->mode = TCP_TIMERLIST_10MS_MODE;
2922 	listp->idleruns = 0;
2923 
2924 	tcp_sched_timerlist(offset);
2925 done:
2926 	lck_mtx_unlock(&listp->mtx);
2927 	return;
2928 }
2929 
2930 inline void
tcp_update_mss_core(struct tcpcb * tp,struct ifnet * ifp)2931 tcp_update_mss_core(struct tcpcb *tp, struct ifnet *ifp)
2932 {
2933 	struct if_cellular_status_v1 *ifsr;
2934 	u_int32_t optlen;
2935 	ifsr = &ifp->if_link_status->ifsr_u.ifsr_cell.if_cell_u.if_status_v1;
2936 	if (ifsr->valid_bitmask & IF_CELL_UL_MSS_RECOMMENDED_VALID) {
2937 		optlen = tp->t_maxopd - tp->t_maxseg;
2938 
2939 		if (ifsr->mss_recommended ==
2940 		    IF_CELL_UL_MSS_RECOMMENDED_NONE &&
2941 		    tp->t_cached_maxopd > 0 &&
2942 		    tp->t_maxopd < tp->t_cached_maxopd) {
2943 			tp->t_maxopd = tp->t_cached_maxopd;
2944 			tcpstat.tcps_mss_to_default++;
2945 		} else if (ifsr->mss_recommended ==
2946 		    IF_CELL_UL_MSS_RECOMMENDED_MEDIUM &&
2947 		    tp->t_maxopd > tcp_mss_rec_medium) {
2948 			tp->t_cached_maxopd = tp->t_maxopd;
2949 			tp->t_maxopd = tcp_mss_rec_medium;
2950 			tcpstat.tcps_mss_to_medium++;
2951 		} else if (ifsr->mss_recommended ==
2952 		    IF_CELL_UL_MSS_RECOMMENDED_LOW &&
2953 		    tp->t_maxopd > tcp_mss_rec_low) {
2954 			tp->t_cached_maxopd = tp->t_maxopd;
2955 			tp->t_maxopd = tcp_mss_rec_low;
2956 			tcpstat.tcps_mss_to_low++;
2957 		}
2958 		tp->t_maxseg = tp->t_maxopd - optlen;
2959 
2960 		/*
2961 		 * clear the cached value if it is same as the current
2962 		 */
2963 		if (tp->t_maxopd == tp->t_cached_maxopd) {
2964 			tp->t_cached_maxopd = 0;
2965 		}
2966 	}
2967 }
2968 
2969 void
tcp_update_mss_locked(struct socket * so,struct ifnet * ifp)2970 tcp_update_mss_locked(struct socket *so, struct ifnet *ifp)
2971 {
2972 	struct inpcb *inp = sotoinpcb(so);
2973 	struct tcpcb *tp = intotcpcb(inp);
2974 
2975 	if (ifp == NULL && (ifp = inp->inp_last_outifp) == NULL) {
2976 		return;
2977 	}
2978 
2979 	if (!IFNET_IS_CELLULAR(ifp)) {
2980 		/*
2981 		 * This optimization is implemented for cellular
2982 		 * networks only
2983 		 */
2984 		return;
2985 	}
2986 	if (tp->t_state <= TCPS_CLOSE_WAIT) {
2987 		/*
2988 		 * If the connection is currently doing or has done PMTU
2989 		 * blackhole detection, do not change the MSS
2990 		 */
2991 		if (tp->t_flags & TF_BLACKHOLE) {
2992 			return;
2993 		}
2994 		if (ifp->if_link_status == NULL) {
2995 			return;
2996 		}
2997 		tcp_update_mss_core(tp, ifp);
2998 	}
2999 }
3000 
3001 void
tcp_itimer(struct inpcbinfo * ipi)3002 tcp_itimer(struct inpcbinfo *ipi)
3003 {
3004 	struct inpcb *inp, *nxt;
3005 
3006 	if (lck_rw_try_lock_exclusive(&ipi->ipi_lock) == FALSE) {
3007 		if (tcp_itimer_done == TRUE) {
3008 			tcp_itimer_done = FALSE;
3009 			os_atomic_inc(&ipi->ipi_timer_req.intimer_fast, relaxed);
3010 			return;
3011 		}
3012 		/* Upgrade failed, lost lock now take it again exclusive */
3013 		lck_rw_lock_exclusive(&ipi->ipi_lock);
3014 	}
3015 	tcp_itimer_done = TRUE;
3016 
3017 	LIST_FOREACH_SAFE(inp, &tcb, inp_list, nxt) {
3018 		struct socket *so;
3019 		struct ifnet *ifp;
3020 
3021 		if (inp->inp_ppcb == NULL ||
3022 		    in_pcb_checkstate(inp, WNT_ACQUIRE, 0) == WNT_STOPUSING) {
3023 			continue;
3024 		}
3025 		so = inp->inp_socket;
3026 		ifp = inp->inp_last_outifp;
3027 		socket_lock(so, 1);
3028 		if (in_pcb_checkstate(inp, WNT_RELEASE, 1) == WNT_STOPUSING) {
3029 			socket_unlock(so, 1);
3030 			continue;
3031 		}
3032 		so_check_extended_bk_idle_time(so);
3033 		if (ipi->ipi_flags & INPCBINFO_UPDATE_MSS) {
3034 			tcp_update_mss_locked(so, NULL);
3035 		}
3036 		socket_unlock(so, 1);
3037 
3038 		/*
3039 		 * Defunct all system-initiated background sockets if the
3040 		 * socket is using the cellular interface and the interface
3041 		 * has its LQM set to abort.
3042 		 */
3043 		if ((ipi->ipi_flags & INPCBINFO_HANDLE_LQM_ABORT) &&
3044 		    IS_SO_TC_BACKGROUNDSYSTEM(so->so_traffic_class) &&
3045 		    ifp != NULL && IFNET_IS_CELLULAR(ifp) &&
3046 		    (ifp->if_interface_state.valid_bitmask &
3047 		    IF_INTERFACE_STATE_LQM_STATE_VALID) &&
3048 		    ifp->if_interface_state.lqm_state ==
3049 		    IFNET_LQM_THRESH_ABORT) {
3050 			socket_defunct(current_proc(), so,
3051 			    SHUTDOWN_SOCKET_LEVEL_DISCONNECT_ALL);
3052 		}
3053 	}
3054 
3055 	ipi->ipi_flags &= ~(INPCBINFO_UPDATE_MSS | INPCBINFO_HANDLE_LQM_ABORT);
3056 	lck_rw_done(&ipi->ipi_lock);
3057 }
3058