1 /*
2 * Copyright (c) 2000-2022 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1994, 1995
30 * The Regents of the University of California. All rights reserved.
31 *
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions
34 * are met:
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce the above copyright
38 * notice, this list of conditions and the following disclaimer in the
39 * documentation and/or other materials provided with the distribution.
40 * 3. All advertising materials mentioning features or use of this software
41 * must display the following acknowledgement:
42 * This product includes software developed by the University of
43 * California, Berkeley and its contributors.
44 * 4. Neither the name of the University nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE.
59 *
60 * @(#)tcp_input.c 8.12 (Berkeley) 5/24/95
61 * $FreeBSD: src/sys/netinet/tcp_input.c,v 1.107.2.16 2001/08/22 00:59:12 silby Exp $
62 */
63 /*
64 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
65 * support for mandatory and extensible security protections. This notice
66 * is included in support of clause 2.2 (b) of the Apple Public License,
67 * Version 2.0.
68 */
69
70 #include "tcp_includes.h"
71
72 #include <sys/param.h>
73 #include <sys/systm.h>
74 #include <sys/kernel.h>
75 #include <sys/sysctl.h>
76 #include <sys/malloc.h>
77 #include <sys/mbuf.h>
78 #include <sys/proc.h> /* for proc0 declaration */
79 #include <sys/protosw.h>
80 #include <sys/socket.h>
81 #include <sys/socketvar.h>
82 #include <sys/syslog.h>
83 #include <sys/mcache.h>
84 #include <sys/kauth.h>
85 #include <kern/cpu_number.h> /* before tcp_seq.h, for tcp_random18() */
86
87 #include <machine/endian.h>
88
89 #include <net/if.h>
90 #include <net/if_types.h>
91 #include <net/route.h>
92 #include <net/ntstat.h>
93 #include <net/content_filter.h>
94 #include <net/dlil.h>
95 #include <net/multi_layer_pkt_log.h>
96 #include <net/droptap.h>
97
98 #include <netinet/in.h>
99 #include <netinet/in_systm.h>
100 #include <netinet/ip.h>
101 #include <netinet/ip_icmp.h> /* for ICMP_BANDLIM */
102 #include <netinet/in_var.h>
103 #include <netinet/icmp_var.h> /* for ICMP_BANDLIM */
104 #include <netinet/in_pcb.h>
105 #include <netinet/ip_var.h>
106 #include <mach/sdt.h>
107 #include <netinet/ip6.h>
108 #include <netinet/icmp6.h>
109 #include <netinet6/nd6.h>
110 #include <netinet6/ip6_var.h>
111 #include <netinet6/in6_pcb.h>
112 #include <netinet/tcp_syncookie.h>
113 #include <netinet/tcp.h>
114 #include <netinet/tcp_cache.h>
115 #include <netinet/tcp_fsm.h>
116 #include <netinet/tcp_seq.h>
117 #include <netinet/tcp_timer.h>
118 #include <netinet/tcp_var.h>
119 #include <netinet/tcp_cc.h>
120 #include <dev/random/randomdev.h>
121 #include <kern/zalloc.h>
122 #include <netinet6/tcp6_var.h>
123 #include <netinet/tcpip.h>
124 #include <netinet/tcp_log.h>
125
126 #if IPSEC
127 #include <netinet6/ipsec.h>
128 #include <netinet6/ipsec6.h>
129 #include <netkey/key.h>
130 #endif /*IPSEC*/
131
132 #include <sys/kdebug.h>
133 #if MPTCP
134 #include <netinet/mptcp_var.h>
135 #include <netinet/mptcp.h>
136 #include <netinet/mptcp_opt.h>
137 #endif /* MPTCP */
138
139 #include <corecrypto/ccaes.h>
140 #include <net/sockaddr_utils.h>
141
142 #define DBG_LAYER_BEG NETDBG_CODE(DBG_NETTCP, 0)
143 #define DBG_LAYER_END NETDBG_CODE(DBG_NETTCP, 2)
144 #define DBG_FNC_TCP_INPUT NETDBG_CODE(DBG_NETTCP, (3 << 8))
145 #define DBG_FNC_TCP_NEWCONN NETDBG_CODE(DBG_NETTCP, (7 << 8))
146
147 #define TCP_RTT_HISTORY_EXPIRE_TIME (60 * TCP_RETRANSHZ)
148 #define TCP_RECV_THROTTLE_WIN (5 * TCP_RETRANSHZ)
149
150 struct tcpstat tcpstat;
151
152 static int log_in_vain = 0;
153 SYSCTL_INT(_net_inet_tcp, OID_AUTO, log_in_vain,
154 CTLFLAG_RW | CTLFLAG_LOCKED, &log_in_vain, 0,
155 "Log all incoming TCP connections");
156
157 static int blackhole = 0;
158 SYSCTL_INT(_net_inet_tcp, OID_AUTO, blackhole,
159 CTLFLAG_RW | CTLFLAG_LOCKED, &blackhole, 0,
160 "Do not send RST when dropping refused connections");
161
162 SYSCTL_SKMEM_TCP_INT(OID_AUTO, delayed_ack,
163 CTLFLAG_RW | CTLFLAG_LOCKED, int, tcp_delack_enabled, 3,
164 "Delay ACK to try and piggyback it onto a data packet");
165
166 SYSCTL_SKMEM_TCP_INT(OID_AUTO, recvbg, CTLFLAG_RW | CTLFLAG_LOCKED,
167 int, tcp_recv_bg, 0, "Receive background");
168
169 SYSCTL_SKMEM_TCP_INT(OID_AUTO, drop_synfin,
170 CTLFLAG_RW | CTLFLAG_LOCKED, static int, drop_synfin, 1,
171 "Drop TCP packets with SYN+FIN set");
172
173 SYSCTL_NODE(_net_inet_tcp, OID_AUTO, reass, CTLFLAG_RW | CTLFLAG_LOCKED, 0,
174 "TCP Segment Reassembly Queue");
175
176 static int tcp_reass_overflows = 0;
177 SYSCTL_INT(_net_inet_tcp_reass, OID_AUTO, overflows,
178 CTLFLAG_RD | CTLFLAG_LOCKED, &tcp_reass_overflows, 0,
179 "Global number of TCP segment reassembly queue overflows");
180
181 int tcp_reass_total_qlen = 0;
182 SYSCTL_INT(_net_inet_tcp_reass, OID_AUTO, qlen,
183 CTLFLAG_RD | CTLFLAG_LOCKED, &tcp_reass_total_qlen, 0,
184 "Total number of TCP segments in reassembly queues");
185
186
187 SYSCTL_SKMEM_TCP_INT(OID_AUTO, slowlink_wsize, CTLFLAG_RW | CTLFLAG_LOCKED,
188 __private_extern__ int, slowlink_wsize, 8192,
189 "Maximum advertised window size for slowlink");
190
191 SYSCTL_SKMEM_TCP_INT(OID_AUTO, maxseg_unacked,
192 CTLFLAG_RW | CTLFLAG_LOCKED, int, maxseg_unacked, 8,
193 "Maximum number of outstanding segments left unacked");
194
195 SYSCTL_SKMEM_TCP_INT(OID_AUTO, rfc3465, CTLFLAG_RW | CTLFLAG_LOCKED,
196 int, tcp_do_rfc3465, 1, "");
197
198 SYSCTL_SKMEM_TCP_INT(OID_AUTO, rfc3465_lim2,
199 CTLFLAG_RW | CTLFLAG_LOCKED, int, tcp_do_rfc3465_lim2, 1,
200 "Appropriate bytes counting w/ L=2*SMSS");
201
202 int rtt_samples_per_slot = 20;
203
204 int tcp_acc_iaj_high_thresh = ACC_IAJ_HIGH_THRESH;
205 u_int32_t tcp_autorcvbuf_inc_shift = 3;
206 SYSCTL_SKMEM_TCP_INT(OID_AUTO, recv_allowed_iaj,
207 CTLFLAG_RW | CTLFLAG_LOCKED, int, tcp_allowed_iaj, ALLOWED_IAJ,
208 "Allowed inter-packet arrival jiter");
209
210 SYSCTL_SKMEM_TCP_INT(OID_AUTO, doautorcvbuf,
211 CTLFLAG_RW | CTLFLAG_LOCKED, u_int32_t, tcp_do_autorcvbuf, 1,
212 "Enable automatic socket buffer tuning");
213
214 /* ToDo - remove once uTCP stops using it. */
215 SYSCTL_SKMEM_TCP_INT(OID_AUTO, autotunereorder,
216 CTLFLAG_RW | CTLFLAG_LOCKED, u_int32_t, tcp_autotune_reorder, 1,
217 "Enable automatic socket buffer tuning even when reordering is present");
218
219 SYSCTL_SKMEM_TCP_INT(OID_AUTO, autorcvbufmax,
220 CTLFLAG_RW | CTLFLAG_LOCKED | CTLFLAG_KERN, u_int32_t, tcp_autorcvbuf_max, 2 * 1024 * 1024,
221 "Maximum receive socket buffer size");
222
223 int tcp_disable_access_to_stats = 1;
224 SYSCTL_INT(_net_inet_tcp, OID_AUTO, disable_access_to_stats,
225 CTLFLAG_RW | CTLFLAG_LOCKED, &tcp_disable_access_to_stats, 0,
226 "Disable access to tcpstat");
227
228 SYSCTL_SKMEM_TCP_INT(OID_AUTO, challengeack_limit,
229 CTLFLAG_RW | CTLFLAG_LOCKED, uint32_t, tcp_challengeack_limit, 10,
230 "Maximum number of challenge ACKs per connection per second");
231
232 SYSCTL_SKMEM_TCP_INT(OID_AUTO, use_min_curr_rtt,
233 CTLFLAG_RW | CTLFLAG_LOCKED, int, tcp_use_min_curr_rtt, 1,
234 "Use a min of k=4 RTT samples for congestion controllers");
235
236 SYSCTL_SKMEM_TCP_INT(OID_AUTO, awdl_rtobase,
237 CTLFLAG_RW | CTLFLAG_LOCKED, int, tcp_awdl_rtobase, 100,
238 "Initial RTO for AWDL interface");
239
240 int tcp_syncookie = 0;
241 SYSCTL_INT(_net_inet_tcp, OID_AUTO, syncookie,
242 CTLFLAG_RW | CTLFLAG_LOCKED, &tcp_syncookie, 1,
243 "0: disable, 1: Use SYN cookies when backlog is full, 2: Always use SYN cookies");
244
245 extern int tcp_acc_iaj_high;
246 extern int tcp_acc_iaj_react_limit;
247 extern int tcp_fin_timeout;
248
249 uint8_t tcprexmtthresh = 3;
250
251 uint32_t tcp_now;
252
253 struct inpcbhead tcb;
254 #define tcb6 tcb /* for KAME src sync over BSD*'s */
255 struct inpcbinfo tcbinfo;
256
257 static void tcp_dooptions(struct tcpcb *, u_char *cp0 __counted_by(cnt0), int cnt0, struct tcphdr *,
258 struct tcpopt *);
259 static void tcp_finalize_options(struct tcpcb *, struct tcpopt *, unsigned int);
260 static void tcp_pulloutofband(struct socket *,
261 struct tcphdr *, struct mbuf *, int);
262 static void tcp_xmit_timer(struct tcpcb *, int, u_int32_t, tcp_seq);
263 static inline unsigned int tcp_maxmtu(struct rtentry *);
264 static inline void tcp_adaptive_rwtimo_check(struct tcpcb *, int);
265
266 #if TRAFFIC_MGT
267 static inline void compute_iaj(struct tcpcb *tp);
268 static inline void compute_iaj_meat(struct tcpcb *tp, uint32_t cur_iaj);
269 #endif /* TRAFFIC_MGT */
270
271 static inline unsigned int tcp_maxmtu6(struct rtentry *);
272 unsigned int get_maxmtu(struct rtentry *);
273
274 static void tcp_sbrcv_grow(struct tcpcb *tp, struct sockbuf *sb,
275 struct tcpopt *to, uint32_t tlen);
276 void tcp_sbrcv_trim(struct tcpcb *tp, struct sockbuf *sb);
277 static void tcp_sbsnd_trim(struct sockbuf *sbsnd);
278 static inline void tcp_sbrcv_tstmp_check(struct tcpcb *tp);
279 static inline void tcp_sbrcv_reserve(struct tcpcb *tp, struct sockbuf *sb,
280 u_int32_t newsize, u_int32_t idealsize, u_int32_t rcvbuf_max);
281 static void tcp_bad_rexmt_restore_state(struct tcpcb *tp, struct tcphdr *th);
282 static void tcp_compute_rtt(struct tcpcb *tp, struct tcpopt *to,
283 struct tcphdr *th);
284 static void tcp_compute_rcv_rtt(struct tcpcb *tp, struct tcpopt *to,
285 struct tcphdr *th);
286 static void tcp_early_rexmt_check(struct tcpcb *tp, struct tcphdr *th);
287 static void tcp_bad_rexmt_check(struct tcpcb *tp, struct tcphdr *th,
288 struct tcpopt *to);
289 /*
290 * Constants used for resizing receive socket buffer
291 * when timestamps are not supported
292 */
293 #define TCPTV_RCVNOTS_QUANTUM 100
294 #define TCP_RCVNOTS_BYTELEVEL 204800
295
296 /*
297 * Constants used for limiting early retransmits
298 * to 10 per minute.
299 */
300 #define TCP_EARLY_REXMT_WIN (60 * TCP_RETRANSHZ) /* 60 seconds */
301 #define TCP_EARLY_REXMT_LIMIT 10
302
303 #define log_in_vain_log( a ) { log a; }
304
305 /* ToDo - to be removed once uTCP stops using it */
306 #define TCP_RCV_SS_PKTCOUNT 512
307 SYSCTL_SKMEM_TCP_INT(OID_AUTO, rcvsspktcnt, CTLFLAG_RW | CTLFLAG_LOCKED,
308 int, tcp_rcvsspktcnt, TCP_RCV_SS_PKTCOUNT, "packets to be seen before receiver stretches acks");
309
310 #define DELAY_ACK(tp, th) \
311 (CC_ALGO(tp)->delay_ack != NULL && CC_ALGO(tp)->delay_ack(tp, th))
312
313 static int tcp_dropdropablreq(struct socket *head);
314 static void tcp_newreno_partial_ack(struct tcpcb *tp, struct tcphdr *th);
315 static void update_base_rtt(struct tcpcb *tp, uint32_t rtt);
316 void tcp_set_background_cc(struct socket *so);
317 void tcp_set_foreground_cc(struct socket *so);
318 static void tcp_set_new_cc(struct socket *so, uint8_t cc_index);
319 static void tcp_bwmeas_check(struct tcpcb *tp);
320
321 #if TRAFFIC_MGT
322 void
reset_acc_iaj(struct tcpcb * tp)323 reset_acc_iaj(struct tcpcb *tp)
324 {
325 tp->acc_iaj = 0;
326 CLEAR_IAJ_STATE(tp);
327 }
328
329 static inline void
update_iaj_state(struct tcpcb * tp,int size,int rst_size)330 update_iaj_state(struct tcpcb *tp, int size, int rst_size)
331 {
332 if (rst_size > 0) {
333 tp->iaj_size = 0;
334 }
335 if (tp->iaj_size == 0 || size >= tp->iaj_size) {
336 tp->iaj_size = size;
337 tp->iaj_rcv_ts = tcp_now;
338 tp->iaj_small_pkt = 0;
339 }
340 }
341
342 /* For every 64-bit unsigned integer(v), this function will find the
343 * largest 32-bit integer n such that (n*n <= v). This takes at most 32 iterations
344 * irrespective of the value of v and does not involve multiplications.
345 */
346 static inline uint32_t
isqrt(uint64_t val)347 isqrt(uint64_t val)
348 {
349 uint32_t sqrt_cache[11] = {0, 1, 4, 9, 16, 25, 36, 49, 64, 81, 100};
350 uint64_t temp, g = 0, b = 1 << 31, bshft = 31;
351 if (val <= 100) {
352 for (g = 0; g <= 10; ++g) {
353 if (sqrt_cache[g] > val) {
354 g--;
355 break;
356 } else if (sqrt_cache[g] == val) {
357 break;
358 }
359 }
360 } else {
361 do {
362 temp = (((g << 1) + b) << (bshft--));
363 if (val >= temp) {
364 g += b;
365 val -= temp;
366 }
367 b >>= 1;
368 } while (b > 0 && val > 0);
369 }
370 return (uint32_t)g;
371 }
372
373 static inline void
compute_iaj_meat(struct tcpcb * tp,uint32_t cur_iaj)374 compute_iaj_meat(struct tcpcb *tp, uint32_t cur_iaj)
375 {
376 /* When accumulated IAJ reaches MAX_ACC_IAJ in milliseconds,
377 * throttle the receive window to a minimum of MIN_IAJ_WIN packets
378 */
379 #define MAX_ACC_IAJ (tcp_acc_iaj_high_thresh + tcp_acc_iaj_react_limit)
380 #define IAJ_DIV_SHIFT 4
381 #define IAJ_ROUNDUP_CONST (1 << (IAJ_DIV_SHIFT - 1))
382
383 uint32_t allowed_iaj, acc_iaj = 0;
384
385 /* Using 64-bit storage for the inter-arrival jitter deviation,
386 * to avoid accidentally rolling over if the inter-arrival time exceeds 62 seconds.
387 */
388 int64_t mean, temp, cur_iaj_dev;
389
390 cur_iaj_dev = (cur_iaj - tp->avg_iaj);
391
392 /* Allow a jitter of "allowed_iaj" milliseconds. Some connections
393 * may have a constant jitter more than that. We detect this by
394 * using standard deviation.
395 */
396 allowed_iaj = tp->avg_iaj + tp->std_dev_iaj;
397 if (allowed_iaj < tcp_allowed_iaj) {
398 allowed_iaj = tcp_allowed_iaj;
399 }
400
401 /* Initially when the connection starts, the senders congestion
402 * window is small. During this period we avoid throttling a
403 * connection because we do not have a good starting point for
404 * allowed_iaj. IAJ_IGNORE_PKTCNT is used to quietly gloss over
405 * the first few packets.
406 */
407 if (tp->iaj_pktcnt > IAJ_IGNORE_PKTCNT) {
408 if (cur_iaj <= allowed_iaj) {
409 if (tp->acc_iaj >= 2) {
410 acc_iaj = tp->acc_iaj - 2;
411 } else {
412 acc_iaj = 0;
413 }
414 } else {
415 acc_iaj = tp->acc_iaj + (cur_iaj - allowed_iaj);
416 }
417
418 if (acc_iaj > MAX_ACC_IAJ) {
419 acc_iaj = MAX_ACC_IAJ;
420 }
421 tp->acc_iaj = acc_iaj;
422 }
423
424 /* Compute weighted average where the history has a weight of
425 * 15 out of 16 and the current value has a weight of 1 out of 16.
426 * This will make the short-term measurements have more weight.
427 *
428 * The addition of 8 will help to round-up the value
429 * instead of round-down
430 */
431 tp->avg_iaj = (((tp->avg_iaj << IAJ_DIV_SHIFT) - tp->avg_iaj)
432 + cur_iaj + IAJ_ROUNDUP_CONST) >> IAJ_DIV_SHIFT;
433
434 /* Compute Root-mean-square of deviation where mean is a weighted
435 * average as described above.
436 */
437 temp = tp->std_dev_iaj * tp->std_dev_iaj;
438 mean = (((temp << IAJ_DIV_SHIFT) - temp)
439 + (cur_iaj_dev * cur_iaj_dev)
440 + IAJ_ROUNDUP_CONST) >> IAJ_DIV_SHIFT;
441
442 tp->std_dev_iaj = isqrt(mean);
443
444 DTRACE_TCP3(iaj, struct tcpcb *, tp, uint32_t, cur_iaj,
445 uint32_t, allowed_iaj);
446
447 return;
448 }
449
450 static inline void
compute_iaj(struct tcpcb * tp)451 compute_iaj(struct tcpcb *tp)
452 {
453 compute_iaj_meat(tp, (tcp_now - tp->iaj_rcv_ts));
454 }
455 #endif /* TRAFFIC_MGT */
456
457 /*
458 * Perform rate limit check per connection per second
459 * tp->t_challengeack_last is the last_time diff was greater than 1sec
460 * tp->t_challengeack_count is the number of ACKs sent (within 1sec)
461 * Return TRUE if we shouldn't send the ACK due to rate limitation
462 * Return FALSE if it is still ok to send challenge ACK
463 */
464 static boolean_t
tcp_is_ack_ratelimited(struct tcpcb * tp)465 tcp_is_ack_ratelimited(struct tcpcb *tp)
466 {
467 boolean_t ret = TRUE;
468 uint32_t now = tcp_now;
469 int32_t diff = 0;
470
471 diff = timer_diff(now, 0, tp->t_challengeack_last, 0);
472 /* If it is first time or diff > 1000ms,
473 * update the challengeack_last and reset the
474 * current count of ACKs
475 */
476 if (tp->t_challengeack_last == 0 || diff >= 1000) {
477 tp->t_challengeack_last = now;
478 tp->t_challengeack_count = 0;
479 ret = FALSE;
480 } else if (tp->t_challengeack_count < tcp_challengeack_limit) {
481 ret = FALSE;
482 }
483
484 /* Careful about wrap-around */
485 if (ret == FALSE && (tp->t_challengeack_count + 1 > 0)) {
486 tp->t_challengeack_count++;
487 }
488
489 return ret;
490 }
491
492 /* Check if enough amount of data has been acknowledged since
493 * bw measurement was started
494 */
495 static void
tcp_bwmeas_check(struct tcpcb * tp)496 tcp_bwmeas_check(struct tcpcb *tp)
497 {
498 int32_t bw_meas_bytes;
499 uint32_t bw, bytes, elapsed_time;
500
501 if (SEQ_LEQ(tp->snd_una, tp->t_bwmeas->bw_start)) {
502 return;
503 }
504
505 bw_meas_bytes = tp->snd_una - tp->t_bwmeas->bw_start;
506 if ((tp->t_flagsext & TF_BWMEAS_INPROGRESS) &&
507 bw_meas_bytes >= (int32_t)(tp->t_bwmeas->bw_size)) {
508 bytes = bw_meas_bytes;
509 elapsed_time = tcp_now - tp->t_bwmeas->bw_ts;
510 if (elapsed_time > 0) {
511 bw = bytes / elapsed_time;
512 if (bw > 0) {
513 if (tp->t_bwmeas->bw_sndbw > 0) {
514 tp->t_bwmeas->bw_sndbw =
515 (((tp->t_bwmeas->bw_sndbw << 3)
516 - tp->t_bwmeas->bw_sndbw)
517 + bw) >> 3;
518 } else {
519 tp->t_bwmeas->bw_sndbw = bw;
520 }
521
522 /* Store the maximum value */
523 if (tp->t_bwmeas->bw_sndbw_max == 0) {
524 tp->t_bwmeas->bw_sndbw_max =
525 tp->t_bwmeas->bw_sndbw;
526 } else {
527 tp->t_bwmeas->bw_sndbw_max =
528 max(tp->t_bwmeas->bw_sndbw,
529 tp->t_bwmeas->bw_sndbw_max);
530 }
531 }
532 }
533 tp->t_flagsext &= ~(TF_BWMEAS_INPROGRESS);
534 }
535 }
536
537 static int
tcp_reass(struct tcpcb * tp,struct tcphdr * th,int * tlenp,struct mbuf * m,struct ifnet * ifp,int * dowakeup)538 tcp_reass(struct tcpcb *tp, struct tcphdr *th, int *tlenp, struct mbuf *m,
539 struct ifnet *ifp, int *dowakeup)
540 {
541 struct tseg_qent *q;
542 struct tseg_qent *p = NULL;
543 struct tseg_qent *nq;
544 struct tseg_qent *te = NULL;
545 struct inpcb *inp = tp->t_inpcb;
546 struct socket *so = inp->inp_socket;
547 int flags = 0;
548 uint32_t qlimit;
549 stats_functional_type ifnet_count_type = IFNET_COUNT_TYPE(ifp);
550 boolean_t dsack_set = FALSE;
551
552 /*
553 * If the reassembly queue already has entries or if we are going
554 * to add a new one, then the connection has reached a loss state.
555 * Reset the force-ACK counter at this point.
556 */
557 tp->t_forced_acks = TCP_FORCED_ACKS_COUNT;
558
559 #if TRAFFIC_MGT
560 if (tp->acc_iaj > 0) {
561 reset_acc_iaj(tp);
562 }
563 #endif /* TRAFFIC_MGT */
564
565 if (th->th_seq != tp->rcv_nxt) {
566 struct mbuf *tmp = m;
567
568 if (tcp_memacct_softlimit()) {
569 m_drop(m, DROPTAP_FLAG_DIR_IN | DROPTAP_FLAG_L2_MISSING, DROP_REASON_TCP_REASS_MEMORY_PRESSURE, NULL, 0);
570 tcp_reass_overflows++;
571 tcpstat.tcps_rcvmemdrop++;
572 *tlenp = 0;
573 return 0;
574 }
575
576 while (tmp != NULL) {
577 if (mbuf_class_under_pressure(tmp)) {
578 m_drop(m, DROPTAP_FLAG_DIR_IN | DROPTAP_FLAG_L2_MISSING, DROP_REASON_TCP_REASS_MEMORY_PRESSURE, NULL, 0);
579 tcp_reass_overflows++;
580 tcpstat.tcps_rcvmemdrop++;
581 *tlenp = 0;
582 return 0;
583 }
584
585 tmp = tmp->m_next;
586 }
587 }
588
589 /*
590 * Limit the number of segments in the reassembly queue to prevent
591 * holding on to too many segments (and thus running out of mbufs).
592 * Make sure to let the missing segment through which caused this
593 * queue. Always keep one global queue entry spare to be able to
594 * process the missing segment.
595 */
596 qlimit = min(max(100, so->so_rcv.sb_hiwat >> 10),
597 (tcp_autorcvbuf_max >> 10));
598 if (th->th_seq != tp->rcv_nxt &&
599 (tp->t_reassqlen + 1) >= qlimit) {
600 tcp_reass_overflows++;
601 tcpstat.tcps_rcvmemdrop++;
602 m_drop(m, DROPTAP_FLAG_DIR_IN | DROPTAP_FLAG_L2_MISSING, DROP_REASON_TCP_REASS_OVERFLOW, NULL, 0);
603 *tlenp = 0;
604 return 0;
605 }
606
607 /* Create a new queue entry. If we can't, just drop the pkt. */
608 te = tcp_create_reass_qent(tp, m, th, *tlenp);
609 if (te == NULL) {
610 m_drop_list(m, NULL,
611 DROPTAP_FLAG_DIR_IN | DROPTAP_FLAG_L2_MISSING,
612 DROP_REASON_TCP_REASSEMBLY_ALLOC, NULL, 0);
613 *tlenp = 0;
614 return 0;
615 }
616
617 /*
618 * Find a segment which begins after this one does.
619 */
620 LIST_FOREACH(q, &tp->t_segq, tqe_q) {
621 if (SEQ_GT(q->tqe_th->th_seq, th->th_seq)) {
622 break;
623 }
624 p = q;
625 }
626
627 /*
628 * If there is a preceding segment, it may provide some of
629 * our data already. If so, drop the data from the incoming
630 * segment. If it provides all of our data, drop us.
631 */
632 if (p != NULL) {
633 int i;
634 /* conversion to int (in i) handles seq wraparound */
635 i = p->tqe_th->th_seq + p->tqe_len - th->th_seq;
636 if (i > 0) {
637 if (i > 1) {
638 /*
639 * Note duplicate data sequence numbers
640 * to report in DSACK option
641 */
642 tp->t_dsack_lseq = th->th_seq;
643 tp->t_dsack_rseq = th->th_seq +
644 min(i, *tlenp);
645
646 /*
647 * Report only the first part of partial/
648 * non-contiguous duplicate sequence space
649 */
650 dsack_set = TRUE;
651 }
652 if (i >= *tlenp) {
653 struct mbuf *tmp;
654
655 tcpstat.tcps_rcvduppack++;
656 tcpstat.tcps_rcvdupbyte += *tlenp;
657 if (nstat_collect) {
658 nstat_route_rx(inp->inp_route.ro_rt,
659 1, *tlenp,
660 NSTAT_RX_FLAG_DUPLICATE);
661 INP_ADD_RXSTAT(inp, ifnet_count_type, 1, *tlenp);
662 tp->t_stat.rxduplicatebytes += *tlenp;
663 }
664 tmp = tcp_destroy_reass_qent(tp, te);
665 m_freem(tmp);
666 te = NULL;
667 /*
668 * Try to present any queued data
669 * at the left window edge to the user.
670 * This is needed after the 3-WHS
671 * completes.
672 */
673 goto present;
674 }
675 m_adj(m, i);
676 *tlenp -= i;
677 te->tqe_len -= i;
678 th->th_seq += i;
679 }
680 }
681
682 if (th->th_seq != tp->rcv_nxt) {
683 tp->t_rcvoopack++;
684 tcpstat.tcps_rcvoopack++;
685 tcpstat.tcps_rcvoobyte += *tlenp;
686 if (nstat_collect) {
687 tp->t_stat.rxoutoforderbytes += *tlenp;
688 }
689 }
690
691 if (nstat_collect) {
692 nstat_route_rx(inp->inp_route.ro_rt, 1, *tlenp,
693 NSTAT_RX_FLAG_OUT_OF_ORDER);
694 INP_ADD_RXSTAT(inp, ifnet_count_type, 1, *tlenp);
695 }
696
697 /*
698 * While we overlap succeeding segments trim them or,
699 * if they are completely covered, dequeue them.
700 */
701 while (q) {
702 struct mbuf *tmp;
703
704 int i = (th->th_seq + *tlenp) - q->tqe_th->th_seq;
705 if (i <= 0) {
706 break;
707 }
708
709 /*
710 * Report only the first part of partial/non-contiguous
711 * duplicate segment in dsack option. The variable
712 * dsack_set will be true if a previous entry has some of
713 * the duplicate sequence space.
714 */
715 if (i > 1 && !dsack_set) {
716 if (tp->t_dsack_lseq == 0) {
717 tp->t_dsack_lseq = q->tqe_th->th_seq;
718 tp->t_dsack_rseq =
719 tp->t_dsack_lseq + min(i, q->tqe_len);
720 } else {
721 /*
722 * this segment overlaps data in multple
723 * entries in the reassembly queue, move
724 * the right sequence number further.
725 */
726 tp->t_dsack_rseq =
727 tp->t_dsack_rseq + min(i, q->tqe_len);
728 }
729 }
730 if (i < q->tqe_len) {
731 q->tqe_th->th_seq += i;
732 q->tqe_len -= i;
733 m_adj(q->tqe_m, i);
734 break;
735 }
736
737 nq = LIST_NEXT(q, tqe_q);
738
739 LIST_REMOVE(q, tqe_q);
740
741 tmp = tcp_destroy_reass_qent(tp, q);
742
743 m_freem(tmp);
744 q = nq;
745 }
746
747 /* Insert the new segment queue entry into place. */
748 if (p == NULL) {
749 LIST_INSERT_HEAD(&tp->t_segq, te, tqe_q);
750 } else {
751 LIST_INSERT_AFTER(p, te, tqe_q);
752 }
753
754 present:
755 /*
756 * Present data to user, advancing rcv_nxt through
757 * completed sequence space.
758 */
759 if (!TCPS_HAVEESTABLISHED(tp->t_state)) {
760 return 0;
761 }
762 q = LIST_FIRST(&tp->t_segq);
763 if (!q || q->tqe_th->th_seq != tp->rcv_nxt) {
764 return 0;
765 }
766
767 /*
768 * If there is already another thread doing reassembly for this
769 * connection, it is better to let it finish the job --
770 * (radar 16316196)
771 */
772 if (tp->t_flagsext & TF_REASS_INPROG) {
773 return 0;
774 }
775
776 tp->t_flagsext |= TF_REASS_INPROG;
777 /* lost packet was recovered, so ooo data can be returned */
778 tcpstat.tcps_recovered_pkts++;
779
780 do {
781 uint8_t psh = q->tqe_th->th_flags & TH_PUSH;
782 struct mbuf *tmp;
783
784 tp->rcv_nxt += q->tqe_len;
785 flags = q->tqe_th->th_flags & TH_FIN;
786
787 LIST_REMOVE(q, tqe_q);
788
789 tmp = tcp_destroy_reass_qent(tp, q);
790
791 if (so->so_state & SS_CANTRCVMORE) {
792 m_freem(tmp);
793 } else {
794 so_recv_data_stat(so, tmp, 0); /* XXXX */
795 if (psh) {
796 tp->t_flagsext |= TF_LAST_IS_PSH;
797 } else {
798 tp->t_flagsext &= ~TF_LAST_IS_PSH;
799 }
800
801 if (sbappendstream_rcvdemux(so, tmp)) {
802 *dowakeup = 1;
803 }
804 }
805 q = LIST_FIRST(&tp->t_segq);
806 } while (q && q->tqe_th->th_seq == tp->rcv_nxt);
807 tp->t_flagsext &= ~TF_REASS_INPROG;
808
809 if ((inp->inp_vflag & INP_IPV6) != 0) {
810 KERNEL_DEBUG(DBG_LAYER_BEG,
811 ((inp->inp_fport << 16) | inp->inp_lport),
812 (((inp->in6p_laddr.s6_addr16[0] & 0xffff) << 16) |
813 (inp->in6p_faddr.s6_addr16[0] & 0xffff)),
814 0, 0, 0);
815 } else {
816 KERNEL_DEBUG(DBG_LAYER_BEG,
817 ((inp->inp_fport << 16) | inp->inp_lport),
818 (((inp->inp_laddr.s_addr & 0xffff) << 16) |
819 (inp->inp_faddr.s_addr & 0xffff)),
820 0, 0, 0);
821 }
822
823 return flags;
824 }
825
826 /*
827 * Reduce congestion window when local AQM sends
828 * congestion event. We don't enter FAST_RECOVERY here
829 * as there is no packet loss.
830 */
831 void
tcp_local_congestion_notification(struct tcpcb * tp)832 tcp_local_congestion_notification(struct tcpcb *tp)
833 {
834 if (CC_ALGO(tp)->pre_fr != NULL) {
835 CC_ALGO(tp)->pre_fr(tp);
836 }
837
838 tp->snd_cwnd = tp->snd_ssthresh;
839 }
840
841 /*
842 * Enter fast recovery and reduce congestion window,
843 * used when CE is seen or when a tail loss
844 * probe recovers the last packet. Also used by RACK.
845 */
846 void
tcp_enter_fast_recovery(struct tcpcb * tp)847 tcp_enter_fast_recovery(struct tcpcb *tp)
848 {
849 /*
850 * If the current tcp cc module has
851 * defined a hook for tasks to run
852 * before entering FR, call it
853 */
854 if (CC_ALGO(tp)->pre_fr != NULL) {
855 CC_ALGO(tp)->pre_fr(tp);
856 }
857 ENTER_FASTRECOVERY(tp);
858 if (tp->t_flags & TF_SENTFIN) {
859 tp->snd_recover = tp->snd_max - 1;
860 } else {
861 tp->snd_recover = tp->snd_max;
862 }
863
864 tp->t_flagsext &= ~TF_CWND_NONVALIDATED;
865
866 tp->t_timer[TCPT_REXMT] = 0;
867 tp->t_timer[TCPT_PTO] = 0;
868 tp->t_rtttime = 0;
869 if (tp->t_flagsext & TF_CWND_NONVALIDATED) {
870 tcp_cc_adjust_nonvalidated_cwnd(tp);
871 } else {
872 /* No need to inflate the congestion window */
873 tp->snd_cwnd = tp->snd_ssthresh;
874 }
875 }
876
877 /*
878 * This function is called upon reception of data on a socket. It's purpose is
879 * to handle the adaptive keepalive timers that monitor whether the connection
880 * is making progress. First the adaptive read-timer, second the TFO probe-timer.
881 *
882 * The application wants to get an event if there is a stall during read.
883 * Set the initial keepalive timeout to be equal to twice RTO.
884 *
885 * If the outgoing interface is in marginal conditions, we need to
886 * enable read probes for that too.
887 */
888 static inline void
tcp_adaptive_rwtimo_check(struct tcpcb * tp,int tlen)889 tcp_adaptive_rwtimo_check(struct tcpcb *tp, int tlen)
890 {
891 struct ifnet *outifp = tp->t_inpcb->inp_last_outifp;
892
893 if ((tp->t_adaptive_rtimo > 0 ||
894 (outifp != NULL &&
895 (outifp->if_eflags & IFEF_PROBE_CONNECTIVITY)))
896 && tlen > 0 &&
897 tp->t_state == TCPS_ESTABLISHED) {
898 tp->t_timer[TCPT_KEEP] = tcp_offset_from_start(tp,
899 (TCP_REXMTVAL(tp) << 1));
900 tp->t_flagsext |= TF_DETECT_READSTALL;
901 tp->t_rtimo_probes = 0;
902 }
903 }
904
905 inline void
tcp_keepalive_reset(struct tcpcb * tp)906 tcp_keepalive_reset(struct tcpcb *tp)
907 {
908 tp->t_timer[TCPT_KEEP] = tcp_offset_from_start(tp,
909 TCP_CONN_KEEPIDLE(tp));
910 tp->t_flagsext &= ~(TF_DETECT_READSTALL);
911 tp->t_rtimo_probes = 0;
912 }
913
914 void
tcp_set_finwait_timeout(struct tcpcb * tp)915 tcp_set_finwait_timeout(struct tcpcb *tp)
916 {
917 /*
918 * Starting the TCPT_2MSL timer is contrary to the
919 * specification, but if we don't get a FIN
920 * we'll hang forever.
921 */
922 ASSERT(tp->t_state == TCPS_FIN_WAIT_2);
923 ASSERT((tp->t_inpcb->inp_socket->so_state & (SS_CANTRCVMORE)) == SS_CANTRCVMORE);
924
925 if (tcp_fin_timeout > 0 &&
926 tcp_fin_timeout < TCP_CONN_MAXIDLE(tp)) {
927 tp->t_timer[TCPT_2MSL] = tcp_offset_from_start(tp, tcp_fin_timeout);
928 } else {
929 tp->t_timer[TCPT_2MSL] = tcp_offset_from_start(tp, TCP_CONN_MAXIDLE(tp));
930 }
931 }
932
933 /*
934 * TCP input routine, follows pages 65-76 of the
935 * protocol specification dated September, 1981 very closely.
936 */
937 int
tcp6_input(struct mbuf ** mp,int * offp,int proto)938 tcp6_input(struct mbuf **mp, int *offp, int proto)
939 {
940 #pragma unused(proto)
941 struct mbuf *m = *mp;
942 uint32_t ia6_flags;
943 struct ifnet *ifp = m->m_pkthdr.rcvif;
944
945 IP6_EXTHDR_CHECK(m, *offp, sizeof(struct tcphdr), return IPPROTO_DONE);
946
947 /* Expect 32-bit aligned data pointer on strict-align platforms */
948 MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(m);
949
950 /*
951 * draft-itojun-ipv6-tcp-to-anycast
952 * better place to put this in?
953 */
954 if (ip6_getdstifaddr_info(m, NULL, &ia6_flags) == 0) {
955 if (ia6_flags & IN6_IFF_ANYCAST) {
956 struct ip6_hdr *ip6;
957
958 ip6 = mtod(m, struct ip6_hdr *);
959 icmp6_error(m, ICMP6_DST_UNREACH,
960 ICMP6_DST_UNREACH_ADDR,
961 (int)((caddr_t)&ip6->ip6_dst - (caddr_t)ip6));
962
963 IF_TCP_STATINC(ifp, icmp6unreach);
964
965 return IPPROTO_DONE;
966 }
967 }
968
969 tcp_input(m, *offp);
970 return IPPROTO_DONE;
971 }
972
973 static void
tcp_sbrcv_reserve(struct tcpcb * tp,struct sockbuf * sbrcv,u_int32_t newsize,u_int32_t idealsize,u_int32_t rcvbuf_max)974 tcp_sbrcv_reserve(struct tcpcb *tp, struct sockbuf *sbrcv,
975 u_int32_t newsize, u_int32_t idealsize, u_int32_t rcvbuf_max)
976 {
977 /* newsize should not exceed max */
978 newsize = min(newsize, rcvbuf_max);
979
980 /* The receive window scale negotiated at the
981 * beginning of the connection will also set a
982 * limit on the socket buffer size
983 */
984 newsize = min(newsize, TCP_MAXWIN << tp->rcv_scale);
985
986 /* Set new socket buffer size */
987 if (newsize > sbrcv->sb_hiwat &&
988 (sbreserve(sbrcv, newsize) == 1)) {
989 sbrcv->sb_idealsize = min(max(sbrcv->sb_idealsize,
990 (idealsize != 0) ? idealsize : newsize), rcvbuf_max);
991
992 /* Again check the limit set by the advertised
993 * window scale
994 */
995 sbrcv->sb_idealsize = min(sbrcv->sb_idealsize,
996 TCP_MAXWIN << tp->rcv_scale);
997 }
998 }
999
1000 /*
1001 * This function is used to grow a receive socket buffer. It
1002 * will take into account system-level memory usage and the
1003 * bandwidth available on the link to make a decision.
1004 */
1005 static void
tcp_sbrcv_grow(struct tcpcb * tp,struct sockbuf * sbrcv,struct tcpopt * to,uint32_t pktlen)1006 tcp_sbrcv_grow(struct tcpcb *tp, struct sockbuf *sbrcv,
1007 struct tcpopt *to, uint32_t pktlen)
1008 {
1009 struct socket *so = sbrcv->sb_so;
1010
1011 /*
1012 * Do not grow the receive socket buffer if
1013 * - auto resizing is disabled, globally or on this socket
1014 * - the high water mark already reached the maximum
1015 * - the stream is in background and receive side is being
1016 * throttled
1017 * - we are memory-limited
1018 */
1019 if (tcp_do_autorcvbuf == 0 ||
1020 (sbrcv->sb_flags & SB_AUTOSIZE) == 0 ||
1021 sbrcv->sb_hiwat >= tcp_autorcvbuf_max ||
1022 (tp->t_flagsext & TF_RECV_THROTTLE) ||
1023 (so->so_flags1 & SOF1_EXTEND_BK_IDLE_WANTED) ||
1024 (tcp_memacct_limited() && sbrcv->sb_hiwat >= tcp_recvspace)) {
1025 /* Can not resize the socket buffer, just return */
1026 goto out;
1027 }
1028
1029 if (!TSTMP_SUPPORTED(tp)) {
1030 /*
1031 * Timestamp option is not supported on this connection,
1032 * use receiver's RTT. Socket buffer grows based on the
1033 * BDP of the link.
1034 */
1035 if (TSTMP_GEQ(tcp_now,
1036 tp->rfbuf_ts + (tp->rcv_srtt >> TCP_RTT_SHIFT))) {
1037 tp->rfbuf_cnt += pktlen;
1038 if (tp->rfbuf_cnt > tp->rfbuf_space) {
1039 int32_t rcvbuf_inc;
1040 uint32_t idealsize;
1041
1042 /*
1043 * Increase receive-buffer aggressively if we
1044 * received more than 150% of what was received
1045 * in the previous round. Because, that means
1046 * the sender is in TCP slow-start and so
1047 * we need to give it more space to not be
1048 * limiting the sender with a small receive-window.
1049 */
1050 if (tp->rfbuf_cnt > tp->rfbuf_space + (tp->rfbuf_space >> 1)) {
1051 rcvbuf_inc = (tp->rfbuf_cnt << 2) - sbrcv->sb_hiwat;
1052 idealsize = (tp->rfbuf_cnt << 2);
1053 } else {
1054 rcvbuf_inc = (tp->rfbuf_cnt << 1) - sbrcv->sb_hiwat;
1055 idealsize = (tp->rfbuf_cnt << 1);
1056 }
1057
1058 if (rcvbuf_inc > 0) {
1059 rcvbuf_inc =
1060 (rcvbuf_inc / tp->t_maxseg) * tp->t_maxseg;
1061
1062 tcp_sbrcv_reserve(tp, sbrcv,
1063 sbrcv->sb_hiwat + rcvbuf_inc,
1064 idealsize, tcp_autorcvbuf_max);
1065
1066 tp->rfbuf_space = tp->rfbuf_cnt;
1067 }
1068 }
1069 goto out;
1070 } else {
1071 tp->rfbuf_cnt += pktlen;
1072 return;
1073 }
1074 } else if (to->to_tsecr != 0) {
1075 /*
1076 * If the timestamp shows that one RTT has
1077 * completed, we can stop counting the
1078 * bytes. Here we consider increasing
1079 * the socket buffer if the bandwidth measured in
1080 * last rtt, is more than half of sb_hiwat, this will
1081 * help to scale the buffer according to the bandwidth
1082 * on the link.
1083 */
1084 if (TSTMP_GEQ(to->to_tsecr, tp->rfbuf_ts)) {
1085 tp->rfbuf_cnt += pktlen;
1086
1087 if (tp->rfbuf_cnt > tp->rfbuf_space) {
1088 int32_t rcvbuf_inc;
1089 uint32_t idealsize;
1090
1091 if (tp->rfbuf_cnt > tp->rfbuf_space + (tp->rfbuf_space >> 1)) {
1092 rcvbuf_inc = (tp->rfbuf_cnt << 2) - sbrcv->sb_hiwat;
1093 idealsize = (tp->rfbuf_cnt << 2);
1094 } else {
1095 rcvbuf_inc = (tp->rfbuf_cnt << 1) - sbrcv->sb_hiwat;
1096 idealsize = (tp->rfbuf_cnt << 1);
1097 }
1098
1099 tp->rfbuf_space = tp->rfbuf_cnt;
1100
1101 if (rcvbuf_inc > 0) {
1102 rcvbuf_inc =
1103 (rcvbuf_inc / tp->t_maxseg) * tp->t_maxseg;
1104
1105 tcp_sbrcv_reserve(tp, sbrcv,
1106 sbrcv->sb_hiwat + rcvbuf_inc,
1107 idealsize, tcp_autorcvbuf_max);
1108 }
1109 }
1110 /* Measure instantaneous receive bandwidth */
1111 if (tp->t_bwmeas != NULL && tp->rfbuf_cnt > 0 &&
1112 TSTMP_GT(tcp_now, tp->rfbuf_ts)) {
1113 u_int32_t rcv_bw;
1114 rcv_bw = tp->rfbuf_cnt /
1115 (int)(tcp_now - tp->rfbuf_ts);
1116 if (tp->t_bwmeas->bw_rcvbw_max == 0) {
1117 tp->t_bwmeas->bw_rcvbw_max = rcv_bw;
1118 } else {
1119 tp->t_bwmeas->bw_rcvbw_max = max(
1120 tp->t_bwmeas->bw_rcvbw_max, rcv_bw);
1121 }
1122 }
1123 goto out;
1124 } else {
1125 tp->rfbuf_cnt += pktlen;
1126 return;
1127 }
1128 }
1129 out:
1130 /* Restart the measurement */
1131 tp->rfbuf_ts = tcp_now;
1132 tp->rfbuf_cnt = 0;
1133 return;
1134 }
1135
1136 /* This function will trim the excess space added to the socket buffer
1137 * to help a slow-reading app. The ideal-size of a socket buffer depends
1138 * on the link bandwidth or it is set by an application and we aim to
1139 * reach that size.
1140 */
1141 void
tcp_sbrcv_trim(struct tcpcb * tp,struct sockbuf * sbrcv)1142 tcp_sbrcv_trim(struct tcpcb *tp, struct sockbuf *sbrcv)
1143 {
1144 if (tcp_do_autorcvbuf == 1 && sbrcv->sb_idealsize > 0 &&
1145 sbrcv->sb_hiwat > sbrcv->sb_idealsize) {
1146 int32_t trim;
1147 /* compute the difference between ideal and current sizes */
1148 u_int32_t diff = sbrcv->sb_hiwat - sbrcv->sb_idealsize;
1149
1150 /* Compute the maximum advertised window for
1151 * this connection.
1152 */
1153 u_int32_t advwin = tp->rcv_adv - tp->rcv_nxt;
1154
1155 /* How much can we trim the receive socket buffer?
1156 * 1. it can not be trimmed beyond the max rcv win advertised
1157 * 2. if possible, leave 1/16 of bandwidth*delay to
1158 * avoid closing the win completely
1159 */
1160 u_int32_t leave = max(advwin, (sbrcv->sb_idealsize >> 4));
1161
1162 /* Sometimes leave can be zero, in that case leave at least
1163 * a few segments worth of space.
1164 */
1165 if (leave == 0) {
1166 leave = tp->t_maxseg << tcp_autorcvbuf_inc_shift;
1167 }
1168
1169 trim = sbrcv->sb_hiwat - (sbrcv->sb_cc + leave);
1170 trim = imin(trim, (int32_t)diff);
1171
1172 if (trim > 0) {
1173 sbreserve(sbrcv, (sbrcv->sb_hiwat - trim));
1174 }
1175 }
1176 }
1177
1178 /* We may need to trim the send socket buffer size for two reasons:
1179 * 1. if the rtt seen on the connection is climbing up, we do not
1180 * want to fill the buffers any more.
1181 * 2. if the congestion win on the socket backed off, there is no need
1182 * to hold more mbufs for that connection than what the cwnd will allow.
1183 */
1184 void
tcp_sbsnd_trim(struct sockbuf * sbsnd)1185 tcp_sbsnd_trim(struct sockbuf *sbsnd)
1186 {
1187 if (((sbsnd->sb_flags & (SB_AUTOSIZE | SB_TRIM)) ==
1188 (SB_AUTOSIZE | SB_TRIM)) &&
1189 (sbsnd->sb_idealsize > 0) &&
1190 (sbsnd->sb_hiwat > sbsnd->sb_idealsize)) {
1191 u_int32_t trim = 0;
1192 if (sbsnd->sb_cc <= sbsnd->sb_idealsize) {
1193 trim = sbsnd->sb_hiwat - sbsnd->sb_idealsize;
1194 } else {
1195 trim = sbsnd->sb_hiwat - sbsnd->sb_cc;
1196 }
1197 sbreserve(sbsnd, (sbsnd->sb_hiwat - trim));
1198 }
1199 if (sbsnd->sb_hiwat <= sbsnd->sb_idealsize) {
1200 sbsnd->sb_flags &= ~(SB_TRIM);
1201 }
1202 }
1203
1204 /*
1205 * If timestamp option was not negotiated on this connection
1206 * and this connection is on the receiving side of a stream
1207 * then we can not measure the delay on the link accurately.
1208 * Instead of enabling automatic receive socket buffer
1209 * resizing, just give more space to the receive socket buffer.
1210 */
1211 static inline void
tcp_sbrcv_tstmp_check(struct tcpcb * tp)1212 tcp_sbrcv_tstmp_check(struct tcpcb *tp)
1213 {
1214 struct socket *so = tp->t_inpcb->inp_socket;
1215 u_int32_t newsize = 2 * tcp_recvspace;
1216 struct sockbuf *sbrcv = &so->so_rcv;
1217
1218 if ((tp->t_flags & (TF_REQ_TSTMP | TF_RCVD_TSTMP)) !=
1219 (TF_REQ_TSTMP | TF_RCVD_TSTMP) &&
1220 (sbrcv->sb_flags & SB_AUTOSIZE) != 0) {
1221 tcp_sbrcv_reserve(tp, sbrcv, newsize, 0, newsize);
1222 }
1223 }
1224
1225 /*
1226 * The last packet was a retransmission, check if this ack
1227 * indicates that the retransmission was spurious.
1228 *
1229 * If the connection supports timestamps, we could use it to
1230 * detect if the last retransmit was not needed. Otherwise,
1231 * we check if the ACK arrived within RTT/2 window, then it
1232 * was a mistake to do the retransmit in the first place.
1233 *
1234 * This function will return 1 if it is a spurious retransmit,
1235 * 0 otherwise.
1236 */
1237 int
tcp_detect_bad_rexmt(struct tcpcb * tp,struct tcphdr * th,struct tcpopt * to,u_int32_t rxtime)1238 tcp_detect_bad_rexmt(struct tcpcb *tp, struct tcphdr *th,
1239 struct tcpopt *to, u_int32_t rxtime)
1240 {
1241 int32_t tdiff, bad_rexmt_win;
1242 bad_rexmt_win = (tp->t_srtt >> (TCP_RTT_SHIFT + 1));
1243
1244 /* If the ack has ECN CE bit, then cwnd has to be adjusted */
1245 if ((tp->accurate_ecn_on && tp->t_aecn.t_delta_ce_packets > 0) ||
1246 (TCP_ECN_ENABLED(tp) && (th->th_flags & TH_ECE))) {
1247 return 0;
1248 }
1249 if (TSTMP_SUPPORTED(tp)) {
1250 if (rxtime > 0 && (to->to_flags & TOF_TS) && to->to_tsecr != 0 &&
1251 TSTMP_LT(to->to_tsecr, rxtime)) {
1252 return 1;
1253 }
1254 } else {
1255 if ((tp->t_rxtshift == 1 || tcp_sent_tlp_retrans(tp)) &&
1256 rxtime > 0) {
1257 tdiff = (int32_t)(tcp_now - rxtime);
1258 if (tdiff < bad_rexmt_win) {
1259 return 1;
1260 }
1261 }
1262 }
1263 return 0;
1264 }
1265
1266
1267 /*
1268 * Restore congestion window state if a spurious timeout
1269 * was detected.
1270 */
1271 static void
tcp_bad_rexmt_restore_state(struct tcpcb * tp,struct tcphdr * th)1272 tcp_bad_rexmt_restore_state(struct tcpcb *tp, struct tcphdr *th)
1273 {
1274 if (TSTMP_SUPPORTED(tp)) {
1275 u_int32_t fsize, acked;
1276 fsize = tp->snd_max - th->th_ack;
1277 acked = BYTES_ACKED(th, tp);
1278
1279 /*
1280 * Implement bad retransmit recovery as
1281 * described in RFC 4015.
1282 */
1283 tp->snd_ssthresh = tp->snd_ssthresh_prev;
1284
1285 /* Initialize cwnd to the initial window */
1286 if (CC_ALGO(tp)->cwnd_init != NULL) {
1287 CC_ALGO(tp)->cwnd_init(tp);
1288 }
1289
1290 tp->snd_cwnd = fsize + min(acked, tp->snd_cwnd);
1291 } else {
1292 tp->snd_cwnd = tp->snd_cwnd_prev;
1293 tp->snd_ssthresh = tp->snd_ssthresh_prev;
1294 if (tp->t_flags & TF_WASFRECOVERY) {
1295 ENTER_FASTRECOVERY(tp);
1296 }
1297
1298 /* Do not use the loss flight size in this case */
1299 tp->t_lossflightsize = 0;
1300 }
1301 tp->snd_cwnd = max(tp->snd_cwnd, tcp_initial_cwnd(tp));
1302 tp->snd_recover = tp->snd_recover_prev;
1303 tp->snd_nxt = tp->snd_max;
1304
1305 /* Fix send socket buffer to reflect the change in cwnd */
1306 tcp_bad_rexmt_fix_sndbuf(tp);
1307
1308 /* Restore rack related state */
1309 if (TCP_RACK_ENABLED(tp)) {
1310 tcp_rack_bad_rexmt_restore(tp);
1311 }
1312
1313 /*
1314 * This RTT might reflect the extra delay induced
1315 * by the network. Skip using this sample for RTO
1316 * calculation and mark the connection so we can
1317 * recompute RTT when the next eligible sample is
1318 * found.
1319 */
1320 tp->t_flagsext |= TF_RECOMPUTE_RTT;
1321 tp->t_badrexmt_time = tcp_now;
1322 tp->t_rtttime = 0;
1323 }
1324
1325 /*
1326 * If the previous packet was sent in retransmission timer, and it was
1327 * not needed, then restore the congestion window to the state before that
1328 * transmission.
1329 *
1330 * If the last packet was sent as a tail loss probe retransmission, check if that
1331 * recovered the last packet. If so, that will indicate a real loss and
1332 * the congestion window needs to be lowered.
1333 */
1334 static void
tcp_bad_rexmt_check(struct tcpcb * tp,struct tcphdr * th,struct tcpopt * to)1335 tcp_bad_rexmt_check(struct tcpcb *tp, struct tcphdr *th, struct tcpopt *to)
1336 {
1337 if (tp->t_rxtshift > 0 &&
1338 tcp_detect_bad_rexmt(tp, th, to, tp->t_rxtstart)) {
1339 ++tcpstat.tcps_sndrexmitbad;
1340 tcp_bad_rexmt_restore_state(tp, th);
1341 tcp_ccdbg_trace(tp, th, TCP_CC_BAD_REXMT_RECOVERY);
1342 } else if (tcp_sent_tlp_retrans(tp) && tp->t_tlphighrxt > 0 &&
1343 SEQ_GEQ(th->th_ack, tp->t_tlphighrxt) &&
1344 !tcp_detect_bad_rexmt(tp, th, to, tp->t_tlpstart)) {
1345 /*
1346 * The tail loss probe recovered the last packet and
1347 * we need to adjust the congestion window to take
1348 * this loss into account.
1349 * No need to update rack.reo_wnd_persist for a TLP recovery
1350 */
1351 ++tcpstat.tcps_tlp_recoverlastpkt;
1352 if (!IN_FASTRECOVERY(tp)) {
1353 tcp_enter_fast_recovery(tp);
1354 EXIT_FASTRECOVERY(tp);
1355 }
1356 tcp_ccdbg_trace(tp, th, TCP_CC_TLP_RECOVER_LASTPACKET);
1357 } else if (tcp_rxtseg_detect_bad_rexmt(tp, th->th_ack)) {
1358 /*
1359 * All of the retransmitted segments were duplicated, this
1360 * can be an indication of bad fast retransmit.
1361 */
1362 tcpstat.tcps_dsack_badrexmt++;
1363 tcp_bad_rexmt_restore_state(tp, th);
1364 tcp_ccdbg_trace(tp, th, TCP_CC_DSACK_BAD_REXMT);
1365 tcp_rxtseg_clean(tp);
1366 }
1367 tp->t_flagsext &= ~(TF_SENT_TLPROBE);
1368 tp->t_tlphighrxt = 0;
1369 tp->t_tlpstart = 0;
1370
1371 /*
1372 * check if the latest ack was for a segment sent during PMTU
1373 * blackhole detection. If the timestamp on the ack is before
1374 * PMTU blackhole detection, then revert the size of the max
1375 * segment to previous size.
1376 */
1377 if (tp->t_rxtshift > 0 && (tp->t_flags & TF_BLACKHOLE) &&
1378 tp->t_pmtud_start_ts > 0 && TSTMP_SUPPORTED(tp)) {
1379 if ((to->to_flags & TOF_TS) && to->to_tsecr != 0
1380 && TSTMP_LT(to->to_tsecr, tp->t_pmtud_start_ts)) {
1381 tcp_pmtud_revert_segment_size(tp);
1382 }
1383 }
1384 if (tp->t_pmtud_start_ts > 0) {
1385 tp->t_pmtud_start_ts = 0;
1386 }
1387
1388 tp->t_pmtud_lastseg_size = 0;
1389 }
1390
1391 /*
1392 * Check if early retransmit can be attempted according to RFC 5827.
1393 *
1394 * If packet reordering is detected on a connection, fast recovery will
1395 * be delayed until it is clear that the packet was lost and not reordered.
1396 * But reordering detection is done only when SACK is enabled.
1397 *
1398 * On connections that do not support SACK, there is a limit on the number
1399 * of early retransmits that can be done per minute. This limit is needed
1400 * to make sure that too many packets are not retransmitted when there is
1401 * packet reordering.
1402 */
1403 static void
tcp_early_rexmt_check(struct tcpcb * tp,struct tcphdr * th)1404 tcp_early_rexmt_check(struct tcpcb *tp, struct tcphdr *th)
1405 {
1406 u_int32_t obytes, snd_off;
1407 int32_t snd_len;
1408 struct socket *so = tp->t_inpcb->inp_socket;
1409
1410 if ((SACK_ENABLED(tp) || tp->t_early_rexmt_count < TCP_EARLY_REXMT_LIMIT) &&
1411 SEQ_GT(tp->snd_max, tp->snd_una) &&
1412 (tp->t_dupacks == 1 || (SACK_ENABLED(tp) && !TAILQ_EMPTY(&tp->snd_holes)))) {
1413 /*
1414 * If there are only a few outstanding
1415 * segments on the connection, we might need
1416 * to lower the retransmit threshold. This
1417 * will allow us to do Early Retransmit as
1418 * described in RFC 5827.
1419 */
1420 if (TCP_RACK_ENABLED(tp)) {
1421 obytes = tcp_flight_size(tp);
1422 } else if (SACK_ENABLED(tp) &&
1423 !TAILQ_EMPTY(&tp->snd_holes)) {
1424 obytes = tcp_flight_size(tp);
1425 } else {
1426 obytes = (tp->snd_max - tp->snd_una);
1427 }
1428
1429 /*
1430 * In order to lower retransmit threshold the
1431 * following two conditions must be met.
1432 * 1. the amount of outstanding data is less
1433 * than 4*SMSS bytes
1434 * 2. there is no unsent data ready for
1435 * transmission or the advertised window
1436 * will limit sending new segments.
1437 */
1438 snd_off = tp->snd_max - tp->snd_una;
1439 snd_len = min(so->so_snd.sb_cc, tp->snd_wnd) - snd_off;
1440 if (obytes < (tp->t_maxseg << 2) &&
1441 snd_len <= 0) {
1442 u_int32_t osegs;
1443
1444 osegs = obytes / tp->t_maxseg;
1445 if ((osegs * tp->t_maxseg) < obytes) {
1446 osegs++;
1447 }
1448
1449 /*
1450 * By checking for early retransmit after
1451 * receiving some duplicate acks when SACK
1452 * is supported, the connection will
1453 * enter fast recovery even if multiple
1454 * segments are lost in the same window.
1455 */
1456 if (osegs < 4) {
1457 tp->t_rexmtthresh =
1458 ((osegs - 1) > 1) ? ((uint8_t)osegs - 1) : 1;
1459 tp->t_rexmtthresh =
1460 MIN(tp->t_rexmtthresh, tcprexmtthresh);
1461 tp->t_rexmtthresh =
1462 MAX(tp->t_rexmtthresh,
1463 tp->t_dupacks > UINT8_MAX ? UINT8_MAX : (uint8_t)tp->t_dupacks);
1464
1465 if (tp->t_early_rexmt_count == 0) {
1466 tp->t_early_rexmt_win = tcp_now;
1467 }
1468
1469 if (tp->t_flagsext & TF_SENT_TLPROBE) {
1470 tcpstat.tcps_tlp_recovery++;
1471 tcp_ccdbg_trace(tp, th,
1472 TCP_CC_TLP_RECOVERY);
1473 } else {
1474 tcpstat.tcps_early_rexmt++;
1475 tp->t_early_rexmt_count++;
1476 tcp_ccdbg_trace(tp, th,
1477 TCP_CC_EARLY_RETRANSMIT);
1478 }
1479 }
1480 }
1481 }
1482
1483 /*
1484 * If we ever sent a TLP probe, the acknowledgement will trigger
1485 * early retransmit because the value of snd_fack will be close
1486 * to snd_max. This will take care of adjustments to the
1487 * congestion window. So we can reset TF_SENT_PROBE flag.
1488 */
1489 tp->t_flagsext &= ~(TF_SENT_TLPROBE);
1490 tp->t_tlphighrxt = 0;
1491 tp->t_tlpstart = 0;
1492 }
1493
1494 static boolean_t
tcp_tfo_syn(struct tcpcb * tp,struct tcpopt * to)1495 tcp_tfo_syn(struct tcpcb *tp, struct tcpopt *to)
1496 {
1497 u_char out[CCAES_BLOCK_SIZE];
1498 unsigned char len;
1499
1500 if (!(to->to_flags & (TOF_TFO | TOF_TFOREQ)) ||
1501 !(tcp_fastopen & TCP_FASTOPEN_SERVER)) {
1502 return FALSE;
1503 }
1504
1505 if ((to->to_flags & TOF_TFOREQ)) {
1506 tp->t_tfo_flags |= TFO_F_OFFER_COOKIE;
1507
1508 tp->t_tfo_stats |= TFO_S_COOKIEREQ_RECV;
1509 tcpstat.tcps_tfo_cookie_req_rcv++;
1510 return FALSE;
1511 }
1512
1513 /* Ok, then it must be an offered cookie. We need to check that ... */
1514 tcp_tfo_gen_cookie(tp->t_inpcb, out, sizeof(out));
1515
1516 len = *to->to_tfo - TCPOLEN_FASTOPEN_REQ;
1517 to->to_tfo++;
1518 to->to_tfo_size--;
1519 if (memcmp(out, to->to_tfo, len)) {
1520 /* Cookies are different! Let's return and offer a new cookie */
1521 tp->t_tfo_flags |= TFO_F_OFFER_COOKIE;
1522
1523 tp->t_tfo_stats |= TFO_S_COOKIE_INVALID;
1524 tcpstat.tcps_tfo_cookie_invalid++;
1525 return FALSE;
1526 }
1527
1528 if (OSIncrementAtomic(&tcp_tfo_halfcnt) >= tcp_tfo_backlog) {
1529 /* Need to decrement again as we just increased it... */
1530 OSDecrementAtomic(&tcp_tfo_halfcnt);
1531 return FALSE;
1532 }
1533
1534 tp->t_tfo_flags |= TFO_F_COOKIE_VALID;
1535
1536 tp->t_tfo_stats |= TFO_S_SYNDATA_RCV;
1537 tcpstat.tcps_tfo_syn_data_rcv++;
1538
1539 return TRUE;
1540 }
1541
1542 static void
tcp_tfo_synack(struct tcpcb * tp,struct tcpopt * to)1543 tcp_tfo_synack(struct tcpcb *tp, struct tcpopt *to)
1544 {
1545 if (to->to_flags & TOF_TFO) {
1546 unsigned char len = *to->to_tfo - TCPOLEN_FASTOPEN_REQ;
1547
1548 /*
1549 * If this happens, things have gone terribly wrong. len should
1550 * have been checked in tcp_dooptions.
1551 */
1552 VERIFY(len <= TFO_COOKIE_LEN_MAX);
1553
1554 to->to_tfo++;
1555 to->to_tfo_size--;
1556
1557 tcp_cache_set_cookie(tp, to->to_tfo, len);
1558 tcp_heuristic_tfo_success(tp);
1559
1560 tp->t_tfo_stats |= TFO_S_COOKIE_RCV;
1561 tcpstat.tcps_tfo_cookie_rcv++;
1562 if (tp->t_tfo_flags & TFO_F_COOKIE_SENT) {
1563 tcpstat.tcps_tfo_cookie_wrong++;
1564 tp->t_tfo_stats |= TFO_S_COOKIE_WRONG;
1565 }
1566 } else {
1567 /*
1568 * Thus, no cookie in the response, but we either asked for one
1569 * or sent SYN+DATA. Now, we need to check whether we had to
1570 * rexmit the SYN. If that's the case, it's better to start
1571 * backing of TFO-cookie requests.
1572 */
1573 if (!(tp->t_flagsext & TF_FASTOPEN_FORCE_ENABLE) &&
1574 tp->t_tfo_flags & TFO_F_SYN_LOSS) {
1575 tp->t_tfo_stats |= TFO_S_SYN_LOSS;
1576 tcpstat.tcps_tfo_syn_loss++;
1577
1578 tcp_heuristic_tfo_loss(tp);
1579 } else {
1580 if (tp->t_tfo_flags & TFO_F_COOKIE_REQ) {
1581 tp->t_tfo_stats |= TFO_S_NO_COOKIE_RCV;
1582 tcpstat.tcps_tfo_no_cookie_rcv++;
1583 }
1584
1585 tcp_heuristic_tfo_success(tp);
1586 }
1587 }
1588 }
1589
1590 static void
tcp_tfo_rcv_probe(struct tcpcb * tp,int tlen)1591 tcp_tfo_rcv_probe(struct tcpcb *tp, int tlen)
1592 {
1593 if (tlen != 0) {
1594 return;
1595 }
1596
1597 tp->t_tfo_probe_state = TFO_PROBE_PROBING;
1598
1599 /*
1600 * We send the probe out rather quickly (after one RTO). It does not
1601 * really hurt that much, it's only one additional segment on the wire.
1602 */
1603 tp->t_timer[TCPT_KEEP] = tcp_offset_from_start(tp, (TCP_REXMTVAL(tp)));
1604 }
1605
1606 static void
tcp_tfo_rcv_data(struct tcpcb * tp)1607 tcp_tfo_rcv_data(struct tcpcb *tp)
1608 {
1609 /* Transition from PROBING to NONE as data has been received */
1610 if (tp->t_tfo_probe_state >= TFO_PROBE_PROBING) {
1611 tp->t_tfo_probe_state = TFO_PROBE_NONE;
1612 }
1613 }
1614
1615 static void
tcp_tfo_rcv_ack(struct tcpcb * tp,struct tcphdr * th)1616 tcp_tfo_rcv_ack(struct tcpcb *tp, struct tcphdr *th)
1617 {
1618 if (tp->t_tfo_probe_state == TFO_PROBE_PROBING &&
1619 tp->t_tfo_probes > 0) {
1620 if (th->th_seq == tp->rcv_nxt) {
1621 /* No hole, so stop probing */
1622 tp->t_tfo_probe_state = TFO_PROBE_NONE;
1623 } else if (SEQ_GT(th->th_seq, tp->rcv_nxt)) {
1624 /* There is a hole! Wait a bit for data... */
1625 tp->t_tfo_probe_state = TFO_PROBE_WAIT_DATA;
1626 tp->t_timer[TCPT_KEEP] = tcp_offset_from_start(tp,
1627 TCP_REXMTVAL(tp));
1628 }
1629 }
1630 }
1631
1632 /*
1633 * Update snd_wnd information.
1634 */
1635 static inline bool
tcp_update_window(struct tcpcb * tp,int thflags,struct tcphdr * th,u_int32_t tiwin,int tlen)1636 tcp_update_window(struct tcpcb *tp, int thflags, struct tcphdr * th,
1637 u_int32_t tiwin, int tlen)
1638 {
1639 /* Don't look at the window if there is no ACK flag */
1640 if ((thflags & TH_ACK) &&
1641 (SEQ_LT(tp->snd_wl1, th->th_seq) ||
1642 (tp->snd_wl1 == th->th_seq && (SEQ_LT(tp->snd_wl2, th->th_ack) ||
1643 (tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd))))) {
1644 /* keep track of pure window updates */
1645 if (tlen == 0 &&
1646 tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd) {
1647 tcpstat.tcps_rcvwinupd++;
1648 }
1649 tp->snd_wnd = tiwin;
1650 tp->snd_wl1 = th->th_seq;
1651 tp->snd_wl2 = th->th_ack;
1652 if (tp->snd_wnd > tp->max_sndwnd) {
1653 tp->max_sndwnd = tp->snd_wnd;
1654 }
1655
1656 if (tp->t_inpcb->inp_socket->so_flags & SOF_MP_SUBFLOW) {
1657 mptcp_update_window_wakeup(tp);
1658 }
1659 return true;
1660 }
1661 return false;
1662 }
1663
1664 static void
tcp_handle_wakeup(struct socket * so,int read_wakeup,int write_wakeup)1665 tcp_handle_wakeup(struct socket *so, int read_wakeup, int write_wakeup)
1666 {
1667 if (read_wakeup != 0) {
1668 sorwakeup(so);
1669 }
1670 if (write_wakeup != 0) {
1671 sowwakeup(so);
1672 }
1673 }
1674
1675 static void
tcp_update_snd_una(struct tcpcb * tp,uint32_t ack)1676 tcp_update_snd_una(struct tcpcb *tp, uint32_t ack)
1677 {
1678 uint32_t delta = ack - tp->snd_una;
1679
1680 tp->t_stat.bytes_acked += delta;
1681 tp->snd_una = ack;
1682 }
1683
1684 static bool
tcp_syn_data_valid(struct tcpcb * tp,struct tcphdr * tcp_hdr,int tlen)1685 tcp_syn_data_valid(struct tcpcb *tp, struct tcphdr *tcp_hdr, int tlen)
1686 {
1687 /* No data? */
1688 if (tlen <= 0) {
1689 return false;
1690 }
1691
1692 /* Not the right sequence-number? */
1693 if (tcp_hdr->th_seq != tp->irs) {
1694 return false;
1695 }
1696
1697 /* We could have wrapped around, check that */
1698 if (tp->t_inpcb->inp_mstat.ms_total.ts_rxbytes > INT32_MAX) {
1699 return false;
1700 }
1701
1702 return true;
1703 }
1704
1705 /* Process IP-ECN codepoints on received packets and update receive side counters */
1706 static void
tcp_input_ip_ecn(struct tcpcb * tp,struct inpcb * inp,uint32_t tlen,uint32_t segment_count,uint8_t ip_ecn)1707 tcp_input_ip_ecn(struct tcpcb *tp, struct inpcb *inp, uint32_t tlen,
1708 uint32_t segment_count, uint8_t ip_ecn)
1709 {
1710 switch (ip_ecn) {
1711 case IPTOS_ECN_ECT1:
1712 tp->ecn_flags |= TE_ACO_ECT1;
1713 tp->t_aecn.t_rcv_ect1_bytes += tlen;
1714 break;
1715 case IPTOS_ECN_ECT0:
1716 tp->ecn_flags |= TE_ACO_ECT0;
1717 tp->t_aecn.t_rcv_ect0_bytes += tlen;
1718 break;
1719 case IPTOS_ECN_CE:
1720 tp->t_aecn.t_rcv_ce_packets += segment_count;
1721 tp->t_aecn.t_rcv_ce_bytes += tlen;
1722 tp->t_ecn_recv_ce++;
1723 tcpstat.tcps_ecn_recv_ce++;
1724 INP_INC_IFNET_STAT(inp, ecn_recv_ce);
1725 break;
1726 default:
1727 /* No counter for Not-ECT */
1728 break;
1729 }
1730 }
1731
1732 /* Process SYN packet that wishes to negotiate Accurate ECN */
1733 static void
tcp_input_process_accecn_syn(struct tcpcb * tp,int ace_flags,uint8_t ip_ecn)1734 tcp_input_process_accecn_syn(struct tcpcb *tp, int ace_flags, uint8_t ip_ecn)
1735 {
1736 switch (ace_flags) {
1737 case (0 | 0 | 0):
1738 /* No ECN */
1739 tp->t_server_accecn_state = tcp_connection_server_no_ecn_requested;
1740 break;
1741 case (0 | TH_CWR | TH_ECE):
1742 /* Legacy ECN-setup */
1743 tp->ecn_flags |= (TE_SETUPRECEIVED | TE_SENDIPECT);
1744 tp->t_server_accecn_state = tcp_connection_server_classic_ecn_requested;
1745 break;
1746 case (TH_ACE):
1747 /* Accurate ECN */
1748 if (tp->l4s_enabled) {
1749 switch (ip_ecn) {
1750 case IPTOS_ECN_NOTECT:
1751 tp->ecn_flags |= TE_ACE_SETUP_NON_ECT;
1752 break;
1753 case IPTOS_ECN_ECT1:
1754 tp->ecn_flags |= TE_ACE_SETUP_ECT1;
1755 break;
1756 case IPTOS_ECN_ECT0:
1757 tp->ecn_flags |= TE_ACE_SETUP_ECT0;
1758 break;
1759 case IPTOS_ECN_CE:
1760 tp->ecn_flags |= TE_ACE_SETUP_CE;
1761 break;
1762 }
1763 /*
1764 * We set TE_SENDIPECT when handshake is complete
1765 * for Accurate ECN
1766 */
1767 tp->ecn_flags |= (TE_ACE_SETUPRECEIVED);
1768
1769 /* Initialize ECT byte counter to 1 to distinguish zeroing of options */
1770 tp->t_aecn.t_rcv_ect1_bytes = tp->t_aecn.t_rcv_ect0_bytes = 1;
1771 tp->t_aecn.t_snd_ect1_bytes = tp->t_aecn.t_snd_ect0_bytes = 1;
1772 tp->t_server_accecn_state = tcp_connection_server_accurate_ecn_requested;
1773 } else {
1774 /*
1775 * If AccECN is not enabled, ignore
1776 * the TH_AE bit and do Legacy ECN-setup
1777 */
1778 tp->ecn_flags |= (TE_SETUPRECEIVED | TE_SENDIPECT);
1779 }
1780 default:
1781 /* Forward Compatibility */
1782 /* Accurate ECN */
1783 if (tp->l4s_enabled) {
1784 switch (ip_ecn) {
1785 case IPTOS_ECN_NOTECT:
1786 tp->ecn_flags |= TE_ACE_SETUP_NON_ECT;
1787 break;
1788 case IPTOS_ECN_ECT1:
1789 tp->ecn_flags |= TE_ACE_SETUP_ECT1;
1790 break;
1791 case IPTOS_ECN_ECT0:
1792 tp->ecn_flags |= TE_ACE_SETUP_ECT0;
1793 break;
1794 case IPTOS_ECN_CE:
1795 tp->ecn_flags |= TE_ACE_SETUP_CE;
1796 break;
1797 }
1798 /*
1799 * We are not yet committing to send IP ECT packets when
1800 * Accurate ECN is enabled
1801 */
1802 tp->ecn_flags |= (TE_ACE_SETUPRECEIVED);
1803
1804 /* Initialize ECT byte counter to 1 to distinguish zeroing of options */
1805 tp->t_aecn.t_rcv_ect1_bytes = tp->t_aecn.t_rcv_ect0_bytes = 1;
1806 tp->t_aecn.t_snd_ect1_bytes = tp->t_aecn.t_snd_ect0_bytes = 1;
1807 tp->t_server_accecn_state = tcp_connection_server_accurate_ecn_requested;
1808 }
1809 break;
1810 }
1811 }
1812
1813 /* Process SYN/ACK packet that wishes to negotiate Accurate ECN */
1814 static void
tcp_input_process_accecn_synack(struct tcpcb * tp,struct inpcb * inp,struct tcpopt * to,int thflags,int ace_flags,uint8_t ip_ecn,uint32_t tlen,uint32_t segment_count)1815 tcp_input_process_accecn_synack(struct tcpcb *tp, struct inpcb *inp, struct tcpopt *to,
1816 int thflags, int ace_flags, uint8_t ip_ecn, uint32_t tlen, uint32_t segment_count)
1817 {
1818 if ((thflags & (TH_ECE | TH_CWR)) == (TH_ECE)) {
1819 /* Receiving Any|0|1 is classic ECN-setup SYN-ACK */
1820 tp->ecn_flags |= TE_SETUPRECEIVED;
1821 if (TCP_ECN_ENABLED(tp)) {
1822 tcp_heuristic_ecn_success(tp);
1823 tcpstat.tcps_ecn_client_success++;
1824 }
1825
1826 if (tp->ecn_flags & TE_ACE_SETUPSENT) {
1827 /*
1828 * Sent AccECN SYN but received classic ECN SYN-ACK
1829 * Set classic ECN related flags
1830 */
1831 tp->ecn_flags |= (TE_SETUPSENT | TE_SENDIPECT);
1832 tp->ecn_flags &= ~TE_ACE_SETUPSENT;
1833 if (tp->t_client_accecn_state == tcp_connection_client_accurate_ecn_feature_enabled) {
1834 tp->t_client_accecn_state = tcp_connection_client_classic_ecn_available;
1835 }
1836 }
1837 } else if (tp->l4s_enabled && ace_flags != 0 &&
1838 ace_flags != TH_ACE) {
1839 /* Initialize sender side packet & byte counters */
1840 tp->t_aecn.t_snd_ce_packets = 5;
1841 tp->t_aecn.t_snd_ect1_bytes = tp->t_aecn.t_snd_ect0_bytes = 1;
1842 tp->t_aecn.t_snd_ce_bytes = 0;
1843 tp->ecn_flags |= TE_ACE_FINAL_ACK_3WHS;
1844 /*
1845 * Client received AccECN SYN-ACK that reflects the state (ECN)
1846 * in which SYN packet was delivered. This helps to detect if
1847 * there was mangling of the SYN packet on the path. Currently, we
1848 * only send Not-ECT on SYN packets. So, we should set Not-ECT in
1849 * all packets if we receive any encoding other than 0|TH_CWR|0.
1850 * If 0|0|0 and 1|1|1 were received, fail Accurate ECN negotiation
1851 * by not setting TE_ACE_SETUPRECEIVED.
1852 */
1853 uint32_t ecn_flags = TE_ACE_SETUPRECEIVED;
1854 if (tp->l4s_enabled) {
1855 ecn_flags |= TE_SENDIPECT;
1856 }
1857 switch (ace_flags) {
1858 case (0 | TH_CWR | 0):
1859 /* Non-ECT SYN was delivered */
1860 tp->ecn_flags |= ecn_flags;
1861 tcpstat.tcps_ecn_ace_syn_not_ect++;
1862 tp->t_client_accecn_state = tcp_connection_client_accurate_ecn_negotiation_success;
1863 break;
1864 case (0 | TH_CWR | TH_ECE):
1865 /* ECT1 SYN was delivered */
1866 tp->ecn_flags |= ecn_flags;
1867 /* Mangling detected, set Non-ECT on outgoing packets */
1868 tp->ecn_flags &= ~TE_SENDIPECT;
1869 tcpstat.tcps_ecn_ace_syn_ect1++;
1870 tp->t_client_accecn_state = tcp_connection_client_accurate_ecn_negotiation_success_ect_mangling_detected;
1871 break;
1872 case (TH_AE | 0 | 0):
1873 /* ECT0 SYN was delivered */
1874 tp->ecn_flags |= ecn_flags;
1875 /* Mangling detected, set Non-ECT on outgoing packets */
1876 tp->ecn_flags &= ~TE_SENDIPECT;
1877 tcpstat.tcps_ecn_ace_syn_ect0++;
1878 tp->t_client_accecn_state = tcp_connection_client_accurate_ecn_negotiation_success_ect_mangling_detected;
1879 break;
1880 case (TH_AE | TH_CWR | 0):
1881 /* CE SYN was delivered */
1882 tp->ecn_flags |= ecn_flags;
1883 /* Mangling detected, set Non-ECT on outgoing packets */
1884 tp->t_client_accecn_state = tcp_connection_client_accurate_ecn_negotiation_success_ect_mangling_detected;
1885 tp->ecn_flags &= ~TE_SENDIPECT;
1886 /*
1887 * Although we don't send ECT SYN yet, it is possible that
1888 * a network element changed Not-ECT to ECT and later there
1889 * was congestion at another network element that set it to CE.
1890 * To keep it simple, we will consider this as a congestion event
1891 * for the congestion controller.
1892 * If a TCP client in AccECN mode receives CE feedback in the TCP
1893 * flags of a SYN/ACK, it MUST NOT increment s.cep.
1894 */
1895 tp->snd_cwnd = 2 * tp->t_maxseg;
1896 tcpstat.tcps_ecn_ace_syn_ce++;
1897 break;
1898 default:
1899 break;
1900 }
1901 /* Set Accurate ECN state for client */
1902 tcp_set_accurate_ecn(tp);
1903
1904 if (TCP_ECN_ENABLED(tp)) {
1905 tcp_heuristic_ecn_success(tp);
1906 tcpstat.tcps_ecn_client_success++;
1907 }
1908 /*
1909 * A TCP client in AccECN mode MUST feed back which of the 4
1910 * possible values of the IP-ECN field that was received in the
1911 * SYN/ACK. Set the setup flag for final ACK accordingly.
1912 * We will initialize r.cep, r.e1b, r.e0b first and then increment
1913 * if CE was set on the IP-ECN field of the SYN-ACK.
1914 */
1915 tp->t_aecn.t_rcv_ce_packets = 5;
1916 tp->t_aecn.t_rcv_ect0_bytes = tp->t_aecn.t_rcv_ect1_bytes = 1;
1917 tp->t_aecn.t_rcv_ce_bytes = 0;
1918
1919 /* Increment packet & byte counters based on IP-ECN */
1920 tcp_input_ip_ecn(tp, inp, (uint32_t)tlen, (uint32_t)segment_count, ip_ecn);
1921 switch (ip_ecn) {
1922 case IPTOS_ECN_NOTECT:
1923 /* Not-ECT SYN-ACK was received */
1924 tp->ecn_flags |= TE_ACE_SETUP_NON_ECT;
1925 break;
1926 case IPTOS_ECN_ECT1:
1927 /* ECT1 SYN-ACK was received */
1928 tp->ecn_flags |= TE_ACE_SETUP_ECT1;
1929 break;
1930 case IPTOS_ECN_ECT0:
1931 /* ECT0 SYN-ACK was received */
1932 tp->ecn_flags |= TE_ACE_SETUP_ECT0;
1933 break;
1934 case IPTOS_ECN_CE:
1935 tp->ecn_flags |= TE_ACE_SETUP_CE;
1936 break;
1937 }
1938 /* Update the time for this newly SYN-ACK packet */
1939 if ((to->to_flags & TOF_TS) != 0 && (to->to_tsecr != 0) &&
1940 (tp->t_last_ack_tsecr == 0 || TSTMP_GEQ(to->to_tsecr, tp->t_last_ack_tsecr))) {
1941 tp->t_last_ack_tsecr = to->to_tsecr;
1942 }
1943 } else {
1944 if ((tp->ecn_flags & (TE_SETUPSENT | TE_ACE_SETUPSENT)) &&
1945 tp->t_rxtshift == 0) {
1946 tcp_heuristic_ecn_success(tp);
1947 tcpstat.tcps_ecn_not_supported++;
1948 }
1949 if (((tp->ecn_flags & TE_SETUPSENT) != 0 && tp->t_rxtshift == 1) ||
1950 ((tp->ecn_flags & TE_ACE_SETUPSENT) != 0 && tp->t_rxtshift == 2)) {
1951 /*
1952 * We keep heuristics for when SYN ECN was likely dropped at the network by
1953 * checking that we received an ACK for the subsequent retransmission without ECN
1954 */
1955 tcp_heuristic_ecn_loss(tp);
1956 }
1957
1958 /* non-ECN-setup SYN-ACK */
1959 tp->ecn_flags &= ~TE_SENDIPECT;
1960 /*
1961 * If Accurate ECN SYN was retransmitted twice and non-ECN SYN-ACK
1962 * was received, then we consider it as Accurate ECN blackholing
1963 */
1964 if ((tp->ecn_flags & TE_LOST_SYN) && tp->t_rxtshift <= 2 &&
1965 tp->t_client_accecn_state == tcp_connection_client_accurate_ecn_feature_enabled) {
1966 tp->t_client_accecn_state = tcp_connection_client_accurate_ecn_negotiation_blackholed;
1967 }
1968 /*
1969 * If SYN wasn't retransmitted twice yet, the server supports neither classic nor
1970 * accurate ECN SYN-ACK. Accurate ECN should already be disabled for both half connections
1971 * as TE_ACE_SETUPRECEIVED flag is not set.
1972 */
1973 if (tp->t_client_accecn_state == tcp_connection_client_accurate_ecn_feature_enabled) {
1974 tp->t_client_accecn_state = tcp_connection_client_ecn_not_available;
1975 }
1976 }
1977 }
1978
1979 static void
tcp_input_process_accecn_last_ack(struct tcpcb * tp,struct tcpopt * to,uint32_t tlen,uint16_t ace_flags,bool syn_cookie_processed)1980 tcp_input_process_accecn_last_ack(struct tcpcb *tp, struct tcpopt *to,
1981 uint32_t tlen, uint16_t ace_flags, bool syn_cookie_processed)
1982 {
1983 if (syn_cookie_processed) {
1984 /* Set AccECN and L4S flags as if these were negotiated successfully. */
1985 if (tp->l4s_enabled) {
1986 tp->ecn_flags |= (TE_ACC_ECN_ON | TE_SENDIPECT);
1987 tcp_set_accurate_ecn(tp);
1988 }
1989 tp->t_aecn.t_rcv_ce_packets = 5;
1990 tp->t_aecn.t_snd_ce_packets = 5;
1991 /* Initialize CE byte counter to 0 */
1992 tp->t_aecn.t_rcv_ce_bytes = tp->t_aecn.t_snd_ce_bytes = 0;
1993 /* Initialize ECT byte counter to 1 to distinguish zeroing of options */
1994 tp->t_aecn.t_rcv_ect1_bytes = tp->t_aecn.t_rcv_ect0_bytes = 1;
1995 tp->t_aecn.t_snd_ect1_bytes = tp->t_aecn.t_snd_ect0_bytes = 1;
1996 }
1997 if (tlen == 0 && to->to_nsacks == 0) {
1998 /*
1999 * ACK for SYN-ACK reflects the state (ECN) in which SYN-ACK packet
2000 * was delivered. Use Table 4 of Accurate ECN draft to decode only
2001 * when a pure ACK with no SACK block is received.
2002 * 0|0|0 will fail Accurate ECN negotiation and disable ECN.
2003 */
2004 switch (ace_flags) {
2005 case (0 | TH_CWR | 0):
2006 /* Non-ECT SYN-ACK was delivered */
2007 tp->t_aecn.t_snd_ce_packets = 5;
2008 if (tp->t_server_accecn_state == tcp_connection_server_accurate_ecn_requested || syn_cookie_processed) {
2009 tp->t_server_accecn_state = tcp_connection_server_accurate_ecn_negotiation_success;
2010 }
2011 break;
2012 case (0 | TH_CWR | TH_ECE):
2013 /* ECT1 SYN-ACK was delivered, mangling detected */
2014 OS_FALLTHROUGH;
2015 case (TH_AE | 0 | 0):
2016 /* ECT0 SYN-ACK was delivered, mangling detected */
2017 tp->t_aecn.t_snd_ce_packets = 5;
2018 if (tp->t_server_accecn_state == tcp_connection_server_accurate_ecn_requested || syn_cookie_processed) {
2019 tp->t_server_accecn_state = tcp_connection_server_accurate_ecn_negotiation_success_ect_mangling_detected;
2020 }
2021 break;
2022 case (TH_AE | TH_CWR | 0):
2023 /*
2024 * CE SYN-ACK was delivered, even though mangling happened,
2025 * CE could indicate congestion at a node after mangling occured.
2026 * Set cwnd to 2 segments
2027 */
2028 tp->t_aecn.t_snd_ce_packets = 6;
2029 tp->snd_cwnd = 2 * tp->t_maxseg;
2030 if (tp->t_server_accecn_state == tcp_connection_server_accurate_ecn_requested || syn_cookie_processed) {
2031 tp->t_server_accecn_state = tcp_connection_server_accurate_ecn_negotiation_success_ect_mangling_detected;
2032 }
2033 break;
2034 case (0 | 0 | 0):
2035 /* Disable ECN, as ACE fields were zeroed */
2036 tp->ecn_flags &= ~(TE_SETUPRECEIVED | TE_SENDIPECT |
2037 TE_SENDCWR | TE_ACE_SETUPRECEIVED);
2038 tcp_set_accurate_ecn(tp);
2039 /*
2040 * Since last ACK has no ECN flag set and TE_LOST_SYNACK is set, this is in response
2041 * to the second (non-ECN setup) SYN-ACK retransmission. In such a case, we assume
2042 * that AccECN SYN-ACK was blackholed.
2043 */
2044 if ((tp->ecn_flags & TE_LOST_SYNACK) && tp->t_rxtshift <= 2 &&
2045 (tp->t_server_accecn_state == tcp_connection_server_classic_ecn_requested ||
2046 tp->t_server_accecn_state == tcp_connection_server_accurate_ecn_requested ||
2047 syn_cookie_processed)) {
2048 tp->t_server_accecn_state = tcp_connection_server_accurate_ecn_negotiation_blackholed;
2049 }
2050 /*
2051 * SYN-ACK hasn't been retransmitted twice yet, so this could likely mean bleaching of ACE
2052 * on the path from client to server on last ACK.
2053 */
2054 if (tp->t_server_accecn_state == tcp_connection_server_accurate_ecn_requested) {
2055 tp->t_server_accecn_state = tcp_connection_server_accurate_ecn_ace_bleaching_detected;
2056 }
2057 break;
2058 default:
2059 /* Unused values for forward compatibility */
2060 tp->t_aecn.t_snd_ce_packets = 5;
2061 break;
2062 }
2063 /* Update the time for this newly received last ACK */
2064 if ((to->to_flags & TOF_TS) != 0 && (to->to_tsecr != 0) &&
2065 (tp->t_last_ack_tsecr == 0 || TSTMP_GEQ(to->to_tsecr, tp->t_last_ack_tsecr))) {
2066 tp->t_last_ack_tsecr = to->to_tsecr;
2067 }
2068 } else if (to->to_nsacks == 0) {
2069 /*
2070 * If 3rd ACK is lost, we won't receive the last ACK
2071 * encoding. We will move the server to AccECN mode
2072 * regardless.
2073 */
2074 tp->t_aecn.t_snd_ce_packets = 5;
2075 if (tp->t_server_accecn_state == tcp_connection_server_accurate_ecn_requested || syn_cookie_processed) {
2076 tp->t_server_accecn_state = tcp_connection_server_accurate_ecn_negotiation_success;
2077 }
2078 }
2079 }
2080
2081 static uint32_t
tcp_process_ace_field(struct tcpcb * tp,uint32_t pkts_acked,uint64_t old_sceb,uint8_t ace)2082 tcp_process_ace_field(struct tcpcb *tp, uint32_t pkts_acked, uint64_t old_sceb, uint8_t ace)
2083 {
2084 /* Congestion was experienced if delta_cep > 0 */
2085 uint32_t delta = 0, safe_delta = 0;
2086 delta = (ace + TCP_ACE_DIV -
2087 (tp->t_aecn.t_snd_ce_packets & TCP_ACE_MASK)) & TCP_ACE_MASK;
2088 if (pkts_acked <= TCP_ACE_MASK) {
2089 return delta;
2090 }
2091
2092 uint64_t d_ceb = tp->t_aecn.t_snd_ce_bytes - old_sceb;
2093 safe_delta = pkts_acked - ((pkts_acked - delta) & TCP_ACE_MASK);
2094
2095 if (d_ceb == 0 || d_ceb < safe_delta * tp->t_maxseg >> 1) {
2096 return delta;
2097 }
2098
2099 return safe_delta;
2100 }
2101
2102 /* Returns the number of CE marked bytes */
2103 static uint32_t
tcp_process_accecn_options(struct tcpcb * tp,struct tcpopt * to)2104 tcp_process_accecn_options(struct tcpcb *tp, struct tcpopt *to)
2105 {
2106 int delta = 0;
2107 uint32_t ce_bytes = 0;
2108
2109 if (to->to_num_accecn >= 1) {
2110 delta = ntoh24(to->to_accecn + 0);
2111 if (to->to_accecn_order == 0) {
2112 delta = (delta + TCP_ACO_DIV -
2113 (tp->t_aecn.t_snd_ect0_bytes & TCP_ACO_MASK)) & TCP_ACO_MASK;
2114 if (delta < 0) {
2115 os_log_error(OS_LOG_DEFAULT, "delta for AccECN0 options (ECT0 bytes) can't be zero");
2116 }
2117 tp->t_aecn.t_snd_ect0_bytes += delta;
2118 } else {
2119 delta = (delta + TCP_ACO_DIV -
2120 (tp->t_aecn.t_snd_ect1_bytes & TCP_ACO_MASK)) & TCP_ACO_MASK;
2121 if (delta < 0) {
2122 os_log_error(OS_LOG_DEFAULT, "delta for AccECN1 options (ECT1 bytes) can't be zero");
2123 }
2124 tp->t_aecn.t_snd_ect1_bytes += delta;
2125 }
2126 }
2127 if (to->to_num_accecn >= 2) {
2128 delta = ntoh24(to->to_accecn + 1 * TCPOLEN_ACCECN_COUNTER);
2129 delta = (delta + TCP_ACO_DIV -
2130 (tp->t_aecn.t_snd_ce_bytes & TCP_ACO_MASK)) & TCP_ACO_MASK;
2131 if (delta < 0) {
2132 os_log_error(OS_LOG_DEFAULT, "delta for AccECN options (CE bytes) can't be zero");
2133 }
2134 tp->t_aecn.t_snd_ce_bytes += delta;
2135 ce_bytes = delta;
2136 }
2137 if (to->to_num_accecn >= 3) {
2138 delta = ntoh24(to->to_accecn + 2 * TCPOLEN_ACCECN_COUNTER);
2139 if (to->to_accecn_order == 0) {
2140 delta = (delta + TCP_ACO_DIV -
2141 (tp->t_aecn.t_snd_ect1_bytes & TCP_ACO_MASK)) & TCP_ACO_MASK;
2142 if (delta < 0) {
2143 os_log_error(OS_LOG_DEFAULT, "delta for AccECN0 options (ECT1 bytes) can't be zero");
2144 }
2145 tp->t_aecn.t_snd_ect1_bytes += delta;
2146 } else {
2147 delta = (delta + TCP_ACO_DIV -
2148 (tp->t_aecn.t_snd_ect0_bytes & TCP_ACO_MASK)) & TCP_ACO_MASK;
2149 if (delta < 0) {
2150 os_log_error(OS_LOG_DEFAULT, "delta for AccECN1 options (ECT0 bytes) can't be zero");
2151 }
2152 tp->t_aecn.t_snd_ect0_bytes += delta;
2153 }
2154 }
2155
2156 return ce_bytes;
2157 }
2158
2159 static void
tcp_process_accecn(struct tcpcb * tp,struct tcpopt * to,struct tcphdr * th,uint32_t pkts_acked,uint8_t ace)2160 tcp_process_accecn(struct tcpcb *tp, struct tcpopt *to, struct tcphdr *th,
2161 uint32_t pkts_acked, uint8_t ace)
2162 {
2163 if (tp->t_aecn.accecn_processed) {
2164 os_log(OS_LOG_DEFAULT, "already processed AccECN field/options for this ACK");
2165 return;
2166 }
2167
2168 uint64_t old_sceb = tp->t_aecn.t_snd_ce_bytes;
2169 uint32_t new_ce_bytes = tcp_process_accecn_options(tp, to);
2170 uint32_t delta = tcp_process_ace_field(tp, pkts_acked, old_sceb, ace);
2171 tp->t_aecn.t_snd_ce_packets += delta;
2172 tp->t_aecn.t_delta_ce_packets = delta;
2173
2174 /* Update the time for this newly acked data or control packet */
2175 if ((to->to_flags & TOF_TS) != 0 && (to->to_tsecr != 0) &&
2176 TSTMP_GEQ(to->to_tsecr, tp->t_last_ack_tsecr)) {
2177 tp->t_last_ack_tsecr = to->to_tsecr;
2178 }
2179
2180 if (delta > 0) {
2181 tp->ecn_flags |= (TE_INRECOVERY);
2182 tp->total_ect_packets_marked += delta;
2183
2184 /* update the stats */
2185 tcpstat.tcps_ecn_ace_recv_ce += tp->t_aecn.t_delta_ce_packets;
2186 /* CE packets counter start at 5 */
2187 tp->t_ecn_capable_packets_marked = tp->t_aecn.t_snd_ce_packets - 5;
2188 tcp_ccdbg_trace(tp, th, TCP_CC_ECN_RCVD);
2189 }
2190
2191 if (CC_ALGO(tp)->process_ecn != NULL) {
2192 CC_ALGO(tp)->process_ecn(tp, th, new_ce_bytes, tp->total_ect_packets_marked,
2193 tp->total_ect_packets_acked);
2194 }
2195
2196 tp->t_aecn.accecn_processed = 1;
2197 }
2198
2199 static void
tcp_ece_aggressive_heur(struct tcpcb * tp,uint32_t pkts_acked)2200 tcp_ece_aggressive_heur(struct tcpcb *tp, uint32_t pkts_acked)
2201 {
2202 if (tp->ecn_flags & TE_ECEHEURI_SET) {
2203 /* ECN heuristic already determined */
2204 return;
2205 }
2206
2207 tp->t_ecn_recv_ece_pkt += pkts_acked;
2208
2209 if (tp->t_ecn_capable_packets_acked < ECN_MIN_CE_PROBES) {
2210 /* Still in learning phase - insufficient probe data */
2211 return;
2212 }
2213
2214 if (tp->t_ecn_recv_ece_pkt > ECN_MAX_CE_RATIO) {
2215 /* Excessive congestion detected - disable ECN */
2216 tcp_heuristic_ecn_aggressive(tp);
2217 tp->ecn_flags |= TE_ECEHEURI_SET;
2218 tp->ecn_flags &= ~TE_SENDIPECT; /* Disable ECT for future packets */
2219 } else {
2220 /* Path is suitable for ECN */
2221 tp->ecn_flags |= TE_ECEHEURI_SET;
2222 }
2223 }
2224 /*
2225 * Process SYN from clients and create a new connecting socket
2226 * from the listener socket. If the listen queue exceeds a certain
2227 * threshold, then generate a SYN cookie instead.
2228 *
2229 * When SYN cookie is used, this function is also called when
2230 * we receive last ACK from the client to create a new connecting
2231 * socket.
2232 */
2233
2234 bool
tcp_create_server_socket(struct tcp_inp * tpi,struct socket ** so2,bool * syn_cookie_sent,int * dropsocket)2235 tcp_create_server_socket(struct tcp_inp *tpi, struct socket **so2,
2236 bool *syn_cookie_sent, int *dropsocket)
2237 {
2238 #define TCP_LOG_HDR (tpi->isipv6 ? (void *)tpi->ip6 : (void *)tpi->ip)
2239
2240 struct socket *so = tpi->so;
2241 struct tcpcb *otp = *tpi->tp;
2242 struct inpcb *oinp = sotoinpcb(so);
2243 struct tcphdr *th = tpi->th;
2244 struct sockaddr_storage from;
2245 struct sockaddr_storage to2;
2246 struct tcpcb *tp;
2247 struct inpcb *inp;
2248 struct ifnet *head_ifscope;
2249 bool head_nocell, head_recvanyif,
2250 head_noexpensive, head_awdl_unrestricted,
2251 head_intcoproc_allowed, head_external_port,
2252 head_noconstrained, head_management_allowed,
2253 head_ultra_constrained_allowed;
2254 boolean_t check_cfil = cfil_filter_present();
2255
2256 /* Get listener's bound-to-interface, if any */
2257 // TODO check that oinp is same as inp set in tcp_input
2258 head_ifscope = (oinp->inp_flags & INP_BOUND_IF) ?
2259 oinp->inp_boundifp : NULL;
2260 /* Get listener's no-cellular information, if any */
2261 head_nocell = INP_NO_CELLULAR(oinp);
2262 /* Get listener's recv-any-interface, if any */
2263 head_recvanyif = (oinp->inp_flags & INP_RECV_ANYIF);
2264 /* Get listener's no-expensive information, if any */
2265 head_noexpensive = INP_NO_EXPENSIVE(oinp);
2266 head_noconstrained = INP_NO_CONSTRAINED(oinp);
2267 head_awdl_unrestricted = INP_AWDL_UNRESTRICTED(oinp);
2268 head_intcoproc_allowed = INP_INTCOPROC_ALLOWED(oinp);
2269 head_external_port = (oinp->inp_flags2 & INP2_EXTERNAL_PORT);
2270 head_management_allowed = INP_MANAGEMENT_ALLOWED(oinp);
2271 head_ultra_constrained_allowed = INP_ULTRA_CONSTRAINED_ALLOWED(oinp);
2272
2273 if (so->so_filt || check_cfil || TCP_SYN_COOKIE_ENABLED(otp)) {
2274 if (tpi->isipv6) {
2275 struct sockaddr_in6 *sin6 = SIN6(&from);
2276
2277 sin6->sin6_len = sizeof(*sin6);
2278 sin6->sin6_family = AF_INET6;
2279 sin6->sin6_port = th->th_sport;
2280 sin6->sin6_flowinfo = 0;
2281 sin6->sin6_addr = tpi->ip6->ip6_src;
2282 sin6->sin6_scope_id = 0;
2283
2284 sin6 = SIN6(&to2);
2285
2286 sin6->sin6_len = sizeof(struct sockaddr_in6);
2287 sin6->sin6_family = AF_INET6;
2288 sin6->sin6_port = th->th_dport;
2289 sin6->sin6_flowinfo = 0;
2290 sin6->sin6_addr = tpi->ip6->ip6_dst;
2291 sin6->sin6_scope_id = 0;
2292 } else {
2293 struct sockaddr_in *sin = SIN(&from);
2294
2295 sin->sin_len = sizeof(*sin);
2296 sin->sin_family = AF_INET;
2297 sin->sin_port = th->th_sport;
2298 sin->sin_addr = tpi->ip->ip_src;
2299
2300 sin = SIN(&to2);
2301
2302 sin->sin_len = sizeof(struct sockaddr_in);
2303 sin->sin_family = AF_INET;
2304 sin->sin_port = th->th_dport;
2305 sin->sin_addr = tpi->ip->ip_dst;
2306 }
2307 }
2308
2309 if (so->so_filt) {
2310 *so2 = sonewconn(so, 0, SA(&from));
2311 } else {
2312 if (tcp_can_send_syncookie(so, otp, th->th_flags)) {
2313 ASSERT(tpi->to != NULL);
2314
2315 tcp_dooptions(otp, tpi->optp, tpi->optlen, th, tpi->to);
2316 tcp_syncookie_syn(tpi, SA(&to2), SA(&from));
2317 if (syn_cookie_sent) {
2318 *syn_cookie_sent = true;
2319 }
2320 /* Release reference and unlock listener socket */
2321 socket_unlock(so, 1);
2322 /*
2323 * In case of SYN cookies, we don't allocate connected
2324 * socket yet, return success.
2325 */
2326 return true;
2327 } else {
2328 *so2 = sonewconn(so, 0, NULL);
2329 }
2330 }
2331 if (*so2 == 0) {
2332 tcpstat.tcps_listendrop++;
2333 if (tcp_dropdropablreq(so)) {
2334 if (so->so_filt) {
2335 *so2 = sonewconn(so, 0, SA(&from));
2336 } else {
2337 if (tcp_can_send_syncookie(so, otp, th->th_flags)) {
2338 ASSERT(tpi->to != NULL);
2339 tcp_dooptions(otp, tpi->optp, tpi->optlen, th, tpi->to);
2340 tcp_syncookie_syn(tpi, SA(&to2), SA(&from));
2341 if (syn_cookie_sent) {
2342 *syn_cookie_sent = true;
2343 }
2344 /* Release reference and unlock listener socket */
2345 socket_unlock(so, 1);
2346
2347 return true;
2348 } else {
2349 *so2 = sonewconn(so, 0, NULL);
2350 }
2351 }
2352 }
2353 if (*so2 == 0) {
2354 TCP_LOG_DROP_PCB(TCP_LOG_HDR, th, otp, false, " listen drop");
2355 goto drop;
2356 }
2357 }
2358
2359 /* Point "inp" and "tp" in tandem to new socket */
2360 *(tpi->inp) = inp = (struct inpcb *)(*so2)->so_pcb;
2361 *(tpi->tp) = tp = intotcpcb(inp);
2362
2363 socket_unlock(so, 0); /* Unlock but keep a reference on listener for now */
2364
2365 socket_lock(*so2, 1);
2366
2367 /*
2368 * Mark socket as temporary until we're
2369 * committed to keeping it. The code at
2370 * ``drop'' and ``dropwithreset'' check the
2371 * flag dropsocket to see if the temporary
2372 * socket created here should be discarded.
2373 * We mark the socket as discardable until
2374 * we're committed to it below in TCPS_LISTEN.
2375 * There are some error conditions in which we
2376 * have to drop the temporary socket.
2377 */
2378 (*dropsocket)++;
2379
2380 /*
2381 * Inherit INP_BOUND_IF from listener; testing if
2382 * head_ifscope is non-NULL is sufficient, since it
2383 * can only be set to a non-zero value earlier if
2384 * the listener has such a flag set.
2385 */
2386 if (head_ifscope != NULL) {
2387 inp->inp_flags |= INP_BOUND_IF;
2388 inp->inp_boundifp = head_ifscope;
2389 } else {
2390 inp->inp_flags &= ~INP_BOUND_IF;
2391 }
2392 /*
2393 * Inherit restrictions from listener.
2394 */
2395 if (head_nocell) {
2396 inp_set_nocellular(inp);
2397 }
2398 if (head_noexpensive) {
2399 inp_set_noexpensive(inp);
2400 }
2401 if (head_noconstrained) {
2402 inp_set_noconstrained(inp);
2403 }
2404 if (head_awdl_unrestricted) {
2405 inp_set_awdl_unrestricted(inp);
2406 }
2407 if (head_intcoproc_allowed) {
2408 inp_set_intcoproc_allowed(inp);
2409 }
2410 if (head_management_allowed) {
2411 inp_set_management_allowed(inp);
2412 }
2413 if (head_ultra_constrained_allowed) {
2414 inp_set_ultra_constrained_allowed(inp);
2415 }
2416 /*
2417 * Inherit {IN,IN6}_RECV_ANYIF from listener.
2418 */
2419 if (head_recvanyif) {
2420 inp->inp_flags |= INP_RECV_ANYIF;
2421 } else {
2422 inp->inp_flags &= ~INP_RECV_ANYIF;
2423 }
2424
2425 if (head_external_port) {
2426 inp->inp_flags2 |= INP2_EXTERNAL_PORT;
2427 }
2428 if (tpi->isipv6) {
2429 inp->in6p_laddr = tpi->ip6->ip6_dst;
2430 inp->inp_lifscope = in6_addr2scopeid(tpi->ifp, &inp->in6p_laddr);
2431 in6_verify_ifscope(&tpi->ip6->ip6_dst, inp->inp_lifscope);
2432 } else {
2433 inp->inp_vflag &= ~INP_IPV6;
2434 inp->inp_vflag |= INP_IPV4;
2435 inp->inp_laddr = tpi->ip->ip_dst;
2436 }
2437 inp->inp_lport = th->th_dport;
2438 if (in_pcbinshash(inp, SA(&from), 0) != 0) {
2439 /*
2440 * Undo the assignments above if we failed to
2441 * put the PCB on the hash lists.
2442 */
2443 if (tpi->isipv6) {
2444 inp->in6p_laddr = in6addr_any;
2445 inp->inp_lifscope = IFSCOPE_NONE;
2446 } else {
2447 inp->inp_laddr.s_addr = INADDR_ANY;
2448 }
2449 #if SKYWALK
2450 netns_release(&inp->inp_netns_token);
2451 #endif /* SKYWALK */
2452 inp->inp_lport = 0;
2453 socket_lock(so, 0); /* release ref on parent */
2454 socket_unlock(so, 1);
2455 TCP_LOG_DROP_PCB(TCP_LOG_HDR, th, tp, false, " in_pcbinshash failed");
2456 goto drop;
2457 }
2458 socket_lock(so, 0);
2459 if (tpi->isipv6) {
2460 /*
2461 * Inherit socket options from the listening
2462 * socket.
2463 * Note that in6p_inputopts are not (even
2464 * should not be) copied, since it stores
2465 * previously received options and is used to
2466 * detect if each new option is different than
2467 * the previous one and hence should be passed
2468 * to a user.
2469 * If we copied in6p_inputopts, a user would
2470 * not be able to receive options just after
2471 * calling the accept system call.
2472 */
2473 inp->inp_flags |=
2474 oinp->inp_flags & INP_CONTROLOPTS;
2475 if (oinp->in6p_outputopts) {
2476 inp->in6p_outputopts =
2477 ip6_copypktopts(oinp->in6p_outputopts, Z_NOWAIT);
2478 }
2479 } else {
2480 inp->inp_options = ip_srcroute();
2481 inp->inp_ip_tos = oinp->inp_ip_tos;
2482 }
2483 #if IPSEC
2484 /* copy old policy into new socket's */
2485 if (sotoinpcb(so)->inp_sp) {
2486 int error = 0;
2487 /* Is it a security hole here to silently fail to copy the policy? */
2488 if (inp->inp_sp == NULL) {
2489 error = ipsec_init_policy(*so2, &inp->inp_sp);
2490 }
2491 if (error != 0 || ipsec_copy_policy(sotoinpcb(so)->inp_sp, inp->inp_sp)) {
2492 printf("tcp_input: could not copy policy\n");
2493 }
2494 }
2495 #endif
2496 /* inherit states from the listener */
2497 DTRACE_TCP4(state__change, void, NULL, struct inpcb *, inp,
2498 struct tcpcb *, tp, int32_t, TCPS_LISTEN);
2499 TCP_LOG_STATE(tp, TCPS_LISTEN);
2500 tp->t_state = TCPS_LISTEN;
2501 tp->t_flags |= otp->t_flags & (TF_NOPUSH | TF_NOOPT | TF_NODELAY);
2502 tp->t_flagsext |= (otp->t_flagsext & (TF_RXTFINDROP | TF_NOTIMEWAIT |
2503 TF_FASTOPEN | TF_L4S_ENABLED | TF_L4S_DISABLED));
2504 tp->t_keepinit = otp->t_keepinit;
2505 tp->t_keepcnt = otp->t_keepcnt;
2506 tp->t_keepintvl = otp->t_keepintvl;
2507 tp->t_adaptive_wtimo = otp->t_adaptive_wtimo;
2508 tp->t_adaptive_rtimo = otp->t_adaptive_rtimo;
2509 tp->t_inpcb->inp_ip_ttl = otp->t_inpcb->inp_ip_ttl;
2510 if (((*so2)->so_flags & SOF_NOTSENT_LOWAT) != 0) {
2511 tp->t_notsent_lowat = otp->t_notsent_lowat;
2512 }
2513 if (tp->t_flagsext & (TF_L4S_ENABLED | TF_L4S_DISABLED)) {
2514 tcp_set_foreground_cc(*so2);
2515 }
2516 tp->t_inpcb->inp_flags2 |=
2517 otp->t_inpcb->inp_flags2 & INP2_KEEPALIVE_OFFLOAD;
2518
2519 /* now drop the reference on the listener */
2520 socket_unlock(so, 1);
2521
2522 tp->request_r_scale = tcp_get_max_rwinscale(tp, *so2);
2523
2524 #if CONTENT_FILTER
2525 if (check_cfil) {
2526 int error = cfil_sock_attach(*so2, SA(&to2), SA(&from), CFS_CONNECTION_DIR_IN);
2527 if (error != 0) {
2528 TCP_LOG_DROP_PCB(TCP_LOG_HDR, th, tp, false, " cfil_sock_attach failed");
2529 goto drop;
2530 }
2531 }
2532 #endif /* CONTENT_FILTER */
2533
2534 KERNEL_DEBUG(DBG_FNC_TCP_NEWCONN | DBG_FUNC_END, 0, 0, 0, 0, 0);
2535
2536 return true;
2537 drop:
2538 return false;
2539 #undef TCP_LOG_HDR
2540 }
2541
2542 /*
2543 * This function is used to setup TCP server socket in either of below cases,
2544 * 1. SYN cookie is disabled and SYN is received.
2545 * 2. SYN cookie is enabled and SYN cookie is received with last ACK
2546 * Socket MUST already be created before this function is called.
2547 * It returns true for success and false for failure.
2548 */
2549 bool
tcp_setup_server_socket(struct tcp_inp * tpi,struct socket * so,bool syn_cookie_used)2550 tcp_setup_server_socket(struct tcp_inp *tpi, struct socket *so, bool syn_cookie_used)
2551 {
2552 #define TCP_LOG_HDR (tpi->isipv6 ? (void *)tpi->ip6 : (void *)tpi->ip)
2553
2554 struct inpcb *inp = *tpi->inp;
2555 struct tcpcb *tp = *tpi->tp;
2556 struct sockaddr_in *sin;
2557 struct sockaddr_in6 *sin6;
2558 int error = 0;
2559 struct in_addr laddr;
2560 struct in6_addr laddr6;
2561
2562 socket_lock_assert_owned(so);
2563
2564 /* Clear the logging flags inherited from the listening socket */
2565 inp->inp_log_flags = 0;
2566 inp->inp_flags2 &= ~INP2_LOGGING_ENABLED;
2567
2568 if (__improbable(inp->inp_flags2 & INP2_BIND_IN_PROGRESS)) {
2569 TCP_LOG_DROP_PCB(TCP_LOG_HDR, tpi->th, tp, false, "LISTEN bind in progress");
2570
2571 return false;
2572 }
2573 inp_enter_bind_in_progress(so);
2574
2575 if (tpi->isipv6) {
2576 sin6 = kalloc_type(struct sockaddr_in6, Z_NOWAIT | Z_ZERO);
2577 if (sin6 == NULL) {
2578 error = ENOMEM;
2579 TCP_LOG_DROP_PCB(TCP_LOG_HDR, tpi->th, tp, false, "LISTEN kalloc_type failed");
2580 goto pcbconnect_done;
2581 }
2582 sin6->sin6_family = AF_INET6;
2583 sin6->sin6_len = sizeof(*sin6);
2584 sin6->sin6_addr = tpi->ip6->ip6_src;
2585 sin6->sin6_port = tpi->th->th_sport;
2586 if (!in6_embedded_scope && IN6_IS_SCOPE_EMBED(&tpi->ip6->ip6_src)) {
2587 sin6->sin6_scope_id = ip6_input_getsrcifscope(tpi->m);
2588 }
2589 laddr6 = inp->in6p_laddr;
2590 uint32_t lifscope = inp->inp_lifscope;
2591 if (IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr)) {
2592 inp->in6p_laddr = tpi->ip6->ip6_dst;
2593 inp->inp_lifscope = in6_addr2scopeid(tpi->ifp, &inp->in6p_laddr);
2594 in6_verify_ifscope(&inp->in6p_laddr, inp->inp_lifscope);
2595 }
2596 if ((error = in6_pcbconnect(inp, SA(sin6), tpi->kernel_proc)) != 0) {
2597 inp->in6p_laddr = laddr6;
2598 kfree_type(struct sockaddr_in6, sin6);
2599 inp->inp_lifscope = lifscope;
2600 in6_verify_ifscope(&inp->in6p_laddr, inp->inp_lifscope);
2601 TCP_LOG_DROP_PCB(TCP_LOG_HDR, tpi->th, tp, false, " LISTEN in6_pcbconnect failed");
2602 goto pcbconnect_done;
2603 }
2604 kfree_type(struct sockaddr_in6, sin6);
2605 } else {
2606 socket_lock_assert_owned(so);
2607 sin = kalloc_type(struct sockaddr_in, Z_NOWAIT);
2608 if (sin == NULL) {
2609 error = ENOMEM;
2610 TCP_LOG_DROP_PCB(TCP_LOG_HDR, tpi->th, tp, false, "LISTEN kalloc_type failed");
2611 goto pcbconnect_done;
2612 }
2613 sin->sin_family = AF_INET;
2614 sin->sin_len = sizeof(*sin);
2615 sin->sin_addr = tpi->ip->ip_src;
2616 sin->sin_port = tpi->th->th_sport;
2617 bzero((caddr_t)sin->sin_zero, sizeof(sin->sin_zero));
2618 laddr = inp->inp_laddr;
2619 if (inp->inp_laddr.s_addr == INADDR_ANY) {
2620 inp->inp_laddr = tpi->ip->ip_dst;
2621 }
2622 if ((error = in_pcbconnect(inp, SA(sin), tpi->kernel_proc, IFSCOPE_NONE, NULL)) != 0) {
2623 inp->inp_laddr = laddr;
2624 kfree_type(struct sockaddr_in, sin);
2625 TCP_LOG_DROP_PCB(TCP_LOG_HDR, tpi->th, tp, false, " LISTEN in_pcbconnect failed");
2626 goto pcbconnect_done;
2627 }
2628 kfree_type(struct sockaddr_in, sin);
2629 }
2630 pcbconnect_done:
2631 inp_exit_bind_in_progress(so);
2632 if (error != 0) {
2633 return false;
2634 }
2635 /*
2636 * We already processed the options just before calling
2637 * tcp_syncookie_ack. If timestamp option is present in
2638 * last ACK, then we assume that it was already negotiated
2639 * during SYN/ACK. For other options, we derive the state
2640 * from the cookie.
2641 */
2642 if (syn_cookie_used) {
2643 tpi->to->to_flags |= TOF_SCALE;
2644 tpi->to->to_wscale = MIN(tpi->peer_wscale, TCP_MAX_WINSHIFT);
2645 tpi->to->to_mss = tpi->peer_mss;
2646 tpi->to->to_flags |= TOF_MSS;
2647
2648 if (tpi->sackok == 1) {
2649 tpi->to->to_flags |= TOF_SACKPERM;
2650 }
2651 if (tpi->ecnok == 1) {
2652 tp->ecn_flags |= (TE_ECN_ON | TE_SENDIPECT);
2653 }
2654 }
2655 /* Get timestamp and other options that are in either:
2656 * SYN, when SYN cookies are disabled
2657 * OR last ACK, when SYN cookies are enabled
2658 */
2659 if (tpi->optp != NULL) {
2660 tcp_dooptions(tp, tpi->optp, tpi->optlen, tpi->th, tpi->to);
2661 }
2662 tcp_finalize_options(tp, tpi->to, tpi->ifscope);
2663
2664 if (tpi->iss) {
2665 tp->iss = tpi->iss;
2666 } else {
2667 tp->iss = tcp_new_isn(tp);
2668 }
2669 if (syn_cookie_used) {
2670 tp->irs = tpi->irs;
2671 } else {
2672 tp->irs = tpi->th->th_seq;
2673 }
2674 if (tpi->ts_offset) {
2675 tp->t_ts_offset = tpi->ts_offset;
2676 /* Adjust received tsecr when SYN cookie is used */
2677 tpi->to->to_tsecr -= tpi->ts_offset;
2678 }
2679 tcp_sendseqinit(tp);
2680 tcp_rcvseqinit(tp);
2681 tp->snd_recover = tp->snd_una;
2682 /*
2683 * Initialization of the tcpcb for transaction;
2684 * set SND.WND = SEG.WND,
2685 * initialize CCsend and CCrecv.
2686 */
2687 tp->snd_wnd = tpi->tiwin; /* initial send-window */
2688 tp->max_sndwnd = tp->snd_wnd;
2689 tp->t_flags |= TF_ACKNOW;
2690 tp->t_unacksegs = 0;
2691 tp->t_unacksegs_ce = 0;
2692 DTRACE_TCP4(state__change, void, NULL, struct inpcb *, inp,
2693 struct tcpcb *, tp, int32_t, TCPS_SYN_RECEIVED);
2694 TCP_LOG_STATE(tp, TCPS_SYN_RECEIVED);
2695
2696 tp->t_state = TCPS_SYN_RECEIVED;
2697 tp->t_timer[TCPT_KEEP] = tcp_offset_from_start(tp,
2698 TCP_CONN_KEEPINIT(tp));
2699 tp->t_connect_time = tcp_now;
2700
2701 if (inp->inp_flowhash == 0) {
2702 inp_calc_flowhash(inp);
2703 ASSERT(inp->inp_flowhash != 0);
2704 }
2705 /* update flowinfo - RFC 6437 */
2706 if (inp->inp_flow == 0 &&
2707 inp->in6p_flags & IN6P_AUTOFLOWLABEL) {
2708 inp->inp_flow &= ~IPV6_FLOWLABEL_MASK;
2709 inp->inp_flow |=
2710 (htonl(ip6_randomflowlabel()) & IPV6_FLOWLABEL_MASK);
2711 }
2712
2713 /* reset the incomp processing flag */
2714 so->so_flags &= ~(SOF_INCOMP_INPROGRESS);
2715 tcpstat.tcps_accepts++;
2716
2717 if (!syn_cookie_used) {
2718 int ace_flags = ((tpi->th->th_x2 << 8) | tpi->th->th_flags) & TH_ACE;
2719 tcp_input_process_accecn_syn(tp, ace_flags, tpi->ip_ecn);
2720 }
2721 /*
2722 * The address and connection state are finalized
2723 */
2724 TCP_LOG_CONNECT(tp, false, 0);
2725
2726 tcp_add_fsw_flow(tp, tpi->ifp);
2727
2728 return true;
2729 #undef TCP_LOG_HDR
2730 }
2731
2732 static void
tcp_input_process_wake_packet(__unused struct mbuf * m,__unused protocol_family_t protocol_family,struct inpcb * inp)2733 tcp_input_process_wake_packet(__unused struct mbuf *m, __unused protocol_family_t protocol_family, struct inpcb *inp)
2734 {
2735 struct ifnet *ifp = m->m_pkthdr.rcvif;
2736
2737 /*
2738 * Note: we will stay in LPW if the TCP packet is invalid or have not found a PCB
2739 */
2740 if (__improbable(if_is_lpw_enabled(ifp))) {
2741 if (inp->inp_flags2 & INP2_CONNECTION_IDLE) {
2742 struct tcpcb *tp = intotcpcb(inp);
2743 TCP_LOG(tp, "LPW drop TCP connection idle");
2744 tcp_drop(tp, 0);
2745 } else {
2746 if_exit_lpw(ifp, "TCP connection not idle ");
2747 }
2748 }
2749 }
2750
2751 void
tcp_input(struct mbuf * m,int off0)2752 tcp_input(struct mbuf *m, int off0)
2753 {
2754 int exiting_fr = 0;
2755 struct tcphdr *th;
2756 struct ip *ip = NULL;
2757 struct inpcb *__single inp;
2758 u_char *optp = NULL;
2759 int optlen = 0;
2760 int tlen, off;
2761 int drop_hdrlen;
2762 struct tcpcb *__single tp = 0;
2763 int thflags;
2764 struct socket *so = 0;
2765 int todrop, acked = 0, ourfinisacked, needoutput = 0;
2766 int read_wakeup = 0;
2767 int write_wakeup = 0;
2768 int dropsocket = 0;
2769 int iss = 0, nosock = 0;
2770 uint32_t tiwin, sack_bytes_acked = 0;
2771 uint32_t highest_sacked_seq = 0;
2772 struct tcpopt to; /* options in this segment */
2773 u_char ip_ecn = IPTOS_ECN_NOTECT;
2774 unsigned int ifscope;
2775 uint8_t isconnected, isdisconnected;
2776 struct ifnet *ifp = m->m_pkthdr.rcvif;
2777 int segment_count = m->m_pkthdr.rx_seg_cnt ? : 1;
2778 int win;
2779 u_int16_t pf_tag = 0;
2780 #if MPTCP
2781 struct mptcb *mp_tp = NULL;
2782 #endif /* MPTCP */
2783 stats_functional_type ifnet_count_type = IFNET_COUNT_TYPE(ifp);
2784 boolean_t recvd_dsack = FALSE;
2785 boolean_t dsack_tlp = false;
2786 struct tcp_respond_args tra;
2787 int prev_t_state;
2788 bool findpcb_iterated = false;
2789 bool rack_loss_detected = false;
2790 bool is_th_swapped = false;
2791 bool syn_cookie_processed = false;
2792 bool ret = false;
2793 /*
2794 * The mbuf may be freed after it has been added to the receive socket
2795 * buffer or the reassembly queue, so we reinitialize th to point to a
2796 * safe copy of the TCP header
2797 */
2798 struct tcphdr saved_tcphdr = {};
2799 /*
2800 * Save copy of the IPv4/IPv6 header.
2801 * Note: use array of uint32_t to silence compiler warning when casting
2802 * to a struct ip6_hdr pointer.
2803 */
2804 #define MAX_IPWORDS ((sizeof(struct ip) + MAX_IPOPTLEN) / sizeof(uint32_t))
2805 uint32_t saved_hdr[MAX_IPWORDS];
2806
2807 #define TCP_INC_VAR(stat, npkts) do { \
2808 stat += npkts; \
2809 } while (0)
2810 drop_reason_t drop_reason = DROP_REASON_UNSPECIFIED;
2811
2812 TCP_INC_VAR(tcpstat.tcps_rcvtotal, segment_count);
2813
2814 struct ip6_hdr *ip6 = NULL;
2815 int isipv6;
2816 struct proc *kernel_proc = current_proc();
2817
2818 KERNEL_DEBUG(DBG_FNC_TCP_INPUT | DBG_FUNC_START, 0, 0, 0, 0, 0);
2819
2820 isipv6 = (mtod(m, struct ip *)->ip_v == 6) ? 1 : 0;
2821 bzero((char *)&to, sizeof(to));
2822
2823 m_add_crumb(m, PKT_CRUMB_TCP_INPUT);
2824
2825 if (m->m_flags & M_PKTHDR) {
2826 pf_tag = m_pftag(m)->pftag_tag;
2827 }
2828
2829 if (isipv6) {
2830 /*
2831 * Expect 32-bit aligned data pointer on
2832 * strict-align platforms
2833 */
2834 MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(m);
2835
2836 /* IP6_EXTHDR_CHECK() is already done at tcp6_input() */
2837 ip6 = mtod(m, struct ip6_hdr *);
2838 tlen = sizeof(*ip6) + ntohs(ip6->ip6_plen) - off0;
2839 th = (struct tcphdr *)(void *)((caddr_t)ip6 + off0);
2840
2841 if (tcp_input_checksum(AF_INET6, m, th, off0, tlen)) {
2842 TCP_LOG_DROP_PKT(ip6, th, ifp, "IPv6 bad tcp checksum");
2843 drop_reason = DROP_REASON_TCP_CHECKSUM_INCORRECT;
2844 goto dropnosock;
2845 }
2846
2847 KERNEL_DEBUG(DBG_LAYER_BEG, ((th->th_dport << 16) | th->th_sport),
2848 (((ip6->ip6_src.s6_addr16[0]) << 16) | (ip6->ip6_dst.s6_addr16[0])),
2849 th->th_seq, th->th_ack, th->th_win);
2850 /*
2851 * Be proactive about unspecified IPv6 address in source.
2852 * As we use all-zero to indicate unbounded/unconnected pcb,
2853 * unspecified IPv6 address can be used to confuse us.
2854 *
2855 * Note that packets with unspecified IPv6 destination is
2856 * already dropped in ip6_input.
2857 */
2858 if (IN6_IS_ADDR_UNSPECIFIED(&ip6->ip6_src)) {
2859 /* XXX stat */
2860 IF_TCP_STATINC(ifp, unspecv6);
2861 TCP_LOG_DROP_PKT(ip6, th, ifp, "src IPv6 address unspecified");
2862 drop_reason = DROP_REASON_TCP_SRC_ADDR_UNSPECIFIED;
2863 goto dropnosock;
2864 }
2865 DTRACE_TCP5(receive, struct mbuf *, m, struct inpcb *, NULL,
2866 struct ip6_hdr *, ip6, struct tcpcb *, NULL,
2867 struct tcphdr *, th);
2868
2869 ip_ecn = (ntohl(ip6->ip6_flow) >> 20) & IPTOS_ECN_MASK;
2870 } else {
2871 /*
2872 * Get IP and TCP header together in first mbuf.
2873 * Note: IP leaves IP header in first mbuf.
2874 */
2875 if (off0 > sizeof(struct ip)) {
2876 ip_stripoptions(m);
2877 off0 = sizeof(struct ip);
2878 }
2879 if (m->m_len < sizeof(struct tcpiphdr)) {
2880 if ((m = m_pullup(m, sizeof(struct tcpiphdr))) == 0) {
2881 tcpstat.tcps_rcvshort++;
2882 return;
2883 }
2884 }
2885
2886 /* Expect 32-bit aligned data pointer on strict-align platforms */
2887 MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(m);
2888
2889 ip = mtod(m, struct ip *);
2890 th = (struct tcphdr *)(void *)((caddr_t)ip + off0);
2891 tlen = ip->ip_len;
2892
2893 if (tcp_input_checksum(AF_INET, m, th, off0, tlen)) {
2894 TCP_LOG_DROP_PKT(ip, th, ifp, "IPv4 bad tcp checksum");
2895 drop_reason = DROP_REASON_TCP_CHECKSUM_INCORRECT;
2896 goto dropnosock;
2897 }
2898
2899 /* Re-initialization for later version check */
2900 ip->ip_v = IPVERSION;
2901 ip_ecn = (ip->ip_tos & IPTOS_ECN_MASK);
2902
2903 DTRACE_TCP5(receive, struct mbuf *, m, struct inpcb *, NULL,
2904 struct ip *, ip, struct tcpcb *, NULL, struct tcphdr *, th);
2905
2906 KERNEL_DEBUG(DBG_LAYER_BEG, ((th->th_dport << 16) | th->th_sport),
2907 (((ip->ip_src.s_addr & 0xffff) << 16) | (ip->ip_dst.s_addr & 0xffff)),
2908 th->th_seq, th->th_ack, th->th_win);
2909 }
2910
2911 #define TCP_LOG_HDR (isipv6 ? (void *)ip6 : (void *)ip)
2912
2913 /*
2914 * Check that TCP offset makes sense,
2915 * pull out TCP options and adjust length.
2916 */
2917 off = th->th_off << 2;
2918 if (off < sizeof(struct tcphdr) || off > tlen) {
2919 tcpstat.tcps_rcvbadoff++;
2920 IF_TCP_STATINC(ifp, badformat);
2921 TCP_LOG_DROP_PKT(TCP_LOG_HDR, th, ifp, "bad tcp offset");
2922 drop_reason = DROP_REASON_TCP_OFFSET_INCORRECT;
2923 goto dropnosock;
2924 }
2925 tlen -= off; /* tlen is used instead of ti->ti_len */
2926 if (off > sizeof(struct tcphdr)) {
2927 if (isipv6) {
2928 IP6_EXTHDR_CHECK(m, off0, off, return );
2929 ip6 = mtod(m, struct ip6_hdr *);
2930 th = (struct tcphdr *)(void *)((caddr_t)ip6 + off0);
2931 } else {
2932 if (m->m_len < sizeof(struct ip) + off) {
2933 if ((m = m_pullup(m, sizeof(struct ip) + off)) == 0) {
2934 tcpstat.tcps_rcvshort++;
2935 return;
2936 }
2937 ip = mtod(m, struct ip *);
2938 th = (struct tcphdr *)(void *)((caddr_t)ip + off0);
2939 }
2940 }
2941 optlen = off - sizeof(struct tcphdr);
2942 optp = (u_char *)(th + 1);
2943 /*
2944 * Do quick retrieval of timestamp options ("options
2945 * prediction?"). If timestamp is the only option and it's
2946 * formatted as recommended in RFC 1323 appendix A, we
2947 * quickly get the values now and not bother calling
2948 * tcp_dooptions(), etc.
2949 */
2950 if ((optlen == TCPOLEN_TSTAMP_APPA ||
2951 (optlen > TCPOLEN_TSTAMP_APPA &&
2952 optp[TCPOLEN_TSTAMP_APPA] == TCPOPT_EOL)) &&
2953 *(u_int32_t *)(void *)optp == htonl(TCPOPT_TSTAMP_HDR) &&
2954 (th->th_flags & TH_SYN) == 0) {
2955 to.to_flags |= TOF_TS;
2956 to.to_tsval = ntohl(*(u_int32_t *)(void *)(optp + 4));
2957 to.to_tsecr = ntohl(*(u_int32_t *)(void *)(optp + 8));
2958 optp = NULL; /* we've parsed the options */
2959 optlen = 0;
2960 }
2961 }
2962 thflags = th->th_flags;
2963
2964 /*
2965 * Drop all packets with both the SYN and FIN bits set.
2966 * This prevents e.g. nmap from identifying the TCP/IP stack.
2967 *
2968 * This is a violation of the TCP specification.
2969 */
2970 if ((thflags & (TH_SYN | TH_FIN)) == (TH_SYN | TH_FIN)) {
2971 IF_TCP_STATINC(ifp, synfin);
2972 TCP_LOG_DROP_PKT(TCP_LOG_HDR, th, ifp, "drop SYN FIN");
2973 drop_reason = DROP_REASON_TCP_SYN_FIN;
2974 goto dropnosock;
2975 }
2976
2977 /*
2978 * Delay dropping TCP, IP headers, IPv6 ext headers, and TCP options,
2979 * until after ip6_savecontrol() is called and before other functions
2980 * which don't want those proto headers.
2981 * Because ip6_savecontrol() is going to parse the mbuf to
2982 * search for data to be passed up to user-land, it wants mbuf
2983 * parameters to be unchanged.
2984 */
2985 drop_hdrlen = off0 + off;
2986
2987 /* Since this is an entry point for input processing of tcp packets, we
2988 * can update the tcp clock here.
2989 */
2990 calculate_tcp_clock();
2991
2992 /*
2993 * Record the interface where this segment arrived on; this does not
2994 * affect normal data output (for non-detached TCP) as it provides a
2995 * hint about which route and interface to use for sending in the
2996 * absence of a PCB, when scoped routing (and thus source interface
2997 * selection) are enabled.
2998 */
2999 if ((m->m_pkthdr.pkt_flags & PKTF_LOOP) || m->m_pkthdr.rcvif == NULL) {
3000 ifscope = IFSCOPE_NONE;
3001 } else {
3002 ifscope = m->m_pkthdr.rcvif->if_index;
3003 }
3004
3005 /*
3006 * Convert TCP protocol specific fields to host format.
3007 */
3008
3009 #if BYTE_ORDER != BIG_ENDIAN
3010 NTOHL(th->th_seq);
3011 NTOHL(th->th_ack);
3012 NTOHS(th->th_win);
3013 NTOHS(th->th_urp);
3014 is_th_swapped = true;
3015 #endif
3016
3017 /*
3018 * Locate pcb for segment.
3019 */
3020 findpcb:
3021
3022 isconnected = FALSE;
3023 isdisconnected = FALSE;
3024
3025 if (isipv6) {
3026 inp = in6_pcblookup_hash(&tcbinfo, &ip6->ip6_src, th->th_sport, ip6_input_getsrcifscope(m),
3027 &ip6->ip6_dst, th->th_dport, ip6_input_getdstifscope(m), 1,
3028 m->m_pkthdr.rcvif);
3029 } else {
3030 inp = in_pcblookup_hash(&tcbinfo, ip->ip_src, th->th_sport,
3031 ip->ip_dst, th->th_dport, 1, m->m_pkthdr.rcvif);
3032 }
3033
3034 /*
3035 * Use the interface scope information from the PCB for outbound
3036 * segments. If the PCB isn't present and if scoped routing is
3037 * enabled, tcp_respond will use the scope of the interface where
3038 * the segment arrived on.
3039 */
3040 if (inp != NULL && (inp->inp_flags & INP_BOUND_IF)) {
3041 ifscope = inp->inp_boundifp->if_index;
3042 }
3043
3044 /*
3045 * If the state is CLOSED (i.e., TCB does not exist) then
3046 * all data in the incoming segment is discarded.
3047 * If the TCB exists but is in CLOSED state, it is embryonic,
3048 * but should either do a listen or a connect soon.
3049 */
3050 if (inp == NULL) {
3051 if (log_in_vain) {
3052 char dbuf[MAX_IPv6_STR_LEN], sbuf[MAX_IPv6_STR_LEN];
3053
3054 if (isipv6) {
3055 inet_ntop(AF_INET6, &ip6->ip6_dst, dbuf, sizeof(dbuf));
3056 inet_ntop(AF_INET6, &ip6->ip6_src, sbuf, sizeof(sbuf));
3057 } else {
3058 inet_ntop(AF_INET, &ip->ip_dst, dbuf, sizeof(dbuf));
3059 inet_ntop(AF_INET, &ip->ip_src, sbuf, sizeof(sbuf));
3060 }
3061 switch (log_in_vain) {
3062 case 1:
3063 if (thflags & TH_SYN) {
3064 log(LOG_INFO,
3065 "Connection attempt to TCP %s:%d from %s:%d\n",
3066 dbuf, ntohs(th->th_dport),
3067 sbuf,
3068 ntohs(th->th_sport));
3069 }
3070 break;
3071 case 2:
3072 log(LOG_INFO,
3073 "Connection attempt to TCP %s:%d from %s:%d flags:0x%x\n",
3074 dbuf, ntohs(th->th_dport), sbuf,
3075 ntohs(th->th_sport), thflags);
3076 break;
3077 case 3:
3078 case 4:
3079 if ((thflags & TH_SYN) && !(thflags & TH_ACK) &&
3080 !(m->m_flags & (M_BCAST | M_MCAST)) &&
3081 ((isipv6 && !in6_are_addr_equal_scoped(&ip6->ip6_dst, &ip6->ip6_src, ip6_input_getdstifscope(m), ip6_input_getsrcifscope(m))) ||
3082 (!isipv6 && ip->ip_dst.s_addr != ip->ip_src.s_addr))) {
3083 log_in_vain_log((LOG_INFO,
3084 "Stealth Mode connection attempt to TCP %s:%d from %s:%d\n",
3085 dbuf, ntohs(th->th_dport),
3086 sbuf,
3087 ntohs(th->th_sport)));
3088 }
3089 break;
3090 default:
3091 break;
3092 }
3093 }
3094 if (blackhole) {
3095 if (m->m_pkthdr.rcvif && m->m_pkthdr.rcvif->if_type != IFT_LOOP) {
3096 switch (blackhole) {
3097 case 1:
3098 if (thflags & TH_SYN) {
3099 TCP_LOG_DROP_PKT(TCP_LOG_HDR, th, ifp, "blackhole 1 syn for closed port");
3100 goto dropnosock;
3101 }
3102 break;
3103 case 2:
3104 TCP_LOG_DROP_PKT(TCP_LOG_HDR, th, ifp, "blackhole 2 closed port");
3105 goto dropnosock;
3106 default:
3107 TCP_LOG_DROP_PKT(TCP_LOG_HDR, th, ifp, "blackhole closed port");
3108 goto dropnosock;
3109 }
3110 }
3111 }
3112 if ((tcp_link_heuristics_flags & TCP_LINK_HEUR_STEALTH) != 0 &&
3113 if_link_heuristics_enabled(ifp)) {
3114 TCP_LOG_DROP_PKT(TCP_LOG_HDR, th, ifp, "link heuristics");
3115 IF_TCP_STATINC(ifp, linkheur_stealthdrop);
3116 goto dropnosock;
3117 }
3118 IF_TCP_STATINC(ifp, noconnnolist);
3119 TCP_LOG_DROP_PKT(TCP_LOG_HDR, th, ifp, "closed port");
3120 goto dropwithresetnosock;
3121 }
3122 so = inp->inp_socket;
3123 if (so == NULL) {
3124 /* This case shouldn't happen as the socket shouldn't be null
3125 * if inp_state isn't set to INPCB_STATE_DEAD
3126 * But just in case, we pretend we didn't find the socket if we hit this case
3127 * as this isn't cause for a panic (the socket might be leaked however)...
3128 */
3129 inp = NULL;
3130 TCP_LOG_DROP_PKT(TCP_LOG_HDR, th, ifp, "inp_socket NULL");
3131 drop_reason = DROP_REASON_TCP_NO_SOCK;
3132 goto dropnosock;
3133 }
3134
3135 socket_lock(so, 1);
3136 if (in_pcb_checkstate(inp, WNT_RELEASE, 1) == WNT_STOPUSING) {
3137 socket_unlock(so, 1);
3138 inp = NULL; // pretend we didn't find it
3139 TCP_LOG_DROP_PKT(TCP_LOG_HDR, th, ifp, "inp state WNT_STOPUSING");
3140 drop_reason = DROP_REASON_TCP_NO_SOCK;
3141 goto dropnosock;
3142 }
3143
3144 if (!isipv6 && inp->inp_faddr.s_addr != INADDR_ANY) {
3145 if (inp->inp_faddr.s_addr != ip->ip_src.s_addr ||
3146 inp->inp_laddr.s_addr != ip->ip_dst.s_addr ||
3147 inp->inp_fport != th->th_sport ||
3148 inp->inp_lport != th->th_dport) {
3149 os_log_error(OS_LOG_DEFAULT, "%s 5-tuple does not match: %u:%u %u:%u\n",
3150 __func__,
3151 ntohs(inp->inp_fport), ntohs(th->th_sport),
3152 ntohs(inp->inp_lport), ntohs(th->th_dport));
3153 if (findpcb_iterated) {
3154 drop_reason = DROP_REASON_TCP_PCB_MISMATCH;
3155 goto drop;
3156 }
3157 findpcb_iterated = true;
3158 socket_unlock(so, 1);
3159 inp = NULL;
3160 goto findpcb;
3161 }
3162 } else if (isipv6 && !IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_faddr)) {
3163 if (!in6_are_addr_equal_scoped(&inp->in6p_faddr, &ip6->ip6_src, inp->inp_fifscope, ip6_input_getsrcifscope(m)) ||
3164 !in6_are_addr_equal_scoped(&inp->in6p_laddr, &ip6->ip6_dst, inp->inp_lifscope, ip6_input_getdstifscope(m)) ||
3165 inp->inp_fport != th->th_sport ||
3166 inp->inp_lport != th->th_dport) {
3167 os_log_error(OS_LOG_DEFAULT, "%s 5-tuple does not match: %u:%u %u:%u\n",
3168 __func__,
3169 ntohs(inp->inp_fport), ntohs(th->th_sport),
3170 ntohs(inp->inp_lport), ntohs(th->th_dport));
3171 if (findpcb_iterated) {
3172 drop_reason = DROP_REASON_TCP_PCB_MISMATCH;
3173 goto drop;
3174 }
3175 findpcb_iterated = true;
3176 socket_unlock(so, 1);
3177 inp = NULL;
3178 goto findpcb;
3179 }
3180 }
3181
3182 tp = intotcpcb(inp);
3183 if (tp == NULL) {
3184 IF_TCP_STATINC(ifp, noconnlist);
3185 TCP_LOG_DROP_PKT(TCP_LOG_HDR, th, ifp, "tp is NULL");
3186 drop_reason = DROP_REASON_TCP_NO_PCB;
3187 goto dropwithreset;
3188 }
3189
3190 /* Now that we found the tcpcb, we can adjust the TCP timestamp */
3191 if (to.to_flags & TOF_TS) {
3192 to.to_tsecr -= tp->t_ts_offset;
3193 }
3194
3195 if (tp->t_state == TCPS_CLOSED) {
3196 TCP_LOG_DROP_PCB(TCP_LOG_HDR, th, tp, false, "tp state TCPS_CLOSED");
3197 drop_reason = DROP_REASON_TCP_CLOSED;
3198 goto drop;
3199 }
3200
3201 /*
3202 * Note: we will stay in LPW if the TCP packet is invalid or have not found a PCB
3203 */
3204 if (__improbable((m->m_pkthdr.pkt_flags & PKTF_WAKE_PKT) != 0)) {
3205 tcp_input_process_wake_packet(m, isipv6 ? PF_INET6 : PF_INET, inp);
3206 }
3207
3208 #if NECP
3209 if (so->so_state & SS_ISCONNECTED) {
3210 // Connected TCP sockets have a fully-bound local and remote,
3211 // so the policy check doesn't need to override addresses
3212 if (!necp_socket_is_allowed_to_send_recv(inp, ifp, pf_tag, NULL, NULL, NULL, NULL)) {
3213 TCP_LOG_DROP_NECP(TCP_LOG_HDR, th, intotcpcb(inp), false);
3214 IF_TCP_STATINC(ifp, badformat);
3215 drop_reason = DROP_REASON_TCP_NECP;
3216 goto drop;
3217 }
3218 } else {
3219 /*
3220 * If the proc_uuid_policy table has been updated since the last use
3221 * of the listening socket (i.e., the proc_uuid_policy_table_gencount
3222 * has been updated), the flags in the socket may be out of date.
3223 * If INP2_WANT_APP_POLICY is stale, inbound packets may
3224 * be dropped by NECP if the socket should now match a per-app
3225 * exception policy.
3226 * In order to avoid this refresh the proc_uuid_policy state to
3227 * potentially recalculate the socket's flags before checking
3228 * with NECP.
3229 */
3230 (void) inp_update_policy(inp);
3231
3232 if (isipv6) {
3233 if (!necp_socket_is_allowed_to_send_recv_v6(inp,
3234 th->th_dport, th->th_sport, &ip6->ip6_dst,
3235 &ip6->ip6_src, ifp, pf_tag, NULL, NULL, NULL, NULL)) {
3236 TCP_LOG_DROP_NECP(TCP_LOG_HDR, th, intotcpcb(inp), false);
3237 IF_TCP_STATINC(ifp, badformat);
3238 drop_reason = DROP_REASON_TCP_NECP;
3239 goto drop;
3240 }
3241 } else {
3242 if (!necp_socket_is_allowed_to_send_recv_v4(inp,
3243 th->th_dport, th->th_sport, &ip->ip_dst, &ip->ip_src,
3244 ifp, pf_tag, NULL, NULL, NULL, NULL)) {
3245 TCP_LOG_DROP_NECP(TCP_LOG_HDR, th, intotcpcb(inp), false);
3246 IF_TCP_STATINC(ifp, badformat);
3247 drop_reason = DROP_REASON_TCP_NECP;
3248 goto drop;
3249 }
3250 }
3251 }
3252 #endif /* NECP */
3253
3254 prev_t_state = tp->t_state;
3255
3256 /* If none of the FIN|SYN|RST|ACK flag is set, drop */
3257 if ((thflags & TH_ACCEPT) == 0) {
3258 TCP_LOG_DROP_PCB(TCP_LOG_HDR, th, tp, false, "rfc5961 TH_ACCEPT == 0");
3259 drop_reason = DROP_REASON_TCP_FLAGS_INCORRECT;
3260 goto drop;
3261 }
3262
3263 /* Initialize highest sacked seq to avoid using 0 as initial value */
3264 highest_sacked_seq = th->th_ack;
3265
3266 /* Unscale the window into a 32-bit value. */
3267 if ((thflags & TH_SYN) == 0) {
3268 tiwin = th->th_win << tp->snd_scale;
3269 } else {
3270 tiwin = th->th_win;
3271 }
3272
3273 /* Avoid processing packets while closing a listen socket */
3274 if (tp->t_state == TCPS_LISTEN &&
3275 (so->so_options & SO_ACCEPTCONN) == 0) {
3276 TCP_LOG_DROP_PCB(TCP_LOG_HDR, th, tp, false, "closing a listening socket");
3277 drop_reason = DROP_REASON_TCP_LISTENER_CLOSING;
3278 goto drop;
3279 }
3280
3281 if ((m->m_flags & M_PKTHDR) && (m->m_pkthdr.pkt_flags & PKTF_WAKE_PKT)) {
3282 soevent(so, SO_FILT_HINT_LOCKED | SO_FILT_HINT_WAKE_PKT);
3283 }
3284
3285 if (so->so_options & SO_ACCEPTCONN) {
3286 struct socket *__single so2;
3287 /*
3288 * Initialize with fields common to both case:
3289 * 1. SYN is received
3290 * 2. Last ACK is received for listening socket (when SYN cookie is enabled)
3291 */
3292 struct tcp_inp tpi = {.so = so, .inp = &inp, .tp = &tp, .m = m, .th = th,
3293 .to = &to, .optp = optp, .optlen = optlen, .ip6 = ip6, .ip = ip,
3294 .isipv6 = isipv6, .ifp = ifp, .ifscope = ifscope, .kernel_proc = kernel_proc};
3295 /*
3296 * When SYN cookie is enabled, check for an existing connection
3297 * attempt if the flag is only ACK. A successful lookup creates a new
3298 * socket appended to the listen queue in SYN_RECEIVED state.
3299 */
3300 if (TCP_SYN_COOKIE_ENABLED(tp) && (thflags & (TH_RST | TH_ACK | TH_SYN)) == TH_ACK) {
3301 /*
3302 * Pull initial sequence numbers out of last ACK and
3303 * revert sequence number advances. Populate other fields
3304 * needed to create and setup the server socket.
3305 */
3306 tpi.iss = th->th_ack - 1;
3307 tpi.irs = th->th_seq - 1;
3308 tpi.tiwin = tiwin;
3309 tpi.ip_ecn = ip_ecn;
3310 ret = tcp_syncookie_ack(&tpi, &so2, &dropsocket);
3311 if (so2 == NULL) {
3312 /* Either ACK was sent to listener after connection was closed or cookie validation failed or we could not allocate a socket */
3313 tcpstat.tcps_listendrop++;
3314 TCP_LOG_DROP_PCB(TCP_LOG_HDR, th, tp, false, " listener dropped ACK while SYN cookies were enabled");
3315 drop_reason = DROP_REASON_TCP_LISTENER_DROP;
3316 goto dropwithreset;
3317 }
3318 /* Set so to newly connected socket */
3319 so = so2;
3320 if (ret == false) {
3321 /*
3322 * There are multiple reasons for tcp_syncookie_ack() to return
3323 * failure even if server socket was created successfully
3324 * 1. During server socket creation, if we failed to put the
3325 * PCB on the hash lists or cfil_sock_attach failed.
3326 * 2. During server socket setup, if in_pcbconnect failed.
3327 * Need to check th behavior when ACK was not for our
3328 * SYN/ACK. Do our protection against double ACK. If peer
3329 * sent us 2 ACKs, then for the first one tcp_syncookie_ack()
3330 * successfully creates a connected socket, while we were
3331 * waiting on the inpcb lock.
3332 */
3333 drop_reason = DROP_REASON_TCP_LISTENER_DROP;
3334 goto drop;
3335 }
3336
3337 /* Point "inp" and "tp" in tandem to new socket */
3338 inp = (struct inpcb *)so->so_pcb;
3339 tp = intotcpcb(inp);
3340 syn_cookie_processed = true;
3341 /*
3342 * New connection inpcb is already locked by
3343 * tcp_syncookie_ack() when it calls tcp_create_server_socket.
3344 */
3345 ASSERT(tp->t_state == TCPS_SYN_RECEIVED);
3346 /*
3347 * Process the segment and the data it
3348 * contains.
3349 */
3350 goto syn_cookie_valid;
3351 }
3352
3353 /*
3354 * If the state is LISTEN then ignore segment if it contains an RST.
3355 * If the segment contains an ACK then it is bad and send a RST.
3356 * If it does not contain a SYN then it is not interesting; drop it.
3357 * If it is from this socket, drop it, it must be forged.
3358 */
3359 if ((thflags & (TH_RST | TH_ACK | TH_SYN)) != TH_SYN) {
3360 IF_TCP_STATINC(ifp, listbadsyn);
3361
3362 if (thflags & TH_RST) {
3363 TCP_LOG_DROP_PCB(TCP_LOG_HDR, th, tp, false,
3364 thflags & TH_SYN ? "ignore SYN with RST" : "ignore RST");
3365 drop_reason = DROP_REASON_TCP_SYN_RST;
3366 goto drop;
3367 }
3368 if (thflags & TH_ACK) {
3369 TCP_LOG_DROP_PCB(TCP_LOG_HDR, th, tp, false,
3370 thflags & TH_SYN ? "bad SYN with ACK" : "bad ACK");
3371 tp = NULL;
3372 tcpstat.tcps_badsyn++;
3373 drop_reason = DROP_REASON_TCP_SYN_ACK_LISTENER;
3374 goto dropwithreset;
3375 }
3376
3377 /* We come here if there is no SYN set */
3378 tcpstat.tcps_badsyn++;
3379 TCP_LOG_DROP_PCB(TCP_LOG_HDR, th, tp, false, "bad SYN");
3380 drop_reason = DROP_REASON_TCP_LISTENER_NO_SYN;
3381 goto drop;
3382 }
3383 KERNEL_DEBUG(DBG_FNC_TCP_NEWCONN | DBG_FUNC_START, 0, 0, 0, 0, 0);
3384 if (th->th_dport == th->th_sport) {
3385 if (isipv6) {
3386 if (in6_are_addr_equal_scoped(&ip6->ip6_dst, &ip6->ip6_src, ip6_input_getdstifscope(m), ip6_input_getsrcifscope(m))) {
3387 TCP_LOG_DROP_PCB(TCP_LOG_HDR, th, tp, false, "bad tuple same port");
3388 drop_reason = DROP_REASON_TCP_SAME_PORT;
3389 goto drop;
3390 }
3391 } else if (ip->ip_dst.s_addr == ip->ip_src.s_addr) {
3392 TCP_LOG_DROP_PCB(TCP_LOG_HDR, th, tp, false, "bad tuple same IPv4 address");
3393 drop_reason = DROP_REASON_TCP_SAME_PORT;
3394 goto drop;
3395 }
3396 }
3397 /*
3398 * RFC1122 4.2.3.10, p. 104: discard bcast/mcast SYN
3399 * in_broadcast() should never return true on a received
3400 * packet with M_BCAST not set.
3401 *
3402 * Packets with a multicast source address should also
3403 * be discarded.
3404 */
3405 if (m->m_flags & (M_BCAST | M_MCAST)) {
3406 TCP_LOG_DROP_PCB(TCP_LOG_HDR, th, tp, false, "mbuf M_BCAST | M_MCAST");
3407 drop_reason = DROP_REASON_TCP_BCAST_MCAST;
3408 goto drop;
3409 }
3410 if (isipv6) {
3411 if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) ||
3412 IN6_IS_ADDR_MULTICAST(&ip6->ip6_src)) {
3413 TCP_LOG_DROP_PCB(TCP_LOG_HDR, th, tp, false, "IN6_IS_ADDR_MULTICAST");
3414 drop_reason = DROP_REASON_TCP_BCAST_MCAST;
3415 goto drop;
3416 }
3417 } else if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) ||
3418 IN_MULTICAST(ntohl(ip->ip_src.s_addr)) ||
3419 ip->ip_src.s_addr == htonl(INADDR_BROADCAST) ||
3420 in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif)) {
3421 TCP_LOG_DROP_PCB(TCP_LOG_HDR, th, tp, false, "multicast or broadcast address");
3422 drop_reason = DROP_REASON_TCP_BCAST_MCAST;
3423 goto drop;
3424 }
3425
3426 /*
3427 * If deprecated address is forbidden,
3428 * we do not accept SYN to deprecated interface
3429 * address to prevent any new inbound connection from
3430 * getting established.
3431 * When we do not accept SYN, we send a TCP RST,
3432 * with deprecated source address (instead of dropping
3433 * it). We compromise it as it is much better for peer
3434 * to send a RST, and RST will be the final packet
3435 * for the exchange.
3436 *
3437 * If we do not forbid deprecated addresses, we accept
3438 * the SYN packet. RFC 4862 forbids dropping SYN in
3439 * this case.
3440 */
3441 if (isipv6 && !ip6_use_deprecated) {
3442 uint32_t ia6_flags;
3443
3444 if (ip6_getdstifaddr_info(m, NULL,
3445 &ia6_flags) == 0) {
3446 if (ia6_flags & IN6_IFF_DEPRECATED) {
3447 tp = NULL;
3448 IF_TCP_STATINC(ifp, deprecate6);
3449 TCP_LOG_DROP_PCB(TCP_LOG_HDR, th, tp, false, "deprecated IPv6 address");
3450 drop_reason = DROP_REASON_TCP_DEPRECATED_ADDR;
3451 goto dropwithreset;
3452 }
3453 }
3454 }
3455
3456 bool syn_cookie_sent = false;
3457 ret = tcp_create_server_socket(&tpi, &so2, &syn_cookie_sent, &dropsocket);
3458
3459 if (syn_cookie_sent) {
3460 /*
3461 * SYN cookie sent and mbuf consumed.
3462 * Only the listen socket is unlocked by tcp_syncookie_syn().
3463 */
3464 KERNEL_DEBUG(DBG_FNC_TCP_INPUT | DBG_FUNC_END, 0, 0, 0, 0, 0);
3465 return;
3466 }
3467 if (!so2) {
3468 TCP_LOG_DROP_PCB(TCP_LOG_HDR, th, tp, false, " listen drop");
3469 drop_reason = DROP_REASON_TCP_LISTENER_DROP;
3470 goto drop;
3471 }
3472 /* Set so to newly connected socket */
3473 so = so2;
3474
3475 if (ret == false) {
3476 drop_reason = DROP_REASON_TCP_CREATE_SERVER_SOCKET;
3477 goto drop;
3478 }
3479 }
3480 syn_cookie_valid:
3481 socket_lock_assert_owned(so);
3482 /*
3483 * Packet accounting should not be done on listening socket
3484 */
3485 if (th->th_flags & TH_SYN) {
3486 (void) os_add_overflow(1, tp->t_syn_rcvd, &tp->t_syn_rcvd);
3487 }
3488 if (th->th_flags & TH_FIN) {
3489 (void) os_add_overflow(1, tp->t_fin_rcvd, &tp->t_fin_rcvd);
3490 }
3491 if (th->th_flags & TH_RST) {
3492 (void) os_add_overflow(1, tp->t_rst_rcvd, &tp->t_rst_rcvd);
3493 }
3494 TCP_LOG_TH_FLAGS(TCP_LOG_HDR, th, tp, false, ifp);
3495
3496 if (net_mpklog_enabled && (m->m_pkthdr.rcvif->if_xflags & IFXF_MPK_LOG)) {
3497 MPKL_TCP_INPUT(tcp_mpkl_log_object,
3498 ntohs(tp->t_inpcb->inp_lport), ntohs(tp->t_inpcb->inp_fport),
3499 th->th_seq, th->th_ack, tlen, thflags,
3500 so->last_pid, so->so_log_seqn++);
3501 }
3502
3503 if (tp->accurate_ecn_on) {
3504 /* Reset the state used for AccECN processing */
3505 tp->t_aecn.accecn_processed = 0;
3506 }
3507
3508 if (tp->t_state == TCPS_ESTABLISHED && BYTES_ACKED(th, tp) > 0) {
3509 if (CC_ALGO(tp)->set_bytes_acked != NULL) {
3510 CC_ALGO(tp)->set_bytes_acked(tp, BYTES_ACKED(th, tp));
3511 }
3512 if (tp->ecn_flags & TE_SENDIPECT) {
3513 /*
3514 * Data sent with ECT has been acknowledged, calculate
3515 * packets approx. by dividing by MSS. This is done to
3516 * count MSS sized packets in case packets are aggregated
3517 * by GRO/LRO.
3518 */
3519 uint32_t bytes_acked = tcp_round_to(BYTES_ACKED(th, tp), tp->t_maxseg);
3520 tp->t_ecn_capable_packets_acked += max(1, (bytes_acked / tp->t_maxseg));
3521 }
3522 }
3523
3524 /* Accurate ECN has different semantics for TH_CWR. */
3525 if (!tp->accurate_ecn_on) {
3526 /*
3527 * Clear TE_SENDECE if TH_CWR is set. This is harmless, so we don't
3528 * bother doing extensive checks for state and whatnot.
3529 */
3530 if (thflags & TH_CWR) {
3531 tp->ecn_flags &= ~TE_SENDECE;
3532 tp->t_ecn_recv_cwr++;
3533 }
3534 }
3535
3536 /*
3537 * Accurate ECN feedback for Data Receiver,
3538 * Process IP ECN bits and update r.cep for CE marked pure ACKs
3539 * or valid data packets
3540 */
3541 uint8_t ace = tcp_get_ace(th);
3542 if (tp->accurate_ecn_on && tp->t_state == TCPS_ESTABLISHED) {
3543 /* Update receive side counters */
3544 if (tlen == 0 || (tlen > 0 &&
3545 SEQ_GEQ(th->th_seq, tp->last_ack_sent) &&
3546 SEQ_LT(th->th_seq, tp->last_ack_sent + tp->rcv_wnd))) {
3547 tcp_input_ip_ecn(tp, inp, (uint32_t)tlen, (uint32_t)segment_count, ip_ecn);
3548 }
3549
3550 /* Test for ACE bleaching, initial value of ace should be non-zero */
3551 if (th->th_seq == tp->iss + 1 && ace == 0) {
3552 tp->t_client_accecn_state = tcp_connection_client_accurate_ecn_ace_bleaching_detected;
3553 }
3554 } else {
3555 /*
3556 * Explicit Congestion Notification - Flag that we need to send ECE if
3557 * + The IP Congestion experienced flag was set.
3558 * + Socket is in established state
3559 * + We negotiated ECN in the TCP setup
3560 * + This isn't a pure ack (tlen > 0)
3561 * + The data is in the valid window
3562 *
3563 * TE_SENDECE will be cleared when we receive a packet with TH_CWR set.
3564 */
3565 if (ip_ecn == IPTOS_ECN_CE && tp->t_state == TCPS_ESTABLISHED &&
3566 TCP_ECN_ENABLED(tp) && tlen > 0 &&
3567 SEQ_GEQ(th->th_seq, tp->last_ack_sent) &&
3568 SEQ_LT(th->th_seq, tp->last_ack_sent + tp->rcv_wnd)) {
3569 tp->t_ecn_recv_ce++;
3570 tcpstat.tcps_ecn_recv_ce++;
3571 INP_INC_IFNET_STAT(inp, ecn_recv_ce);
3572 /* Mark this connection as it received CE from network */
3573 tp->ecn_flags |= TE_RECV_ECN_CE;
3574 tp->ecn_flags |= TE_SENDECE;
3575 }
3576 }
3577
3578 /*
3579 * If we received an explicit notification of congestion in
3580 * ip tos ecn bits or by the CWR bit in TCP header flags, reset
3581 * the force-ACK counter. We need to handle ECN notification if
3582 * an ECN setup SYN was sent even once.
3583 */
3584 if (tp->t_state == TCPS_ESTABLISHED &&
3585 (tp->ecn_flags & TE_SETUPSENT) &&
3586 (ip_ecn == IPTOS_ECN_CE || (thflags & TH_CWR))) {
3587 tp->t_forced_acks = TCP_FORCED_ACKS_COUNT;
3588 CLEAR_IAJ_STATE(tp);
3589 }
3590
3591
3592 if (ip_ecn == IPTOS_ECN_CE && tp->t_state == TCPS_ESTABLISHED) {
3593 /* Received CE on a non-ECN enabled connection */
3594 if (!TCP_ECN_ENABLED(tp)) {
3595 tcpstat.tcps_ecn_fallback_ce++;
3596 INP_INC_IFNET_STAT(inp, ecn_fallback_ce);
3597 } else if (!(tp->ecn_flags & TE_ECEHEURI_SET)) {
3598 if (inp->inp_mstat.ms_total.ts_rxpackets < ECN_MIN_CE_PROBES) {
3599 tp->t_ecn_recv_ce_pkt++;
3600 } else if (tp->t_ecn_recv_ce_pkt > ECN_MAX_CE_RATIO) {
3601 tcp_heuristic_ecn_aggressive(tp);
3602 tp->ecn_flags |= TE_ECEHEURI_SET;
3603 } else {
3604 /* We tracked the first ECN_MIN_CE_PROBES segments, we
3605 * now know that the path is good.
3606 */
3607 tp->ecn_flags |= TE_ECEHEURI_SET;
3608 }
3609 }
3610 }
3611
3612 /* Update rcvtime as a new segment was received on the connection */
3613 tp->t_rcvtime = tcp_now;
3614
3615 /*
3616 * Segment received on connection.
3617 * Reset idle time and keep-alive timer.
3618 */
3619 if (TCPS_HAVEESTABLISHED(tp->t_state)) {
3620 tcp_keepalive_reset(tp);
3621
3622 if (tp->t_mpsub) {
3623 mptcp_reset_keepalive(tp);
3624 }
3625 }
3626
3627 /*
3628 * Process options if not in LISTEN state,
3629 * else do it below (after getting remote address).
3630 */
3631 if (tp->t_state != TCPS_LISTEN && optp) {
3632 tcp_dooptions(tp, optp, optlen, th, &to);
3633 }
3634 #if MPTCP
3635 if (tp->t_state != TCPS_LISTEN && (so->so_flags & SOF_MP_SUBFLOW)) {
3636 mptcp_insert_rmap(tp, m, th);
3637 }
3638 #endif /* MPTCP */
3639 if (tp->t_state == TCPS_SYN_SENT && (thflags & TH_SYN)) {
3640 if (!(thflags & TH_ACK) ||
3641 (SEQ_GT(th->th_ack, tp->iss) &&
3642 SEQ_LEQ(th->th_ack, tp->snd_max))) {
3643 tcp_finalize_options(tp, &to, ifscope);
3644 }
3645 }
3646
3647 #if TRAFFIC_MGT
3648 /*
3649 * Compute inter-packet arrival jitter. According to RFC 3550,
3650 * inter-packet arrival jitter is defined as the difference in
3651 * packet spacing at the receiver compared to the sender for a
3652 * pair of packets. When two packets of maximum segment size come
3653 * one after the other with consecutive sequence numbers, we
3654 * consider them as packets sent together at the sender and use
3655 * them as a pair to compute inter-packet arrival jitter. This
3656 * metric indicates the delay induced by the network components due
3657 * to queuing in edge/access routers.
3658 */
3659 if (tp->t_state == TCPS_ESTABLISHED &&
3660 (thflags & (TH_SYN | TH_FIN | TH_RST | TH_URG | TH_ACK | TH_ECE | TH_PUSH)) == TH_ACK &&
3661 ((tp->t_flags & TF_NEEDFIN) == 0) &&
3662 ((to.to_flags & TOF_TS) == 0 ||
3663 TSTMP_GEQ(to.to_tsval, tp->ts_recent)) &&
3664 th->th_seq == tp->rcv_nxt && LIST_EMPTY(&tp->t_segq)) {
3665 int seg_size = tlen;
3666 if (tp->iaj_pktcnt <= IAJ_IGNORE_PKTCNT) {
3667 TCP_INC_VAR(tp->iaj_pktcnt, segment_count);
3668 }
3669
3670 if (tp->iaj_size == 0 || seg_size > tp->iaj_size ||
3671 (seg_size == tp->iaj_size && tp->iaj_rcv_ts == 0)) {
3672 /*
3673 * State related to inter-arrival jitter is
3674 * uninitialized or we are trying to find a good
3675 * first packet to start computing the metric
3676 */
3677 update_iaj_state(tp, seg_size, 0);
3678 } else {
3679 if (seg_size == tp->iaj_size) {
3680 /*
3681 * Compute inter-arrival jitter taking
3682 * this packet as the second packet
3683 */
3684 compute_iaj(tp);
3685 }
3686 if (seg_size < tp->iaj_size) {
3687 /*
3688 * There is a smaller packet in the stream.
3689 * Some times the maximum size supported
3690 * on a path can change if there is a new
3691 * link with smaller MTU. The receiver will
3692 * not know about this change. If there
3693 * are too many packets smaller than
3694 * iaj_size, we try to learn the iaj_size
3695 * again.
3696 */
3697 TCP_INC_VAR(tp->iaj_small_pkt, segment_count);
3698 if (tp->iaj_small_pkt > RESET_IAJ_SIZE_THRESH) {
3699 update_iaj_state(tp, seg_size, 1);
3700 } else {
3701 CLEAR_IAJ_STATE(tp);
3702 }
3703 } else {
3704 update_iaj_state(tp, seg_size, 0);
3705 }
3706 }
3707 } else {
3708 CLEAR_IAJ_STATE(tp);
3709 }
3710 #endif /* TRAFFIC_MGT */
3711
3712 /*
3713 * Header prediction: check for the two common cases
3714 * of a uni-directional data xfer. If the packet has
3715 * no control flags, is in-sequence, the window didn't
3716 * change and we're not retransmitting, it's a
3717 * candidate. If the length is zero and the ack moved
3718 * forward, we're the sender side of the xfer. Just
3719 * free the data acked & wake any higher level process
3720 * that was blocked waiting for space. If the length
3721 * is non-zero and the ack didn't move, we're the
3722 * receiver side. If we're getting packets in-order
3723 * (the reassembly queue is empty), add the data to
3724 * the socket buffer and note that we need a delayed ack.
3725 * Make sure that the hidden state-flags are also off.
3726 * Since we check for TCPS_ESTABLISHED above, it can only
3727 * be TH_NEEDSYN.
3728 */
3729 if (tp->t_state == TCPS_ESTABLISHED &&
3730 !(so->so_state & SS_CANTRCVMORE) &&
3731 (thflags & TH_FLAGS) == TH_ACK &&
3732 ((tp->t_flags & TF_NEEDFIN) == 0) &&
3733 ((to.to_flags & TOF_TS) == 0 ||
3734 TSTMP_GEQ(to.to_tsval, tp->ts_recent)) &&
3735 th->th_seq == tp->rcv_nxt &&
3736 tiwin && tiwin == tp->snd_wnd &&
3737 tp->snd_nxt == tp->snd_max) {
3738 /*
3739 * If last ACK falls within this segment's sequence numbers,
3740 * record the timestamp.
3741 * NOTE that the test is modified according to the latest
3742 * proposal of the [email protected] list (Braden 1993/04/26).
3743 */
3744 if ((to.to_flags & TOF_TS) != 0 &&
3745 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) {
3746 tp->ts_recent_age = tcp_now;
3747 tp->ts_recent = to.to_tsval;
3748 }
3749
3750 /*
3751 * We increment t_unacksegs_ce for both data segments
3752 * and pure ACKs for Accurate ECN
3753 */
3754 if (tp->accurate_ecn_on && ip_ecn == IPTOS_ECN_CE) {
3755 TCP_INC_VAR(tp->t_unacksegs_ce, segment_count);
3756 }
3757
3758 if (tlen == 0) {
3759 if (SEQ_GT(th->th_ack, tp->snd_una) &&
3760 SEQ_LEQ(th->th_ack, tp->snd_max) &&
3761 tp->snd_cwnd >= tp->snd_ssthresh &&
3762 (!IN_FASTRECOVERY(tp) &&
3763 ((!(SACK_ENABLED(tp)) &&
3764 tp->t_dupacks < tp->t_rexmtthresh) ||
3765 (SACK_ENABLED(tp) && to.to_nsacks == 0 &&
3766 TAILQ_EMPTY(&tp->snd_holes))))) {
3767 /*
3768 * this is a pure ack for outstanding data.
3769 */
3770 ++tcpstat.tcps_predack;
3771
3772 tcp_bad_rexmt_check(tp, th, &to);
3773
3774 /* Recalculate the RTT */
3775 tcp_compute_rtt(tp, &to, th);
3776
3777 VERIFY(SEQ_GEQ(th->th_ack, tp->snd_una));
3778 acked = BYTES_ACKED(th, tp);
3779 tcpstat.tcps_rcvackpack++;
3780 tcpstat.tcps_rcvackbyte += acked;
3781
3782 /* TE_SENDIPECT is only set when L4S sysctl is enabled */
3783 if (tp->accurate_ecn_on && (tp->ecn_flags & TE_SENDIPECT)) {
3784 uint32_t pkts_acked = tcp_packets_this_ack(tp, acked);
3785 tp->total_ect_packets_acked += pkts_acked;
3786
3787 bool newly_acked_time = false;
3788 if (acked == 0 && (to.to_flags & TOF_TS) != 0 && to.to_tsecr != 0 &&
3789 TSTMP_GT(to.to_tsecr, tp->t_last_ack_tsecr)) {
3790 newly_acked_time = true;
3791 }
3792 if (acked > 0 || newly_acked_time) {
3793 tcp_process_accecn(tp, &to, th, pkts_acked, ace);
3794 }
3795 }
3796
3797 /*
3798 * Process sent segments used for RACK, called after RTT is computed
3799 * RACK reordering window doesn't need to be updated until we process
3800 * DSACK.
3801 */
3802 if (TCP_RACK_ENABLED(tp)) {
3803 tcp_segs_doack(tp, th->th_ack, &to);
3804 if (SEQ_LT(tp->snd_fack, th->th_ack)) {
3805 /*
3806 * We update snd_fack here for RACK only as it is updated
3807 * and used differently for SACK. This should be done after
3808 * ACK processing of segments which checks for reordering.
3809 * Also, we don't compare with highest_sacked_seq here as this
3810 * is the fast path with no SACK blocks.
3811 */
3812 tp->snd_fack = th->th_ack;
3813 }
3814 }
3815
3816 /*
3817 * Handle an ack that is in sequence during
3818 * congestion avoidance phase. The
3819 * calculations in this function
3820 * assume that snd_una is not updated yet.
3821 */
3822 if (CC_ALGO(tp)->congestion_avd != NULL) {
3823 CC_ALGO(tp)->congestion_avd(tp, th);
3824 }
3825 tcp_ccdbg_trace(tp, th, TCP_CC_INSEQ_ACK_RCVD);
3826 sbdrop(&so->so_snd, acked);
3827 tcp_sbsnd_trim(&so->so_snd);
3828
3829 if (SEQ_GT(tp->snd_una, tp->snd_recover) &&
3830 SEQ_LEQ(th->th_ack, tp->snd_recover)) {
3831 tp->snd_recover = th->th_ack - 1;
3832 }
3833
3834 tcp_update_snd_una(tp, th->th_ack);
3835
3836 TCP_RESET_REXMT_STATE(tp);
3837
3838 /*
3839 * pull snd_wl2 up to prevent seq wrap relative
3840 * to th_ack.
3841 */
3842 tp->snd_wl2 = th->th_ack;
3843
3844 if (tp->t_dupacks > 0) {
3845 tp->t_dupacks = 0;
3846 tp->t_rexmtthresh = tcprexmtthresh;
3847 }
3848
3849 tp->sackhint.sack_bytes_acked = 0;
3850
3851 /*
3852 * If all outstanding data are acked, stop
3853 * retransmit timer, otherwise restart timer
3854 * using current (possibly backed-off) value.
3855 * If process is waiting for space,
3856 * wakeup/selwakeup/signal. If data
3857 * are ready to send, let tcp_output
3858 * decide between more output or persist.
3859 */
3860 if (tp->snd_una == tp->snd_max) {
3861 tp->t_timer[TCPT_REXMT] = 0;
3862 tp->t_timer[TCPT_PTO] = 0;
3863 tp->t_timer[TCPT_REORDER] = 0;
3864 tcp_rack_reset_segs_retransmitted(tp);
3865 } else if (tp->t_timer[TCPT_PERSIST] == 0) {
3866 tcp_set_rto(tp);
3867 }
3868 if (!SLIST_EMPTY(&tp->t_rxt_segments) &&
3869 !TCP_DSACK_SEQ_IN_WINDOW(tp,
3870 tp->t_dsack_lastuna, tp->snd_una)) {
3871 tcp_rxtseg_clean(tp);
3872 }
3873
3874 if ((tp->t_flagsext & TF_MEASURESNDBW) != 0 &&
3875 tp->t_bwmeas != NULL) {
3876 tcp_bwmeas_check(tp);
3877 }
3878
3879 write_wakeup = 1;
3880 if (!SLIST_EMPTY(&tp->t_notify_ack)) {
3881 tcp_notify_acknowledgement(tp, so);
3882 }
3883
3884 if ((so->so_snd.sb_cc) || (tp->t_flags & TF_ACKNOW)) {
3885 (void) tcp_output(tp);
3886 }
3887
3888 tcp_tfo_rcv_ack(tp, th);
3889
3890 m_freem(m);
3891
3892 tcp_check_timer_state(tp);
3893
3894 tcp_handle_wakeup(so, read_wakeup, write_wakeup);
3895
3896 socket_unlock(so, 1);
3897 KERNEL_DEBUG(DBG_FNC_TCP_INPUT | DBG_FUNC_END, 0, 0, 0, 0, 0);
3898 return;
3899 }
3900 } else if (th->th_ack == tp->snd_una && LIST_EMPTY(&tp->t_segq) &&
3901 tlen <= tcp_sbspace(tp)) {
3902 int mem = tcp_memacct_limited();
3903 if (mem == MEMACCT_HARDLIMIT ||
3904 (mem == MEMACCT_SOFTLIMIT && so->so_rcv.sb_cc > 0)) {
3905 /*
3906 * If we are at the hard limit, just drop.
3907 * If we are at the softlimit, only accept one
3908 * packet into the receive-queue.
3909 */
3910 drop_reason = DROP_REASON_TCP_INSEQ_MEMORY_PRESSURE;
3911 tcpstat.tcps_rcvmemdrop++;
3912 goto drop;
3913 }
3914 /*
3915 * this is a pure, in-sequence data packet
3916 * with nothing on the reassembly queue and
3917 * we have enough buffer space to take it.
3918 */
3919
3920 /* Clean receiver SACK report if present */
3921 if (SACK_ENABLED(tp) && tp->rcv_numsacks) {
3922 tcp_clean_sackreport(tp);
3923 }
3924 ++tcpstat.tcps_preddat;
3925 tp->rcv_nxt += tlen;
3926 /* Update highest received sequence and its timestamp */
3927 if (SEQ_LT(tp->rcv_high, tp->rcv_nxt)) {
3928 tp->rcv_high = tp->rcv_nxt;
3929 if (to.to_flags & TOF_TS) {
3930 tp->tsv_high = to.to_tsval;
3931 }
3932 }
3933
3934 /*
3935 * Pull snd_wl1 up to prevent seq wrap relative to
3936 * th_seq.
3937 */
3938 tp->snd_wl1 = th->th_seq;
3939 /*
3940 * Pull rcv_up up to prevent seq wrap relative to
3941 * rcv_nxt.
3942 */
3943 tp->rcv_up = tp->rcv_nxt;
3944 TCP_INC_VAR(tcpstat.tcps_rcvpack, segment_count);
3945 tcpstat.tcps_rcvbyte += tlen;
3946 if (nstat_collect) {
3947 INP_ADD_RXSTAT(inp, ifnet_count_type, 1, tlen);
3948 }
3949
3950 /* Calculate the RTT on the receiver */
3951 tcp_compute_rcv_rtt(tp, &to, th);
3952
3953 tcp_sbrcv_grow(tp, &so->so_rcv, &to, tlen);
3954 if (TCP_USE_RLEDBAT(tp, so) && tcp_cc_rledbat.data_rcvd != NULL) {
3955 tcp_cc_rledbat.data_rcvd(tp, th, &to, tlen);
3956 }
3957
3958 /*
3959 * Add data to socket buffer.
3960 */
3961 so_recv_data_stat(so, m, 0);
3962 m_adj(m, drop_hdrlen); /* delayed header drop */
3963
3964 if (isipv6) {
3965 memcpy(&saved_hdr, ip6, sizeof(struct ip6_hdr));
3966 ip6 = (struct ip6_hdr *)&saved_hdr[0];
3967 } else {
3968 memcpy(&saved_hdr, ip, ip->ip_hl << 2);
3969 ip = (struct ip *)&saved_hdr[0];
3970 }
3971 memcpy(&saved_tcphdr, th, sizeof(struct tcphdr));
3972
3973 if (th->th_flags & TH_PUSH) {
3974 tp->t_flagsext |= TF_LAST_IS_PSH;
3975 } else {
3976 tp->t_flagsext &= ~TF_LAST_IS_PSH;
3977 }
3978
3979 if (sbappendstream_rcvdemux(so, m)) {
3980 mptcp_handle_input(so);
3981 read_wakeup = 1;
3982 }
3983 th = &saved_tcphdr;
3984
3985 if (isipv6) {
3986 KERNEL_DEBUG(DBG_LAYER_END, ((th->th_dport << 16) | th->th_sport),
3987 (((ip6->ip6_src.s6_addr16[0]) << 16) | (ip6->ip6_dst.s6_addr16[0])),
3988 th->th_seq, th->th_ack, th->th_win);
3989 } else {
3990 KERNEL_DEBUG(DBG_LAYER_END, ((th->th_dport << 16) | th->th_sport),
3991 (((ip->ip_src.s_addr & 0xffff) << 16) | (ip->ip_dst.s_addr & 0xffff)),
3992 th->th_seq, th->th_ack, th->th_win);
3993 }
3994 TCP_INC_VAR(tp->t_unacksegs, segment_count);
3995 if (DELAY_ACK(tp, th)) {
3996 if ((tp->t_flags & TF_DELACK) == 0) {
3997 tp->t_flags |= TF_DELACK;
3998 tp->t_timer[TCPT_DELACK] = tcp_offset_from_start(tp, tcp_delack);
3999 }
4000 } else {
4001 tp->t_flags |= TF_ACKNOW;
4002 tcp_output(tp);
4003 }
4004
4005 tcp_adaptive_rwtimo_check(tp, tlen);
4006
4007 if (tlen > 0) {
4008 tcp_tfo_rcv_data(tp);
4009 }
4010
4011 tcp_check_timer_state(tp);
4012
4013 tcp_handle_wakeup(so, read_wakeup, write_wakeup);
4014
4015 socket_unlock(so, 1);
4016 KERNEL_DEBUG(DBG_FNC_TCP_INPUT | DBG_FUNC_END, 0, 0, 0, 0, 0);
4017 return;
4018 }
4019 }
4020
4021 /*
4022 * Calculate amount of space in receive window,
4023 * and then do TCP input processing.
4024 * Receive window is amount of space in rcv queue,
4025 * but not less than advertised window.
4026 */
4027 socket_lock_assert_owned(so);
4028 win = tcp_sbspace(tp);
4029 if (win < 0) {
4030 win = 0;
4031 } else { /* clip rcv window to 4K for modems */
4032 if (tp->t_flags & TF_SLOWLINK && slowlink_wsize > 0) {
4033 win = min(win, slowlink_wsize);
4034 }
4035 }
4036 tp->rcv_wnd = imax(win, (int)(tp->rcv_adv - tp->rcv_nxt));
4037 #if MPTCP
4038 /*
4039 * Ensure that the subflow receive window isn't greater
4040 * than the connection level receive window.
4041 */
4042 if ((tp->t_mpflags & TMPF_MPTCP_TRUE) && (mp_tp = tptomptp(tp))) {
4043 socket_lock_assert_owned(mptetoso(mp_tp->mpt_mpte));
4044 int64_t recwin_conn = (int64_t)(mp_tp->mpt_rcvadv - mp_tp->mpt_rcvnxt);
4045
4046 VERIFY(recwin_conn < INT32_MAX && recwin_conn > INT32_MIN);
4047 if (recwin_conn > 0 && tp->rcv_wnd > (uint32_t)recwin_conn) {
4048 tp->rcv_wnd = (uint32_t)recwin_conn;
4049 tcpstat.tcps_mp_reducedwin++;
4050 }
4051 }
4052 #endif /* MPTCP */
4053
4054 switch (tp->t_state) {
4055 /*
4056 * Initialize tp->rcv_nxt, and tp->irs, select an initial
4057 * tp->iss, and send a segment:
4058 * <SEQ=ISS><ACK=RCV_NXT><CTL=SYN,ACK>
4059 * Also initialize tp->snd_nxt to tp->iss+1 and tp->snd_una to tp->iss.
4060 * Fill in remote peer address fields if not previously specified.
4061 * Enter SYN_RECEIVED state, and process any other fields of this
4062 * segment in this state.
4063 */
4064 case TCPS_LISTEN: {
4065 struct tcp_inp tpi = {.inp = &inp, .tp = &tp, .m = m, .th = th,
4066 .iss = iss, .tiwin = tiwin, .to = &to, .optp = optp, .optlen = optlen,
4067 .ip6 = ip6, .ip = ip, .ip_ecn = ip_ecn, .isipv6 = isipv6, .ifp = ifp,
4068 .ifscope = ifscope, .kernel_proc = kernel_proc};
4069 ret = tcp_setup_server_socket(&tpi, so, false);
4070
4071 if (ret == false) {
4072 drop_reason = DROP_REASON_TCP_CREATE_SERVER_SOCKET;
4073 goto drop;
4074 }
4075 if (TFO_ENABLED(tp) && tcp_tfo_syn(tp, &to)) {
4076 isconnected = TRUE;
4077 }
4078 dropsocket = 0; /* committed to socket */
4079
4080 goto trimthenstep6;
4081 }
4082
4083 /*
4084 * If the state is SYN_RECEIVED and the seg contains an ACK,
4085 * but not for our SYN/ACK, send a RST.
4086 */
4087 case TCPS_SYN_RECEIVED:
4088 if ((thflags & TH_ACK) &&
4089 (SEQ_LEQ(th->th_ack, tp->snd_una) ||
4090 SEQ_GT(th->th_ack, tp->snd_max))) {
4091 IF_TCP_STATINC(ifp, ooopacket);
4092 TCP_LOG_DROP_PCB(TCP_LOG_HDR, th, tp, false, "SYN_RECEIVED bad ACK");
4093 drop_reason = DROP_REASON_TCP_SYN_RECEIVED_BAD_ACK;
4094 goto dropwithreset;
4095 }
4096
4097 /*
4098 * In SYN_RECEIVED state, if we recv some SYNS with
4099 * window scale and others without, window scaling should
4100 * be disabled. Otherwise the window advertised will be
4101 * lower if we assume scaling and the other end does not.
4102 */
4103 if ((thflags & TH_SYN) &&
4104 (tp->irs == th->th_seq) &&
4105 !(to.to_flags & TOF_SCALE)) {
4106 tp->t_flags &= ~TF_RCVD_SCALE;
4107 }
4108 break;
4109
4110 /*
4111 * If the state is SYN_SENT:
4112 * if seg contains an ACK, but not for our SYN, drop the input.
4113 * if seg contains a RST, then drop the connection.
4114 * if seg does not contain SYN, then drop it.
4115 * Otherwise this is an acceptable SYN segment
4116 * initialize tp->rcv_nxt and tp->irs
4117 * if seg contains ack then advance tp->snd_una
4118 * if SYN has been acked change to ESTABLISHED else SYN_RCVD state
4119 * arrange for segment to be acked (eventually)
4120 * continue processing rest of data/controls, beginning with URG
4121 */
4122 case TCPS_SYN_SENT:
4123 if ((thflags & TH_ACK) &&
4124 (SEQ_LEQ(th->th_ack, tp->iss) ||
4125 SEQ_GT(th->th_ack, tp->snd_max))) {
4126 IF_TCP_STATINC(ifp, ooopacket);
4127 TCP_LOG_DROP_PCB(TCP_LOG_HDR, th, tp, false, "SYN_SENT bad ACK");
4128 drop_reason = DROP_REASON_TCP_SYN_SENT_BAD_ACK;
4129 goto dropwithreset;
4130 }
4131 if (thflags & TH_RST) {
4132 if ((thflags & TH_ACK) != 0) {
4133 if (TFO_ENABLED(tp) &&
4134 !(tp->t_flagsext & TF_FASTOPEN_FORCE_ENABLE)) {
4135 tcp_heuristic_tfo_rst(tp);
4136 }
4137 if ((tp->ecn_flags & (TE_SETUPSENT | TE_RCVD_SYN_RST)) == TE_SETUPSENT ||
4138 (tp->ecn_flags & (TE_ACE_SETUPSENT | TE_RCVD_SYN_RST)) == TE_ACE_SETUPSENT) {
4139 /*
4140 * On local connections, send
4141 * non-ECN syn one time before
4142 * dropping the connection
4143 */
4144 if (tp->t_flags & TF_LOCAL) {
4145 tp->ecn_flags |= TE_RCVD_SYN_RST;
4146 drop_reason = DROP_REASON_TCP_RST;
4147 goto drop;
4148 } else {
4149 tcp_heuristic_ecn_synrst(tp);
4150 }
4151 }
4152 soevent(so,
4153 (SO_FILT_HINT_LOCKED |
4154 SO_FILT_HINT_CONNRESET));
4155 tp = tcp_drop(tp, ECONNREFUSED);
4156 }
4157 TCP_LOG_DROP_PCB(TCP_LOG_HDR, th, tp, false, "SYN_SENT got RST");
4158 drop_reason = DROP_REASON_TCP_RST;
4159 goto drop;
4160 }
4161 if ((thflags & TH_SYN) == 0) {
4162 TCP_LOG_DROP_PCB(TCP_LOG_HDR, th, tp, false, "SYN_SENT no SYN");
4163 drop_reason = DROP_REASON_TCP_SYN_SENT_NO_SYN;
4164 goto drop;
4165 }
4166 tp->snd_wnd = th->th_win; /* initial send window */
4167 tp->max_sndwnd = tp->snd_wnd;
4168
4169 tp->irs = th->th_seq;
4170 tcp_rcvseqinit(tp);
4171 if (thflags & TH_ACK) {
4172 /* Client processes SYN-ACK */
4173 tcpstat.tcps_connects++;
4174
4175 const uint32_t ace_flags = ((th->th_x2 << 8) | thflags) & TH_ACE;
4176 tcp_input_process_accecn_synack(tp, inp, &to, thflags, ace_flags, ip_ecn,
4177 (uint32_t)tlen, (uint32_t)segment_count);
4178
4179 /* Do window scaling on this connection? */
4180 if (TCP_WINDOW_SCALE_ENABLED(tp)) {
4181 tp->snd_scale = tp->requested_s_scale;
4182 tp->rcv_scale = tp->request_r_scale;
4183 }
4184
4185 uint32_t recwin = min(tp->rcv_wnd, TCP_MAXWIN << tp->rcv_scale);
4186 if (TCP_USE_RLEDBAT(tp, so) && tcp_cc_rledbat.get_rlwin != NULL) {
4187 /* For a LBE receiver, also use rledbat_win */
4188 uint32_t rledbat_win = tcp_cc_rledbat.get_rlwin(tp);
4189 if (rledbat_win > 0) {
4190 recwin = min(recwin, rledbat_win);
4191 }
4192 }
4193 tp->rcv_adv += recwin;
4194
4195 tp->snd_una++; /* SYN is acked */
4196 if (SEQ_LT(tp->snd_nxt, tp->snd_una)) {
4197 tp->snd_nxt = tp->snd_una;
4198 }
4199
4200 /*
4201 * We have sent more in the SYN than what is being
4202 * acked. (e.g., TFO)
4203 * We should restart the sending from what the receiver
4204 * has acknowledged immediately.
4205 */
4206 if (SEQ_GT(tp->snd_nxt, th->th_ack)) {
4207 /*
4208 * rdar://problem/33214601
4209 * There is a middlebox that acks all but one
4210 * byte and still drops the data.
4211 */
4212 if (!(tp->t_flagsext & TF_FASTOPEN_FORCE_ENABLE) &&
4213 (tp->t_tfo_stats & TFO_S_SYN_DATA_SENT) &&
4214 tp->snd_max == th->th_ack + 1 &&
4215 tp->snd_max > tp->snd_una + 1) {
4216 tcp_heuristic_tfo_middlebox(tp);
4217
4218 so->so_error = ENODATA;
4219 soevent(so,
4220 (SO_FILT_HINT_LOCKED | SO_FILT_HINT_MP_SUB_ERROR));
4221
4222 tp->t_tfo_stats |= TFO_S_ONE_BYTE_PROXY;
4223 }
4224
4225 tp->snd_max = tp->snd_nxt = th->th_ack;
4226 }
4227
4228 /*
4229 * If there's data, delay ACK; if there's also a FIN
4230 * ACKNOW will be turned on later.
4231 */
4232 TCP_INC_VAR(tp->t_unacksegs, segment_count);
4233 if (tp->accurate_ecn_on && ip_ecn == IPTOS_ECN_CE) {
4234 TCP_INC_VAR(tp->t_unacksegs_ce, segment_count);
4235 }
4236 if (DELAY_ACK(tp, th) && tlen != 0) {
4237 if ((tp->t_flags & TF_DELACK) == 0) {
4238 tp->t_flags |= TF_DELACK;
4239 tp->t_timer[TCPT_DELACK] = tcp_offset_from_start(tp, tcp_delack);
4240 }
4241 } else {
4242 tp->t_flags |= TF_ACKNOW;
4243 }
4244 /*
4245 * Received <SYN,ACK> in SYN_SENT[*] state.
4246 * Transitions:
4247 * SYN_SENT --> ESTABLISHED
4248 * SYN_SENT* --> FIN_WAIT_1
4249 */
4250 tp->t_starttime = tcp_now;
4251 tcp_sbrcv_tstmp_check(tp);
4252 if (tp->t_flags & TF_NEEDFIN) {
4253 DTRACE_TCP4(state__change, void, NULL,
4254 struct inpcb *, inp,
4255 struct tcpcb *, tp, int32_t,
4256 TCPS_FIN_WAIT_1);
4257 TCP_LOG_STATE(tp, TCPS_FIN_WAIT_1);
4258 tp->t_state = TCPS_FIN_WAIT_1;
4259 tp->t_flags &= ~TF_NEEDFIN;
4260 thflags &= ~TH_SYN;
4261
4262 TCP_LOG_CONNECTION_SUMMARY(tp);
4263 } else {
4264 DTRACE_TCP4(state__change, void, NULL,
4265 struct inpcb *, inp, struct tcpcb *,
4266 tp, int32_t, TCPS_ESTABLISHED);
4267 TCP_LOG_STATE(tp, TCPS_ESTABLISHED);
4268 tp->t_state = TCPS_ESTABLISHED;
4269 tp->t_timer[TCPT_KEEP] =
4270 tcp_offset_from_start(tp,
4271 TCP_CONN_KEEPIDLE(tp));
4272 if (nstat_collect) {
4273 nstat_route_connect_success(
4274 inp->inp_route.ro_rt);
4275 }
4276 TCP_LOG_CONNECTED(tp, 0);
4277 /*
4278 * The SYN is acknowledged but una is not
4279 * updated yet. So pass the value of
4280 * ack to compute sndbytes correctly
4281 */
4282 inp_count_sndbytes(inp, th->th_ack);
4283 }
4284 tp->t_forced_acks = TCP_FORCED_ACKS_COUNT;
4285 #if MPTCP
4286 /*
4287 * Do not send the connect notification for additional
4288 * subflows until ACK for 3-way handshake arrives.
4289 */
4290 if ((!(tp->t_mpflags & TMPF_MPTCP_TRUE)) &&
4291 (tp->t_mpflags & TMPF_SENT_JOIN)) {
4292 isconnected = FALSE;
4293 } else
4294 #endif /* MPTCP */
4295 isconnected = TRUE;
4296
4297 if ((tp->t_tfo_flags & (TFO_F_COOKIE_REQ | TFO_F_COOKIE_SENT)) ||
4298 (tp->t_tfo_stats & TFO_S_SYN_DATA_SENT)) {
4299 tcp_tfo_synack(tp, &to);
4300
4301 if ((tp->t_tfo_stats & TFO_S_SYN_DATA_SENT) &&
4302 SEQ_LT(tp->snd_una, th->th_ack)) {
4303 tp->t_tfo_stats |= TFO_S_SYN_DATA_ACKED;
4304 tcpstat.tcps_tfo_syn_data_acked++;
4305 #if MPTCP
4306 if (so->so_flags & SOF_MP_SUBFLOW) {
4307 so->so_flags1 |= SOF1_TFO_REWIND;
4308 }
4309 #endif
4310 tcp_tfo_rcv_probe(tp, tlen);
4311 }
4312 }
4313 } else {
4314 /*
4315 * Received initial SYN in SYN-SENT[*] state => simul-
4316 * taneous open.
4317 * Do 3-way handshake:
4318 * SYN-SENT -> SYN-RECEIVED
4319 * SYN-SENT* -> SYN-RECEIVED*
4320 */
4321 tp->t_flags |= TF_ACKNOW;
4322 tp->t_timer[TCPT_REXMT] = 0;
4323 DTRACE_TCP4(state__change, void, NULL, struct inpcb *, inp,
4324 struct tcpcb *, tp, int32_t, TCPS_SYN_RECEIVED);
4325 TCP_LOG_STATE(tp, TCPS_SYN_RECEIVED);
4326 tp->t_state = TCPS_SYN_RECEIVED;
4327
4328 /*
4329 * During simultaneous open, TFO should not be used.
4330 * So, we disable it here, to prevent that data gets
4331 * sent on the SYN/ACK.
4332 */
4333 tcp_disable_tfo(tp);
4334 }
4335
4336 trimthenstep6:
4337 /*
4338 * Advance th->th_seq to correspond to first data byte.
4339 * If data, trim to stay within window,
4340 * dropping FIN if necessary.
4341 */
4342 th->th_seq++;
4343 if (tlen > tp->rcv_wnd) {
4344 todrop = tlen - tp->rcv_wnd;
4345 m_adj(m, -todrop);
4346 tlen = tp->rcv_wnd;
4347 thflags &= ~TH_FIN;
4348 tcpstat.tcps_rcvpackafterwin++;
4349 tcpstat.tcps_rcvbyteafterwin += todrop;
4350 }
4351 tp->snd_wl1 = th->th_seq - 1;
4352 tp->rcv_up = th->th_seq;
4353 /*
4354 * Client side of transaction: already sent SYN and data.
4355 * If the remote host used T/TCP to validate the SYN,
4356 * our data will be ACK'd; if so, enter normal data segment
4357 * processing in the middle of step 5, ack processing.
4358 * Otherwise, goto step 6.
4359 */
4360 if (thflags & TH_ACK) {
4361 goto process_ACK;
4362 }
4363 goto step6;
4364 /*
4365 * If the state is LAST_ACK or CLOSING or TIME_WAIT:
4366 * do normal processing.
4367 *
4368 * NB: Leftover from RFC1644 T/TCP. Cases to be reused later.
4369 */
4370 case TCPS_LAST_ACK:
4371 case TCPS_CLOSING:
4372 case TCPS_TIME_WAIT:
4373 break; /* continue normal processing */
4374
4375 /* Received a SYN while connection is already established.
4376 * This is a "half open connection and other anomalies" described
4377 * in RFC793 page 34, send an ACK so the remote reset the connection
4378 * or recovers by adjusting its sequence numbering. Sending an ACK is
4379 * in accordance with RFC 5961 Section 4.2
4380 *
4381 * For Accurate ECN, if we receive a packet with SYN in ESTABLISHED
4382 * state, we don't send the handshake encoding.
4383 */
4384 case TCPS_ESTABLISHED:
4385 if (thflags & TH_SYN && tlen <= 0) {
4386 /* Drop the packet silently if we have reached the limit */
4387 if (tcp_is_ack_ratelimited(tp)) {
4388 TCP_LOG_DROP_PCB(TCP_LOG_HDR, th, tp, false, "SYN in ESTABLISHED state");
4389 goto drop;
4390 } else {
4391 /* Send challenge ACK */
4392 tcpstat.tcps_synchallenge++;
4393 TCP_LOG_DROP_PCB(TCP_LOG_HDR, th, tp, false, "SYN in ESTABLISHED state");
4394 goto dropafterack;
4395 }
4396 }
4397 break;
4398 }
4399
4400 /*
4401 * States other than LISTEN or SYN_SENT.
4402 * First check the RST flag and sequence number since reset segments
4403 * are exempt from the timestamp and connection count tests. This
4404 * fixes a bug introduced by the Stevens, vol. 2, p. 960 bugfix
4405 * below which allowed reset segments in half the sequence space
4406 * to fall though and be processed (which gives forged reset
4407 * segments with a random sequence number a 50 percent chance of
4408 * killing a connection).
4409 * Then check timestamp, if present.
4410 * Then check the connection count, if present.
4411 * Then check that at least some bytes of segment are within
4412 * receive window. If segment begins before rcv_nxt,
4413 * drop leading data (and SYN); if nothing left, just ack.
4414 *
4415 *
4416 * If the RST bit is set, check the sequence number to see
4417 * if this is a valid reset segment.
4418 * RFC 793 page 37:
4419 * In all states except SYN-SENT, all reset (RST) segments
4420 * are validated by checking their SEQ-fields. A reset is
4421 * valid if its sequence number is in the window.
4422 * Note: this does not take into account delayed ACKs, so
4423 * we should test against last_ack_sent instead of rcv_nxt.
4424 * The sequence number in the reset segment is normally an
4425 * echo of our outgoing acknowlegement numbers, but some hosts
4426 * send a reset with the sequence number at the rightmost edge
4427 * of our receive window, and we have to handle this case.
4428 * Note 2: Paul Watson's paper "Slipping in the Window" has shown
4429 * that brute force RST attacks are possible. To combat this,
4430 * we use a much stricter check while in the ESTABLISHED state,
4431 * only accepting RSTs where the sequence number is equal to
4432 * last_ack_sent. In all other states (the states in which a
4433 * RST is more likely), the more permissive check is used.
4434 * RFC 5961 Section 3.2: if the RST bit is set, sequence # is
4435 * within the receive window and last_ack_sent == seq,
4436 * then reset the connection. Otherwise if the seq doesn't
4437 * match last_ack_sent, TCP must send challenge ACK. Perform
4438 * rate limitation when sending the challenge ACK.
4439 * If we have multiple segments in flight, the intial reset
4440 * segment sequence numbers will be to the left of last_ack_sent,
4441 * but they will eventually catch up.
4442 * In any case, it never made sense to trim reset segments to
4443 * fit the receive window since RFC 1122 says:
4444 * 4.2.2.12 RST Segment: RFC-793 Section 3.4
4445 *
4446 * A TCP SHOULD allow a received RST segment to include data.
4447 *
4448 * DISCUSSION
4449 * It has been suggested that a RST segment could contain
4450 * ASCII text that encoded and explained the cause of the
4451 * RST. No standard has yet been established for such
4452 * data.
4453 *
4454 * If the reset segment passes the sequence number test examine
4455 * the state:
4456 * SYN_RECEIVED STATE:
4457 * If passive open, return to LISTEN state.
4458 * If active open, inform user that connection was refused.
4459 * ESTABLISHED, FIN_WAIT_1, FIN_WAIT_2, CLOSE_WAIT STATES:
4460 * Inform user that connection was reset, and close tcb.
4461 * CLOSING, LAST_ACK STATES:
4462 * Close the tcb.
4463 * TIME_WAIT STATE:
4464 * Drop the segment - see Stevens, vol. 2, p. 964 and
4465 * RFC 1337.
4466 *
4467 * Radar 4803931: Allows for the case where we ACKed the FIN but
4468 * there is already a RST in flight from the peer.
4469 * In that case, accept the RST for non-established
4470 * state if it's one off from last_ack_sent.
4471 *
4472 * Also be lenient in closing states to allow last_ack_sent and also
4473 * last_ack_sent - 1 in case there is a lot of delay upstream
4474 * and it is an older segment that is triggering the RST
4475 */
4476 if (thflags & TH_RST) {
4477 if ((SEQ_GEQ(th->th_seq, tp->last_ack_sent) &&
4478 SEQ_LT(th->th_seq, tp->last_ack_sent + tp->rcv_wnd)) ||
4479 ((tp->rcv_wnd == 0 || tp->t_state >= TCPS_CLOSE_WAIT) &&
4480 ((tp->last_ack_sent == th->th_seq) ||
4481 (tp->last_ack_sent - 1 == th->th_seq)))) {
4482 if (tp->last_ack_sent == th->th_seq || tp->last_ack_sent - 1 == th->th_seq) {
4483 switch (tp->t_state) {
4484 case TCPS_SYN_RECEIVED:
4485 IF_TCP_STATINC(ifp, rstinsynrcv);
4486 so->so_error = ECONNREFUSED;
4487 goto close;
4488
4489 case TCPS_ESTABLISHED:
4490 if ((TCP_ECN_ENABLED(tp) || tp->accurate_ecn_on) &&
4491 tp->snd_una == tp->iss + 1 &&
4492 SEQ_GT(tp->snd_max, tp->snd_una)) {
4493 /*
4494 * If the first data packet on an
4495 * ECN connection receives a RST
4496 * increment the heuristic
4497 */
4498 tcp_heuristic_ecn_droprst(tp);
4499 }
4500 OS_FALLTHROUGH;
4501 case TCPS_FIN_WAIT_1:
4502 case TCPS_CLOSE_WAIT:
4503 case TCPS_FIN_WAIT_2:
4504 so->so_error = ECONNRESET;
4505 close:
4506 soevent(so,
4507 (SO_FILT_HINT_LOCKED |
4508 SO_FILT_HINT_CONNRESET));
4509
4510 tcpstat.tcps_drops++;
4511 tp = tcp_close(tp);
4512 break;
4513
4514 case TCPS_CLOSING:
4515 case TCPS_LAST_ACK:
4516 tp = tcp_close(tp);
4517 break;
4518
4519 case TCPS_TIME_WAIT:
4520 break;
4521 }
4522 } else {
4523 tcpstat.tcps_badrst++;
4524 /* Drop if we have reached the ACK limit */
4525 if (tcp_is_ack_ratelimited(tp)) {
4526 TCP_LOG_DROP_PCB(TCP_LOG_HDR, th, tp, false, "bad RST in ESTABLISHED state");
4527 goto drop;
4528 } else {
4529 /* Send challenge ACK */
4530 tcpstat.tcps_rstchallenge++;
4531 TCP_LOG_DROP_PCB(TCP_LOG_HDR, th, tp, false, "bad RST in ESTABLISHED state");
4532 goto dropafterack;
4533 }
4534 }
4535 }
4536 drop_reason = DROP_REASON_TCP_BAD_RST;
4537 goto drop;
4538 }
4539
4540 /*
4541 * RFC 1323 PAWS: If we have a timestamp reply on this segment
4542 * and it's less than ts_recent, drop it.
4543 */
4544 if ((to.to_flags & TOF_TS) != 0 && tp->ts_recent &&
4545 TSTMP_LT(to.to_tsval, tp->ts_recent)) {
4546 /* Check to see if ts_recent is over 24 days old. */
4547 if ((int)(tcp_now - tp->ts_recent_age) > TCP_PAWS_IDLE) {
4548 /*
4549 * Invalidate ts_recent. If this segment updates
4550 * ts_recent, the age will be reset later and ts_recent
4551 * will get a valid value. If it does not, setting
4552 * ts_recent to zero will at least satisfy the
4553 * requirement that zero be placed in the timestamp
4554 * echo reply when ts_recent isn't valid. The
4555 * age isn't reset until we get a valid ts_recent
4556 * because we don't want out-of-order segments to be
4557 * dropped when ts_recent is old.
4558 */
4559 tp->ts_recent = 0;
4560 } else {
4561 tcpstat.tcps_rcvduppack++;
4562 tcpstat.tcps_rcvdupbyte += tlen;
4563 tp->t_pawsdrop++;
4564 tcpstat.tcps_pawsdrop++;
4565
4566 if (nstat_collect) {
4567 nstat_route_rx(tp->t_inpcb->inp_route.ro_rt,
4568 1, tlen, NSTAT_RX_FLAG_DUPLICATE);
4569 INP_ADD_RXSTAT(inp, ifnet_count_type, 1, tlen);
4570 tp->t_stat.rxduplicatebytes += tlen;
4571 }
4572 if (tlen > 0) {
4573 goto dropafterack;
4574 }
4575 drop_reason = DROP_REASON_TCP_PAWS;
4576 goto drop;
4577 }
4578 }
4579
4580 /*
4581 * In the SYN-RECEIVED state, validate that the packet belongs to
4582 * this connection before trimming the data to fit the receive
4583 * window. Check the sequence number versus IRS since we know
4584 * the sequence numbers haven't wrapped. This is a partial fix
4585 * for the "LAND" DoS attack.
4586 */
4587 if (tp->t_state == TCPS_SYN_RECEIVED && SEQ_LT(th->th_seq, tp->irs)) {
4588 IF_TCP_STATINC(ifp, dospacket);
4589 TCP_LOG_DROP_PCB(TCP_LOG_HDR, th, tp, false, "SYN_RECEIVED bad SEQ");
4590 drop_reason = DROP_REASON_TCP_SYN_RECEIVED_BAD_SEQ;
4591 goto dropwithreset;
4592 }
4593
4594 /*
4595 * For SYN received in TIME_WAIT state:
4596 * A valid SYN with the intention to create a new connection
4597 * should have a higher timestamp than seen for the current
4598 * connection, if timestamp is supported. OR if timestamp
4599 * is either equal or not supported, sequence number of the
4600 * incoming SYN should be greater than the last sequence
4601 * number seen on the current connection.
4602 */
4603 if (tp->t_state == TCPS_TIME_WAIT && tlen == 0 &&
4604 (thflags & (TH_SYN | TH_ACK | TH_RST)) == TH_SYN) {
4605 bool higher_seq = SEQ_GT(th->th_seq, tp->rcv_nxt);
4606 bool newer_time = TSTMP_GT(to.to_tsval, tp->ts_recent) ||
4607 (to.to_tsval == tp->ts_recent && higher_seq);
4608 bool tstmp_received = to.to_flags & TOF_TS;
4609
4610 if ((tstmp_received && newer_time) || (!tstmp_received && higher_seq)) {
4611 iss = tcp_new_isn(tp);
4612 tp = tcp_close(tp);
4613 socket_unlock(so, 1);
4614 goto findpcb;
4615 }
4616 }
4617
4618 /*
4619 * Check if there is old data at the beginning of the window
4620 * i.e. the sequence number is before rcv_nxt
4621 */
4622 todrop = tp->rcv_nxt - th->th_seq;
4623 if (todrop > 0) {
4624 boolean_t is_syn_set = FALSE;
4625
4626 if (thflags & TH_SYN) {
4627 is_syn_set = TRUE;
4628 thflags &= ~TH_SYN;
4629 th->th_seq++;
4630 if (th->th_urp > 1) {
4631 th->th_urp--;
4632 } else {
4633 thflags &= ~TH_URG;
4634 }
4635 todrop--;
4636 }
4637 /*
4638 * Following if statement from Stevens, vol. 2, p. 960.
4639 * The amount of duplicate data is greater than or equal
4640 * to the size of the segment - entire segment is duplicate
4641 */
4642 if (todrop > tlen
4643 || (todrop == tlen && (thflags & TH_FIN) == 0)) {
4644 /*
4645 * Any valid FIN must be to the left of the window.
4646 * At this point the FIN must be a duplicate or out
4647 * of sequence; drop it.
4648 */
4649 thflags &= ~TH_FIN;
4650
4651 /*
4652 * Send an ACK to resynchronize and drop any data.
4653 * But keep on processing for RST or ACK.
4654 *
4655 * If the SYN bit was originally set, then only send
4656 * an ACK if we are not rate-limiting this connection.
4657 */
4658 if (is_syn_set) {
4659 if (!tcp_is_ack_ratelimited(tp)) {
4660 tcpstat.tcps_synchallenge++;
4661 tp->t_flags |= TF_ACKNOW;
4662 }
4663 } else {
4664 tp->t_flags |= TF_ACKNOW;
4665 }
4666
4667 if (todrop == 1) {
4668 /* This could be a keepalive */
4669 soevent(so, SO_FILT_HINT_LOCKED |
4670 SO_FILT_HINT_KEEPALIVE);
4671 }
4672 todrop = tlen;
4673 tcpstat.tcps_rcvduppack++;
4674 tcpstat.tcps_rcvdupbyte += todrop;
4675 } else {
4676 tcpstat.tcps_rcvpartduppack++;
4677 tcpstat.tcps_rcvpartdupbyte += todrop;
4678 }
4679
4680 if (todrop > 1) {
4681 /*
4682 * Note the duplicate data sequence space so that
4683 * it can be reported in DSACK option.
4684 */
4685 tp->t_dsack_lseq = th->th_seq;
4686 tp->t_dsack_rseq = th->th_seq + todrop;
4687 tp->t_flags |= TF_ACKNOW;
4688 }
4689 if (nstat_collect) {
4690 nstat_route_rx(tp->t_inpcb->inp_route.ro_rt, 1,
4691 todrop, NSTAT_RX_FLAG_DUPLICATE);
4692 INP_ADD_RXSTAT(inp, ifnet_count_type, 1, todrop);
4693 tp->t_stat.rxduplicatebytes += todrop;
4694 }
4695 drop_hdrlen += todrop; /* drop from the top afterwards */
4696 th->th_seq += todrop;
4697 tlen -= todrop;
4698 if (th->th_urp > todrop) {
4699 th->th_urp -= todrop;
4700 } else {
4701 thflags &= ~TH_URG;
4702 th->th_urp = 0;
4703 }
4704 }
4705
4706 /*
4707 * If new data are received on a connection after the user
4708 * processes are gone, then RST the other end.
4709 * Send also a RST when we received a data segment after we've
4710 * sent our FIN when the socket is defunct.
4711 * Note that an MPTCP subflow socket would have SS_NOFDREF set
4712 * by default. So, if it's an MPTCP-subflow we rather check the
4713 * MPTCP-level's socket state for SS_NOFDREF.
4714 */
4715 if (tlen) {
4716 boolean_t close_it = FALSE;
4717
4718 if (!(so->so_flags & SOF_MP_SUBFLOW) && (so->so_state & SS_NOFDREF) &&
4719 tp->t_state > TCPS_CLOSE_WAIT) {
4720 TCP_LOG_DROP_PCB(TCP_LOG_HDR, th, tp, false, "SS_NOFDREF");
4721 close_it = TRUE;
4722 }
4723
4724 if ((so->so_flags & SOF_MP_SUBFLOW) && (mptetoso(tptomptp(tp)->mpt_mpte)->so_state & SS_NOFDREF) &&
4725 tp->t_state > TCPS_CLOSE_WAIT) {
4726 TCP_LOG_DROP_PCB(TCP_LOG_HDR, th, tp, false, "SOF_MP_SUBFLOW SS_NOFDREF");
4727 close_it = TRUE;
4728 }
4729
4730 if ((so->so_flags & SOF_DEFUNCT) && tp->t_state > TCPS_FIN_WAIT_1) {
4731 TCP_LOG_DROP_PCB(TCP_LOG_HDR, th, tp, false, "SOF_DEFUNCT");
4732 close_it = TRUE;
4733 }
4734
4735 if (so->so_state & SS_CANTRCVMORE) {
4736 TCP_LOG_DROP_PCB(TCP_LOG_HDR, th, tp, false, "SS_CANTRCVMORE");
4737 close_it = TRUE;
4738 }
4739
4740 if (close_it) {
4741 tp = tcp_close(tp);
4742 tcpstat.tcps_rcvafterclose++;
4743 IF_TCP_STATINC(ifp, cleanup);
4744 drop_reason = DROP_REASON_TCP_RECV_AFTER_CLOSE;
4745 goto dropwithreset;
4746 }
4747 }
4748
4749 /*
4750 * If segment ends after window, drop trailing data
4751 * (and PUSH and FIN); if nothing left, just ACK.
4752 */
4753 todrop = (th->th_seq + tlen) - (tp->rcv_nxt + tp->rcv_wnd);
4754 if (todrop > 0) {
4755 tcpstat.tcps_rcvpackafterwin++;
4756 if (todrop >= tlen) {
4757 tcpstat.tcps_rcvbyteafterwin += tlen;
4758 /*
4759 * If window is closed can only take segments at
4760 * window edge, and have to drop data and PUSH from
4761 * incoming segments. Continue processing, but
4762 * remember to ack. Otherwise, drop segment
4763 * and ack.
4764 */
4765 if (tp->rcv_wnd == 0 && th->th_seq == tp->rcv_nxt) {
4766 tp->t_flags |= TF_ACKNOW;
4767 tcpstat.tcps_rcvwinprobe++;
4768 } else {
4769 goto dropafterack;
4770 }
4771 } else {
4772 tcpstat.tcps_rcvbyteafterwin += todrop;
4773 }
4774 m_adj(m, -todrop);
4775 tlen -= todrop;
4776 thflags &= ~(TH_PUSH | TH_FIN);
4777 }
4778
4779 /*
4780 * If last ACK falls within this segment's sequence numbers,
4781 * record its timestamp.
4782 * NOTE:
4783 * 1) That the test incorporates suggestions from the latest
4784 * proposal of the [email protected] list (Braden 1993/04/26).
4785 * 2) That updating only on newer timestamps interferes with
4786 * our earlier PAWS tests, so this check should be solely
4787 * predicated on the sequence space of this segment.
4788 * 3) That we modify the segment boundary check to be
4789 * Last.ACK.Sent <= SEG.SEQ + SEG.Len
4790 * instead of RFC1323's
4791 * Last.ACK.Sent < SEG.SEQ + SEG.Len,
4792 * This modified check allows us to overcome RFC1323's
4793 * limitations as described in Stevens TCP/IP Illustrated
4794 * Vol. 2 p.869. In such cases, we can still calculate the
4795 * RTT correctly when RCV.NXT == Last.ACK.Sent.
4796 */
4797 if ((to.to_flags & TOF_TS) != 0 &&
4798 SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
4799 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
4800 ((thflags & (TH_SYN | TH_FIN)) != 0))) {
4801 tp->ts_recent_age = tcp_now;
4802 tp->ts_recent = to.to_tsval;
4803 }
4804
4805 /*
4806 * Stevens: If a SYN is in the window, then this is an
4807 * error and we send an RST and drop the connection.
4808 *
4809 * RFC 5961 Section 4.2
4810 * Send challenge ACK for any SYN in synchronized state
4811 * Perform rate limitation in doing so.
4812 */
4813 if (thflags & TH_SYN) {
4814 if (!tcp_syn_data_valid(tp, th, tlen)) {
4815 tcpstat.tcps_badsyn++;
4816 /* Drop if we have reached ACK limit */
4817 if (tcp_is_ack_ratelimited(tp)) {
4818 TCP_LOG_DROP_PCB(TCP_LOG_HDR, th, tp, false, "SYN data invalid");
4819 drop_reason = DROP_REASON_TCP_SYN_DATA_INVALID;
4820 goto drop;
4821 } else {
4822 /* Send challenge ACK */
4823 tcpstat.tcps_synchallenge++;
4824 TCP_LOG_DROP_PCB(TCP_LOG_HDR, th, tp, false, "SYN data invalid");
4825 drop_reason = DROP_REASON_TCP_SYN_DATA_INVALID;
4826 goto dropafterack;
4827 }
4828 } else {
4829 /*
4830 * Received SYN (/ACK) with data.
4831 * Move sequence number along to process the data.
4832 */
4833 th->th_seq++;
4834 thflags &= ~TH_SYN;
4835 }
4836 }
4837
4838 /*
4839 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN
4840 * flag is on (half-synchronized state), then queue data for
4841 * later processing; else drop segment and return.
4842 */
4843 if ((thflags & TH_ACK) == 0) {
4844 if (tp->t_state == TCPS_SYN_RECEIVED) {
4845 if ((TFO_ENABLED(tp))) {
4846 /*
4847 * So, we received a valid segment while in
4848 * SYN-RECEIVED.
4849 * As this cannot be an RST (see that if a bit
4850 * higher), and it does not have the ACK-flag
4851 * set, we want to retransmit the SYN/ACK.
4852 * Thus, we have to reset snd_nxt to snd_una to
4853 * trigger the going back to sending of the
4854 * SYN/ACK. This is more consistent with the
4855 * behavior of tcp_output(), which expects
4856 * to send the segment that is pointed to by
4857 * snd_nxt.
4858 */
4859 tp->snd_nxt = tp->snd_una;
4860
4861 /*
4862 * We need to make absolutely sure that we are
4863 * going to reply upon a duplicate SYN-segment.
4864 */
4865 if (th->th_flags & TH_SYN) {
4866 needoutput = 1;
4867 }
4868 }
4869 /* Process this same as newly received Accurate ECN SYN */
4870 int ace_flags = ((th->th_x2 << 8) | thflags) & TH_ACE;
4871 tcp_input_process_accecn_syn(tp, ace_flags, ip_ecn);
4872
4873 goto step6;
4874 } else if (tp->t_flags & TF_ACKNOW) {
4875 TCP_LOG_DROP_PCB(TCP_LOG_HDR, th, tp, false, "bad ACK");
4876 drop_reason = DROP_REASON_TCP_BAD_ACK;
4877 goto dropafterack;
4878 } else {
4879 TCP_LOG_DROP_PCB(TCP_LOG_HDR, th, tp, false, "bad ACK");
4880 drop_reason = DROP_REASON_TCP_BAD_ACK;
4881 goto drop;
4882 }
4883 }
4884
4885 /*
4886 * Ack processing.
4887 */
4888
4889 switch (tp->t_state) {
4890 /*
4891 * In SYN_RECEIVED state, the ack ACKs our SYN, so enter
4892 * ESTABLISHED state and continue processing.
4893 * The ACK was checked above.
4894 */
4895 case TCPS_SYN_RECEIVED:
4896
4897 tcpstat.tcps_connects++;
4898
4899 /* Do window scaling? */
4900 if (TCP_WINDOW_SCALE_ENABLED(tp)) {
4901 tp->snd_scale = tp->requested_s_scale;
4902 tp->rcv_scale = tp->request_r_scale;
4903 tp->snd_wnd = th->th_win << tp->snd_scale;
4904 tp->max_sndwnd = tp->snd_wnd;
4905 tiwin = tp->snd_wnd;
4906 }
4907 /*
4908 * Make transitions:
4909 * SYN-RECEIVED -> ESTABLISHED
4910 * SYN-RECEIVED* -> FIN-WAIT-1
4911 */
4912 tp->t_starttime = tcp_now;
4913 tcp_sbrcv_tstmp_check(tp);
4914 if (tp->t_flags & TF_NEEDFIN) {
4915 DTRACE_TCP4(state__change, void, NULL,
4916 struct inpcb *, inp,
4917 struct tcpcb *, tp, int32_t, TCPS_FIN_WAIT_1);
4918 TCP_LOG_STATE(tp, TCPS_FIN_WAIT_1);
4919 tp->t_state = TCPS_FIN_WAIT_1;
4920 tp->t_flags &= ~TF_NEEDFIN;
4921
4922 TCP_LOG_CONNECTION_SUMMARY(tp);
4923 } else {
4924 DTRACE_TCP4(state__change, void, NULL,
4925 struct inpcb *, inp,
4926 struct tcpcb *, tp, int32_t, TCPS_ESTABLISHED);
4927 TCP_LOG_STATE(tp, TCPS_ESTABLISHED);
4928 tp->t_state = TCPS_ESTABLISHED;
4929 tp->t_timer[TCPT_KEEP] = tcp_offset_from_start(tp,
4930 TCP_CONN_KEEPIDLE(tp));
4931 if (nstat_collect) {
4932 nstat_route_connect_success(
4933 tp->t_inpcb->inp_route.ro_rt);
4934 }
4935 TCP_LOG_CONNECTED(tp, 0);
4936 /*
4937 * The SYN is acknowledged but una is not updated
4938 * yet. So pass the value of ack to compute
4939 * sndbytes correctly
4940 */
4941 inp_count_sndbytes(inp, th->th_ack);
4942 }
4943 tp->t_forced_acks = TCP_FORCED_ACKS_COUNT;
4944
4945 VERIFY(LIST_EMPTY(&tp->t_segq));
4946 tp->snd_wl1 = th->th_seq - 1;
4947
4948 /*
4949 * AccECN server in SYN-RCVD state received an ACK with
4950 * SYN=0, process handshake encoding present in the ACK for SYN-ACK
4951 * and update receive side counters.
4952 *
4953 * When SYN cookies are used, process last ACK only if classic ECN
4954 * wasn't negotiated.
4955 */
4956 if ((tp->accurate_ecn_on || (tp->l4s_enabled && !TCP_ECN_ENABLED(tp) && syn_cookie_processed))
4957 && (thflags & (TH_SYN | TH_ACK)) == TH_ACK) {
4958 uint16_t aceflags = tcp_get_flags(th);
4959 aceflags &= TH_ACE;
4960 tcp_input_process_accecn_last_ack(tp, &to, (uint32_t)tlen, aceflags, syn_cookie_processed);
4961 /* Increment receive side counters based on IP-ECN */
4962 tcp_input_ip_ecn(tp, inp, (uint32_t)tlen, (uint32_t)segment_count, ip_ecn);
4963 }
4964
4965 #if MPTCP
4966 /*
4967 * Do not send the connect notification for additional subflows
4968 * until ACK for 3-way handshake arrives.
4969 */
4970 if ((!(tp->t_mpflags & TMPF_MPTCP_TRUE)) &&
4971 (tp->t_mpflags & TMPF_SENT_JOIN)) {
4972 isconnected = FALSE;
4973 } else
4974 #endif /* MPTCP */
4975 isconnected = TRUE;
4976 if ((tp->t_tfo_flags & TFO_F_COOKIE_VALID)) {
4977 /* Done this when receiving the SYN */
4978 isconnected = FALSE;
4979
4980 OSDecrementAtomic(&tcp_tfo_halfcnt);
4981
4982 /* Panic if something has gone terribly wrong. */
4983 VERIFY(tcp_tfo_halfcnt >= 0);
4984
4985 tp->t_tfo_flags &= ~TFO_F_COOKIE_VALID;
4986 }
4987
4988 /*
4989 * In case there is data in the send-queue (e.g., TFO is being
4990 * used, or connectx+data has been done), then if we would
4991 * "FALLTHROUGH", we would handle this ACK as if data has been
4992 * acknowledged. But, we have to prevent this. And this
4993 * can be prevented by increasing snd_una by 1, so that the
4994 * SYN is not considered as data (snd_una++ is actually also
4995 * done in SYN_SENT-state as part of the regular TCP stack).
4996 *
4997 * In case there is data on this ack as well, the data will be
4998 * handled by the label "dodata" right after step6.
4999 */
5000 if (so->so_snd.sb_cc) {
5001 tp->snd_una++; /* SYN is acked */
5002 if (SEQ_LT(tp->snd_nxt, tp->snd_una)) {
5003 tp->snd_nxt = tp->snd_una;
5004 }
5005
5006 /*
5007 * No duplicate-ACK handling is needed. So, we
5008 * directly advance to processing the ACK (aka,
5009 * updating the RTT estimation,...)
5010 *
5011 * But, we first need to handle eventual SACKs,
5012 * because TFO will start sending data with the
5013 * SYN/ACK, so it might be that the client
5014 * includes a SACK with its ACK.
5015 */
5016 if (SACK_ENABLED(tp) &&
5017 (to.to_nsacks > 0 || !TAILQ_EMPTY(&tp->snd_holes))) {
5018 tcp_sack_doack(tp, &to, th, &sack_bytes_acked, &highest_sacked_seq);
5019 }
5020
5021 goto process_ACK;
5022 }
5023
5024 OS_FALLTHROUGH;
5025
5026 /*
5027 * In ESTABLISHED state: drop duplicate ACKs; ACK out of range
5028 * ACKs. If the ack is in the range
5029 * tp->snd_una < th->th_ack <= tp->snd_max
5030 * then advance tp->snd_una to th->th_ack and drop
5031 * data from the retransmission queue. If this ACK reflects
5032 * more up to date window information we update our window information.
5033 */
5034 case TCPS_ESTABLISHED:
5035 case TCPS_FIN_WAIT_1:
5036 case TCPS_FIN_WAIT_2:
5037 case TCPS_CLOSE_WAIT:
5038 case TCPS_CLOSING:
5039 case TCPS_LAST_ACK:
5040 case TCPS_TIME_WAIT:
5041 {
5042 /*
5043 * TODO: The MAX(..., 100) is a temporary workaround for redirection issues with certain captive portals
5044 * in the wild.
5045 * After a successful TCP handshake, these portals send an incorrect ACK number in the data packet containing
5046 * the HTTP redirect response, that is 19 bytes behind the ISS. The security mitigation below caused these
5047 * packets to be dropped. Making the minimum byte_limit 100 works around this issue.
5048 * This workaround will be removed once the operators of these portals patch the issue on their end.
5049 */
5050 const uint64_t byte_limit = MAX(MIN(tp->t_stat.bytes_acked, tp->max_sndwnd), 100);
5051
5052 if (SEQ_GT(th->th_ack, tp->snd_max)) {
5053 tcpstat.tcps_rcvacktoomuch++;
5054 if (tcp_is_ack_ratelimited(tp)) {
5055 TCP_LOG_DROP_PCB(TCP_LOG_HDR, th, tp, false, "rfc5961 rcvacktoomuch");
5056 drop_reason = DROP_REASON_TCP_ACK_TOOMUCH;
5057 goto drop;
5058 } else {
5059 drop_reason = DROP_REASON_TCP_ACK_TOOMUCH;
5060 goto dropafterack;
5061 }
5062 }
5063 if (SEQ_LT(th->th_ack, tp->snd_una - byte_limit)) {
5064 if (tcp_is_ack_ratelimited(tp)) {
5065 TCP_LOG_DROP_PCB(TCP_LOG_HDR, th, tp, false, "rfc5961 bad ACK");
5066 drop_reason = DROP_REASON_TCP_OLD_ACK;
5067 goto drop;
5068 } else {
5069 drop_reason = DROP_REASON_TCP_OLD_ACK;
5070 goto dropafterack;
5071 }
5072 }
5073 if (SACK_ENABLED(tp) && to.to_nsacks > 0) {
5074 recvd_dsack = tcp_sack_process_dsack(tp, &to, th, &dsack_tlp);
5075 if (TCP_RACK_ENABLED(tp)) {
5076 /* If DSACK was received (not due to TLP), then increase the reordering window */
5077 if (recvd_dsack && !dsack_tlp) {
5078 tp->rack.dsack_round_seen = 1;
5079 }
5080 tcp_rack_update_reordering_window(tp, highest_sacked_seq);
5081 }
5082 /*
5083 * If DSACK is received and this packet has no
5084 * other SACK information, it can be dropped.
5085 * We do not want to treat it as a duplicate ack.
5086 */
5087 if (recvd_dsack &&
5088 SEQ_LEQ(th->th_ack, tp->snd_una) &&
5089 to.to_nsacks == 0) {
5090 tcp_bad_rexmt_check(tp, th, &to);
5091 goto drop;
5092 }
5093 }
5094
5095 if (SACK_ENABLED(tp) &&
5096 (to.to_nsacks > 0 || !TAILQ_EMPTY(&tp->snd_holes))) {
5097 tcp_sack_doack(tp, &to, th, &sack_bytes_acked, &highest_sacked_seq);
5098 }
5099
5100 #if MPTCP
5101 if (tp->t_mpuna && SEQ_GEQ(th->th_ack, tp->t_mpuna)) {
5102 if (tp->t_mpflags & TMPF_PREESTABLISHED) {
5103 /* MP TCP establishment succeeded */
5104 tp->t_mpuna = 0;
5105 if (tp->t_mpflags & TMPF_JOINED_FLOW) {
5106 if (tp->t_mpflags & TMPF_SENT_JOIN) {
5107 tp->t_mpflags &=
5108 ~TMPF_PREESTABLISHED;
5109 tp->t_mpflags |=
5110 TMPF_MPTCP_TRUE;
5111
5112 tp->t_timer[TCPT_JACK_RXMT] = 0;
5113 tp->t_mprxtshift = 0;
5114 isconnected = TRUE;
5115 } else {
5116 isconnected = FALSE;
5117 }
5118 } else {
5119 isconnected = TRUE;
5120 }
5121 }
5122 }
5123 #endif /* MPTCP */
5124
5125 tcp_tfo_rcv_ack(tp, th);
5126
5127 /*
5128 * If we have outstanding data (other than
5129 * a window probe), this is a completely
5130 * duplicate ack and the ack is the biggest we've seen.
5131 *
5132 * Need to accommodate a change in window on duplicate acks
5133 * to allow operating systems that update window during
5134 * recovery with SACK
5135 */
5136 if (SEQ_LEQ(th->th_ack, tp->snd_una)) {
5137 /*
5138 * Update snd_fack when new SACK blocks are received
5139 * without advancing the ACK
5140 */
5141 if (TCP_RACK_ENABLED(tp) && sack_bytes_acked > 0 &&
5142 SEQ_LT(tp->snd_fack, highest_sacked_seq)) {
5143 tp->snd_fack = highest_sacked_seq;
5144 }
5145
5146 /*
5147 * Process AccECN feedback here for control packets
5148 * that don't have s/acked bytes
5149 */
5150 if (tp->accurate_ecn_on && (tp->ecn_flags & TE_SENDIPECT) &&
5151 (sack_bytes_acked == 0)) {
5152 tp->total_ect_packets_acked += 1;
5153
5154 bool newly_acked_time = false;
5155 if (acked == 0 && (to.to_flags & TOF_TS) != 0 && to.to_tsecr != 0 &&
5156 TSTMP_GT(to.to_tsecr, tp->t_last_ack_tsecr)) {
5157 newly_acked_time = true;
5158 }
5159 if (newly_acked_time) {
5160 tcp_process_accecn(tp, &to, th, 1, ace);
5161 }
5162 }
5163
5164 if (tlen == 0 && (tiwin == tp->snd_wnd ||
5165 (to.to_nsacks > 0 && sack_bytes_acked > 0))) {
5166 uint32_t old_dupacks = 0;
5167 /*
5168 * If both ends send FIN at the same time,
5169 * then the ack will be a duplicate ack
5170 * but we have to process the FIN. Check
5171 * for this condition and process the FIN
5172 * instead of the dupack
5173 */
5174 if ((thflags & TH_FIN) &&
5175 !TCPS_HAVERCVDFIN(tp->t_state)) {
5176 break;
5177 }
5178 process_dupack:
5179 old_dupacks = tp->t_dupacks;
5180 #if MPTCP
5181 /*
5182 * MPTCP options that are ignored must
5183 * not be treated as duplicate ACKs.
5184 */
5185 if (to.to_flags & TOF_MPTCP) {
5186 goto drop;
5187 }
5188
5189 if ((isconnected) && (tp->t_mpflags & TMPF_JOINED_FLOW)) {
5190 break;
5191 }
5192 #endif /* MPTCP */
5193 /*
5194 * If a duplicate acknowledgement was seen
5195 * after ECN, it indicates packet loss in
5196 * addition to ECN. Reset INRECOVERY flag
5197 * so that we can process partial acks
5198 * correctly
5199 */
5200 if (tp->ecn_flags & TE_INRECOVERY) {
5201 tp->ecn_flags &= ~TE_INRECOVERY;
5202 }
5203
5204 tcpstat.tcps_rcvdupack++;
5205 if (SACK_ENABLED(tp)) {
5206 tp->t_dupacks += max(1, sack_bytes_acked / tp->t_maxseg);
5207 } else {
5208 ++tp->t_dupacks;
5209 }
5210
5211 if (!TCP_RACK_ENABLED(tp)) {
5212 tp->sackhint.sack_bytes_acked += sack_bytes_acked;
5213 }
5214
5215 if (sack_bytes_acked > 0 && tp->accurate_ecn_on &&
5216 (tp->ecn_flags & TE_SENDIPECT) && tp->t_state == TCPS_ESTABLISHED) {
5217 uint32_t pkts_sacked = tcp_packets_this_ack(tp, sack_bytes_acked);
5218 tp->total_ect_packets_acked += pkts_sacked;
5219 tcp_process_accecn(tp, &to, th, pkts_sacked, ace);
5220 }
5221 /*
5222 * Check if we need to reset the limit on
5223 * early retransmit
5224 */
5225 if (tp->t_early_rexmt_count > 0 &&
5226 TSTMP_GEQ(tcp_now,
5227 (tp->t_early_rexmt_win +
5228 TCP_EARLY_REXMT_WIN))) {
5229 tp->t_early_rexmt_count = 0;
5230 }
5231
5232 /*
5233 * Is early retransmit needed? We check for
5234 * this when the connection is waiting for
5235 * duplicate acks to enter fast recovery.
5236 */
5237 if (!IN_FASTRECOVERY(tp)) {
5238 tcp_early_rexmt_check(tp, th);
5239 }
5240
5241 /*
5242 * Detect loss based on RACK during dupACK processing to mark lost
5243 * segments before tcp_output is called for retransmission
5244 */
5245 if (TCP_RACK_ENABLED(tp) && tcp_rack_detect_loss_and_arm_timer(tp, tp->t_dupacks)) {
5246 rack_loss_detected = true;
5247 }
5248 /*
5249 * Below are four different processing of (dup) ACKs,
5250 * 1. Not a valid dup ACK
5251 * 2. More than 3 dup ACKs but already in Fast Recovery
5252 * 3. Entered Fast Recovery for the first time
5253 * 4. Received less than 3 dup ACKs, evaluate if we can do Limited Transmit
5254 */
5255 if (tp->t_timer[TCPT_REXMT] == 0 ||
5256 (th->th_ack != tp->snd_una && sack_bytes_acked == 0)) {
5257 /*
5258 * No outstanding data and ACK is not a duplicate as it is
5259 * less than snd_una but not equal to it.
5260 */
5261 tp->t_dupacks = 0;
5262 tp->t_rexmtthresh = tcprexmtthresh;
5263 } else if ((!TCP_RACK_ENABLED(tp) && tp->t_dupacks > tp->t_rexmtthresh && old_dupacks >= tp->t_rexmtthresh) ||
5264 IN_FASTRECOVERY(tp)) {
5265 /*
5266 * We are already in Fast Recovery and t_dupacks is greater than retransmit threshold.
5267 * Increase the cwnd by 1MSS if allowed
5268 */
5269
5270 /*
5271 * If this connection was seeing packet
5272 * reordering, then recovery might be
5273 * delayed to disambiguate between
5274 * reordering and loss
5275 */
5276 if (SACK_ENABLED(tp) && !IN_FASTRECOVERY(tp) &&
5277 (tp->t_flagsext &
5278 (TF_PKTS_REORDERED | TF_DELAY_RECOVERY)) ==
5279 (TF_PKTS_REORDERED | TF_DELAY_RECOVERY)) {
5280 /*
5281 * Since the SACK information is already
5282 * updated, this ACK will be dropped
5283 */
5284 break;
5285 }
5286
5287 /*
5288 * Dup acks mean that packets have left the
5289 * network (they're now cached at the receiver)
5290 * so bump cwnd by the amount in the receiver
5291 * to keep a constant cwnd packets in the
5292 * network.
5293 */
5294 if (SACK_ENABLED(tp) && IN_FASTRECOVERY(tp)) {
5295 int awnd;
5296
5297 /*
5298 * Compute the amount of data in flight first.
5299 * We can inject new data into the pipe iff
5300 * we have less than snd_ssthres worth of data in
5301 * flight.
5302 */
5303 awnd = (tp->snd_nxt - tp->snd_fack) + tp->sackhint.sack_bytes_rexmit;
5304 if (awnd < tp->snd_ssthresh) {
5305 tp->snd_cwnd += tp->t_maxseg;
5306 if (tp->snd_cwnd > tp->snd_ssthresh) {
5307 tp->snd_cwnd = tp->snd_ssthresh;
5308 }
5309 }
5310 } else {
5311 tp->snd_cwnd += tp->t_maxseg;
5312 }
5313
5314 /* Process any window updates */
5315 if (tiwin > tp->snd_wnd) {
5316 tcp_update_window(tp, thflags,
5317 th, tiwin, tlen);
5318 }
5319 tcp_ccdbg_trace(tp, th,
5320 TCP_CC_IN_FASTRECOVERY);
5321
5322 (void) tcp_output(tp);
5323
5324 goto drop;
5325 } else if (rack_loss_detected || (!TCP_RACK_ENABLED(tp) && tp->t_dupacks >= tp->t_rexmtthresh)) {
5326 /*
5327 * Currently not in Fast Recovery and received 3 or more dupacks.
5328 * Enter Fast Recovery, retransmit segment and set
5329 * cwnd to sshthresh if SACK is enabled.
5330 */
5331 tcp_seq onxt = tp->snd_nxt;
5332
5333 /*
5334 * If we're doing sack, check to
5335 * see if we're already in sack
5336 * recovery. If we're not doing sack,
5337 * check to see if we're in newreno
5338 * recovery.
5339 */
5340 if (SACK_ENABLED(tp)) {
5341 if (IN_FASTRECOVERY(tp)) {
5342 tp->t_dupacks = 0;
5343 break;
5344 } else if (tp->t_flagsext & TF_DELAY_RECOVERY) {
5345 break;
5346 }
5347 } else {
5348 if (SEQ_LEQ(th->th_ack, tp->snd_recover)) {
5349 tp->t_dupacks = 0;
5350 break;
5351 }
5352 }
5353 if (tp->t_flags & TF_SENTFIN) {
5354 tp->snd_recover = tp->snd_max - 1;
5355 } else {
5356 tp->snd_recover = tp->snd_max;
5357 }
5358 tp->t_timer[TCPT_PTO] = 0;
5359 tp->t_rtttime = 0;
5360
5361 /*
5362 * If the connection has seen pkt
5363 * reordering, delay recovery until
5364 * it is clear that the packet
5365 * was lost.
5366 */
5367 if (SACK_ENABLED(tp) &&
5368 (tp->t_flagsext &
5369 (TF_PKTS_REORDERED | TF_DELAY_RECOVERY))
5370 == TF_PKTS_REORDERED &&
5371 !IN_FASTRECOVERY(tp) &&
5372 tp->t_reorderwin > 0 &&
5373 (tp->t_state == TCPS_ESTABLISHED ||
5374 tp->t_state == TCPS_FIN_WAIT_1)) {
5375 tp->t_timer[TCPT_DELAYFR] =
5376 tcp_offset_from_start(tp,
5377 tp->t_reorderwin);
5378 tp->t_flagsext |= TF_DELAY_RECOVERY;
5379 tcpstat.tcps_delay_recovery++;
5380 tcp_ccdbg_trace(tp, th,
5381 TCP_CC_DELAY_FASTRECOVERY);
5382 break;
5383 }
5384
5385 tcp_rexmt_save_state(tp);
5386 /*
5387 * If the current tcp cc module has
5388 * defined a hook for tasks to run
5389 * before entering FR, call it
5390 */
5391 if (CC_ALGO(tp)->pre_fr != NULL) {
5392 CC_ALGO(tp)->pre_fr(tp);
5393 }
5394 ENTER_FASTRECOVERY(tp);
5395 tp->t_timer[TCPT_REXMT] = 0;
5396 if (!tp->accurate_ecn_on && TCP_ECN_ENABLED(tp)) {
5397 tp->ecn_flags |= TE_SENDCWR;
5398 }
5399
5400 if (SACK_ENABLED(tp)) {
5401 if (TCP_RACK_ENABLED(tp)) {
5402 tcpstat.tcps_rack_recovery_episode++;
5403 tp->t_rack_recovery_episode++;
5404 } else {
5405 tcpstat.tcps_sack_recovery_episode++;
5406 tp->t_sack_recovery_episode++;
5407 }
5408
5409 tp->snd_cwnd = tp->snd_ssthresh;
5410 tp->t_flagsext &= ~TF_CWND_NONVALIDATED;
5411
5412 /* Process any window updates */
5413 if (tiwin > tp->snd_wnd) {
5414 tcp_update_window(tp, thflags, th, tiwin, tlen);
5415 }
5416
5417 tcp_ccdbg_trace(tp, th, TCP_CC_ENTER_FASTRECOVERY);
5418 (void) tcp_output(tp);
5419 goto drop;
5420 }
5421 tp->snd_nxt = th->th_ack;
5422 tp->snd_cwnd = tp->t_maxseg;
5423
5424 /* cwnd is validated after pre_fr() */
5425 tp->t_flagsext &= ~TF_CWND_NONVALIDATED;
5426
5427 /* Process any window updates */
5428 if (tiwin > tp->snd_wnd) {
5429 tcp_update_window(tp, thflags, th, tiwin, tlen);
5430 }
5431
5432 (void) tcp_output(tp);
5433 if (tp->t_flagsext & TF_CWND_NONVALIDATED) {
5434 tcp_cc_adjust_nonvalidated_cwnd(tp);
5435 } else {
5436 tp->snd_cwnd = tp->snd_ssthresh + tp->t_maxseg * tp->t_dupacks;
5437 }
5438 if (SEQ_GT(onxt, tp->snd_nxt)) {
5439 tp->snd_nxt = onxt;
5440 }
5441
5442 tcp_ccdbg_trace(tp, th, TCP_CC_ENTER_FASTRECOVERY);
5443 goto drop;
5444 } else if (ALLOW_LIMITED_TRANSMIT(tp) &&
5445 (!(SACK_ENABLED(tp)) || sack_bytes_acked > 0) &&
5446 (so->so_snd.sb_cc - (tp->snd_max - tp->snd_una)) > 0) {
5447 u_int32_t incr = (tp->t_maxseg * tp->t_dupacks);
5448
5449 /* Use Limited Transmit algorithm on the first two
5450 * duplicate acks when there is new data to transmit
5451 */
5452 tp->snd_cwnd += incr;
5453 tcpstat.tcps_limited_txt++;
5454 (void) tcp_output(tp);
5455
5456 tcp_ccdbg_trace(tp, th, TCP_CC_LIMITED_TRANSMIT);
5457
5458 /* Reset snd_cwnd back to normal */
5459 tp->snd_cwnd -= incr;
5460 }
5461 }
5462 break;
5463 }
5464 /*
5465 * If the congestion window was inflated to account
5466 * for the other side's cached packets, retract it.
5467 */
5468 if (IN_FASTRECOVERY(tp)) {
5469 if (SEQ_LT(th->th_ack, tp->snd_recover)) {
5470 /*
5471 * If we received an ECE and entered
5472 * recovery, the subsequent ACKs should
5473 * not be treated as partial acks.
5474 */
5475 if (tp->ecn_flags & TE_INRECOVERY) {
5476 goto process_ACK;
5477 }
5478 /* RACK doesn't require inflating cwnd */
5479 if (!TCP_RACK_ENABLED(tp)) {
5480 if (SACK_ENABLED(tp)) {
5481 tcp_sack_partialack(tp, th);
5482 } else {
5483 tcp_newreno_partial_ack(tp, th);
5484 }
5485 tcp_ccdbg_trace(tp, th, TCP_CC_PARTIAL_ACK);
5486 }
5487 } else {
5488 exiting_fr = 1;
5489 EXIT_FASTRECOVERY(tp);
5490 if (CC_ALGO(tp)->post_fr != NULL) {
5491 CC_ALGO(tp)->post_fr(tp, th);
5492 }
5493
5494 if (TCP_RACK_ENABLED(tp)) {
5495 tcp_rack_update_reordering_win_persist(tp);
5496 }
5497
5498 tp->t_pipeack = 0;
5499 tcp_clear_pipeack_state(tp);
5500 tcp_ccdbg_trace(tp, th,
5501 TCP_CC_EXIT_FASTRECOVERY);
5502 }
5503 } else if ((tp->t_flagsext &
5504 (TF_PKTS_REORDERED | TF_DELAY_RECOVERY))
5505 == (TF_PKTS_REORDERED | TF_DELAY_RECOVERY)) {
5506 /*
5507 * If the ack acknowledges upto snd_recover or if
5508 * it acknowledges all the snd holes, exit
5509 * recovery and cancel the timer. Otherwise,
5510 * this is a partial ack. Wait for recovery timer
5511 * to enter recovery. The snd_holes have already
5512 * been updated.
5513 */
5514 if (SEQ_GEQ(th->th_ack, tp->snd_recover) ||
5515 TAILQ_EMPTY(&tp->snd_holes)) {
5516 tp->t_timer[TCPT_DELAYFR] = 0;
5517 tp->t_flagsext &= ~TF_DELAY_RECOVERY;
5518 EXIT_FASTRECOVERY(tp);
5519 tcp_ccdbg_trace(tp, th,
5520 TCP_CC_EXIT_FASTRECOVERY);
5521 }
5522 } else {
5523 /*
5524 * We were not in fast recovery. Reset the
5525 * duplicate ack counter.
5526 */
5527 tp->t_dupacks = 0;
5528 tp->t_rexmtthresh = tcprexmtthresh;
5529 }
5530
5531 process_ACK:
5532 VERIFY(SEQ_GEQ(th->th_ack, tp->snd_una));
5533 acked = BYTES_ACKED(th, tp);
5534 tcpstat.tcps_rcvackpack++;
5535 tcpstat.tcps_rcvackbyte += acked;
5536
5537 /*
5538 * If the last packet was a retransmit, make sure
5539 * it was not spurious.
5540 *
5541 * This will also take care of congestion window
5542 * adjustment if a last packet was recovered due to a
5543 * tail loss probe.
5544 */
5545 tcp_bad_rexmt_check(tp, th, &to);
5546
5547 /* Recalculate the RTT */
5548 tcp_compute_rtt(tp, &to, th);
5549
5550 /*
5551 * If all outstanding data is acked, stop retransmit
5552 * timer and remember to restart (more output or persist).
5553 * If there is more data to be acked, restart retransmit
5554 * timer, using current (possibly backed-off) value.
5555 */
5556 TCP_RESET_REXMT_STATE(tp);
5557 TCPT_RANGESET(tp->t_rxtcur, TCP_REXMTVAL(tp),
5558 tp->t_rttmin, TCPTV_REXMTMAX,
5559 TCP_ADD_REXMTSLOP(tp));
5560 if (th->th_ack == tp->snd_max) {
5561 tp->t_timer[TCPT_REXMT] = 0;
5562 tp->t_timer[TCPT_PTO] = 0;
5563 tp->t_timer[TCPT_REORDER] = 0;
5564 tcp_rack_reset_segs_retransmitted(tp);
5565 needoutput = 1;
5566 } else if (tp->t_timer[TCPT_PERSIST] == 0) {
5567 tcp_set_rto(tp);
5568 }
5569
5570 if ((prev_t_state == TCPS_SYN_SENT ||
5571 prev_t_state == TCPS_SYN_RECEIVED) &&
5572 tp->t_state == TCPS_ESTABLISHED) {
5573 TCP_LOG_RTT_INFO(tp);
5574 }
5575
5576 /*
5577 * If no data (only SYN) was ACK'd, skip rest of ACK
5578 * processing.
5579 */
5580 if (acked == 0) {
5581 goto step6;
5582 }
5583
5584 /*
5585 * Process sent segments used for RACK as we need to update
5586 * RACK state before loss detection. Update snd_fack only
5587 * after ACK processing which performs reordering detection.
5588 */
5589 if (TCP_RACK_ENABLED(tp)) {
5590 tcp_segs_doack(tp, th->th_ack, &to);
5591 if (SEQ_LT(tp->snd_fack, highest_sacked_seq)) {
5592 tp->snd_fack = highest_sacked_seq;
5593 }
5594 if (SEQ_LT(tp->snd_fack, th->th_ack)) {
5595 tp->snd_fack = th->th_ack;
5596 }
5597 }
5598 /*
5599 * When outgoing data has been acked (except the SYN+data), we
5600 * mark this connection as "sending good" for TFO.
5601 */
5602 if ((tp->t_tfo_stats & TFO_S_SYN_DATA_SENT) &&
5603 !(tp->t_tfo_flags & TFO_F_NO_SNDPROBING) &&
5604 !(th->th_flags & TH_SYN)) {
5605 tp->t_tfo_flags |= TFO_F_NO_SNDPROBING;
5606 }
5607
5608 if ((tp->ecn_flags & TE_SENDIPECT)) {
5609 /*
5610 * draft-ietf-tcpm-accurate-ecn-28
5611 * Accurate ECN feedback processing for data sender,
5612 * Process peer's feedback in received TCP thflags and update s.cep
5613 * Since SYN-ACK has a special encoding, exclude it from below.
5614 * Only perform it before CC is called and snd_una is updated.
5615 */
5616 if (tp->accurate_ecn_on && !(thflags & TH_SYN)) {
5617 /*
5618 * For a server in SYN_RECEIVED state (that switched to
5619 * ESTABLISHED in this ACK, exclude processing the last ACK
5620 */
5621 if (th->th_ack == tp->iss + 1) {
5622 acked = 0;
5623 }
5624 uint32_t pkts_acked = tcp_packets_this_ack(tp, acked);
5625 tp->total_ect_packets_acked += pkts_acked;
5626 /*
5627 * Calculate newly_acked_time used for AccECN feedback parsing
5628 * for data sender if ACK acknowledges packets without data
5629 * if reordering happens and certain packets have same TS.
5630 * Right now, we consider that new time was ACKed if the TS
5631 * was GT previous value, but we need to think about how to
5632 * differentiate between reordering and wrapping when TS is same
5633 * as previous value.
5634 */
5635 bool newly_acked_time = false;
5636 if (acked == 0 && sack_bytes_acked == 0 &&
5637 (to.to_flags & TOF_TS) != 0 && to.to_tsecr != 0 &&
5638 (tp->t_last_ack_tsecr == 0 || TSTMP_GT(to.to_tsecr, tp->t_last_ack_tsecr))) {
5639 newly_acked_time = true;
5640 }
5641 /*
5642 * Update s.cep if bytes have been newly S/ACKed
5643 * otherwise, this ACK has already been superseded.
5644 */
5645 if (acked > 0 || sack_bytes_acked > 0 || newly_acked_time) {
5646 tcp_process_accecn(tp, &to, th, pkts_acked, ace);
5647 }
5648 } else if (TCP_ECN_ENABLED(tp) && (thflags & TH_ECE)) {
5649 uint32_t pkts_acked = tcp_packets_this_ack(tp, acked);
5650 /*
5651 * For classic ECN, congestion event is receiving TH_ECE.
5652 * Disable ECN if > 90% marking is observed in ACK packets
5653 */
5654 tcp_ece_aggressive_heur(tp, pkts_acked);
5655 /*
5656 * Reduce the congestion window if we haven't
5657 * done so.
5658 */
5659 if (!IN_FASTRECOVERY(tp)) {
5660 /*
5661 * Although we enter Fast Recovery in the below function
5662 * we exit it immediately below as th_ack >= snd_recover
5663 */
5664 tcp_enter_fast_recovery(tp);
5665 tp->ecn_flags |= (TE_INRECOVERY | TE_SENDCWR);
5666 /*
5667 * Also note that the connection received
5668 * ECE atleast once. We increment
5669 * t_ecn_capable_packets_marked when we first
5670 * enter fast recovery.
5671 */
5672 tp->ecn_flags |= TE_RECV_ECN_ECE;
5673 INP_INC_IFNET_STAT(inp, ecn_recv_ece);
5674 tcpstat.tcps_ecn_recv_ece++;
5675 tp->t_ecn_capable_packets_marked += pkts_acked;
5676 tcp_ccdbg_trace(tp, th, TCP_CC_ECN_RCVD);
5677 }
5678 }
5679 }
5680
5681 /*
5682 * When new data is acked, open the congestion window.
5683 * The specifics of how this is achieved are up to the
5684 * congestion control algorithm in use for this connection.
5685 *
5686 * The calculations in this function assume that snd_una is
5687 * not updated yet.
5688 */
5689 if (!IN_FASTRECOVERY(tp) && !exiting_fr) {
5690 if (CC_ALGO(tp)->ack_rcvd != NULL) {
5691 CC_ALGO(tp)->ack_rcvd(tp, th);
5692 }
5693 tcp_ccdbg_trace(tp, th, TCP_CC_ACK_RCVD);
5694 }
5695 if (acked > so->so_snd.sb_cc) {
5696 tp->snd_wnd -= so->so_snd.sb_cc;
5697 sbdrop(&so->so_snd, (int)so->so_snd.sb_cc);
5698 ourfinisacked = 1;
5699 } else {
5700 sbdrop(&so->so_snd, acked);
5701 tcp_sbsnd_trim(&so->so_snd);
5702 tp->snd_wnd -= acked;
5703 ourfinisacked = 0;
5704 }
5705 /* detect una wraparound */
5706 if (!IN_FASTRECOVERY(tp) &&
5707 SEQ_GT(tp->snd_una, tp->snd_recover) &&
5708 SEQ_LEQ(th->th_ack, tp->snd_recover)) {
5709 tp->snd_recover = th->th_ack - 1;
5710 }
5711
5712 if (IN_FASTRECOVERY(tp) &&
5713 SEQ_GEQ(th->th_ack, tp->snd_recover)) {
5714 EXIT_FASTRECOVERY(tp);
5715 if (TCP_RACK_ENABLED(tp)) {
5716 tcp_rack_update_reordering_win_persist(tp);
5717 }
5718 }
5719
5720 tcp_update_snd_una(tp, th->th_ack);
5721
5722 if (SACK_ENABLED(tp)) {
5723 if (SEQ_GT(tp->snd_una, tp->snd_recover)) {
5724 tp->snd_recover = tp->snd_una;
5725 }
5726 }
5727 if (SEQ_LT(tp->snd_nxt, tp->snd_una)) {
5728 tp->snd_nxt = tp->snd_una;
5729 }
5730
5731 /*
5732 * Detect loss based on RACK during ACK processing to mark lost
5733 * segments and call tcp_output. Rest of the ACK processing can
5734 * continue after that.
5735 */
5736 if (TCP_RACK_ENABLED(tp) && tcp_rack_detect_loss_and_arm_timer(tp, 0)) {
5737 if (!IN_FASTRECOVERY(tp)) {
5738 tcp_enter_fast_recovery(tp);
5739 tcpstat.tcps_rack_recovery_episode++;
5740 tp->t_rack_recovery_episode++;
5741 }
5742 tcp_output(tp);
5743 }
5744
5745 if (!SLIST_EMPTY(&tp->t_rxt_segments) &&
5746 !TCP_DSACK_SEQ_IN_WINDOW(tp, tp->t_dsack_lastuna,
5747 tp->snd_una)) {
5748 tcp_rxtseg_clean(tp);
5749 }
5750 if ((tp->t_flagsext & TF_MEASURESNDBW) != 0 &&
5751 tp->t_bwmeas != NULL) {
5752 tcp_bwmeas_check(tp);
5753 }
5754
5755 write_wakeup = 1;
5756
5757 if (!SLIST_EMPTY(&tp->t_notify_ack)) {
5758 tcp_notify_acknowledgement(tp, so);
5759 }
5760
5761 switch (tp->t_state) {
5762 /*
5763 * In FIN_WAIT_1 STATE in addition to the processing
5764 * for the ESTABLISHED state if our FIN is now acknowledged
5765 * then enter FIN_WAIT_2.
5766 */
5767 case TCPS_FIN_WAIT_1:
5768 if (ourfinisacked) {
5769 /*
5770 * If we can't receive any more
5771 * data, then closing user can proceed.
5772 * Starting the TCPT_2MSL timer is contrary to the
5773 * specification, but if we don't get a FIN
5774 * we'll hang forever.
5775 */
5776 DTRACE_TCP4(state__change, void, NULL,
5777 struct inpcb *, inp,
5778 struct tcpcb *, tp,
5779 int32_t, TCPS_FIN_WAIT_2);
5780 TCP_LOG_STATE(tp, TCPS_FIN_WAIT_2);
5781 tp->t_state = TCPS_FIN_WAIT_2;
5782 if (so->so_state & SS_CANTRCVMORE) {
5783 isconnected = FALSE;
5784 isdisconnected = TRUE;
5785 tcp_set_finwait_timeout(tp);
5786 }
5787 /*
5788 * fall through and make sure we also recognize
5789 * data ACKed with the FIN
5790 */
5791 }
5792 break;
5793
5794 /*
5795 * In CLOSING STATE in addition to the processing for
5796 * the ESTABLISHED state if the ACK acknowledges our FIN
5797 * then enter the TIME-WAIT state, otherwise ignore
5798 * the segment.
5799 */
5800 case TCPS_CLOSING:
5801 if (ourfinisacked) {
5802 DTRACE_TCP4(state__change, void, NULL,
5803 struct inpcb *, inp,
5804 struct tcpcb *, tp,
5805 int32_t, TCPS_TIME_WAIT);
5806 TCP_LOG_STATE(tp, TCPS_TIME_WAIT);
5807 tp->t_state = TCPS_TIME_WAIT;
5808 tcp_canceltimers(tp);
5809 if (tp->t_flagsext & TF_NOTIMEWAIT) {
5810 tp->t_flags |= TF_CLOSING;
5811 } else {
5812 add_to_time_wait(tp, 2 * tcp_msl);
5813 }
5814 isconnected = FALSE;
5815 isdisconnected = TRUE;
5816 }
5817 break;
5818
5819 /*
5820 * In LAST_ACK, we may still be waiting for data to drain
5821 * and/or to be acked, as well as for the ack of our FIN.
5822 * If our FIN is now acknowledged, delete the TCB,
5823 * enter the closed state and return.
5824 */
5825 case TCPS_LAST_ACK:
5826 if (ourfinisacked) {
5827 tp = tcp_close(tp);
5828 goto drop;
5829 }
5830 break;
5831
5832 /*
5833 * In TIME_WAIT state the only thing that should arrive
5834 * is a retransmission of the remote FIN. Acknowledge
5835 * it and restart the finack timer.
5836 */
5837 case TCPS_TIME_WAIT:
5838 add_to_time_wait(tp, 2 * tcp_msl);
5839 goto dropafterack;
5840 }
5841
5842 /*
5843 * If there is a SACK option on the ACK and we
5844 * haven't seen any duplicate acks before, count
5845 * it as a duplicate ack even if the cumulative
5846 * ack is advanced. If the receiver delayed an
5847 * ack and detected loss afterwards, then the ack
5848 * will advance cumulative ack and will also have
5849 * a SACK option. So counting it as one duplicate
5850 * ack is ok.
5851 */
5852 if (tp->t_state == TCPS_ESTABLISHED &&
5853 SACK_ENABLED(tp) && sack_bytes_acked > 0 &&
5854 to.to_nsacks > 0 && tp->t_dupacks == 0 &&
5855 SEQ_LEQ(th->th_ack, tp->snd_una) && tlen == 0 &&
5856 !(tp->t_flagsext & TF_PKTS_REORDERED)) {
5857 tcpstat.tcps_sack_ackadv++;
5858 goto process_dupack;
5859 }
5860 }
5861 }
5862
5863 step6:
5864 /*
5865 * Update window information.
5866 */
5867 if (tcp_update_window(tp, thflags, th, tiwin, tlen)) {
5868 needoutput = 1;
5869 }
5870
5871 /*
5872 * Process segments with URG.
5873 */
5874 if ((thflags & TH_URG) && th->th_urp &&
5875 TCPS_HAVERCVDFIN(tp->t_state) == 0) {
5876 /*
5877 * This is a kludge, but if we receive and accept
5878 * random urgent pointers, we'll crash in
5879 * soreceive. It's hard to imagine someone
5880 * actually wanting to send this much urgent data.
5881 */
5882 if (th->th_urp + so->so_rcv.sb_cc > sb_max) {
5883 th->th_urp = 0; /* XXX */
5884 thflags &= ~TH_URG; /* XXX */
5885 goto dodata; /* XXX */
5886 }
5887 /*
5888 * If this segment advances the known urgent pointer,
5889 * then mark the data stream. This should not happen
5890 * in CLOSE_WAIT, CLOSING, LAST_ACK or TIME_WAIT STATES since
5891 * a FIN has been received from the remote side.
5892 * In these states we ignore the URG.
5893 *
5894 * According to RFC961 (Assigned Protocols),
5895 * the urgent pointer points to the last octet
5896 * of urgent data. We continue, however,
5897 * to consider it to indicate the first octet
5898 * of data past the urgent section as the original
5899 * spec states (in one of two places).
5900 */
5901 if (SEQ_GT(th->th_seq + th->th_urp, tp->rcv_up)) {
5902 tp->rcv_up = th->th_seq + th->th_urp;
5903 so->so_oobmark = so->so_rcv.sb_cc +
5904 (tp->rcv_up - tp->rcv_nxt) - 1;
5905 if (so->so_oobmark == 0) {
5906 so->so_state |= SS_RCVATMARK;
5907 }
5908 sohasoutofband(so);
5909 tp->t_oobflags &= ~(TCPOOB_HAVEDATA | TCPOOB_HADDATA);
5910 }
5911 /*
5912 * Remove out of band data so doesn't get presented to user.
5913 * This can happen independent of advancing the URG pointer,
5914 * but if two URG's are pending at once, some out-of-band
5915 * data may creep in... ick.
5916 */
5917 if (th->th_urp <= (u_int32_t)tlen
5918 #if SO_OOBINLINE
5919 && (so->so_options & SO_OOBINLINE) == 0
5920 #endif
5921 ) {
5922 tcp_pulloutofband(so, th, m,
5923 drop_hdrlen); /* hdr drop is delayed */
5924 }
5925 } else {
5926 /*
5927 * If no out of band data is expected,
5928 * pull receive urgent pointer along
5929 * with the receive window.
5930 */
5931 if (SEQ_GT(tp->rcv_nxt, tp->rcv_up)) {
5932 tp->rcv_up = tp->rcv_nxt;
5933 }
5934 }
5935 dodata:
5936
5937 /* Set socket's connect or disconnect state correcly before doing data.
5938 * The following might unlock the socket if there is an upcall or a socket
5939 * filter.
5940 */
5941 if (isconnected) {
5942 soisconnected(so);
5943 } else if (isdisconnected) {
5944 soisdisconnected(so);
5945 }
5946
5947 /* Let's check the state of pcb just to make sure that it did not get closed
5948 * when we unlocked above
5949 */
5950 if (inp->inp_state == INPCB_STATE_DEAD) {
5951 /* Just drop the packet that we are processing and return */
5952 TCP_LOG_DROP_PCB(TCP_LOG_HDR, th, tp, false, "INPCB_STATE_DEAD");
5953 drop_reason = DROP_REASON_TCP_NO_SOCK;
5954 goto drop;
5955 }
5956
5957 /*
5958 * Process the segment text, merging it into the TCP sequencing queue,
5959 * and arranging for acknowledgment of receipt if necessary.
5960 * This process logically involves adjusting tp->rcv_wnd as data
5961 * is presented to the user (this happens in tcp_usrreq.c,
5962 * case PRU_RCVD). If a FIN has already been received on this
5963 * connection then we just ignore the text.
5964 *
5965 * If we are in SYN-received state and got a valid TFO cookie, we want
5966 * to process the data.
5967 */
5968 if ((tlen || (thflags & TH_FIN)) &&
5969 TCPS_HAVERCVDFIN(tp->t_state) == 0 &&
5970 (TCPS_HAVEESTABLISHED(tp->t_state) ||
5971 (tp->t_state == TCPS_SYN_RECEIVED &&
5972 (tp->t_tfo_flags & TFO_F_COOKIE_VALID)))) {
5973 tcp_seq save_start = th->th_seq;
5974 tcp_seq save_end = th->th_seq + tlen;
5975 m_adj(m, drop_hdrlen); /* delayed header drop */
5976
5977 if (th->th_seq == tp->rcv_nxt) {
5978 int mem = tcp_memacct_limited();
5979 if (mem == MEMACCT_HARDLIMIT ||
5980 (mem == MEMACCT_SOFTLIMIT && so->so_rcv.sb_cc > 0)) {
5981 /*
5982 * If we are at the hard limit, just drop.
5983 * If we are at the softlimit, only accept one
5984 * packet into the receive-queue.
5985 */
5986 drop_reason = DROP_REASON_TCP_INSEQ_MEMORY_PRESSURE;
5987 tcpstat.tcps_rcvmemdrop++;
5988 goto drop;
5989 }
5990 }
5991 /*
5992 * Insert segment which includes th into TCP reassembly queue
5993 * with control block tp. Set thflags to whether reassembly now
5994 * includes a segment with FIN. This handles the common case
5995 * inline (segment is the next to be received on an established
5996 * connection, and the queue is empty), avoiding linkage into
5997 * and removal from the queue and repetition of various
5998 * conversions.
5999 * Set DELACK for segments received in order, but ack
6000 * immediately when segments are out of order (so
6001 * fast retransmit can work).
6002 */
6003 if (th->th_seq == tp->rcv_nxt && LIST_EMPTY(&tp->t_segq)) {
6004 TCP_INC_VAR(tp->t_unacksegs, segment_count);
6005
6006 /* Calculate the RTT on the receiver */
6007 tcp_compute_rcv_rtt(tp, &to, th);
6008
6009 if (DELAY_ACK(tp, th) &&
6010 ((tp->t_flags & TF_ACKNOW) == 0)) {
6011 if ((tp->t_flags & TF_DELACK) == 0) {
6012 tp->t_flags |= TF_DELACK;
6013 tp->t_timer[TCPT_DELACK] =
6014 tcp_offset_from_start(tp, tcp_delack);
6015 }
6016 } else {
6017 tp->t_flags |= TF_ACKNOW;
6018 }
6019 tp->rcv_nxt += tlen;
6020 /* Update highest received sequence and its timestamp */
6021 if (SEQ_LT(tp->rcv_high, tp->rcv_nxt)) {
6022 tp->rcv_high = tp->rcv_nxt;
6023 if (to.to_flags & TOF_TS) {
6024 tp->tsv_high = to.to_tsval;
6025 }
6026 }
6027
6028 thflags = th->th_flags & TH_FIN;
6029 TCP_INC_VAR(tcpstat.tcps_rcvpack, segment_count);
6030 tcpstat.tcps_rcvbyte += tlen;
6031 if (nstat_collect) {
6032 INP_ADD_RXSTAT(inp, ifnet_count_type, 1, tlen);
6033 }
6034 tcp_sbrcv_grow(tp, &so->so_rcv, &to, tlen);
6035 if (TCP_USE_RLEDBAT(tp, so) &&
6036 tcp_cc_rledbat.data_rcvd != NULL) {
6037 tcp_cc_rledbat.data_rcvd(tp, th, &to, tlen);
6038 }
6039
6040 so_recv_data_stat(so, m, drop_hdrlen);
6041
6042 if (isipv6) {
6043 memcpy(&saved_hdr, ip6, sizeof(struct ip6_hdr));
6044 ip6 = (struct ip6_hdr *)&saved_hdr[0];
6045 } else {
6046 memcpy(&saved_hdr, ip, ip->ip_hl << 2);
6047 ip = (struct ip *)&saved_hdr[0];
6048 }
6049 memcpy(&saved_tcphdr, th, sizeof(struct tcphdr));
6050
6051 if (th->th_flags & TH_PUSH) {
6052 tp->t_flagsext |= TF_LAST_IS_PSH;
6053 } else {
6054 tp->t_flagsext &= ~TF_LAST_IS_PSH;
6055 }
6056
6057 if (sbappendstream_rcvdemux(so, m)) {
6058 read_wakeup = 1;
6059 }
6060 th = &saved_tcphdr;
6061 } else {
6062 if (isipv6) {
6063 memcpy(&saved_hdr, ip6, sizeof(struct ip6_hdr));
6064 ip6 = (struct ip6_hdr *)&saved_hdr[0];
6065 } else {
6066 memcpy(&saved_hdr, ip, ip->ip_hl << 2);
6067 ip = (struct ip *)&saved_hdr[0];
6068 }
6069
6070 /* Update highest received sequence and its timestamp */
6071 if (SEQ_LT(tp->rcv_high, th->th_seq + tlen)) {
6072 tp->rcv_high = th->th_seq + tlen;
6073 if (to.to_flags & TOF_TS) {
6074 tp->tsv_high = to.to_tsval;
6075 }
6076 }
6077
6078 /*
6079 * Calculate the RTT on the receiver,
6080 * even if OOO segment is received.
6081 */
6082 tcp_compute_rcv_rtt(tp, &to, th);
6083
6084 tcp_sbrcv_grow(tp, &so->so_rcv, &to, tlen);
6085 if (TCP_USE_RLEDBAT(tp, so) &&
6086 tcp_cc_rledbat.data_rcvd != NULL) {
6087 tcp_cc_rledbat.data_rcvd(tp, th, &to, tlen);
6088 }
6089
6090 memcpy(&saved_tcphdr, th, sizeof(struct tcphdr));
6091 thflags = tcp_reass(tp, th, &tlen, m, ifp, &read_wakeup);
6092 th = &saved_tcphdr;
6093 tp->t_flags |= TF_ACKNOW;
6094 }
6095
6096 if ((tlen > 0 || (th->th_flags & TH_FIN)) && SACK_ENABLED(tp)) {
6097 if (th->th_flags & TH_FIN) {
6098 save_end++;
6099 }
6100 tcp_update_sack_list(tp, save_start, save_end);
6101 }
6102
6103 tcp_adaptive_rwtimo_check(tp, tlen);
6104
6105 if (tlen > 0) {
6106 tcp_tfo_rcv_data(tp);
6107 }
6108
6109 if (tp->t_flags & TF_DELACK) {
6110 if (isipv6) {
6111 KERNEL_DEBUG(DBG_LAYER_END, ((th->th_dport << 16) | th->th_sport),
6112 (((ip6->ip6_src.s6_addr16[0]) << 16) | (ip6->ip6_dst.s6_addr16[0])),
6113 th->th_seq, th->th_ack, th->th_win);
6114 } else {
6115 KERNEL_DEBUG(DBG_LAYER_END, ((th->th_dport << 16) | th->th_sport),
6116 (((ip->ip_src.s_addr & 0xffff) << 16) | (ip->ip_dst.s_addr & 0xffff)),
6117 th->th_seq, th->th_ack, th->th_win);
6118 }
6119 }
6120 } else {
6121 if ((so->so_flags & SOF_MP_SUBFLOW) && tlen == 0 &&
6122 (m->m_pkthdr.pkt_flags & PKTF_MPTCP_DFIN) &&
6123 (m->m_pkthdr.pkt_flags & PKTF_MPTCP)) {
6124 m_adj(m, drop_hdrlen); /* delayed header drop */
6125 /*
6126 * 0-length DATA_FIN. The rlen is actually 0. We special-case the
6127 * byte consumed by the dfin in mptcp_input and mptcp_reass_present
6128 */
6129 m->m_pkthdr.mp_rlen = 0;
6130 mptcp_input(tptomptp(tp)->mpt_mpte, m);
6131 tp->t_flags |= TF_ACKNOW;
6132 } else {
6133 m_freem(m);
6134 }
6135 thflags &= ~TH_FIN;
6136 }
6137 /*
6138 * We increment t_unacksegs_ce for both data segments and pure ACKs
6139 * No need to increment if a FIN has already been received.
6140 */
6141 if (tp->accurate_ecn_on && TCPS_HAVEESTABLISHED(tp->t_state) &&
6142 TCPS_HAVERCVDFIN(tp->t_state) == 0) {
6143 if (ip_ecn == IPTOS_ECN_CE) {
6144 TCP_INC_VAR(tp->t_unacksegs_ce, segment_count);
6145 }
6146 /*
6147 * Send an ACK immediately if there is a change in IP ECN
6148 * from non-CE to CE.
6149 * If new data is delivered, then ACK for every 2 CE marks,
6150 * otherwise ACK for every 3 CE marks
6151 */
6152 if ((ip_ecn == IPTOS_ECN_CE && ip_ecn != tp->t_prev_ip_ecn) ||
6153 (tp->t_unacksegs_ce >= 2 && tp->last_ack_sent != tp->rcv_nxt) ||
6154 tp->t_unacksegs_ce >= 3) {
6155 tp->t_flags |= TF_ACKNOW;
6156 }
6157 tp->t_prev_ip_ecn = ip_ecn;
6158 }
6159 /*
6160 * If FIN is received ACK the FIN and let the user know
6161 * that the connection is closing.
6162 */
6163 if (thflags & TH_FIN) {
6164 if (TCPS_HAVERCVDFIN(tp->t_state) == 0) {
6165 socantrcvmore(so);
6166 /*
6167 * If connection is half-synchronized
6168 * (ie NEEDSYN flag on) then delay ACK,
6169 * so it may be piggybacked when SYN is sent.
6170 * Otherwise, since we received a FIN then no
6171 * more input can be expected, send ACK now.
6172 */
6173 TCP_INC_VAR(tp->t_unacksegs, segment_count);
6174 tp->t_flags |= TF_ACKNOW;
6175 tp->rcv_nxt++;
6176 }
6177 switch (tp->t_state) {
6178 /*
6179 * In SYN_RECEIVED and ESTABLISHED STATES
6180 * enter the CLOSE_WAIT state.
6181 */
6182 case TCPS_SYN_RECEIVED:
6183 tp->t_starttime = tcp_now;
6184 OS_FALLTHROUGH;
6185 case TCPS_ESTABLISHED:
6186 DTRACE_TCP4(state__change, void, NULL, struct inpcb *, inp,
6187 struct tcpcb *, tp, int32_t, TCPS_CLOSE_WAIT);
6188 TCP_LOG_STATE(tp, TCPS_CLOSE_WAIT);
6189 tp->t_state = TCPS_CLOSE_WAIT;
6190 break;
6191
6192 /*
6193 * If still in FIN_WAIT_1 STATE FIN has not been acked so
6194 * enter the CLOSING state.
6195 */
6196 case TCPS_FIN_WAIT_1:
6197 DTRACE_TCP4(state__change, void, NULL, struct inpcb *, inp,
6198 struct tcpcb *, tp, int32_t, TCPS_CLOSING);
6199 TCP_LOG_STATE(tp, TCPS_CLOSING);
6200 tp->t_state = TCPS_CLOSING;
6201 break;
6202
6203 /*
6204 * In FIN_WAIT_2 state enter the TIME_WAIT state,
6205 * starting the time-wait timer, turning off the other
6206 * standard timers.
6207 */
6208 case TCPS_FIN_WAIT_2:
6209 DTRACE_TCP4(state__change, void, NULL,
6210 struct inpcb *, inp,
6211 struct tcpcb *, tp,
6212 int32_t, TCPS_TIME_WAIT);
6213 TCP_LOG_STATE(tp, TCPS_TIME_WAIT);
6214 tp->t_state = TCPS_TIME_WAIT;
6215 tcp_canceltimers(tp);
6216 tp->t_flags |= TF_ACKNOW;
6217 if (tp->t_flagsext & TF_NOTIMEWAIT) {
6218 tp->t_flags |= TF_CLOSING;
6219 } else {
6220 add_to_time_wait(tp, 2 * tcp_msl);
6221 }
6222 soisdisconnected(so);
6223 break;
6224
6225 /*
6226 * In TIME_WAIT state restart the 2 MSL time_wait timer.
6227 */
6228 case TCPS_TIME_WAIT:
6229 add_to_time_wait(tp, 2 * tcp_msl);
6230 break;
6231 }
6232 }
6233 if (read_wakeup) {
6234 mptcp_handle_input(so);
6235 }
6236
6237 /*
6238 * Return any desired output.
6239 */
6240 if (needoutput || (tp->t_flags & TF_ACKNOW)) {
6241 (void) tcp_output(tp);
6242 }
6243
6244 tcp_check_timer_state(tp);
6245
6246 tcp_handle_wakeup(so, read_wakeup, write_wakeup);
6247
6248 socket_unlock(so, 1);
6249 KERNEL_DEBUG(DBG_FNC_TCP_INPUT | DBG_FUNC_END, 0, 0, 0, 0, 0);
6250 return;
6251
6252 dropafterack:
6253 /*
6254 * Generate an ACK dropping incoming segment if it occupies
6255 * sequence space, where the ACK reflects our state.
6256 *
6257 * We can now skip the test for the RST flag since all
6258 * paths to this code happen after packets containing
6259 * RST have been dropped.
6260 *
6261 * In the SYN-RECEIVED state, don't send an ACK unless the
6262 * segment we received passes the SYN-RECEIVED ACK test.
6263 * If it fails send a RST. This breaks the loop in the
6264 * "LAND" DoS attack, and also prevents an ACK storm
6265 * between two listening ports that have been sent forged
6266 * SYN segments, each with the source address of the other.
6267 */
6268 if (tp->t_state == TCPS_SYN_RECEIVED && (thflags & TH_ACK) &&
6269 (SEQ_GT(tp->snd_una, th->th_ack) ||
6270 SEQ_GT(th->th_ack, tp->snd_max))) {
6271 IF_TCP_STATINC(ifp, dospacket);
6272 goto dropwithreset;
6273 }
6274 m_freem(m);
6275 tp->t_flags |= TF_ACKNOW;
6276
6277 (void) tcp_output(tp);
6278
6279 tcp_handle_wakeup(so, read_wakeup, write_wakeup);
6280
6281 /* Don't need to check timer state as we should have done it during tcp_output */
6282 socket_unlock(so, 1);
6283 KERNEL_DEBUG(DBG_FNC_TCP_INPUT | DBG_FUNC_END, 0, 0, 0, 0, 0);
6284 return;
6285 dropwithresetnosock:
6286 nosock = 1;
6287 dropwithreset:
6288 /*
6289 * Generate a RST, dropping incoming segment.
6290 * Make ACK acceptable to originator of segment.
6291 * Don't bother to respond if destination was broadcast/multicast.
6292 */
6293 if ((thflags & TH_RST) || m->m_flags & (M_BCAST | M_MCAST)) {
6294 goto drop;
6295 }
6296 if (isipv6) {
6297 if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) ||
6298 IN6_IS_ADDR_MULTICAST(&ip6->ip6_src)) {
6299 goto drop;
6300 }
6301 } else if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) ||
6302 IN_MULTICAST(ntohl(ip->ip_src.s_addr)) ||
6303 ip->ip_src.s_addr == htonl(INADDR_BROADCAST) ||
6304 in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif)) {
6305 goto drop;
6306 }
6307 /* IPv6 anycast check is done at tcp6_input() */
6308
6309 bzero(&tra, sizeof(tra));
6310 tra.ifscope = ifscope;
6311 tra.awdl_unrestricted = 1;
6312 tra.intcoproc_allowed = 1;
6313 tra.management_allowed = 1;
6314 if (thflags & TH_ACK) {
6315 /* mtod() below is safe as long as hdr dropping is delayed */
6316 tcp_respond(tp, mtod(m, void *), m->m_len, th, m, (tcp_seq)0, th->th_ack,
6317 0, TH_RST, NULL, 0, 0, 0, &tra, false);
6318 } else {
6319 if (thflags & TH_SYN) {
6320 tlen++;
6321 }
6322 /* mtod() below is safe as long as hdr dropping is delayed */
6323 tcp_respond(tp, mtod(m, void *), m->m_len, th, m, th->th_seq + tlen,
6324 (tcp_seq)0, 0, TH_RST | TH_ACK, NULL, 0, 0, 0, &tra, false);
6325 }
6326 /* destroy temporarily created socket */
6327 if (dropsocket) {
6328 (void) soabort(so);
6329 socket_unlock(so, 1);
6330 } else if ((inp != NULL) && (nosock == 0)) {
6331 tcp_handle_wakeup(so, read_wakeup, write_wakeup);
6332
6333 socket_unlock(so, 1);
6334 }
6335 KERNEL_DEBUG(DBG_FNC_TCP_INPUT | DBG_FUNC_END, 0, 0, 0, 0, 0);
6336 return;
6337 dropnosock:
6338 nosock = 1;
6339 drop:
6340 /*
6341 * Drop space held by incoming segment and return.
6342 */
6343 if (isipv6 == 0) {
6344 if (ip == NULL) {
6345 ip = mtod(m, struct ip *);
6346 }
6347 /* add back the header length */
6348 ip->ip_len += (ip->ip_hl << 2);
6349 HTONS(ip->ip_len);
6350 HTONS(ip->ip_off);
6351
6352 th = (struct tcphdr *)(void *)((caddr_t)ip + off0);
6353 } else if (ip6 == NULL) {
6354 ip6 = mtod(m, struct ip6_hdr *);
6355
6356 th = (struct tcphdr *)(void *)((caddr_t)ip6 + off0);
6357 }
6358 if (is_th_swapped) {
6359 HTONL(th->th_seq);
6360 HTONL(th->th_ack);
6361 HTONS(th->th_win);
6362 HTONS(th->th_urp);
6363 }
6364 if (drop_reason != DROP_REASON_UNSPECIFIED || droptap_verbose > 0) {
6365 m_drop(m, DROPTAP_FLAG_DIR_IN | DROPTAP_FLAG_L2_MISSING, drop_reason, NULL, 0);
6366 } else {
6367 m_freem(m);
6368 }
6369 /* destroy temporarily created socket */
6370 if (dropsocket) {
6371 (void) soabort(so);
6372 socket_unlock(so, 1);
6373 } else if (nosock == 0) {
6374 tcp_handle_wakeup(so, read_wakeup, write_wakeup);
6375
6376 socket_unlock(so, 1);
6377 }
6378 KERNEL_DEBUG(DBG_FNC_TCP_INPUT | DBG_FUNC_END, 0, 0, 0, 0, 0);
6379 return;
6380 }
6381
6382 /*
6383 * Parse TCP options and place in tcpopt.
6384 */
6385 static void
tcp_dooptions(struct tcpcb * tp,u_char * cp0 __counted_by (cnt0),int cnt0,struct tcphdr * th,struct tcpopt * to)6386 tcp_dooptions(struct tcpcb *tp, u_char *cp0 __counted_by(cnt0), int cnt0, struct tcphdr *th,
6387 struct tcpopt *to)
6388 {
6389 u_short mss = 0;
6390 uint8_t opt, optlen;
6391 u_char *cp = cp0;
6392 u_char * const cpend = cp0 + cnt0;
6393 int cnt = cnt0;
6394
6395 for (; cnt > 0; cnt -= optlen, cp += optlen) {
6396 opt = cp[0];
6397 if (opt == TCPOPT_EOL) {
6398 break;
6399 }
6400 if (opt == TCPOPT_NOP) {
6401 optlen = 1;
6402 } else {
6403 if (cnt < 2) {
6404 break;
6405 }
6406 optlen = cp[1];
6407 if (optlen < 2 || optlen > cnt) {
6408 break;
6409 }
6410 }
6411 switch (opt) {
6412 default:
6413 continue;
6414
6415 case TCPOPT_MAXSEG:
6416 if (optlen != TCPOLEN_MAXSEG) {
6417 continue;
6418 }
6419 if (!(th->th_flags & TH_SYN)) {
6420 continue;
6421 }
6422 bcopy((char *) cp + 2, (char *) &mss, sizeof(mss));
6423 NTOHS(mss);
6424 to->to_mss = mss;
6425 to->to_flags |= TOF_MSS;
6426 break;
6427
6428 case TCPOPT_WINDOW:
6429 if (optlen != TCPOLEN_WINDOW) {
6430 continue;
6431 }
6432 if (!(th->th_flags & TH_SYN)) {
6433 continue;
6434 }
6435 to->to_flags |= TOF_SCALE;
6436 to->to_wscale = MIN(cp[2], TCP_MAX_WINSHIFT);
6437 break;
6438
6439 case TCPOPT_TIMESTAMP:
6440 if (optlen != TCPOLEN_TIMESTAMP) {
6441 continue;
6442 }
6443 to->to_flags |= TOF_TS;
6444 bcopy((char *)cp + 2,
6445 (char *)&to->to_tsval, sizeof(to->to_tsval));
6446 NTOHL(to->to_tsval);
6447 bcopy((char *)cp + 6,
6448 (char *)&to->to_tsecr, sizeof(to->to_tsecr));
6449 NTOHL(to->to_tsecr);
6450 to->to_tsecr -= tp->t_ts_offset;
6451 /* Re-enable sending Timestamps if we received them */
6452 if (!(tp->t_flags & TF_REQ_TSTMP) && tcp_do_timestamps) {
6453 tp->t_flags |= TF_REQ_TSTMP;
6454 }
6455 break;
6456 case TCPOPT_SACK_PERMITTED:
6457 if (optlen != TCPOLEN_SACK_PERMITTED) {
6458 continue;
6459 }
6460 if (th->th_flags & TH_SYN) {
6461 to->to_flags |= TOF_SACKPERM;
6462 }
6463 break;
6464 case TCPOPT_SACK:
6465 if (optlen <= 2 || (optlen - 2) % TCPOLEN_SACK != 0) {
6466 continue;
6467 }
6468 to->to_flags |= TOF_SACK;
6469 to->to_nsacks = (optlen - 2) / TCPOLEN_SACK;
6470 to->to_sacks_size = optlen - 2;
6471 to->to_sacks = cp + 2;
6472 tcpstat.tcps_sack_rcv_blocks++;
6473
6474 break;
6475 case TCPOPT_FASTOPEN:
6476 if (optlen == TCPOLEN_FASTOPEN_REQ) {
6477 if (tp->t_state != TCPS_LISTEN) {
6478 continue;
6479 }
6480
6481 to->to_flags |= TOF_TFOREQ;
6482 } else {
6483 if (optlen < TCPOLEN_FASTOPEN_REQ ||
6484 (optlen - TCPOLEN_FASTOPEN_REQ) > TFO_COOKIE_LEN_MAX ||
6485 (optlen - TCPOLEN_FASTOPEN_REQ) < TFO_COOKIE_LEN_MIN) {
6486 continue;
6487 }
6488 if (tp->t_state != TCPS_LISTEN &&
6489 tp->t_state != TCPS_SYN_SENT) {
6490 continue;
6491 }
6492
6493 to->to_flags |= TOF_TFO;
6494 to->to_tfo = cp + 1;
6495 to->to_tfo_size = optlen - 1;
6496 }
6497
6498 break;
6499 case TCPOPT_ACCECN0:
6500 case TCPOPT_ACCECN1:
6501 if (optlen < (TCPOLEN_ACCECN_EMPTY + 1 * TCPOLEN_ACCECN_COUNTER) ||
6502 (optlen - 2) % TCPOLEN_ACCECN_COUNTER != 0) {
6503 continue;
6504 }
6505 to->to_num_accecn = (optlen - 2) / TCPOLEN_ACCECN_COUNTER;
6506 to->to_accecn = cp + 2;
6507 to->to_accecn_size = optlen - 2;
6508 if (opt == TCPOPT_ACCECN0) {
6509 to->to_accecn_order = 0;
6510 } else if (opt == TCPOPT_ACCECN1) {
6511 to->to_accecn_order = 1;
6512 }
6513 break;
6514
6515 #if MPTCP
6516 case TCPOPT_MULTIPATH:
6517 tcp_do_mptcp_options(tp, cp, cpend, th, to, optlen);
6518 break;
6519 #endif /* MPTCP */
6520 }
6521 }
6522 }
6523
6524 static void
tcp_finalize_options(struct tcpcb * tp,struct tcpopt * to,unsigned int ifscope)6525 tcp_finalize_options(struct tcpcb *tp, struct tcpopt *to, unsigned int ifscope)
6526 {
6527 if (to->to_flags & TOF_TS) {
6528 tp->t_flags |= TF_RCVD_TSTMP;
6529 tp->ts_recent = to->to_tsval;
6530 tp->ts_recent_age = tcp_now;
6531 }
6532 if (to->to_flags & TOF_MSS) {
6533 tcp_mss(tp, to->to_mss, ifscope);
6534 }
6535 if (SACK_ENABLED(tp)) {
6536 if (!(to->to_flags & TOF_SACKPERM)) {
6537 tp->t_flagsext &= ~(TF_SACK_ENABLE);
6538 } else {
6539 tp->t_flags |= TF_SACK_PERMIT;
6540 }
6541 }
6542 if (to->to_flags & TOF_SCALE) {
6543 tp->t_flags |= TF_RCVD_SCALE;
6544 tp->requested_s_scale = to->to_wscale;
6545
6546 /* Re-enable window scaling, if the option is received */
6547 if (tp->request_r_scale > 0) {
6548 tp->t_flags |= TF_REQ_SCALE;
6549 }
6550 }
6551 }
6552
6553 /*
6554 * Pull out of band byte out of a segment so
6555 * it doesn't appear in the user's data queue.
6556 * It is still reflected in the segment length for
6557 * sequencing purposes.
6558 *
6559 * @param off delayed to be droped hdrlen
6560 */
6561 static void
tcp_pulloutofband(struct socket * so,struct tcphdr * th,struct mbuf * m,int off)6562 tcp_pulloutofband(struct socket *so, struct tcphdr *th, struct mbuf *m, int off)
6563 {
6564 int cnt = off + th->th_urp - 1;
6565
6566 while (cnt >= 0) {
6567 if (m->m_len > cnt) {
6568 char *cp = mtod(m, caddr_t) + cnt;
6569 struct tcpcb *tp = sototcpcb(so);
6570
6571 tp->t_iobc = *cp;
6572 tp->t_oobflags |= TCPOOB_HAVEDATA;
6573 bcopy(cp + 1, cp, (unsigned)(m->m_len - cnt - 1));
6574 m->m_len--;
6575 if (m->m_flags & M_PKTHDR) {
6576 m->m_pkthdr.len--;
6577 }
6578 return;
6579 }
6580 cnt -= m->m_len;
6581 m = m->m_next;
6582 if (m == 0) {
6583 break;
6584 }
6585 }
6586 panic("tcp_pulloutofband");
6587 }
6588
6589 uint32_t
get_base_rtt(struct tcpcb * tp)6590 get_base_rtt(struct tcpcb *tp)
6591 {
6592 struct rtentry *rt = tp->t_inpcb->inp_route.ro_rt;
6593 return (rt == NULL) ? 0 : rt->rtt_min;
6594 }
6595
6596 static void
update_curr_rtt(struct tcpcb * tp,uint32_t rtt)6597 update_curr_rtt(struct tcpcb * tp, uint32_t rtt)
6598 {
6599 tp->curr_rtt_index = (tp->curr_rtt_index + 1) % NCURR_RTT_HIST;
6600 tp->curr_rtt_hist[tp->curr_rtt_index] = rtt;
6601
6602 /* forget the old value and update minimum */
6603 tp->curr_rtt_min = 0;
6604 for (int i = 0; i < NCURR_RTT_HIST; ++i) {
6605 if (tp->curr_rtt_hist[i] != 0 && (tp->curr_rtt_min == 0 ||
6606 tp->curr_rtt_hist[i] < tp->curr_rtt_min)) {
6607 tp->curr_rtt_min = tp->curr_rtt_hist[i];
6608 }
6609 }
6610 }
6611
6612 /* Each value of RTT base represents the minimum RTT seen in a minute.
6613 * We keep upto N_RTT_BASE minutes worth of history.
6614 */
6615 void
update_base_rtt(struct tcpcb * tp,uint32_t rtt)6616 update_base_rtt(struct tcpcb *tp, uint32_t rtt)
6617 {
6618 u_int32_t base_rtt, i;
6619 struct rtentry *rt;
6620
6621 if ((rt = tp->t_inpcb->inp_route.ro_rt) == NULL) {
6622 return;
6623 }
6624 if (rt->rtt_expire_ts == 0) {
6625 RT_LOCK_SPIN(rt);
6626 if (rt->rtt_expire_ts != 0) {
6627 RT_UNLOCK(rt);
6628 goto update;
6629 }
6630 rt->rtt_expire_ts = tcp_now;
6631 rt->rtt_index = 0;
6632 rt->rtt_hist[0] = rtt;
6633 rt->rtt_min = rtt;
6634 RT_UNLOCK(rt);
6635
6636 tp->curr_rtt_index = 0;
6637 tp->curr_rtt_hist[0] = rtt;
6638 tp->curr_rtt_min = rtt;
6639 return;
6640 }
6641 update:
6642 #if TRAFFIC_MGT
6643 /*
6644 * If the recv side is being throttled, check if the
6645 * current RTT is closer to the base RTT seen in
6646 * first (recent) two slots. If so, unthrottle the stream.
6647 */
6648 if ((tp->t_flagsext & TF_RECV_THROTTLE) &&
6649 (int)(tcp_now - tp->t_recv_throttle_ts) >= TCP_RECV_THROTTLE_WIN) {
6650 base_rtt = rt->rtt_min;
6651 if (tp->t_rttcur <= (base_rtt + target_qdelay)) {
6652 tp->t_flagsext &= ~TF_RECV_THROTTLE;
6653 tp->t_recv_throttle_ts = 0;
6654 }
6655 }
6656 #endif /* TRAFFIC_MGT */
6657
6658 /* Update the next current RTT sample */
6659 update_curr_rtt(tp, rtt);
6660
6661 if ((int)(tcp_now - rt->rtt_expire_ts) >=
6662 TCP_RTT_HISTORY_EXPIRE_TIME) {
6663 RT_LOCK_SPIN(rt);
6664 /* check the condition again to avoid race */
6665 if ((int)(tcp_now - rt->rtt_expire_ts) >=
6666 TCP_RTT_HISTORY_EXPIRE_TIME) {
6667 /* Set the base rtt to 0 for idle periods */
6668 uint32_t times = MIN((tcp_now - rt->rtt_expire_ts) /
6669 TCP_RTT_HISTORY_EXPIRE_TIME, NRTT_HIST + 1);
6670
6671 for (i = rt->rtt_index + 1; i < rt->rtt_index + times; i++) {
6672 rt->rtt_hist[i % NRTT_HIST] = 0;
6673 }
6674
6675 rt->rtt_index = i % NRTT_HIST;
6676 rt->rtt_hist[rt->rtt_index] = rtt;
6677 rt->rtt_expire_ts = tcp_now;
6678 } else {
6679 rt->rtt_hist[rt->rtt_index] =
6680 min(rt->rtt_hist[rt->rtt_index], rtt);
6681 }
6682 /* forget the old value and update minimum */
6683 rt->rtt_min = 0;
6684 for (i = 0; i < NRTT_HIST; ++i) {
6685 if (rt->rtt_hist[i] != 0 &&
6686 (rt->rtt_min == 0 ||
6687 rt->rtt_hist[i] < rt->rtt_min)) {
6688 rt->rtt_min = rt->rtt_hist[i];
6689 }
6690 }
6691 RT_UNLOCK(rt);
6692 } else {
6693 rt->rtt_hist[rt->rtt_index] =
6694 min(rt->rtt_hist[rt->rtt_index], rtt);
6695 if (rt->rtt_min == 0) {
6696 rt->rtt_min = rtt;
6697 } else {
6698 rt->rtt_min = min(rt->rtt_min, rtt);
6699 }
6700 }
6701 }
6702
6703 /*
6704 * If we have a timestamp reply, update smoothed RTT. If no timestamp is
6705 * present but transmit timer is running and timed sequence number was
6706 * acked, update smoothed RTT.
6707 *
6708 * If timestamps are supported, a receiver can update RTT even if
6709 * there is no outstanding data.
6710 *
6711 * Some boxes send broken timestamp replies during the SYN+ACK phase,
6712 * ignore timestamps of 0or we could calculate a huge RTT and blow up
6713 * the retransmit timer.
6714 */
6715 static void
tcp_compute_rtt(struct tcpcb * tp,struct tcpopt * to,struct tcphdr * th)6716 tcp_compute_rtt(struct tcpcb *tp, struct tcpopt *to, struct tcphdr *th)
6717 {
6718 int rtt = 0;
6719 VERIFY(to != NULL && th != NULL);
6720 if (tp->t_rtttime != 0 && SEQ_GT(th->th_ack, tp->t_rtseq)) {
6721 u_int32_t pipe_ack_val;
6722 rtt = tcp_now - tp->t_rtttime;
6723 if (rtt == 0) {
6724 /*
6725 * Make adjustment for sub ms RTT when
6726 * timestamps are not used.
6727 */
6728 rtt = 1;
6729 }
6730 /*
6731 * Compute pipe ack -- the amount of data acknowledged
6732 * in the last RTT -- only works for sender
6733 */
6734 if (SEQ_GT(th->th_ack, tp->t_pipeack_lastuna)) {
6735 pipe_ack_val = th->th_ack - tp->t_pipeack_lastuna;
6736 /* Update the sample */
6737 tp->t_pipeack_sample[tp->t_pipeack_ind++] =
6738 pipe_ack_val;
6739 tp->t_pipeack_ind %= TCP_PIPEACK_SAMPLE_COUNT;
6740
6741 /* Compute the max of the pipeack samples */
6742 pipe_ack_val = tcp_get_max_pipeack(tp);
6743 tp->t_pipeack = (pipe_ack_val >
6744 tcp_initial_cwnd(tp)) ?
6745 pipe_ack_val : 0;
6746 }
6747 /* start another measurement */
6748 tp->t_rtttime = 0;
6749 }
6750 if (((to->to_flags & TOF_TS) != 0) &&
6751 (to->to_tsecr != 0) &&
6752 TSTMP_GEQ(tcp_now, to->to_tsecr)) {
6753 tcp_xmit_timer(tp, (tcp_now - to->to_tsecr),
6754 to->to_tsecr, th->th_ack);
6755 } else if (rtt > 0) {
6756 tcp_xmit_timer(tp, rtt, 0, th->th_ack);
6757 }
6758 }
6759
6760 static void
tcp_compute_rcv_rtt(struct tcpcb * tp,struct tcpopt * to,struct tcphdr * th)6761 tcp_compute_rcv_rtt(struct tcpcb *tp, struct tcpopt *to, struct tcphdr *th)
6762 {
6763 uint32_t rtt = 0, delta = 0;
6764 VERIFY(to != NULL && th != NULL);
6765
6766 /* Calculate RTT */
6767 if (((to->to_flags & TOF_TS) != 0) && (to->to_tsecr != 0) &&
6768 TSTMP_GEQ(tcp_now, to->to_tsecr)) {
6769 /* Timestamp is supported */
6770 rtt = tcp_now - to->to_tsecr;
6771 if (rtt == 0) {
6772 /* Make adjustment for sub ms RTT */
6773 rtt = 1;
6774 }
6775 } else if ((to->to_flags & TOF_TS) == 0) {
6776 /*
6777 * Timestamp is not supported, 1RTT is roughly
6778 * the time to receive one full window of data
6779 * Currently, RTT calculated this way is only used
6780 * for auto-tuning.
6781 */
6782 if (tp->rcv_rtt_est_ts != 0) {
6783 if (SEQ_LT(tp->rcv_nxt, tp->rcv_rtt_est_seq)) {
6784 /* Haven't received a full window yet */
6785 return;
6786 } else {
6787 rtt = tcp_now - tp->rcv_rtt_est_ts;
6788 if (rtt == 0) {
6789 /* Make adjustment for sub ms RTT */
6790 rtt = 1;
6791 }
6792 }
6793 } else {
6794 /* Use default value when no RTT measurement */
6795 rtt = TCPTV_RCVNOTS_QUANTUM;
6796 }
6797 /* Restart the measurement */
6798 tp->rcv_rtt_est_ts = tcp_now;
6799 tp->rcv_rtt_est_seq = tp->rcv_nxt + tp->rcv_wnd;
6800 }
6801
6802 /* Update receiver's SRTT */
6803 if (tp->rcv_srtt != 0) {
6804 /*
6805 * Use the smoothed rtt formula,
6806 * (srtt = rtt/8 + srtt*7/8) in fixed point
6807 */
6808 delta = (rtt << TCP_DELTA_SHIFT)
6809 - (tp->rcv_srtt >> (TCP_RTT_SHIFT - TCP_DELTA_SHIFT));
6810
6811 if ((tp->rcv_srtt += delta) <= 0) {
6812 tp->rcv_srtt = 1;
6813 }
6814 } else {
6815 /* No previous measurement */
6816 tp->rcv_srtt = rtt << TCP_RTT_SHIFT;
6817 }
6818
6819 /*
6820 * For current RTT, base RTT and current RTT over k samples,
6821 * we are using the same state for both sender and receiver
6822 * as the most recent sample is always updated before any
6823 * other processing, i.e. the sender will not end up with
6824 * a high RTT due to the receiver.
6825 */
6826 tp->t_rttcur = rtt;
6827 update_base_rtt(tp, rtt);
6828 }
6829
6830 /*
6831 * Collect new round-trip time estimate and update averages and
6832 * current timeout.
6833 */
6834 static void
tcp_xmit_timer(struct tcpcb * tp,int rtt,u_int32_t tsecr,tcp_seq th_ack)6835 tcp_xmit_timer(struct tcpcb *tp, int rtt,
6836 u_int32_t tsecr, tcp_seq th_ack)
6837 {
6838 VERIFY(rtt >= 0);
6839 int delta;
6840 int old_srtt = tp->t_srtt;
6841 int old_rttvar = tp->t_rttvar;
6842 bool log_rtt = false;
6843
6844 if (rtt == 0) {
6845 /*
6846 * As rtt has millisecond precision,
6847 * make adjustment for sub ms RTT
6848 */
6849 rtt = 1;
6850 }
6851
6852 if (rtt > 4 * TCPTV_MSL) {
6853 TCP_LOG(tp, "%s: rtt is %d - maxing it at 4 x MSL\n", __func__, rtt);
6854 /*
6855 * We compute RTT either based on the time-to-ACK a packet,
6856 * if TSval is disabled or based on the TSecr value.
6857 * If there is a middlebox messing up the TSecr value, we can
6858 * end up having HUGE rtt values, causing all kinds of problems.
6859 * Let's protect against this by capping RTT to 4*MSL
6860 * (60seconds).
6861 */
6862 rtt = 4 * TCPTV_MSL;
6863 }
6864
6865 /*
6866 * On AWDL interface, the initial RTT measurement on SYN
6867 * can be wrong due to peer caching. Avoid the first RTT
6868 * measurement as it might skew up the RTO.
6869 * <rdar://problem/28739046>
6870 */
6871 if (tp->t_inpcb->inp_last_outifp != NULL &&
6872 (tp->t_inpcb->inp_last_outifp->if_eflags & IFEF_AWDL) &&
6873 th_ack == tp->iss + 1) {
6874 return;
6875 }
6876
6877 if (tp->t_flagsext & TF_RECOMPUTE_RTT) {
6878 if (SEQ_GT(th_ack, tp->snd_una) &&
6879 SEQ_LEQ(th_ack, tp->snd_max) &&
6880 (tsecr == 0 ||
6881 TSTMP_GEQ(tsecr, tp->t_badrexmt_time))) {
6882 /*
6883 * We received a new ACK after a
6884 * spurious timeout. Adapt retransmission
6885 * timer as described in rfc 4015.
6886 */
6887 tp->t_flagsext &= ~(TF_RECOMPUTE_RTT);
6888 tp->t_badrexmt_time = 0;
6889 tp->t_srtt = max(tp->t_srtt_prev, rtt);
6890 tp->t_srtt = tp->t_srtt << TCP_RTT_SHIFT;
6891 tp->t_rttvar = max(tp->t_rttvar_prev, (rtt >> 1));
6892 tp->t_rttvar = tp->t_rttvar << TCP_RTTVAR_SHIFT;
6893
6894 if (tp->t_rttbest > (tp->t_srtt + tp->t_rttvar)) {
6895 tp->t_rttbest = tp->t_srtt + tp->t_rttvar;
6896 }
6897
6898 goto compute_rto;
6899 } else {
6900 return;
6901 }
6902 }
6903
6904 tcpstat.tcps_rttupdated++;
6905 tp->t_rttupdated++;
6906
6907 tp->t_rttcur = rtt;
6908 update_base_rtt(tp, rtt);
6909
6910 if (tp->t_srtt != 0) {
6911 /*
6912 * srtt is stored as fixed point with 5 bits after the
6913 * binary point (i.e., scaled by 32). The following magic
6914 * is equivalent to the smoothing algorithm in rfc793 with
6915 * an alpha of .875 (srtt = rtt/8 + srtt*7/8 in fixed
6916 * point).
6917 *
6918 * Freebsd adjusts rtt to origin 0 by subtracting 1
6919 * from the provided rtt value. This was required because
6920 * of the way t_rtttime was initiailised to 1 before.
6921 * Since we changed t_rtttime to be based on
6922 * tcp_now, this extra adjustment is not needed.
6923 */
6924 delta = (rtt << TCP_DELTA_SHIFT)
6925 - (tp->t_srtt >> (TCP_RTT_SHIFT - TCP_DELTA_SHIFT));
6926
6927 if ((tp->t_srtt += delta) <= 0) {
6928 tp->t_srtt = 1;
6929 }
6930
6931 /*
6932 * We accumulate a smoothed rtt variance (actually, a
6933 * smoothed mean difference), then set the retransmit
6934 * timer to smoothed rtt + 4 times the smoothed variance.
6935 * rttvar is stored as fixed point with 4 bits after the
6936 * binary point (scaled by 16). The following is
6937 * equivalent to rfc793 smoothing with an alpha of .75
6938 * (rttvar = rttvar*3/4 + |delta| / 4). This replaces
6939 * rfc793's wired-in beta.
6940 */
6941 if (delta < 0) {
6942 delta = -delta;
6943 }
6944 delta -= tp->t_rttvar >> (TCP_RTTVAR_SHIFT - TCP_DELTA_SHIFT);
6945 if ((tp->t_rttvar += delta) <= 0) {
6946 tp->t_rttvar = 1;
6947 }
6948 if (tp->t_rttbest == 0 ||
6949 tp->t_rttbest > (tp->t_srtt + tp->t_rttvar)) {
6950 tp->t_rttbest = tp->t_srtt + tp->t_rttvar;
6951 }
6952 } else {
6953 /*
6954 * No rtt measurement yet - use the unsmoothed rtt.
6955 * Set the variance to half the rtt (so our first
6956 * retransmit happens at 3*rtt).
6957 */
6958 tp->t_srtt = rtt << TCP_RTT_SHIFT;
6959 tp->t_rttvar = rtt << (TCP_RTTVAR_SHIFT - 1);
6960 tp->t_rttbest = tp->t_srtt + tp->t_rttvar;
6961
6962 /* Initialize the receive SRTT */
6963 if (tp->rcv_srtt == 0) {
6964 tp->rcv_srtt = tp->t_srtt;
6965 }
6966 }
6967
6968 compute_rto:
6969 nstat_route_rtt(tp->t_inpcb->inp_route.ro_rt, tp->t_srtt,
6970 tp->t_rttvar);
6971
6972 /*
6973 * the retransmit should happen at rtt + 4 * rttvar.
6974 * Because of the way we do the smoothing, srtt and rttvar
6975 * will each average +1/2 tick of bias. When we compute
6976 * the retransmit timer, we want 1/2 tick of rounding and
6977 * 1 extra tick because of +-1/2 tick uncertainty in the
6978 * firing of the timer. The bias will give us exactly the
6979 * 1.5 tick we need. But, because the bias is
6980 * statistical, we have to test that we don't drop below
6981 * the minimum feasible timer (which is 2 ticks).
6982 */
6983 TCPT_RANGESET(tp->t_rxtcur, TCP_REXMTVAL(tp),
6984 max(tp->t_rttmin, rtt + 2), TCPTV_REXMTMAX,
6985 TCP_ADD_REXMTSLOP(tp));
6986
6987 /*
6988 * We received an ack for a packet that wasn't retransmitted;
6989 * it is probably safe to discard any error indications we've
6990 * received recently. This isn't quite right, but close enough
6991 * for now (a route might have failed after we sent a segment,
6992 * and the return path might not be symmetrical).
6993 */
6994 tp->t_softerror = 0;
6995
6996 if (log_rtt) {
6997 TCP_LOG_RTT_INFO(tp);
6998 }
6999
7000 tcp_update_pacer_state(tp);
7001
7002 TCP_LOG_RTT_CHANGE(tp, old_srtt, old_rttvar);
7003 }
7004
7005 static inline unsigned int
tcp_maxmtu(struct rtentry * rt)7006 tcp_maxmtu(struct rtentry *rt)
7007 {
7008 unsigned int maxmtu;
7009 int interface_mtu = 0;
7010
7011 RT_LOCK_ASSERT_HELD(rt);
7012 interface_mtu = rt->rt_ifp->if_mtu;
7013
7014 if (rt_key(rt)->sa_family == AF_INET &&
7015 INTF_ADJUST_MTU_FOR_CLAT46(rt->rt_ifp)) {
7016 interface_mtu = IN6_LINKMTU(rt->rt_ifp);
7017 /* Further adjust the size for CLAT46 expansion */
7018 interface_mtu -= CLAT46_HDR_EXPANSION_OVERHD;
7019 }
7020
7021 if (rt->rt_rmx.rmx_mtu == 0) {
7022 maxmtu = interface_mtu;
7023 } else {
7024 maxmtu = MIN(rt->rt_rmx.rmx_mtu, interface_mtu);
7025 }
7026
7027 return maxmtu;
7028 }
7029
7030 static inline unsigned int
tcp_maxmtu6(struct rtentry * rt)7031 tcp_maxmtu6(struct rtentry *rt)
7032 {
7033 unsigned int maxmtu;
7034 struct nd_ifinfo *ndi = NULL;
7035
7036 RT_LOCK_ASSERT_HELD(rt);
7037 if ((ndi = ND_IFINFO(rt->rt_ifp)) != NULL && !ndi->initialized) {
7038 ndi = NULL;
7039 }
7040 if (ndi != NULL) {
7041 lck_mtx_lock(&ndi->lock);
7042 }
7043 if (rt->rt_rmx.rmx_mtu == 0) {
7044 maxmtu = IN6_LINKMTU(rt->rt_ifp);
7045 } else {
7046 maxmtu = MIN(rt->rt_rmx.rmx_mtu, IN6_LINKMTU(rt->rt_ifp));
7047 }
7048 if (ndi != NULL) {
7049 lck_mtx_unlock(&ndi->lock);
7050 }
7051
7052 return maxmtu;
7053 }
7054
7055 unsigned int
get_maxmtu(struct rtentry * rt)7056 get_maxmtu(struct rtentry *rt)
7057 {
7058 unsigned int maxmtu = 0;
7059
7060 RT_LOCK_ASSERT_NOTHELD(rt);
7061
7062 RT_LOCK(rt);
7063
7064 if (rt_key(rt)->sa_family == AF_INET6) {
7065 maxmtu = tcp_maxmtu6(rt);
7066 } else {
7067 maxmtu = tcp_maxmtu(rt);
7068 }
7069
7070 RT_UNLOCK(rt);
7071
7072 return maxmtu;
7073 }
7074
7075 /*
7076 * Determine a reasonable value for maxseg size.
7077 * If the route is known, check route for mtu.
7078 * If none, use an mss that can be handled on the outgoing
7079 * interface without forcing IP to fragment; if bigger than
7080 * an mbuf cluster (MCLBYTES), round down to nearest multiple of MCLBYTES
7081 * to utilize large mbufs. If no route is found, route has no mtu,
7082 * or the destination isn't local, use a default, hopefully conservative
7083 * size (usually 512 or the default IP max size, but no more than the mtu
7084 * of the interface), as we can't discover anything about intervening
7085 * gateways or networks. We also initialize the congestion/slow start
7086 * window. While looking at the routing entry, we also initialize
7087 * other path-dependent parameters from pre-set or cached values
7088 * in the routing entry.
7089 *
7090 * Also take into account the space needed for options that we
7091 * send regularly. Make maxseg shorter by that amount to assure
7092 * that we can send maxseg amount of data even when the options
7093 * are present. Store the upper limit of the length of options plus
7094 * data in maxopd.
7095 *
7096 * NOTE that this routine is only called when we process an incoming
7097 * segment, for outgoing segments only tcp_mssopt is called.
7098 *
7099 */
7100 void
tcp_mss(struct tcpcb * tp,int offer,unsigned int input_ifscope)7101 tcp_mss(struct tcpcb *tp, int offer, unsigned int input_ifscope)
7102 {
7103 struct rtentry *rt;
7104 struct ifnet *ifp;
7105 int mss;
7106 uint32_t bufsize;
7107 struct inpcb *inp;
7108 struct socket *so;
7109 int origoffer = offer;
7110 int isipv6;
7111 int min_protoh;
7112
7113 inp = tp->t_inpcb;
7114
7115 so = inp->inp_socket;
7116 /*
7117 * Nothing left to send after the socket is defunct or TCP is in the closed state
7118 */
7119 if ((so->so_state & SS_DEFUNCT) || tp->t_state == TCPS_CLOSED) {
7120 return;
7121 }
7122
7123 isipv6 = ((inp->inp_vflag & INP_IPV6) != 0) ? 1 : 0;
7124 min_protoh = isipv6 ? sizeof(struct ip6_hdr) + sizeof(struct tcphdr)
7125 : sizeof(struct tcpiphdr);
7126
7127 if (isipv6) {
7128 rt = tcp_rtlookup6(inp, input_ifscope);
7129 } else {
7130 rt = tcp_rtlookup(inp, input_ifscope);
7131 }
7132
7133 if (rt == NULL) {
7134 tp->t_maxopd = tp->t_maxseg = isipv6 ? tcp_v6mssdflt : tcp_mssdflt;
7135 return;
7136 }
7137 ifp = rt->rt_ifp;
7138 /*
7139 * Slower link window correction:
7140 * If a value is specificied for slowlink_wsize use it for
7141 * PPP links believed to be on a serial modem (speed <128Kbps).
7142 * Excludes 9600bps as it is the default value adversized
7143 * by pseudo-devices over ppp.
7144 */
7145 if (ifp->if_type == IFT_PPP && slowlink_wsize > 0 &&
7146 ifp->if_baudrate > 9600 && ifp->if_baudrate <= 128000) {
7147 tp->t_flags |= TF_SLOWLINK;
7148 }
7149
7150 /*
7151 * Offer == -1 means that we didn't receive SYN yet. Use 0 then.
7152 */
7153 if (offer == -1) {
7154 offer = rt->rt_rmx.rmx_filler[0];
7155 }
7156 /*
7157 * Offer == 0 means that there was no MSS on the SYN segment,
7158 * in this case we use tcp_mssdflt.
7159 */
7160 if (offer == 0) {
7161 offer = isipv6 ? tcp_v6mssdflt : tcp_mssdflt;
7162 } else {
7163 /*
7164 * Prevent DoS attack with too small MSS. Round up
7165 * to at least minmss.
7166 */
7167 offer = max(offer, tcp_minmss);
7168 /*
7169 * Sanity check: make sure that maxopd will be large
7170 * enough to allow some data on segments even is the
7171 * all the option space is used (40bytes). Otherwise
7172 * funny things may happen in tcp_output.
7173 */
7174 offer = max(offer, 64);
7175 }
7176 rt->rt_rmx.rmx_filler[0] = offer;
7177
7178 /*
7179 * While we're here, check if there's an initial rtt
7180 * or rttvar. Convert from the route-table units
7181 * to scaled multiples of the slow timeout timer.
7182 */
7183 if (tp->t_srtt == 0 && rt->rt_rmx.rmx_rtt != 0) {
7184 tcp_getrt_rtt(tp, rt);
7185 } else {
7186 tp->t_rttmin = TCPTV_REXMTMIN;
7187 }
7188
7189 mss = (isipv6 ? tcp_maxmtu6(rt) : tcp_maxmtu(rt));
7190
7191 mss = tcp_get_effective_mtu(rt, mss);
7192 #if NECP
7193 // At this point, the mss is just the MTU. Adjust if necessary.
7194 mss = necp_socket_get_effective_mtu(inp, mss);
7195 #endif /* NECP */
7196
7197 mss -= min_protoh;
7198
7199 if (rt->rt_rmx.rmx_mtu == 0) {
7200 if (isipv6) {
7201 mss = min(mss, tcp_v6mssdflt);
7202 } else {
7203 mss = min(mss, tcp_mssdflt);
7204 }
7205 }
7206
7207 mss = min(mss, offer);
7208 /*
7209 * maxopd stores the maximum length of data AND options
7210 * in a segment; maxseg is the amount of data in a normal
7211 * segment. We need to store this value (maxopd) apart
7212 * from maxseg, because now every segment carries options
7213 * and thus we normally have somewhat less data in segments.
7214 */
7215 tp->t_maxopd = mss;
7216
7217 /*
7218 * origoffer==-1 indicates, that no segments were received yet.
7219 * In this case we just guess.
7220 */
7221 if ((tp->t_flags & (TF_REQ_TSTMP | TF_NOOPT)) == TF_REQ_TSTMP &&
7222 (origoffer == -1 ||
7223 (tp->t_flags & TF_RCVD_TSTMP) == TF_RCVD_TSTMP)) {
7224 mss -= TCPOLEN_TSTAMP_APPA;
7225 }
7226
7227 #if MPTCP
7228 mss -= mptcp_adj_mss(tp, FALSE);
7229 #endif /* MPTCP */
7230 tp->t_maxseg = mss;
7231
7232 /*
7233 * If there's a pipesize (ie loopback), change the socket
7234 * buffer to that size only if it's bigger than the current
7235 * sockbuf size. Make the socket buffers an integral
7236 * number of mss units; if the mss is larger than
7237 * the socket buffer, decrease the mss.
7238 */
7239 #if RTV_SPIPE
7240 bufsize = rt->rt_rmx.rmx_sendpipe;
7241 if (bufsize < so->so_snd.sb_hiwat)
7242 #endif
7243 bufsize = so->so_snd.sb_hiwat;
7244 if (bufsize < mss) {
7245 mss = bufsize;
7246 } else {
7247 bufsize = (((bufsize + mss - 1) / mss) * mss);
7248 (void)sbreserve(&so->so_snd, bufsize);
7249 }
7250 tp->t_maxseg = mss;
7251
7252 ASSERT(tp->t_maxseg);
7253
7254 /*
7255 * Update MSS using recommendation from link status report. This is
7256 * temporary
7257 */
7258 tcp_update_mss_locked(so, ifp);
7259
7260 #if RTV_RPIPE
7261 bufsize = rt->rt_rmx.rmx_recvpipe;
7262 if (bufsize < so->so_rcv.sb_hiwat)
7263 #endif
7264 bufsize = so->so_rcv.sb_hiwat;
7265 if (bufsize > mss) {
7266 bufsize = (((bufsize + mss - 1) / mss) * mss);
7267 (void)sbreserve(&so->so_rcv, bufsize);
7268 }
7269
7270 set_tcp_stream_priority(so);
7271
7272 if (rt->rt_rmx.rmx_ssthresh) {
7273 /*
7274 * There's some sort of gateway or interface
7275 * buffer limit on the path. Use this to set
7276 * slow-start threshold, but set the threshold to
7277 * no less than 2*mss.
7278 */
7279 tp->snd_ssthresh = max(2 * mss, rt->rt_rmx.rmx_ssthresh);
7280 tcpstat.tcps_usedssthresh++;
7281 } else {
7282 tp->snd_ssthresh = TCP_MAXWIN << TCP_MAX_WINSHIFT;
7283 }
7284
7285 /*
7286 * Set the slow-start flight size depending on whether this
7287 * is a local network or not.
7288 */
7289 if (CC_ALGO(tp)->cwnd_init != NULL) {
7290 CC_ALGO(tp)->cwnd_init(tp);
7291 }
7292
7293 tcp_ccdbg_trace(tp, NULL, TCP_CC_CWND_INIT);
7294
7295 if (TCP_USE_RLEDBAT(tp, so) && tcp_cc_rledbat.rwnd_init != NULL) {
7296 tcp_cc_rledbat.rwnd_init(tp);
7297 }
7298
7299 /* Route locked during lookup above */
7300 RT_UNLOCK(rt);
7301 }
7302
7303 /*
7304 * Determine the MSS option to send on an outgoing SYN.
7305 */
7306 int
tcp_mssopt(struct tcpcb * tp)7307 tcp_mssopt(struct tcpcb *tp)
7308 {
7309 struct rtentry *rt;
7310 int mss;
7311 int isipv6;
7312 int min_protoh;
7313
7314 isipv6 = ((tp->t_inpcb->inp_vflag & INP_IPV6) != 0) ? 1 : 0;
7315 min_protoh = isipv6 ? sizeof(struct ip6_hdr) + sizeof(struct tcphdr)
7316 : sizeof(struct tcpiphdr);
7317
7318 if (isipv6) {
7319 rt = tcp_rtlookup6(tp->t_inpcb, IFSCOPE_NONE);
7320 } else {
7321 rt = tcp_rtlookup(tp->t_inpcb, IFSCOPE_NONE);
7322 }
7323 if (rt == NULL) {
7324 return isipv6 ? tcp_v6mssdflt : tcp_mssdflt;
7325 }
7326 /*
7327 * Slower link window correction:
7328 * If a value is specificied for slowlink_wsize use it for PPP links
7329 * believed to be on a serial modem (speed <128Kbps). Excludes 9600bps as
7330 * it is the default value adversized by pseudo-devices over ppp.
7331 */
7332 if (rt->rt_ifp->if_type == IFT_PPP && slowlink_wsize > 0 &&
7333 rt->rt_ifp->if_baudrate > 9600 && rt->rt_ifp->if_baudrate <= 128000) {
7334 tp->t_flags |= TF_SLOWLINK;
7335 }
7336
7337 mss = (isipv6 ? tcp_maxmtu6(rt) : tcp_maxmtu(rt));
7338
7339 mss = tcp_get_effective_mtu(rt, mss);
7340
7341 /* Route locked during lookup above */
7342 RT_UNLOCK(rt);
7343
7344 #if NECP
7345 // At this point, the mss is just the MTU. Adjust if necessary.
7346 mss = necp_socket_get_effective_mtu(tp->t_inpcb, mss);
7347 #endif /* NECP */
7348
7349 return mss - min_protoh;
7350 }
7351
7352 /*
7353 * On a partial ack arrives, force the retransmission of the
7354 * next unacknowledged segment. Do not clear tp->t_dupacks.
7355 * By setting snd_nxt to th_ack, this forces retransmission timer to
7356 * be started again.
7357 */
7358 static void
tcp_newreno_partial_ack(struct tcpcb * tp,struct tcphdr * th)7359 tcp_newreno_partial_ack(struct tcpcb *tp, struct tcphdr *th)
7360 {
7361 tcp_seq onxt = tp->snd_nxt;
7362 u_int32_t ocwnd = tp->snd_cwnd;
7363 tp->t_timer[TCPT_REXMT] = 0;
7364 tp->t_timer[TCPT_PTO] = 0;
7365 tp->t_rtttime = 0;
7366 tp->snd_nxt = th->th_ack;
7367 /*
7368 * Set snd_cwnd to one segment beyond acknowledged offset
7369 * (tp->snd_una has not yet been updated when this function
7370 * is called)
7371 */
7372 tp->snd_cwnd = tp->t_maxseg + BYTES_ACKED(th, tp);
7373 (void) tcp_output(tp);
7374 tp->snd_cwnd = ocwnd;
7375 if (SEQ_GT(onxt, tp->snd_nxt)) {
7376 tp->snd_nxt = onxt;
7377 }
7378 /*
7379 * Partial window deflation. Relies on fact that tp->snd_una
7380 * not updated yet.
7381 */
7382 if (tp->snd_cwnd > BYTES_ACKED(th, tp)) {
7383 tp->snd_cwnd -= BYTES_ACKED(th, tp);
7384 } else {
7385 tp->snd_cwnd = 0;
7386 }
7387 tp->snd_cwnd += tp->t_maxseg;
7388 }
7389
7390 /*
7391 * Drop a random TCP connection that hasn't been serviced yet and
7392 * is eligible for discard. There is a one in qlen chance that
7393 * we will return a null, saying that there are no dropable
7394 * requests. In this case, the protocol specific code should drop
7395 * the new request. This insures fairness.
7396 *
7397 * The listening TCP socket "head" must be locked
7398 */
7399 static int
tcp_dropdropablreq(struct socket * head)7400 tcp_dropdropablreq(struct socket *head)
7401 {
7402 struct socket *so, *sonext;
7403 unsigned int j, qlen;
7404 static uint32_t rnd = 0;
7405 static uint64_t old_runtime;
7406 static unsigned int cur_cnt, old_cnt;
7407 uint64_t now_sec, i;
7408 struct inpcb *inp = NULL;
7409 struct tcpcb *tp;
7410
7411 if ((head->so_options & SO_ACCEPTCONN) == 0) {
7412 return 0;
7413 }
7414
7415 if (TAILQ_EMPTY(&head->so_incomp)) {
7416 return 0;
7417 }
7418
7419 so_acquire_accept_list(head, NULL);
7420 socket_unlock(head, 0);
7421
7422 /*
7423 * Check if there is any socket in the incomp queue
7424 * that is closed because of a reset from the peer and is
7425 * waiting to be garbage collected. If so, pick that as
7426 * the victim
7427 */
7428 TAILQ_FOREACH_SAFE(so, &head->so_incomp, so_list, sonext) {
7429 inp = sotoinpcb(so);
7430 tp = intotcpcb(inp);
7431 if (tp != NULL && tp->t_state == TCPS_CLOSED &&
7432 so->so_head != NULL &&
7433 (so->so_state & (SS_INCOMP | SS_CANTSENDMORE | SS_CANTRCVMORE)) ==
7434 (SS_INCOMP | SS_CANTSENDMORE | SS_CANTRCVMORE)) {
7435 /*
7436 * The listen socket is already locked but we
7437 * can lock this socket here without lock ordering
7438 * issues because it is in the incomp queue and
7439 * is not visible to others.
7440 */
7441 if (socket_try_lock(so)) {
7442 so->so_usecount++;
7443 goto found_victim;
7444 } else {
7445 continue;
7446 }
7447 }
7448 }
7449
7450 so = TAILQ_FIRST(&head->so_incomp);
7451
7452 now_sec = net_uptime();
7453 if ((i = (now_sec - old_runtime)) != 0) {
7454 old_runtime = now_sec;
7455 old_cnt = cur_cnt / i;
7456 cur_cnt = 0;
7457 }
7458
7459 qlen = head->so_incqlen;
7460 if (rnd == 0) {
7461 rnd = RandomULong();
7462 }
7463
7464 if (++cur_cnt > qlen || old_cnt > qlen) {
7465 rnd = (314159 * rnd + 66329) & 0xffff;
7466 j = ((qlen + 1) * rnd) >> 16;
7467
7468 while (j-- && so) {
7469 so = TAILQ_NEXT(so, so_list);
7470 }
7471 }
7472 /* Find a connection that is not already closing (or being served) */
7473 while (so) {
7474 inp = (struct inpcb *)so->so_pcb;
7475
7476 sonext = TAILQ_NEXT(so, so_list);
7477
7478 if (in_pcb_checkstate(inp, WNT_ACQUIRE, 0) != WNT_STOPUSING) {
7479 /*
7480 * Avoid the issue of a socket being accepted
7481 * by one input thread and being dropped by
7482 * another input thread. If we can't get a hold
7483 * on this mutex, then grab the next socket in
7484 * line.
7485 */
7486 if (socket_try_lock(so)) {
7487 so->so_usecount++;
7488 if ((so->so_usecount == 2) &&
7489 (so->so_state & SS_INCOMP) &&
7490 !(so->so_flags & SOF_INCOMP_INPROGRESS)) {
7491 break;
7492 } else {
7493 /*
7494 * don't use if being accepted or
7495 * used in any other way
7496 */
7497 in_pcb_checkstate(inp, WNT_RELEASE, 1);
7498 socket_unlock(so, 1);
7499 }
7500 } else {
7501 /*
7502 * do not try to lock the inp in
7503 * in_pcb_checkstate because the lock
7504 * is already held in some other thread.
7505 * Only drop the inp_wntcnt reference.
7506 */
7507 in_pcb_checkstate(inp, WNT_RELEASE, 1);
7508 }
7509 }
7510 so = sonext;
7511 }
7512 if (so == NULL) {
7513 socket_lock(head, 0);
7514 so_release_accept_list(head);
7515 return 0;
7516 }
7517
7518 /* Makes sure socket is still in the right state to be discarded */
7519
7520 if (in_pcb_checkstate(inp, WNT_RELEASE, 1) == WNT_STOPUSING) {
7521 socket_unlock(so, 1);
7522 socket_lock(head, 0);
7523 so_release_accept_list(head);
7524 return 0;
7525 }
7526
7527 found_victim:
7528 if (so->so_usecount != 2 || !(so->so_state & SS_INCOMP)) {
7529 /* do not discard: that socket is being accepted */
7530 socket_unlock(so, 1);
7531 socket_lock(head, 0);
7532 so_release_accept_list(head);
7533 return 0;
7534 }
7535
7536 socket_lock(head, 0);
7537 TAILQ_REMOVE(&head->so_incomp, so, so_list);
7538 head->so_incqlen--;
7539 head->so_qlen--;
7540 so->so_state &= ~SS_INCOMP;
7541 so->so_flags |= SOF_OVERFLOW;
7542 so->so_head = NULL;
7543 so_release_accept_list(head);
7544 socket_unlock(head, 0);
7545
7546 socket_lock_assert_owned(so);
7547 tp = sototcpcb(so);
7548
7549 tcp_close(tp);
7550 if (inp->inp_wantcnt > 0 && inp->inp_wantcnt != WNT_STOPUSING) {
7551 /*
7552 * Some one has a wantcnt on this pcb. Since WNT_ACQUIRE
7553 * doesn't require a lock, it could have happened while
7554 * we are holding the lock. This pcb will have to
7555 * be garbage collected later.
7556 * Release the reference held for so_incomp queue
7557 */
7558 VERIFY(so->so_usecount > 0);
7559 so->so_usecount--;
7560 socket_unlock(so, 1);
7561 } else {
7562 /*
7563 * Unlock this socket and leave the reference on.
7564 * We need to acquire the pcbinfo lock in order to
7565 * fully dispose it off
7566 */
7567 socket_unlock(so, 0);
7568
7569 lck_rw_lock_exclusive(&tcbinfo.ipi_lock);
7570
7571 socket_lock(so, 0);
7572 /* Release the reference held for so_incomp queue */
7573 VERIFY(so->so_usecount > 0);
7574 so->so_usecount--;
7575
7576 if (so->so_usecount != 1 ||
7577 (inp->inp_wantcnt > 0 &&
7578 inp->inp_wantcnt != WNT_STOPUSING)) {
7579 /*
7580 * There is an extra wantcount or usecount
7581 * that must have been added when the socket
7582 * was unlocked. This socket will have to be
7583 * garbage collected later
7584 */
7585 socket_unlock(so, 1);
7586 } else {
7587 /* Drop the reference held for this function */
7588 VERIFY(so->so_usecount > 0);
7589 so->so_usecount--;
7590
7591 in_pcbdispose(inp);
7592 }
7593 lck_rw_done(&tcbinfo.ipi_lock);
7594 }
7595 tcpstat.tcps_drops++;
7596
7597 socket_lock(head, 0);
7598 return 1;
7599 }
7600
7601 /* Set background congestion control on a socket */
7602 void
tcp_set_background_cc(struct socket * so)7603 tcp_set_background_cc(struct socket *so)
7604 {
7605 tcp_set_new_cc(so, TCP_CC_ALGO_BACKGROUND_INDEX);
7606 }
7607
7608 /* Set foreground congestion control on a socket */
7609 void
tcp_set_foreground_cc(struct socket * so)7610 tcp_set_foreground_cc(struct socket *so)
7611 {
7612 if (tcp_use_newreno) {
7613 tcp_set_new_cc(so, TCP_CC_ALGO_NEWRENO_INDEX);
7614 #if (DEVELOPMENT || DEBUG)
7615 } else if (tcp_use_ledbat) {
7616 /* Only used for testing */
7617 tcp_set_new_cc(so, TCP_CC_ALGO_BACKGROUND_INDEX);
7618 #endif
7619 } else {
7620 struct inpcb *inp = sotoinpcb(so);
7621 struct tcpcb *tp = intotcpcb(inp);
7622 if (tp->l4s_enabled) {
7623 tcp_set_new_cc(so, TCP_CC_ALGO_PRAGUE_INDEX);
7624 } else {
7625 tcp_set_new_cc(so, TCP_CC_ALGO_CUBIC_INDEX);
7626 }
7627 }
7628 }
7629
7630 static void
tcp_set_new_cc(struct socket * so,uint8_t cc_index)7631 tcp_set_new_cc(struct socket *so, uint8_t cc_index)
7632 {
7633 struct inpcb *inp = sotoinpcb(so);
7634 struct tcpcb *tp = intotcpcb(inp);
7635
7636 if (tp->tcp_cc_index != cc_index) {
7637 if (CC_ALGO(tp)->cleanup != NULL) {
7638 CC_ALGO(tp)->cleanup(tp);
7639 }
7640 tp->tcp_cc_index = cc_index;
7641
7642 tcp_cc_allocate_state(tp);
7643
7644 if (CC_ALGO(tp)->switch_to != NULL) {
7645 CC_ALGO(tp)->switch_to(tp);
7646 }
7647
7648 tcp_ccdbg_trace(tp, NULL, TCP_CC_CHANGE_ALGO);
7649 }
7650 }
7651
7652 void
tcp_set_recv_bg(struct socket * so)7653 tcp_set_recv_bg(struct socket *so)
7654 {
7655 if (!IS_TCP_RECV_BG(so)) {
7656 so->so_flags1 |= SOF1_TRAFFIC_MGT_TCP_RECVBG;
7657
7658 struct inpcb *inp = sotoinpcb(so);
7659 struct tcpcb *tp = intotcpcb(inp);
7660
7661 if (TCP_RLEDBAT_ENABLED(tp) && tcp_cc_rledbat.switch_to != NULL) {
7662 tcp_cc_rledbat.switch_to(tp);
7663 }
7664 }
7665 }
7666
7667 void
tcp_clear_recv_bg(struct socket * so)7668 tcp_clear_recv_bg(struct socket *so)
7669 {
7670 if (IS_TCP_RECV_BG(so)) {
7671 so->so_flags1 &= ~(SOF1_TRAFFIC_MGT_TCP_RECVBG);
7672 }
7673 }
7674
7675 void
inp_fc_throttle_tcp(struct inpcb * inp)7676 inp_fc_throttle_tcp(struct inpcb *inp)
7677 {
7678 tcpcb_ref_t tp = inp->inp_ppcb;
7679
7680 /*
7681 * Back off the slow-start threshold and enter
7682 * congestion avoidance phase
7683 */
7684 if (CC_ALGO(tp)->pre_fr != NULL) {
7685 CC_ALGO(tp)->pre_fr(tp);
7686 }
7687 }
7688
7689 void
inp_fc_unthrottle_tcp(struct inpcb * inp)7690 inp_fc_unthrottle_tcp(struct inpcb *inp)
7691 {
7692 tcpcb_ref_t tp = inp->inp_ppcb;
7693 struct ifnet *outifp = inp->inp_last_outifp;
7694
7695 if (CC_ALGO(tp)->post_fr != NULL) {
7696 CC_ALGO(tp)->post_fr(tp, NULL);
7697 }
7698
7699 tp->t_bytes_acked = 0;
7700
7701 /*
7702 * Reset retransmit shift as we know that the reason
7703 * for delay in sending a packet is due to flow
7704 * control on the outgoing interface. There is no need
7705 * to backoff retransmit timer.
7706 */
7707 if (tp->t_rxtshift != 0 && outifp != NULL &&
7708 IFNET_IS_CELLULAR(outifp)) {
7709 TCP_LOG(tp, "inp_fc_unthrottle_tcp keep rxmit state t_rxtshift %d", tp->t_rxtshift);
7710 } else {
7711 TCP_RESET_REXMT_STATE(tp);
7712 }
7713
7714 tp->t_flagsext &= ~TF_CWND_NONVALIDATED;
7715
7716 /*
7717 * Start the output stream again. Since we are
7718 * not retransmitting data, do not reset the
7719 * retransmit timer or rtt calculation.
7720 */
7721 tcp_output(tp);
7722 }
7723
7724 static int
7725 tcp_getstat SYSCTL_HANDLER_ARGS
7726 {
7727 #pragma unused(oidp, arg1, arg2)
7728
7729 int error;
7730 struct tcpstat *stat;
7731 stat = &tcpstat;
7732
7733 #if XNU_TARGET_OS_OSX
7734 struct tcpstat zero_stat;
7735
7736 if (tcp_disable_access_to_stats &&
7737 !kauth_cred_issuser(kauth_cred_get())) {
7738 bzero(&zero_stat, sizeof(zero_stat));
7739 stat = &zero_stat;
7740 }
7741
7742 #endif /* XNU_TARGET_OS_OSX */
7743
7744 if (req->oldptr == 0) {
7745 req->oldlen = (size_t)sizeof(struct tcpstat);
7746 }
7747
7748 error = SYSCTL_OUT(req, stat, MIN(sizeof(tcpstat), req->oldlen));
7749
7750 return error;
7751 }
7752
7753 /*
7754 * Checksum extended TCP header and data.
7755 */
7756 int
tcp_input_checksum(int af,struct mbuf * m,struct tcphdr * th,int off,int tlen)7757 tcp_input_checksum(int af, struct mbuf *m, struct tcphdr *th, int off, int tlen)
7758 {
7759 struct ifnet *ifp = m->m_pkthdr.rcvif;
7760
7761 switch (af) {
7762 case AF_INET: {
7763 struct ip *ip = mtod(m, struct ip *);
7764 struct ipovly *ipov = (struct ipovly *)ip;
7765
7766 /* ip_stripoptions() must have been called before we get here */
7767 ASSERT((ip->ip_hl << 2) == sizeof(*ip));
7768
7769 if ((hwcksum_rx || (ifp->if_flags & IFF_LOOPBACK) ||
7770 (m->m_pkthdr.pkt_flags & PKTF_LOOP)) &&
7771 (m->m_pkthdr.csum_flags & CSUM_DATA_VALID)) {
7772 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR) {
7773 th->th_sum = m->m_pkthdr.csum_rx_val;
7774 } else {
7775 uint32_t sum = m->m_pkthdr.csum_rx_val;
7776 uint32_t start = m->m_pkthdr.csum_rx_start;
7777 int32_t trailer = (m_pktlen(m) - (off + tlen));
7778
7779 /*
7780 * Perform 1's complement adjustment of octets
7781 * that got included/excluded in the hardware-
7782 * calculated checksum value. Ignore cases
7783 * where the value already includes the entire
7784 * IP header span, as the sum for those octets
7785 * would already be 0 by the time we get here;
7786 * IP has already performed its header checksum
7787 * checks. If we do need to adjust, restore
7788 * the original fields in the IP header when
7789 * computing the adjustment value. Also take
7790 * care of any trailing bytes and subtract out
7791 * their partial sum.
7792 */
7793 ASSERT(trailer >= 0);
7794 if ((m->m_pkthdr.csum_flags & CSUM_PARTIAL) &&
7795 ((start != 0 && start != off) || trailer)) {
7796 uint32_t swbytes = (uint32_t)trailer;
7797
7798 if (start < off) {
7799 ip->ip_len += sizeof(*ip);
7800 #if BYTE_ORDER != BIG_ENDIAN
7801 HTONS(ip->ip_len);
7802 HTONS(ip->ip_off);
7803 #endif /* BYTE_ORDER != BIG_ENDIAN */
7804 }
7805 /* callee folds in sum */
7806 sum = m_adj_sum16(m, start, off,
7807 tlen, sum);
7808 if (off > start) {
7809 swbytes += (off - start);
7810 } else {
7811 swbytes += (start - off);
7812 }
7813
7814 if (start < off) {
7815 #if BYTE_ORDER != BIG_ENDIAN
7816 NTOHS(ip->ip_off);
7817 NTOHS(ip->ip_len);
7818 #endif /* BYTE_ORDER != BIG_ENDIAN */
7819 ip->ip_len -= sizeof(*ip);
7820 }
7821
7822 if (swbytes != 0) {
7823 tcp_in_cksum_stats(swbytes);
7824 }
7825 if (trailer != 0) {
7826 m_adj(m, -trailer);
7827 }
7828 }
7829
7830 /* callee folds in sum */
7831 th->th_sum = in_pseudo(ip->ip_src.s_addr,
7832 ip->ip_dst.s_addr,
7833 sum + htonl(tlen + IPPROTO_TCP));
7834 }
7835 th->th_sum ^= 0xffff;
7836 } else {
7837 uint16_t ip_sum;
7838 int len;
7839 char b[9];
7840
7841 bcopy(ipov->ih_x1, b, sizeof(ipov->ih_x1));
7842 bzero(ipov->ih_x1, sizeof(ipov->ih_x1));
7843 ip_sum = ipov->ih_len;
7844 ipov->ih_len = (u_short)tlen;
7845 #if BYTE_ORDER != BIG_ENDIAN
7846 HTONS(ipov->ih_len);
7847 #endif
7848 len = sizeof(struct ip) + tlen;
7849 th->th_sum = in_cksum(m, len);
7850 bcopy(b, ipov->ih_x1, sizeof(ipov->ih_x1));
7851 ipov->ih_len = ip_sum;
7852
7853 tcp_in_cksum_stats(len);
7854 }
7855 break;
7856 }
7857 case AF_INET6: {
7858 struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *);
7859
7860 if ((hwcksum_rx || (ifp->if_flags & IFF_LOOPBACK) ||
7861 (m->m_pkthdr.pkt_flags & PKTF_LOOP)) &&
7862 (m->m_pkthdr.csum_flags & CSUM_DATA_VALID)) {
7863 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR) {
7864 th->th_sum = m->m_pkthdr.csum_rx_val;
7865 } else {
7866 uint32_t sum = m->m_pkthdr.csum_rx_val;
7867 uint32_t start = m->m_pkthdr.csum_rx_start;
7868 int32_t trailer = (m_pktlen(m) - (off + tlen));
7869
7870 /*
7871 * Perform 1's complement adjustment of octets
7872 * that got included/excluded in the hardware-
7873 * calculated checksum value. Also take care
7874 * of any trailing bytes and subtract out their
7875 * partial sum.
7876 */
7877 ASSERT(trailer >= 0);
7878 if ((m->m_pkthdr.csum_flags & CSUM_PARTIAL) &&
7879 (start != off || trailer != 0)) {
7880 uint16_t s = 0, d = 0;
7881 uint32_t swbytes = (uint32_t)trailer;
7882
7883 if (IN6_IS_SCOPE_EMBED(&ip6->ip6_src)) {
7884 s = ip6->ip6_src.s6_addr16[1];
7885 ip6->ip6_src.s6_addr16[1] = 0;
7886 }
7887 if (IN6_IS_SCOPE_EMBED(&ip6->ip6_dst)) {
7888 d = ip6->ip6_dst.s6_addr16[1];
7889 ip6->ip6_dst.s6_addr16[1] = 0;
7890 }
7891
7892 /* callee folds in sum */
7893 sum = m_adj_sum16(m, start, off,
7894 tlen, sum);
7895 if (off > start) {
7896 swbytes += (off - start);
7897 } else {
7898 swbytes += (start - off);
7899 }
7900
7901 if (IN6_IS_SCOPE_EMBED(&ip6->ip6_src)) {
7902 ip6->ip6_src.s6_addr16[1] = s;
7903 }
7904 if (IN6_IS_SCOPE_EMBED(&ip6->ip6_dst)) {
7905 ip6->ip6_dst.s6_addr16[1] = d;
7906 }
7907
7908 if (swbytes != 0) {
7909 tcp_in6_cksum_stats(swbytes);
7910 }
7911 if (trailer != 0) {
7912 m_adj(m, -trailer);
7913 }
7914 }
7915
7916 th->th_sum = in6_pseudo(
7917 &ip6->ip6_src, &ip6->ip6_dst,
7918 sum + htonl(tlen + IPPROTO_TCP));
7919 }
7920 th->th_sum ^= 0xffff;
7921 } else {
7922 tcp_in6_cksum_stats(tlen);
7923 th->th_sum = in6_cksum(m, IPPROTO_TCP, off, tlen);
7924 }
7925 break;
7926 }
7927 default:
7928 VERIFY(0);
7929 /* NOTREACHED */
7930 }
7931
7932 if (th->th_sum != 0) {
7933 tcpstat.tcps_rcvbadsum++;
7934 IF_TCP_STATINC(ifp, badformat);
7935 return -1;
7936 }
7937
7938 return 0;
7939 }
7940
7941 uint32_t
tcp_reass_qlen_space(struct socket * so)7942 tcp_reass_qlen_space(struct socket *so)
7943 {
7944 uint32_t space = 0;
7945 struct inpcb *inp = sotoinpcb(so);
7946
7947 if (inp != NULL) {
7948 struct tcpcb *tp = intotcpcb(inp);
7949
7950 if (tp != NULL) {
7951 space = tp->t_reassq_mbcnt;
7952 }
7953 }
7954 return space;
7955 }
7956
7957
7958 SYSCTL_PROC(_net_inet_tcp, TCPCTL_STATS, stats,
7959 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, tcp_getstat,
7960 "S,tcpstat", "TCP statistics (struct tcpstat, netinet/tcp_var.h)");
7961
7962 static int
7963 sysctl_rexmtthresh SYSCTL_HANDLER_ARGS
7964 {
7965 #pragma unused(arg1, arg2)
7966
7967 int error, val = tcprexmtthresh;
7968
7969 error = sysctl_handle_int(oidp, &val, 0, req);
7970 if (error || !req->newptr) {
7971 return error;
7972 }
7973
7974 /*
7975 * Constrain the number of duplicate ACKs
7976 * to consider for TCP fast retransmit
7977 * to either 2 or 3
7978 */
7979
7980 if (val < 2 || val > 3) {
7981 return EINVAL;
7982 }
7983
7984 tcprexmtthresh = (uint8_t)val;
7985
7986 return 0;
7987 }
7988
7989 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, rexmt_thresh, CTLTYPE_INT | CTLFLAG_RW |
7990 CTLFLAG_LOCKED, &tcprexmtthresh, 0, &sysctl_rexmtthresh, "I",
7991 "Duplicate ACK Threshold for Fast Retransmit");
7992