1 /*
2 * Copyright (c) 2000-2022 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1994, 1995
30 * The Regents of the University of California. All rights reserved.
31 *
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions
34 * are met:
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce the above copyright
38 * notice, this list of conditions and the following disclaimer in the
39 * documentation and/or other materials provided with the distribution.
40 * 3. All advertising materials mentioning features or use of this software
41 * must display the following acknowledgement:
42 * This product includes software developed by the University of
43 * California, Berkeley and its contributors.
44 * 4. Neither the name of the University nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE.
59 *
60 * @(#)tcp_input.c 8.12 (Berkeley) 5/24/95
61 * $FreeBSD: src/sys/netinet/tcp_input.c,v 1.107.2.16 2001/08/22 00:59:12 silby Exp $
62 */
63 /*
64 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
65 * support for mandatory and extensible security protections. This notice
66 * is included in support of clause 2.2 (b) of the Apple Public License,
67 * Version 2.0.
68 */
69
70 #include "tcp_includes.h"
71
72 #include <sys/param.h>
73 #include <sys/systm.h>
74 #include <sys/kernel.h>
75 #include <sys/sysctl.h>
76 #include <sys/malloc.h>
77 #include <sys/mbuf.h>
78 #include <sys/proc.h> /* for proc0 declaration */
79 #include <sys/protosw.h>
80 #include <sys/socket.h>
81 #include <sys/socketvar.h>
82 #include <sys/syslog.h>
83 #include <sys/mcache.h>
84 #include <sys/kauth.h>
85 #include <kern/cpu_number.h> /* before tcp_seq.h, for tcp_random18() */
86
87 #include <machine/endian.h>
88
89 #include <net/if.h>
90 #include <net/if_types.h>
91 #include <net/route.h>
92 #include <net/ntstat.h>
93 #include <net/content_filter.h>
94 #include <net/dlil.h>
95 #include <net/multi_layer_pkt_log.h>
96 #include <net/droptap.h>
97
98 #include <netinet/in.h>
99 #include <netinet/in_systm.h>
100 #include <netinet/ip.h>
101 #include <netinet/ip_icmp.h> /* for ICMP_BANDLIM */
102 #include <netinet/in_var.h>
103 #include <netinet/icmp_var.h> /* for ICMP_BANDLIM */
104 #include <netinet/in_pcb.h>
105 #include <netinet/ip_var.h>
106 #include <mach/sdt.h>
107 #include <netinet/ip6.h>
108 #include <netinet/icmp6.h>
109 #include <netinet6/nd6.h>
110 #include <netinet6/ip6_var.h>
111 #include <netinet6/in6_pcb.h>
112 #include <netinet/tcp.h>
113 #include <netinet/tcp_cache.h>
114 #include <netinet/tcp_fsm.h>
115 #include <netinet/tcp_seq.h>
116 #include <netinet/tcp_timer.h>
117 #include <netinet/tcp_var.h>
118 #include <netinet/tcp_cc.h>
119 #include <dev/random/randomdev.h>
120 #include <kern/zalloc.h>
121 #include <netinet6/tcp6_var.h>
122 #include <netinet/tcpip.h>
123 #include <netinet/tcp_log.h>
124
125 #if IPSEC
126 #include <netinet6/ipsec.h>
127 #include <netinet6/ipsec6.h>
128 #include <netkey/key.h>
129 #endif /*IPSEC*/
130
131 #include <sys/kdebug.h>
132 #if MPTCP
133 #include <netinet/mptcp_var.h>
134 #include <netinet/mptcp.h>
135 #include <netinet/mptcp_opt.h>
136 #endif /* MPTCP */
137
138 #include <corecrypto/ccaes.h>
139 #include <net/sockaddr_utils.h>
140
141 #define DBG_LAYER_BEG NETDBG_CODE(DBG_NETTCP, 0)
142 #define DBG_LAYER_END NETDBG_CODE(DBG_NETTCP, 2)
143 #define DBG_FNC_TCP_INPUT NETDBG_CODE(DBG_NETTCP, (3 << 8))
144 #define DBG_FNC_TCP_NEWCONN NETDBG_CODE(DBG_NETTCP, (7 << 8))
145
146 #define TCP_RTT_HISTORY_EXPIRE_TIME (60 * TCP_RETRANSHZ)
147 #define TCP_RECV_THROTTLE_WIN (5 * TCP_RETRANSHZ)
148 #define TCP_STRETCHACK_ENABLE_PKTCNT 2000
149
150 struct tcpstat tcpstat;
151
152 SYSCTL_SKMEM_TCP_INT(OID_AUTO, flow_control_response,
153 CTLFLAG_RW | CTLFLAG_LOCKED, int, tcp_flow_control_response, 1,
154 "Improved response to Flow-control events");
155
156 static int log_in_vain = 0;
157 SYSCTL_INT(_net_inet_tcp, OID_AUTO, log_in_vain,
158 CTLFLAG_RW | CTLFLAG_LOCKED, &log_in_vain, 0,
159 "Log all incoming TCP connections");
160
161 SYSCTL_SKMEM_TCP_INT(OID_AUTO, ack_strategy,
162 CTLFLAG_RW | CTLFLAG_LOCKED, int, tcp_ack_strategy, TCP_ACK_STRATEGY_MODERN,
163 "Revised TCP ACK-strategy, avoiding stretch-ACK implementation");
164
165 static int blackhole = 0;
166 SYSCTL_INT(_net_inet_tcp, OID_AUTO, blackhole,
167 CTLFLAG_RW | CTLFLAG_LOCKED, &blackhole, 0,
168 "Do not send RST when dropping refused connections");
169
170 /* TODO - remove once uTCP stopped using it */
171 SYSCTL_SKMEM_TCP_INT(OID_AUTO, aggressive_rcvwnd_inc,
172 CTLFLAG_RW | CTLFLAG_LOCKED, int, tcp_aggressive_rcvwnd_inc, 1,
173 "Be more aggressive about increasing the receive-window.");
174
175 SYSCTL_SKMEM_TCP_INT(OID_AUTO, delayed_ack,
176 CTLFLAG_RW | CTLFLAG_LOCKED, int, tcp_delack_enabled, 3,
177 "Delay ACK to try and piggyback it onto a data packet");
178
179 SYSCTL_SKMEM_TCP_INT(OID_AUTO, recvbg, CTLFLAG_RW | CTLFLAG_LOCKED,
180 int, tcp_recv_bg, 0, "Receive background");
181
182 SYSCTL_SKMEM_TCP_INT(OID_AUTO, drop_synfin,
183 CTLFLAG_RW | CTLFLAG_LOCKED, static int, drop_synfin, 1,
184 "Drop TCP packets with SYN+FIN set");
185
186 SYSCTL_NODE(_net_inet_tcp, OID_AUTO, reass, CTLFLAG_RW | CTLFLAG_LOCKED, 0,
187 "TCP Segment Reassembly Queue");
188
189 static int tcp_reass_overflows = 0;
190 SYSCTL_INT(_net_inet_tcp_reass, OID_AUTO, overflows,
191 CTLFLAG_RD | CTLFLAG_LOCKED, &tcp_reass_overflows, 0,
192 "Global number of TCP segment reassembly queue overflows");
193
194 int tcp_reass_total_qlen = 0;
195 SYSCTL_INT(_net_inet_tcp_reass, OID_AUTO, qlen,
196 CTLFLAG_RD | CTLFLAG_LOCKED, &tcp_reass_total_qlen, 0,
197 "Total number of TCP segments in reassembly queues");
198
199
200 SYSCTL_SKMEM_TCP_INT(OID_AUTO, slowlink_wsize, CTLFLAG_RW | CTLFLAG_LOCKED,
201 __private_extern__ int, slowlink_wsize, 8192,
202 "Maximum advertised window size for slowlink");
203
204 SYSCTL_SKMEM_TCP_INT(OID_AUTO, maxseg_unacked,
205 CTLFLAG_RW | CTLFLAG_LOCKED, int, maxseg_unacked, 8,
206 "Maximum number of outstanding segments left unacked");
207
208 SYSCTL_SKMEM_TCP_INT(OID_AUTO, rfc3465, CTLFLAG_RW | CTLFLAG_LOCKED,
209 int, tcp_do_rfc3465, 1, "");
210
211 SYSCTL_SKMEM_TCP_INT(OID_AUTO, rfc3465_lim2,
212 CTLFLAG_RW | CTLFLAG_LOCKED, int, tcp_do_rfc3465_lim2, 1,
213 "Appropriate bytes counting w/ L=2*SMSS");
214
215 int rtt_samples_per_slot = 20;
216
217 int tcp_acc_iaj_high_thresh = ACC_IAJ_HIGH_THRESH;
218 u_int32_t tcp_autorcvbuf_inc_shift = 3;
219 SYSCTL_SKMEM_TCP_INT(OID_AUTO, recv_allowed_iaj,
220 CTLFLAG_RW | CTLFLAG_LOCKED, int, tcp_allowed_iaj, ALLOWED_IAJ,
221 "Allowed inter-packet arrival jiter");
222
223 SYSCTL_SKMEM_TCP_INT(OID_AUTO, doautorcvbuf,
224 CTLFLAG_RW | CTLFLAG_LOCKED, u_int32_t, tcp_do_autorcvbuf, 1,
225 "Enable automatic socket buffer tuning");
226
227 SYSCTL_SKMEM_TCP_INT(OID_AUTO, autotunereorder,
228 CTLFLAG_RW | CTLFLAG_LOCKED, u_int32_t, tcp_autotune_reorder, 1,
229 "Enable automatic socket buffer tuning even when reordering is present");
230
231 SYSCTL_SKMEM_TCP_INT(OID_AUTO, autorcvbufmax,
232 CTLFLAG_RW | CTLFLAG_LOCKED | CTLFLAG_KERN, u_int32_t, tcp_autorcvbuf_max, 2 * 1024 * 1024,
233 "Maximum receive socket buffer size");
234
235 int tcp_disable_access_to_stats = 1;
236 SYSCTL_INT(_net_inet_tcp, OID_AUTO, disable_access_to_stats,
237 CTLFLAG_RW | CTLFLAG_LOCKED, &tcp_disable_access_to_stats, 0,
238 "Disable access to tcpstat");
239
240 SYSCTL_SKMEM_TCP_INT(OID_AUTO, challengeack_limit,
241 CTLFLAG_RW | CTLFLAG_LOCKED, uint32_t, tcp_challengeack_limit, 10,
242 "Maximum number of challenge ACKs per connection per second");
243
244 /* TO BE REMOVED */
245 SYSCTL_SKMEM_TCP_INT(OID_AUTO, do_rfc5961,
246 CTLFLAG_RW | CTLFLAG_LOCKED, static int, tcp_do_rfc5961, 1,
247 "Enable/Disable full RFC 5961 compliance");
248
249 SYSCTL_SKMEM_TCP_INT(OID_AUTO, do_better_lr,
250 CTLFLAG_RW | CTLFLAG_LOCKED, int, tcp_do_better_lr, 1,
251 "Improved TCP Loss Recovery");
252
253 SYSCTL_SKMEM_TCP_INT(OID_AUTO, use_min_curr_rtt,
254 CTLFLAG_RW | CTLFLAG_LOCKED, int, tcp_use_min_curr_rtt, 1,
255 "Use a min of k=4 RTT samples for congestion controllers");
256
257 SYSCTL_SKMEM_TCP_INT(OID_AUTO, awdl_rtobase,
258 CTLFLAG_RW | CTLFLAG_LOCKED, int, tcp_awdl_rtobase, 100,
259 "Initial RTO for AWDL interface");
260
261 extern int tcp_acc_iaj_high;
262 extern int tcp_acc_iaj_react_limit;
263 extern int tcp_fin_timeout;
264
265 uint8_t tcprexmtthresh = 3;
266
267 uint32_t tcp_now;
268 struct timeval tcp_uptime; /* uptime when tcp_now was last updated */
269
270 /* Used to sychronize updates to tcp_now */
271 static LCK_GRP_DECLARE(tcp_uptime_mtx_grp, "tcpuptime");
272 LCK_SPIN_DECLARE(tcp_uptime_lock, &tcp_uptime_mtx_grp);
273
274 struct inpcbhead tcb;
275 #define tcb6 tcb /* for KAME src sync over BSD*'s */
276 struct inpcbinfo tcbinfo;
277
278 static void tcp_dooptions(struct tcpcb *, u_char *cp0 __counted_by(cnt0), int cnt0, struct tcphdr *,
279 struct tcpopt *);
280 static void tcp_finalize_options(struct tcpcb *, struct tcpopt *, unsigned int);
281 static void tcp_pulloutofband(struct socket *,
282 struct tcphdr *, struct mbuf *, int);
283 static void tcp_xmit_timer(struct tcpcb *, int, u_int32_t, tcp_seq);
284 static inline unsigned int tcp_maxmtu(struct rtentry *);
285 static inline int tcp_stretch_ack_enable(struct tcpcb *tp, int thflags);
286 static inline void tcp_adaptive_rwtimo_check(struct tcpcb *, int);
287
288 #if TRAFFIC_MGT
289 static inline void compute_iaj(struct tcpcb *tp);
290 static inline void compute_iaj_meat(struct tcpcb *tp, uint32_t cur_iaj);
291 #endif /* TRAFFIC_MGT */
292
293 static inline unsigned int tcp_maxmtu6(struct rtentry *);
294 unsigned int get_maxmtu(struct rtentry *);
295
296 static void tcp_sbrcv_grow(struct tcpcb *tp, struct sockbuf *sb,
297 struct tcpopt *to, uint32_t tlen);
298 void tcp_sbrcv_trim(struct tcpcb *tp, struct sockbuf *sb);
299 static void tcp_sbsnd_trim(struct sockbuf *sbsnd);
300 static inline void tcp_sbrcv_tstmp_check(struct tcpcb *tp);
301 static inline void tcp_sbrcv_reserve(struct tcpcb *tp, struct sockbuf *sb,
302 u_int32_t newsize, u_int32_t idealsize, u_int32_t rcvbuf_max);
303 static void tcp_bad_rexmt_restore_state(struct tcpcb *tp, struct tcphdr *th);
304 static void tcp_compute_rtt(struct tcpcb *tp, struct tcpopt *to,
305 struct tcphdr *th);
306 static void tcp_compute_rcv_rtt(struct tcpcb *tp, struct tcpopt *to,
307 struct tcphdr *th);
308 static void tcp_early_rexmt_check(struct tcpcb *tp, struct tcphdr *th);
309 static void tcp_bad_rexmt_check(struct tcpcb *tp, struct tcphdr *th,
310 struct tcpopt *to);
311 /*
312 * Constants used for resizing receive socket buffer
313 * when timestamps are not supported
314 */
315 #define TCPTV_RCVNOTS_QUANTUM 100
316 #define TCP_RCVNOTS_BYTELEVEL 204800
317
318 /*
319 * Constants used for limiting early retransmits
320 * to 10 per minute.
321 */
322 #define TCP_EARLY_REXMT_WIN (60 * TCP_RETRANSHZ) /* 60 seconds */
323 #define TCP_EARLY_REXMT_LIMIT 10
324
325 #define log_in_vain_log( a ) { log a; }
326
327 int tcp_rcvunackwin = TCPTV_UNACKWIN;
328 int tcp_maxrcvidle = TCPTV_MAXRCVIDLE;
329 SYSCTL_SKMEM_TCP_INT(OID_AUTO, rcvsspktcnt, CTLFLAG_RW | CTLFLAG_LOCKED,
330 int, tcp_rcvsspktcnt, TCP_RCV_SS_PKTCOUNT, "packets to be seen before receiver stretches acks");
331
332 #define DELAY_ACK(tp, th) \
333 (CC_ALGO(tp)->delay_ack != NULL && CC_ALGO(tp)->delay_ack(tp, th))
334
335 static int tcp_dropdropablreq(struct socket *head);
336 static void tcp_newreno_partial_ack(struct tcpcb *tp, struct tcphdr *th);
337 static void update_base_rtt(struct tcpcb *tp, uint32_t rtt);
338 void tcp_set_background_cc(struct socket *so);
339 void tcp_set_foreground_cc(struct socket *so);
340 static void tcp_set_new_cc(struct socket *so, uint8_t cc_index);
341 static void tcp_bwmeas_check(struct tcpcb *tp);
342
343 #if TRAFFIC_MGT
344 void
reset_acc_iaj(struct tcpcb * tp)345 reset_acc_iaj(struct tcpcb *tp)
346 {
347 tp->acc_iaj = 0;
348 CLEAR_IAJ_STATE(tp);
349 }
350
351 static inline void
update_iaj_state(struct tcpcb * tp,int size,int rst_size)352 update_iaj_state(struct tcpcb *tp, int size, int rst_size)
353 {
354 if (rst_size > 0) {
355 tp->iaj_size = 0;
356 }
357 if (tp->iaj_size == 0 || size >= tp->iaj_size) {
358 tp->iaj_size = size;
359 tp->iaj_rcv_ts = tcp_now;
360 tp->iaj_small_pkt = 0;
361 }
362 }
363
364 /* For every 64-bit unsigned integer(v), this function will find the
365 * largest 32-bit integer n such that (n*n <= v). This takes at most 32 iterations
366 * irrespective of the value of v and does not involve multiplications.
367 */
368 static inline uint32_t
isqrt(uint64_t val)369 isqrt(uint64_t val)
370 {
371 uint32_t sqrt_cache[11] = {0, 1, 4, 9, 16, 25, 36, 49, 64, 81, 100};
372 uint64_t temp, g = 0, b = 1 << 31, bshft = 31;
373 if (val <= 100) {
374 for (g = 0; g <= 10; ++g) {
375 if (sqrt_cache[g] > val) {
376 g--;
377 break;
378 } else if (sqrt_cache[g] == val) {
379 break;
380 }
381 }
382 } else {
383 do {
384 temp = (((g << 1) + b) << (bshft--));
385 if (val >= temp) {
386 g += b;
387 val -= temp;
388 }
389 b >>= 1;
390 } while (b > 0 && val > 0);
391 }
392 return (uint32_t)g;
393 }
394
395 static inline void
compute_iaj_meat(struct tcpcb * tp,uint32_t cur_iaj)396 compute_iaj_meat(struct tcpcb *tp, uint32_t cur_iaj)
397 {
398 /* When accumulated IAJ reaches MAX_ACC_IAJ in milliseconds,
399 * throttle the receive window to a minimum of MIN_IAJ_WIN packets
400 */
401 #define MAX_ACC_IAJ (tcp_acc_iaj_high_thresh + tcp_acc_iaj_react_limit)
402 #define IAJ_DIV_SHIFT 4
403 #define IAJ_ROUNDUP_CONST (1 << (IAJ_DIV_SHIFT - 1))
404
405 uint32_t allowed_iaj, acc_iaj = 0;
406
407 /* Using 64-bit storage for the inter-arrival jitter deviation,
408 * to avoid accidentally rolling over if the inter-arrival time exceeds 62 seconds.
409 */
410 int64_t mean, temp, cur_iaj_dev;
411
412 cur_iaj_dev = (cur_iaj - tp->avg_iaj);
413
414 /* Allow a jitter of "allowed_iaj" milliseconds. Some connections
415 * may have a constant jitter more than that. We detect this by
416 * using standard deviation.
417 */
418 allowed_iaj = tp->avg_iaj + tp->std_dev_iaj;
419 if (allowed_iaj < tcp_allowed_iaj) {
420 allowed_iaj = tcp_allowed_iaj;
421 }
422
423 /* Initially when the connection starts, the senders congestion
424 * window is small. During this period we avoid throttling a
425 * connection because we do not have a good starting point for
426 * allowed_iaj. IAJ_IGNORE_PKTCNT is used to quietly gloss over
427 * the first few packets.
428 */
429 if (tp->iaj_pktcnt > IAJ_IGNORE_PKTCNT) {
430 if (cur_iaj <= allowed_iaj) {
431 if (tp->acc_iaj >= 2) {
432 acc_iaj = tp->acc_iaj - 2;
433 } else {
434 acc_iaj = 0;
435 }
436 } else {
437 acc_iaj = tp->acc_iaj + (cur_iaj - allowed_iaj);
438 }
439
440 if (acc_iaj > MAX_ACC_IAJ) {
441 acc_iaj = MAX_ACC_IAJ;
442 }
443 tp->acc_iaj = acc_iaj;
444 }
445
446 /* Compute weighted average where the history has a weight of
447 * 15 out of 16 and the current value has a weight of 1 out of 16.
448 * This will make the short-term measurements have more weight.
449 *
450 * The addition of 8 will help to round-up the value
451 * instead of round-down
452 */
453 tp->avg_iaj = (((tp->avg_iaj << IAJ_DIV_SHIFT) - tp->avg_iaj)
454 + cur_iaj + IAJ_ROUNDUP_CONST) >> IAJ_DIV_SHIFT;
455
456 /* Compute Root-mean-square of deviation where mean is a weighted
457 * average as described above.
458 */
459 temp = tp->std_dev_iaj * tp->std_dev_iaj;
460 mean = (((temp << IAJ_DIV_SHIFT) - temp)
461 + (cur_iaj_dev * cur_iaj_dev)
462 + IAJ_ROUNDUP_CONST) >> IAJ_DIV_SHIFT;
463
464 tp->std_dev_iaj = isqrt(mean);
465
466 DTRACE_TCP3(iaj, struct tcpcb *, tp, uint32_t, cur_iaj,
467 uint32_t, allowed_iaj);
468
469 return;
470 }
471
472 static inline void
compute_iaj(struct tcpcb * tp)473 compute_iaj(struct tcpcb *tp)
474 {
475 compute_iaj_meat(tp, (tcp_now - tp->iaj_rcv_ts));
476 }
477 #endif /* TRAFFIC_MGT */
478
479 /*
480 * Perform rate limit check per connection per second
481 * tp->t_challengeack_last is the last_time diff was greater than 1sec
482 * tp->t_challengeack_count is the number of ACKs sent (within 1sec)
483 * Return TRUE if we shouldn't send the ACK due to rate limitation
484 * Return FALSE if it is still ok to send challenge ACK
485 */
486 static boolean_t
tcp_is_ack_ratelimited(struct tcpcb * tp)487 tcp_is_ack_ratelimited(struct tcpcb *tp)
488 {
489 boolean_t ret = TRUE;
490 uint32_t now = tcp_now;
491 int32_t diff = 0;
492
493 diff = timer_diff(now, 0, tp->t_challengeack_last, 0);
494 /* If it is first time or diff > 1000ms,
495 * update the challengeack_last and reset the
496 * current count of ACKs
497 */
498 if (tp->t_challengeack_last == 0 || diff >= 1000) {
499 tp->t_challengeack_last = now;
500 tp->t_challengeack_count = 0;
501 ret = FALSE;
502 } else if (tp->t_challengeack_count < tcp_challengeack_limit) {
503 ret = FALSE;
504 }
505
506 /* Careful about wrap-around */
507 if (ret == FALSE && (tp->t_challengeack_count + 1 > 0)) {
508 tp->t_challengeack_count++;
509 }
510
511 return ret;
512 }
513
514 /* Check if enough amount of data has been acknowledged since
515 * bw measurement was started
516 */
517 static void
tcp_bwmeas_check(struct tcpcb * tp)518 tcp_bwmeas_check(struct tcpcb *tp)
519 {
520 int32_t bw_meas_bytes;
521 uint32_t bw, bytes, elapsed_time;
522
523 if (SEQ_LEQ(tp->snd_una, tp->t_bwmeas->bw_start)) {
524 return;
525 }
526
527 bw_meas_bytes = tp->snd_una - tp->t_bwmeas->bw_start;
528 if ((tp->t_flagsext & TF_BWMEAS_INPROGRESS) &&
529 bw_meas_bytes >= (int32_t)(tp->t_bwmeas->bw_size)) {
530 bytes = bw_meas_bytes;
531 elapsed_time = tcp_now - tp->t_bwmeas->bw_ts;
532 if (elapsed_time > 0) {
533 bw = bytes / elapsed_time;
534 if (bw > 0) {
535 if (tp->t_bwmeas->bw_sndbw > 0) {
536 tp->t_bwmeas->bw_sndbw =
537 (((tp->t_bwmeas->bw_sndbw << 3)
538 - tp->t_bwmeas->bw_sndbw)
539 + bw) >> 3;
540 } else {
541 tp->t_bwmeas->bw_sndbw = bw;
542 }
543
544 /* Store the maximum value */
545 if (tp->t_bwmeas->bw_sndbw_max == 0) {
546 tp->t_bwmeas->bw_sndbw_max =
547 tp->t_bwmeas->bw_sndbw;
548 } else {
549 tp->t_bwmeas->bw_sndbw_max =
550 max(tp->t_bwmeas->bw_sndbw,
551 tp->t_bwmeas->bw_sndbw_max);
552 }
553 }
554 }
555 tp->t_flagsext &= ~(TF_BWMEAS_INPROGRESS);
556 }
557 }
558
559 static int
tcp_reass(struct tcpcb * tp,struct tcphdr * th,int * tlenp,struct mbuf * m,struct ifnet * ifp,int * dowakeup)560 tcp_reass(struct tcpcb *tp, struct tcphdr *th, int *tlenp, struct mbuf *m,
561 struct ifnet *ifp, int *dowakeup)
562 {
563 struct tseg_qent *q;
564 struct tseg_qent *p = NULL;
565 struct tseg_qent *nq;
566 struct tseg_qent *te = NULL;
567 struct inpcb *inp = tp->t_inpcb;
568 struct socket *so = inp->inp_socket;
569 int flags = 0;
570 uint32_t qlimit;
571 stats_functional_type ifnet_count_type = IFNET_COUNT_TYPE(ifp);
572 boolean_t dsack_set = FALSE;
573
574 /*
575 * If the reassembly queue already has entries or if we are going
576 * to add a new one, then the connection has reached a loss state.
577 * Reset the stretch-ack algorithm at this point.
578 */
579 tcp_reset_stretch_ack(tp);
580 tp->t_forced_acks = TCP_FORCED_ACKS_COUNT;
581
582 #if TRAFFIC_MGT
583 if (tp->acc_iaj > 0) {
584 reset_acc_iaj(tp);
585 }
586 #endif /* TRAFFIC_MGT */
587
588 if (th->th_seq != tp->rcv_nxt) {
589 struct mbuf *tmp = m;
590 while (tmp != NULL) {
591 if (mbuf_class_under_pressure(tmp)) {
592 m_drop(m, DROPTAP_FLAG_DIR_IN | DROPTAP_FLAG_L2_MISSING, DROP_REASON__TCP_REASS_MEMORY_PRESSURE, NULL, 0);
593 tcp_reass_overflows++;
594 tcpstat.tcps_rcvmemdrop++;
595 *tlenp = 0;
596 return 0;
597 }
598
599 tmp = tmp->m_next;
600 }
601 }
602
603 /*
604 * Limit the number of segments in the reassembly queue to prevent
605 * holding on to too many segments (and thus running out of mbufs).
606 * Make sure to let the missing segment through which caused this
607 * queue. Always keep one global queue entry spare to be able to
608 * process the missing segment.
609 */
610 qlimit = min(max(100, so->so_rcv.sb_hiwat >> 10),
611 (tcp_autorcvbuf_max >> 10));
612 if (th->th_seq != tp->rcv_nxt &&
613 (tp->t_reassqlen + 1) >= qlimit) {
614 tcp_reass_overflows++;
615 tcpstat.tcps_rcvmemdrop++;
616 m_drop(m, DROPTAP_FLAG_DIR_IN | DROPTAP_FLAG_L2_MISSING, DROP_REASON_TCP_REASS_OVERFLOW, NULL, 0);
617 *tlenp = 0;
618 return 0;
619 }
620
621 /* Allocate a new queue entry. If we can't, just drop the pkt. XXX */
622 te = tcp_reass_qent_alloc();
623 tp->t_reassqlen++;
624 OSIncrementAtomic(&tcp_reass_total_qlen);
625
626 /*
627 * Find a segment which begins after this one does.
628 */
629 LIST_FOREACH(q, &tp->t_segq, tqe_q) {
630 if (SEQ_GT(q->tqe_th->th_seq, th->th_seq)) {
631 break;
632 }
633 p = q;
634 }
635
636 /*
637 * If there is a preceding segment, it may provide some of
638 * our data already. If so, drop the data from the incoming
639 * segment. If it provides all of our data, drop us.
640 */
641 if (p != NULL) {
642 int i;
643 /* conversion to int (in i) handles seq wraparound */
644 i = p->tqe_th->th_seq + p->tqe_len - th->th_seq;
645 if (i > 0) {
646 if (i > 1) {
647 /*
648 * Note duplicate data sequnce numbers
649 * to report in DSACK option
650 */
651 tp->t_dsack_lseq = th->th_seq;
652 tp->t_dsack_rseq = th->th_seq +
653 min(i, *tlenp);
654
655 /*
656 * Report only the first part of partial/
657 * non-contiguous duplicate sequence space
658 */
659 dsack_set = TRUE;
660 }
661 if (i >= *tlenp) {
662 tcpstat.tcps_rcvduppack++;
663 tcpstat.tcps_rcvdupbyte += *tlenp;
664 if (nstat_collect) {
665 nstat_route_rx(inp->inp_route.ro_rt,
666 1, *tlenp,
667 NSTAT_RX_FLAG_DUPLICATE);
668 INP_ADD_STAT(inp, ifnet_count_type,
669 rxpackets, 1);
670 INP_ADD_STAT(inp, ifnet_count_type,
671 rxbytes, *tlenp);
672 tp->t_stat.rxduplicatebytes += *tlenp;
673 inp_set_activity_bitmap(inp);
674 }
675 m_freem(m);
676 tcp_reass_qent_free(te);
677 te = NULL;
678 tp->t_reassqlen--;
679 OSDecrementAtomic(&tcp_reass_total_qlen);
680 /*
681 * Try to present any queued data
682 * at the left window edge to the user.
683 * This is needed after the 3-WHS
684 * completes.
685 */
686 goto present;
687 }
688 m_adj(m, i);
689 *tlenp -= i;
690 th->th_seq += i;
691 }
692 }
693
694 if (th->th_seq != tp->rcv_nxt) {
695 tp->t_rcvoopack++;
696 tcpstat.tcps_rcvoopack++;
697 tcpstat.tcps_rcvoobyte += *tlenp;
698 if (nstat_collect) {
699 tp->t_stat.rxoutoforderbytes += *tlenp;
700 }
701 }
702
703 if (nstat_collect) {
704 nstat_route_rx(inp->inp_route.ro_rt, 1, *tlenp,
705 NSTAT_RX_FLAG_OUT_OF_ORDER);
706 INP_ADD_STAT(inp, ifnet_count_type, rxpackets, 1);
707 INP_ADD_STAT(inp, ifnet_count_type, rxbytes, *tlenp);
708 inp_set_activity_bitmap(inp);
709 }
710
711 /*
712 * While we overlap succeeding segments trim them or,
713 * if they are completely covered, dequeue them.
714 */
715 while (q) {
716 int i = (th->th_seq + *tlenp) - q->tqe_th->th_seq;
717 if (i <= 0) {
718 break;
719 }
720
721 /*
722 * Report only the first part of partial/non-contiguous
723 * duplicate segment in dsack option. The variable
724 * dsack_set will be true if a previous entry has some of
725 * the duplicate sequence space.
726 */
727 if (i > 1 && !dsack_set) {
728 if (tp->t_dsack_lseq == 0) {
729 tp->t_dsack_lseq = q->tqe_th->th_seq;
730 tp->t_dsack_rseq =
731 tp->t_dsack_lseq + min(i, q->tqe_len);
732 } else {
733 /*
734 * this segment overlaps data in multple
735 * entries in the reassembly queue, move
736 * the right sequence number further.
737 */
738 tp->t_dsack_rseq =
739 tp->t_dsack_rseq + min(i, q->tqe_len);
740 }
741 }
742 if (i < q->tqe_len) {
743 q->tqe_th->th_seq += i;
744 q->tqe_len -= i;
745 m_adj(q->tqe_m, i);
746 break;
747 }
748
749 nq = LIST_NEXT(q, tqe_q);
750 LIST_REMOVE(q, tqe_q);
751 tp->t_reassq_mbcnt -= _MSIZE + (q->tqe_m->m_flags & M_EXT) ?
752 q->tqe_m->m_ext.ext_size : 0;
753 m_freem(q->tqe_m);
754 tcp_reass_qent_free(q);
755 tp->t_reassqlen--;
756 OSDecrementAtomic(&tcp_reass_total_qlen);
757 q = nq;
758 }
759
760 /* Insert the new segment queue entry into place. */
761 te->tqe_m = m;
762 te->tqe_th = th;
763 te->tqe_len = *tlenp;
764
765 tp->t_reassq_mbcnt += _MSIZE + (m->m_flags & M_EXT) ? m->m_ext.ext_size : 0;
766
767 if (p == NULL) {
768 LIST_INSERT_HEAD(&tp->t_segq, te, tqe_q);
769 } else {
770 LIST_INSERT_AFTER(p, te, tqe_q);
771 }
772
773 present:
774 /*
775 * Present data to user, advancing rcv_nxt through
776 * completed sequence space.
777 */
778 if (!TCPS_HAVEESTABLISHED(tp->t_state)) {
779 return 0;
780 }
781 q = LIST_FIRST(&tp->t_segq);
782 if (!q || q->tqe_th->th_seq != tp->rcv_nxt) {
783 return 0;
784 }
785
786 /*
787 * If there is already another thread doing reassembly for this
788 * connection, it is better to let it finish the job --
789 * (radar 16316196)
790 */
791 if (tp->t_flagsext & TF_REASS_INPROG) {
792 return 0;
793 }
794
795 tp->t_flagsext |= TF_REASS_INPROG;
796 /* lost packet was recovered, so ooo data can be returned */
797 tcpstat.tcps_recovered_pkts++;
798
799 do {
800 tp->rcv_nxt += q->tqe_len;
801 flags = q->tqe_th->th_flags & TH_FIN;
802 LIST_REMOVE(q, tqe_q);
803 tp->t_reassq_mbcnt -= _MSIZE + (q->tqe_m->m_flags & M_EXT) ?
804 q->tqe_m->m_ext.ext_size : 0;
805 if (so->so_state & SS_CANTRCVMORE) {
806 m_freem(q->tqe_m);
807 } else {
808 so_recv_data_stat(so, q->tqe_m, 0); /* XXXX */
809 if (q->tqe_th->th_flags & TH_PUSH) {
810 tp->t_flagsext |= TF_LAST_IS_PSH;
811 } else {
812 tp->t_flagsext &= ~TF_LAST_IS_PSH;
813 }
814
815 if (sbappendstream_rcvdemux(so, q->tqe_m)) {
816 *dowakeup = 1;
817 }
818 }
819 tcp_reass_qent_free(q);
820 tp->t_reassqlen--;
821 OSDecrementAtomic(&tcp_reass_total_qlen);
822 q = LIST_FIRST(&tp->t_segq);
823 } while (q && q->tqe_th->th_seq == tp->rcv_nxt);
824 tp->t_flagsext &= ~TF_REASS_INPROG;
825
826 if ((inp->inp_vflag & INP_IPV6) != 0) {
827 KERNEL_DEBUG(DBG_LAYER_BEG,
828 ((inp->inp_fport << 16) | inp->inp_lport),
829 (((inp->in6p_laddr.s6_addr16[0] & 0xffff) << 16) |
830 (inp->in6p_faddr.s6_addr16[0] & 0xffff)),
831 0, 0, 0);
832 } else {
833 KERNEL_DEBUG(DBG_LAYER_BEG,
834 ((inp->inp_fport << 16) | inp->inp_lport),
835 (((inp->inp_laddr.s_addr & 0xffff) << 16) |
836 (inp->inp_faddr.s_addr & 0xffff)),
837 0, 0, 0);
838 }
839
840 return flags;
841 }
842
843 /*
844 * Enter fast recovery and reduce congestion window,
845 * used when CE is seen or when a tail loss
846 * probe recovers the last packet. Also used by RACK.
847 */
848 void
tcp_enter_fast_recovery(struct tcpcb * tp)849 tcp_enter_fast_recovery(struct tcpcb *tp)
850 {
851 /*
852 * If the current tcp cc module has
853 * defined a hook for tasks to run
854 * before entering FR, call it
855 */
856 if (CC_ALGO(tp)->pre_fr != NULL) {
857 CC_ALGO(tp)->pre_fr(tp);
858 }
859 ENTER_FASTRECOVERY(tp);
860 if (tp->t_flags & TF_SENTFIN) {
861 tp->snd_recover = tp->snd_max - 1;
862 } else {
863 tp->snd_recover = tp->snd_max;
864 }
865
866 tp->t_flagsext &= ~TF_CWND_NONVALIDATED;
867
868 tp->t_timer[TCPT_REXMT] = 0;
869 tp->t_timer[TCPT_PTO] = 0;
870 tp->t_rtttime = 0;
871 if (tp->t_flagsext & TF_CWND_NONVALIDATED) {
872 tcp_cc_adjust_nonvalidated_cwnd(tp);
873 } else {
874 /* No need to inflate the congestion window */
875 tp->snd_cwnd = tp->snd_ssthresh;
876 }
877 }
878
879 /*
880 * This function is called upon reception of data on a socket. It's purpose is
881 * to handle the adaptive keepalive timers that monitor whether the connection
882 * is making progress. First the adaptive read-timer, second the TFO probe-timer.
883 *
884 * The application wants to get an event if there is a stall during read.
885 * Set the initial keepalive timeout to be equal to twice RTO.
886 *
887 * If the outgoing interface is in marginal conditions, we need to
888 * enable read probes for that too.
889 */
890 static inline void
tcp_adaptive_rwtimo_check(struct tcpcb * tp,int tlen)891 tcp_adaptive_rwtimo_check(struct tcpcb *tp, int tlen)
892 {
893 struct ifnet *outifp = tp->t_inpcb->inp_last_outifp;
894
895 if ((tp->t_adaptive_rtimo > 0 ||
896 (outifp != NULL &&
897 (outifp->if_eflags & IFEF_PROBE_CONNECTIVITY)))
898 && tlen > 0 &&
899 tp->t_state == TCPS_ESTABLISHED) {
900 tp->t_timer[TCPT_KEEP] = OFFSET_FROM_START(tp,
901 (TCP_REXMTVAL(tp) << 1));
902 tp->t_flagsext |= TF_DETECT_READSTALL;
903 tp->t_rtimo_probes = 0;
904 }
905 }
906
907 inline void
tcp_keepalive_reset(struct tcpcb * tp)908 tcp_keepalive_reset(struct tcpcb *tp)
909 {
910 tp->t_timer[TCPT_KEEP] = OFFSET_FROM_START(tp,
911 TCP_CONN_KEEPIDLE(tp));
912 tp->t_flagsext &= ~(TF_DETECT_READSTALL);
913 tp->t_rtimo_probes = 0;
914 }
915
916 void
tcp_set_finwait_timeout(struct tcpcb * tp)917 tcp_set_finwait_timeout(struct tcpcb *tp)
918 {
919 /*
920 * Starting the TCPT_2MSL timer is contrary to the
921 * specification, but if we don't get a FIN
922 * we'll hang forever.
923 */
924 ASSERT(tp->t_state == TCPS_FIN_WAIT_2);
925 ASSERT((tp->t_inpcb->inp_socket->so_state & (SS_CANTRCVMORE)) == SS_CANTRCVMORE);
926
927 if (tcp_fin_timeout > 0 &&
928 tcp_fin_timeout < TCP_CONN_MAXIDLE(tp)) {
929 tp->t_timer[TCPT_2MSL] = OFFSET_FROM_START(tp, tcp_fin_timeout);
930 } else {
931 tp->t_timer[TCPT_2MSL] = OFFSET_FROM_START(tp, TCP_CONN_MAXIDLE(tp));
932 }
933 }
934
935 /*
936 * TCP input routine, follows pages 65-76 of the
937 * protocol specification dated September, 1981 very closely.
938 */
939 int
tcp6_input(struct mbuf ** mp,int * offp,int proto)940 tcp6_input(struct mbuf **mp, int *offp, int proto)
941 {
942 #pragma unused(proto)
943 struct mbuf *m = *mp;
944 uint32_t ia6_flags;
945 struct ifnet *ifp = m->m_pkthdr.rcvif;
946
947 IP6_EXTHDR_CHECK(m, *offp, sizeof(struct tcphdr), return IPPROTO_DONE);
948
949 /* Expect 32-bit aligned data pointer on strict-align platforms */
950 MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(m);
951
952 /*
953 * draft-itojun-ipv6-tcp-to-anycast
954 * better place to put this in?
955 */
956 if (ip6_getdstifaddr_info(m, NULL, &ia6_flags) == 0) {
957 if (ia6_flags & IN6_IFF_ANYCAST) {
958 struct ip6_hdr *ip6;
959
960 ip6 = mtod(m, struct ip6_hdr *);
961 icmp6_error(m, ICMP6_DST_UNREACH,
962 ICMP6_DST_UNREACH_ADDR,
963 (int)((caddr_t)&ip6->ip6_dst - (caddr_t)ip6));
964
965 IF_TCP_STATINC(ifp, icmp6unreach);
966
967 return IPPROTO_DONE;
968 }
969 }
970
971 tcp_input(m, *offp);
972 return IPPROTO_DONE;
973 }
974
975 static void
tcp_sbrcv_reserve(struct tcpcb * tp,struct sockbuf * sbrcv,u_int32_t newsize,u_int32_t idealsize,u_int32_t rcvbuf_max)976 tcp_sbrcv_reserve(struct tcpcb *tp, struct sockbuf *sbrcv,
977 u_int32_t newsize, u_int32_t idealsize, u_int32_t rcvbuf_max)
978 {
979 /* newsize should not exceed max */
980 newsize = min(newsize, rcvbuf_max);
981
982 /* The receive window scale negotiated at the
983 * beginning of the connection will also set a
984 * limit on the socket buffer size
985 */
986 newsize = min(newsize, TCP_MAXWIN << tp->rcv_scale);
987
988 /* Set new socket buffer size */
989 if (newsize > sbrcv->sb_hiwat &&
990 (sbreserve(sbrcv, newsize) == 1)) {
991 sbrcv->sb_idealsize = min(max(sbrcv->sb_idealsize,
992 (idealsize != 0) ? idealsize : newsize), rcvbuf_max);
993
994 /* Again check the limit set by the advertised
995 * window scale
996 */
997 sbrcv->sb_idealsize = min(sbrcv->sb_idealsize,
998 TCP_MAXWIN << tp->rcv_scale);
999 }
1000 }
1001
1002 /*
1003 * This function is used to grow a receive socket buffer. It
1004 * will take into account system-level memory usage and the
1005 * bandwidth available on the link to make a decision.
1006 */
1007 static void
tcp_sbrcv_grow(struct tcpcb * tp,struct sockbuf * sbrcv,struct tcpopt * to,uint32_t pktlen)1008 tcp_sbrcv_grow(struct tcpcb *tp, struct sockbuf *sbrcv,
1009 struct tcpopt *to, uint32_t pktlen)
1010 {
1011 struct socket *so = sbrcv->sb_so;
1012
1013 /*
1014 * Do not grow the receive socket buffer if
1015 * - auto resizing is disabled, globally or on this socket
1016 * - the high water mark already reached the maximum
1017 * - the stream is in background and receive side is being
1018 * throttled
1019 */
1020 if (tcp_do_autorcvbuf == 0 ||
1021 (sbrcv->sb_flags & SB_AUTOSIZE) == 0 ||
1022 sbrcv->sb_hiwat >= tcp_autorcvbuf_max ||
1023 (tp->t_flagsext & TF_RECV_THROTTLE) ||
1024 (so->so_flags1 & SOF1_EXTEND_BK_IDLE_WANTED) ||
1025 (!tcp_autotune_reorder && !LIST_EMPTY(&tp->t_segq))) {
1026 /* Can not resize the socket buffer, just return */
1027 goto out;
1028 }
1029
1030 if (!TSTMP_SUPPORTED(tp)) {
1031 /*
1032 * Timestamp option is not supported on this connection,
1033 * use receiver's RTT. Socket buffer grows based on the
1034 * BDP of the link.
1035 */
1036 if (TSTMP_GEQ(tcp_now,
1037 tp->rfbuf_ts + (tp->rcv_srtt >> TCP_RTT_SHIFT))) {
1038 tp->rfbuf_cnt += pktlen;
1039 if (tp->rfbuf_cnt > tp->rfbuf_space) {
1040 int32_t rcvbuf_inc;
1041 uint32_t idealsize;
1042
1043 /*
1044 * Increase receive-buffer aggressively if we
1045 * received more than 150% of what was received
1046 * in the previous round. Because, that means
1047 * the sender is in TCP slow-start and so
1048 * we need to give it more space to not be
1049 * limiting the sender with a small receive-window.
1050 */
1051 if (tp->rfbuf_cnt > tp->rfbuf_space + (tp->rfbuf_space >> 1)) {
1052 rcvbuf_inc = (tp->rfbuf_cnt << 2) - sbrcv->sb_hiwat;
1053 idealsize = (tp->rfbuf_cnt << 2);
1054 } else {
1055 rcvbuf_inc = (tp->rfbuf_cnt << 1) - sbrcv->sb_hiwat;
1056 idealsize = (tp->rfbuf_cnt << 1);
1057 }
1058
1059 if (rcvbuf_inc > 0) {
1060 rcvbuf_inc =
1061 (rcvbuf_inc / tp->t_maxseg) * tp->t_maxseg;
1062
1063 tcp_sbrcv_reserve(tp, sbrcv,
1064 sbrcv->sb_hiwat + rcvbuf_inc,
1065 idealsize, tcp_autorcvbuf_max);
1066
1067 tp->rfbuf_space = tp->rfbuf_cnt;
1068 }
1069 }
1070 goto out;
1071 } else {
1072 tp->rfbuf_cnt += pktlen;
1073 return;
1074 }
1075 } else if (to->to_tsecr != 0) {
1076 /*
1077 * If the timestamp shows that one RTT has
1078 * completed, we can stop counting the
1079 * bytes. Here we consider increasing
1080 * the socket buffer if the bandwidth measured in
1081 * last rtt, is more than half of sb_hiwat, this will
1082 * help to scale the buffer according to the bandwidth
1083 * on the link.
1084 */
1085 if (TSTMP_GEQ(to->to_tsecr, tp->rfbuf_ts)) {
1086 tp->rfbuf_cnt += pktlen;
1087
1088 if (tp->rfbuf_cnt > tp->rfbuf_space) {
1089 int32_t rcvbuf_inc;
1090 uint32_t idealsize;
1091
1092 if (tp->rfbuf_cnt > tp->rfbuf_space + (tp->rfbuf_space >> 1)) {
1093 rcvbuf_inc = (tp->rfbuf_cnt << 2) - sbrcv->sb_hiwat;
1094 idealsize = (tp->rfbuf_cnt << 2);
1095 } else {
1096 rcvbuf_inc = (tp->rfbuf_cnt << 1) - sbrcv->sb_hiwat;
1097 idealsize = (tp->rfbuf_cnt << 1);
1098 }
1099
1100 tp->rfbuf_space = tp->rfbuf_cnt;
1101
1102 if (rcvbuf_inc > 0) {
1103 rcvbuf_inc =
1104 (rcvbuf_inc / tp->t_maxseg) * tp->t_maxseg;
1105
1106 tcp_sbrcv_reserve(tp, sbrcv,
1107 sbrcv->sb_hiwat + rcvbuf_inc,
1108 idealsize, tcp_autorcvbuf_max);
1109 }
1110 }
1111 /* Measure instantaneous receive bandwidth */
1112 if (tp->t_bwmeas != NULL && tp->rfbuf_cnt > 0 &&
1113 TSTMP_GT(tcp_now, tp->rfbuf_ts)) {
1114 u_int32_t rcv_bw;
1115 rcv_bw = tp->rfbuf_cnt /
1116 (int)(tcp_now - tp->rfbuf_ts);
1117 if (tp->t_bwmeas->bw_rcvbw_max == 0) {
1118 tp->t_bwmeas->bw_rcvbw_max = rcv_bw;
1119 } else {
1120 tp->t_bwmeas->bw_rcvbw_max = max(
1121 tp->t_bwmeas->bw_rcvbw_max, rcv_bw);
1122 }
1123 }
1124 goto out;
1125 } else {
1126 tp->rfbuf_cnt += pktlen;
1127 return;
1128 }
1129 }
1130 out:
1131 /* Restart the measurement */
1132 tp->rfbuf_ts = tcp_now;
1133 tp->rfbuf_cnt = 0;
1134 return;
1135 }
1136
1137 /* This function will trim the excess space added to the socket buffer
1138 * to help a slow-reading app. The ideal-size of a socket buffer depends
1139 * on the link bandwidth or it is set by an application and we aim to
1140 * reach that size.
1141 */
1142 void
tcp_sbrcv_trim(struct tcpcb * tp,struct sockbuf * sbrcv)1143 tcp_sbrcv_trim(struct tcpcb *tp, struct sockbuf *sbrcv)
1144 {
1145 if (tcp_do_autorcvbuf == 1 && sbrcv->sb_idealsize > 0 &&
1146 sbrcv->sb_hiwat > sbrcv->sb_idealsize) {
1147 int32_t trim;
1148 /* compute the difference between ideal and current sizes */
1149 u_int32_t diff = sbrcv->sb_hiwat - sbrcv->sb_idealsize;
1150
1151 /* Compute the maximum advertised window for
1152 * this connection.
1153 */
1154 u_int32_t advwin = tp->rcv_adv - tp->rcv_nxt;
1155
1156 /* How much can we trim the receive socket buffer?
1157 * 1. it can not be trimmed beyond the max rcv win advertised
1158 * 2. if possible, leave 1/16 of bandwidth*delay to
1159 * avoid closing the win completely
1160 */
1161 u_int32_t leave = max(advwin, (sbrcv->sb_idealsize >> 4));
1162
1163 /* Sometimes leave can be zero, in that case leave at least
1164 * a few segments worth of space.
1165 */
1166 if (leave == 0) {
1167 leave = tp->t_maxseg << tcp_autorcvbuf_inc_shift;
1168 }
1169
1170 trim = sbrcv->sb_hiwat - (sbrcv->sb_cc + leave);
1171 trim = imin(trim, (int32_t)diff);
1172
1173 if (trim > 0) {
1174 sbreserve(sbrcv, (sbrcv->sb_hiwat - trim));
1175 }
1176 }
1177 }
1178
1179 /* We may need to trim the send socket buffer size for two reasons:
1180 * 1. if the rtt seen on the connection is climbing up, we do not
1181 * want to fill the buffers any more.
1182 * 2. if the congestion win on the socket backed off, there is no need
1183 * to hold more mbufs for that connection than what the cwnd will allow.
1184 */
1185 void
tcp_sbsnd_trim(struct sockbuf * sbsnd)1186 tcp_sbsnd_trim(struct sockbuf *sbsnd)
1187 {
1188 if (((sbsnd->sb_flags & (SB_AUTOSIZE | SB_TRIM)) ==
1189 (SB_AUTOSIZE | SB_TRIM)) &&
1190 (sbsnd->sb_idealsize > 0) &&
1191 (sbsnd->sb_hiwat > sbsnd->sb_idealsize)) {
1192 u_int32_t trim = 0;
1193 if (sbsnd->sb_cc <= sbsnd->sb_idealsize) {
1194 trim = sbsnd->sb_hiwat - sbsnd->sb_idealsize;
1195 } else {
1196 trim = sbsnd->sb_hiwat - sbsnd->sb_cc;
1197 }
1198 sbreserve(sbsnd, (sbsnd->sb_hiwat - trim));
1199 }
1200 if (sbsnd->sb_hiwat <= sbsnd->sb_idealsize) {
1201 sbsnd->sb_flags &= ~(SB_TRIM);
1202 }
1203 }
1204
1205 /*
1206 * If timestamp option was not negotiated on this connection
1207 * and this connection is on the receiving side of a stream
1208 * then we can not measure the delay on the link accurately.
1209 * Instead of enabling automatic receive socket buffer
1210 * resizing, just give more space to the receive socket buffer.
1211 */
1212 static inline void
tcp_sbrcv_tstmp_check(struct tcpcb * tp)1213 tcp_sbrcv_tstmp_check(struct tcpcb *tp)
1214 {
1215 struct socket *so = tp->t_inpcb->inp_socket;
1216 u_int32_t newsize = 2 * tcp_recvspace;
1217 struct sockbuf *sbrcv = &so->so_rcv;
1218
1219 if ((tp->t_flags & (TF_REQ_TSTMP | TF_RCVD_TSTMP)) !=
1220 (TF_REQ_TSTMP | TF_RCVD_TSTMP) &&
1221 (sbrcv->sb_flags & SB_AUTOSIZE) != 0) {
1222 tcp_sbrcv_reserve(tp, sbrcv, newsize, 0, newsize);
1223 }
1224 }
1225
1226 /* A receiver will evaluate the flow of packets on a connection
1227 * to see if it can reduce ack traffic. The receiver will start
1228 * stretching acks if all of the following conditions are met:
1229 * 1. tcp_delack_enabled is set to 3
1230 * 2. If the bytes received in the last 100ms is greater than a threshold
1231 * defined by maxseg_unacked
1232 * 3. If the connection has not been idle for tcp_maxrcvidle period.
1233 * 4. If the connection has seen enough packets to let the slow-start
1234 * finish after connection establishment or after some packet loss.
1235 *
1236 * The receiver will stop stretching acks if there is congestion/reordering
1237 * as indicated by packets on reassembly queue or an ECN. If the delayed-ack
1238 * timer fires while stretching acks, it means that the packet flow has gone
1239 * below the threshold defined by maxseg_unacked and the receiver will stop
1240 * stretching acks. The receiver gets no indication when slow-start is completed
1241 * or when the connection reaches an idle state. That is why we use
1242 * tcp_rcvsspktcnt to cover slow-start and tcp_maxrcvidle to identify idle
1243 * state.
1244 */
1245 static inline int
tcp_stretch_ack_enable(struct tcpcb * tp,int thflags)1246 tcp_stretch_ack_enable(struct tcpcb *tp, int thflags)
1247 {
1248 if (tp->rcv_by_unackwin >= (maxseg_unacked * tp->t_maxseg) &&
1249 TSTMP_GEQ(tp->rcv_unackwin, tcp_now)) {
1250 tp->t_flags |= TF_STREAMING_ON;
1251 } else {
1252 tp->t_flags &= ~TF_STREAMING_ON;
1253 }
1254
1255 /* If there has been an idle time, reset streaming detection */
1256 if (TSTMP_GT(tcp_now, tp->rcv_unackwin + tcp_maxrcvidle)) {
1257 tp->t_flags &= ~TF_STREAMING_ON;
1258 }
1259
1260 /*
1261 * If there are flags other than TH_ACK set, reset streaming
1262 * detection
1263 */
1264 if (thflags & ~TH_ACK) {
1265 tp->t_flags &= ~TF_STREAMING_ON;
1266 }
1267
1268 if (tp->t_flagsext & TF_DISABLE_STRETCHACK) {
1269 if (tp->rcv_nostrack_pkts >= TCP_STRETCHACK_ENABLE_PKTCNT) {
1270 tp->t_flagsext &= ~TF_DISABLE_STRETCHACK;
1271 tp->rcv_nostrack_pkts = 0;
1272 tp->rcv_nostrack_ts = 0;
1273 } else {
1274 tp->rcv_nostrack_pkts++;
1275 }
1276 }
1277
1278 if (!(tp->t_flagsext & (TF_NOSTRETCHACK | TF_DISABLE_STRETCHACK)) &&
1279 (tp->t_flags & TF_STREAMING_ON) &&
1280 (!(tp->t_flagsext & TF_RCVUNACK_WAITSS) ||
1281 (tp->rcv_waitforss >= tcp_rcvsspktcnt))) {
1282 return 1;
1283 }
1284
1285 return 0;
1286 }
1287
1288 /*
1289 * Reset the state related to stretch-ack algorithm. This will make
1290 * the receiver generate an ack every other packet. The receiver
1291 * will start re-evaluating the rate at which packets come to decide
1292 * if it can benefit by lowering the ack traffic.
1293 */
1294 void
tcp_reset_stretch_ack(struct tcpcb * tp)1295 tcp_reset_stretch_ack(struct tcpcb *tp)
1296 {
1297 tp->t_flags &= ~(TF_STRETCHACK | TF_STREAMING_ON);
1298 tp->rcv_by_unackwin = 0;
1299 tp->rcv_by_unackhalfwin = 0;
1300 tp->rcv_unackwin = tcp_now + tcp_rcvunackwin;
1301
1302 /*
1303 * When there is packet loss or packet re-ordering or CWR due to
1304 * ECN, the sender's congestion window is reduced. In these states,
1305 * generate an ack for every other packet for some time to allow
1306 * the sender's congestion window to grow.
1307 */
1308 tp->t_flagsext |= TF_RCVUNACK_WAITSS;
1309 tp->rcv_waitforss = 0;
1310 }
1311
1312 /*
1313 * The last packet was a retransmission, check if this ack
1314 * indicates that the retransmission was spurious.
1315 *
1316 * If the connection supports timestamps, we could use it to
1317 * detect if the last retransmit was not needed. Otherwise,
1318 * we check if the ACK arrived within RTT/2 window, then it
1319 * was a mistake to do the retransmit in the first place.
1320 *
1321 * This function will return 1 if it is a spurious retransmit,
1322 * 0 otherwise.
1323 */
1324 int
tcp_detect_bad_rexmt(struct tcpcb * tp,struct tcphdr * th,struct tcpopt * to,u_int32_t rxtime)1325 tcp_detect_bad_rexmt(struct tcpcb *tp, struct tcphdr *th,
1326 struct tcpopt *to, u_int32_t rxtime)
1327 {
1328 int32_t tdiff, bad_rexmt_win;
1329 bad_rexmt_win = (tp->t_srtt >> (TCP_RTT_SHIFT + 1));
1330
1331 /* If the ack has ECN CE bit, then cwnd has to be adjusted */
1332 if ((TCP_ACC_ECN_ON(tp) && tp->t_aecn.t_delta_ce_packets > 0) ||
1333 (TCP_ECN_ENABLED(tp) && (th->th_flags & TH_ECE))) {
1334 return 0;
1335 }
1336 if (TSTMP_SUPPORTED(tp)) {
1337 if (rxtime > 0 && (to->to_flags & TOF_TS) && to->to_tsecr != 0 &&
1338 TSTMP_LT(to->to_tsecr, rxtime)) {
1339 return 1;
1340 }
1341 } else {
1342 if ((tp->t_rxtshift == 1 || tcp_sent_tlp_retrans(tp)) &&
1343 rxtime > 0) {
1344 tdiff = (int32_t)(tcp_now - rxtime);
1345 if (tdiff < bad_rexmt_win) {
1346 return 1;
1347 }
1348 }
1349 }
1350 return 0;
1351 }
1352
1353
1354 /*
1355 * Restore congestion window state if a spurious timeout
1356 * was detected.
1357 */
1358 static void
tcp_bad_rexmt_restore_state(struct tcpcb * tp,struct tcphdr * th)1359 tcp_bad_rexmt_restore_state(struct tcpcb *tp, struct tcphdr *th)
1360 {
1361 if (TSTMP_SUPPORTED(tp)) {
1362 u_int32_t fsize, acked;
1363 fsize = tp->snd_max - th->th_ack;
1364 acked = BYTES_ACKED(th, tp);
1365
1366 /*
1367 * Implement bad retransmit recovery as
1368 * described in RFC 4015.
1369 */
1370 tp->snd_ssthresh = tp->snd_ssthresh_prev;
1371
1372 /* Initialize cwnd to the initial window */
1373 if (CC_ALGO(tp)->cwnd_init != NULL) {
1374 CC_ALGO(tp)->cwnd_init(tp);
1375 }
1376
1377 tp->snd_cwnd = fsize + min(acked, tp->snd_cwnd);
1378 } else {
1379 tp->snd_cwnd = tp->snd_cwnd_prev;
1380 tp->snd_ssthresh = tp->snd_ssthresh_prev;
1381 if (tp->t_flags & TF_WASFRECOVERY) {
1382 ENTER_FASTRECOVERY(tp);
1383 }
1384
1385 /* Do not use the loss flight size in this case */
1386 tp->t_lossflightsize = 0;
1387 }
1388 tp->snd_cwnd = max(tp->snd_cwnd, tcp_initial_cwnd(tp));
1389 tp->snd_recover = tp->snd_recover_prev;
1390 tp->snd_nxt = tp->snd_max;
1391
1392 /* Fix send socket buffer to reflect the change in cwnd */
1393 tcp_bad_rexmt_fix_sndbuf(tp);
1394
1395 /* Restore rack related state */
1396 if (TCP_RACK_ENABLED(tp)) {
1397 tcp_rack_bad_rexmt_restore(tp);
1398 }
1399
1400 /*
1401 * This RTT might reflect the extra delay induced
1402 * by the network. Skip using this sample for RTO
1403 * calculation and mark the connection so we can
1404 * recompute RTT when the next eligible sample is
1405 * found.
1406 */
1407 tp->t_flagsext |= TF_RECOMPUTE_RTT;
1408 tp->t_badrexmt_time = tcp_now;
1409 tp->t_rtttime = 0;
1410 }
1411
1412 /*
1413 * If the previous packet was sent in retransmission timer, and it was
1414 * not needed, then restore the congestion window to the state before that
1415 * transmission.
1416 *
1417 * If the last packet was sent as a tail loss probe retransmission, check if that
1418 * recovered the last packet. If so, that will indicate a real loss and
1419 * the congestion window needs to be lowered.
1420 */
1421 static void
tcp_bad_rexmt_check(struct tcpcb * tp,struct tcphdr * th,struct tcpopt * to)1422 tcp_bad_rexmt_check(struct tcpcb *tp, struct tcphdr *th, struct tcpopt *to)
1423 {
1424 if (tp->t_rxtshift > 0 &&
1425 tcp_detect_bad_rexmt(tp, th, to, tp->t_rxtstart)) {
1426 ++tcpstat.tcps_sndrexmitbad;
1427 tcp_bad_rexmt_restore_state(tp, th);
1428 tcp_ccdbg_trace(tp, th, TCP_CC_BAD_REXMT_RECOVERY);
1429 } else if (tcp_sent_tlp_retrans(tp) && tp->t_tlphighrxt > 0 &&
1430 SEQ_GEQ(th->th_ack, tp->t_tlphighrxt) &&
1431 !tcp_detect_bad_rexmt(tp, th, to, tp->t_tlpstart)) {
1432 /*
1433 * The tail loss probe recovered the last packet and
1434 * we need to adjust the congestion window to take
1435 * this loss into account.
1436 * No need to update rack.reo_wnd_persist for a TLP recovery
1437 */
1438 ++tcpstat.tcps_tlp_recoverlastpkt;
1439 if (!IN_FASTRECOVERY(tp)) {
1440 tcp_enter_fast_recovery(tp);
1441 EXIT_FASTRECOVERY(tp);
1442 }
1443 tcp_ccdbg_trace(tp, th, TCP_CC_TLP_RECOVER_LASTPACKET);
1444 } else if (tcp_rxtseg_detect_bad_rexmt(tp, th->th_ack)) {
1445 /*
1446 * All of the retransmitted segments were duplicated, this
1447 * can be an indication of bad fast retransmit.
1448 */
1449 tcpstat.tcps_dsack_badrexmt++;
1450 tcp_bad_rexmt_restore_state(tp, th);
1451 tcp_ccdbg_trace(tp, th, TCP_CC_DSACK_BAD_REXMT);
1452 tcp_rxtseg_clean(tp);
1453 }
1454 tp->t_flagsext &= ~(TF_SENT_TLPROBE);
1455 tp->t_tlphighrxt = 0;
1456 tp->t_tlpstart = 0;
1457
1458 /*
1459 * check if the latest ack was for a segment sent during PMTU
1460 * blackhole detection. If the timestamp on the ack is before
1461 * PMTU blackhole detection, then revert the size of the max
1462 * segment to previous size.
1463 */
1464 if (tp->t_rxtshift > 0 && (tp->t_flags & TF_BLACKHOLE) &&
1465 tp->t_pmtud_start_ts > 0 && TSTMP_SUPPORTED(tp)) {
1466 if ((to->to_flags & TOF_TS) && to->to_tsecr != 0
1467 && TSTMP_LT(to->to_tsecr, tp->t_pmtud_start_ts)) {
1468 tcp_pmtud_revert_segment_size(tp);
1469 }
1470 }
1471 if (tp->t_pmtud_start_ts > 0) {
1472 tp->t_pmtud_start_ts = 0;
1473 }
1474
1475 tp->t_pmtud_lastseg_size = 0;
1476 }
1477
1478 /*
1479 * Check if early retransmit can be attempted according to RFC 5827.
1480 *
1481 * If packet reordering is detected on a connection, fast recovery will
1482 * be delayed until it is clear that the packet was lost and not reordered.
1483 * But reordering detection is done only when SACK is enabled.
1484 *
1485 * On connections that do not support SACK, there is a limit on the number
1486 * of early retransmits that can be done per minute. This limit is needed
1487 * to make sure that too many packets are not retransmitted when there is
1488 * packet reordering.
1489 */
1490 static void
tcp_early_rexmt_check(struct tcpcb * tp,struct tcphdr * th)1491 tcp_early_rexmt_check(struct tcpcb *tp, struct tcphdr *th)
1492 {
1493 u_int32_t obytes, snd_off;
1494 int32_t snd_len;
1495 struct socket *so = tp->t_inpcb->inp_socket;
1496
1497 if ((SACK_ENABLED(tp) || tp->t_early_rexmt_count < TCP_EARLY_REXMT_LIMIT) &&
1498 SEQ_GT(tp->snd_max, tp->snd_una) &&
1499 (tp->t_dupacks == 1 || (SACK_ENABLED(tp) && !TAILQ_EMPTY(&tp->snd_holes)))) {
1500 /*
1501 * If there are only a few outstanding
1502 * segments on the connection, we might need
1503 * to lower the retransmit threshold. This
1504 * will allow us to do Early Retransmit as
1505 * described in RFC 5827.
1506 */
1507 if (TCP_RACK_ENABLED(tp)) {
1508 obytes = tcp_flight_size(tp);
1509 } else if (SACK_ENABLED(tp) &&
1510 !TAILQ_EMPTY(&tp->snd_holes)) {
1511 obytes = tcp_flight_size(tp);
1512 } else {
1513 obytes = (tp->snd_max - tp->snd_una);
1514 }
1515
1516 /*
1517 * In order to lower retransmit threshold the
1518 * following two conditions must be met.
1519 * 1. the amount of outstanding data is less
1520 * than 4*SMSS bytes
1521 * 2. there is no unsent data ready for
1522 * transmission or the advertised window
1523 * will limit sending new segments.
1524 */
1525 snd_off = tp->snd_max - tp->snd_una;
1526 snd_len = min(so->so_snd.sb_cc, tp->snd_wnd) - snd_off;
1527 if (obytes < (tp->t_maxseg << 2) &&
1528 snd_len <= 0) {
1529 u_int32_t osegs;
1530
1531 osegs = obytes / tp->t_maxseg;
1532 if ((osegs * tp->t_maxseg) < obytes) {
1533 osegs++;
1534 }
1535
1536 /*
1537 * By checking for early retransmit after
1538 * receiving some duplicate acks when SACK
1539 * is supported, the connection will
1540 * enter fast recovery even if multiple
1541 * segments are lost in the same window.
1542 */
1543 if (osegs < 4) {
1544 tp->t_rexmtthresh =
1545 ((osegs - 1) > 1) ? ((uint8_t)osegs - 1) : 1;
1546 tp->t_rexmtthresh =
1547 MIN(tp->t_rexmtthresh, tcprexmtthresh);
1548 tp->t_rexmtthresh =
1549 MAX(tp->t_rexmtthresh,
1550 tp->t_dupacks > UINT8_MAX ? UINT8_MAX : (uint8_t)tp->t_dupacks);
1551
1552 if (tp->t_early_rexmt_count == 0) {
1553 tp->t_early_rexmt_win = tcp_now;
1554 }
1555
1556 if (tp->t_flagsext & TF_SENT_TLPROBE) {
1557 tcpstat.tcps_tlp_recovery++;
1558 tcp_ccdbg_trace(tp, th,
1559 TCP_CC_TLP_RECOVERY);
1560 } else {
1561 tcpstat.tcps_early_rexmt++;
1562 tp->t_early_rexmt_count++;
1563 tcp_ccdbg_trace(tp, th,
1564 TCP_CC_EARLY_RETRANSMIT);
1565 }
1566 }
1567 }
1568 }
1569
1570 /*
1571 * If we ever sent a TLP probe, the acknowledgement will trigger
1572 * early retransmit because the value of snd_fack will be close
1573 * to snd_max. This will take care of adjustments to the
1574 * congestion window. So we can reset TF_SENT_PROBE flag.
1575 */
1576 tp->t_flagsext &= ~(TF_SENT_TLPROBE);
1577 tp->t_tlphighrxt = 0;
1578 tp->t_tlpstart = 0;
1579 }
1580
1581 static boolean_t
tcp_tfo_syn(struct tcpcb * tp,struct tcpopt * to)1582 tcp_tfo_syn(struct tcpcb *tp, struct tcpopt *to)
1583 {
1584 u_char out[CCAES_BLOCK_SIZE];
1585 unsigned char len;
1586
1587 if (!(to->to_flags & (TOF_TFO | TOF_TFOREQ)) ||
1588 !(tcp_fastopen & TCP_FASTOPEN_SERVER)) {
1589 return FALSE;
1590 }
1591
1592 if ((to->to_flags & TOF_TFOREQ)) {
1593 tp->t_tfo_flags |= TFO_F_OFFER_COOKIE;
1594
1595 tp->t_tfo_stats |= TFO_S_COOKIEREQ_RECV;
1596 tcpstat.tcps_tfo_cookie_req_rcv++;
1597 return FALSE;
1598 }
1599
1600 /* Ok, then it must be an offered cookie. We need to check that ... */
1601 tcp_tfo_gen_cookie(tp->t_inpcb, out, sizeof(out));
1602
1603 len = *to->to_tfo - TCPOLEN_FASTOPEN_REQ;
1604 to->to_tfo++;
1605 to->to_tfo_size--;
1606 if (memcmp(out, to->to_tfo, len)) {
1607 /* Cookies are different! Let's return and offer a new cookie */
1608 tp->t_tfo_flags |= TFO_F_OFFER_COOKIE;
1609
1610 tp->t_tfo_stats |= TFO_S_COOKIE_INVALID;
1611 tcpstat.tcps_tfo_cookie_invalid++;
1612 return FALSE;
1613 }
1614
1615 if (OSIncrementAtomic(&tcp_tfo_halfcnt) >= tcp_tfo_backlog) {
1616 /* Need to decrement again as we just increased it... */
1617 OSDecrementAtomic(&tcp_tfo_halfcnt);
1618 return FALSE;
1619 }
1620
1621 tp->t_tfo_flags |= TFO_F_COOKIE_VALID;
1622
1623 tp->t_tfo_stats |= TFO_S_SYNDATA_RCV;
1624 tcpstat.tcps_tfo_syn_data_rcv++;
1625
1626 return TRUE;
1627 }
1628
1629 static void
tcp_tfo_synack(struct tcpcb * tp,struct tcpopt * to)1630 tcp_tfo_synack(struct tcpcb *tp, struct tcpopt *to)
1631 {
1632 if (to->to_flags & TOF_TFO) {
1633 unsigned char len = *to->to_tfo - TCPOLEN_FASTOPEN_REQ;
1634
1635 /*
1636 * If this happens, things have gone terribly wrong. len should
1637 * have been checked in tcp_dooptions.
1638 */
1639 VERIFY(len <= TFO_COOKIE_LEN_MAX);
1640
1641 to->to_tfo++;
1642 to->to_tfo_size--;
1643
1644 tcp_cache_set_cookie(tp, to->to_tfo, len);
1645 tcp_heuristic_tfo_success(tp);
1646
1647 tp->t_tfo_stats |= TFO_S_COOKIE_RCV;
1648 tcpstat.tcps_tfo_cookie_rcv++;
1649 if (tp->t_tfo_flags & TFO_F_COOKIE_SENT) {
1650 tcpstat.tcps_tfo_cookie_wrong++;
1651 tp->t_tfo_stats |= TFO_S_COOKIE_WRONG;
1652 }
1653 } else {
1654 /*
1655 * Thus, no cookie in the response, but we either asked for one
1656 * or sent SYN+DATA. Now, we need to check whether we had to
1657 * rexmit the SYN. If that's the case, it's better to start
1658 * backing of TFO-cookie requests.
1659 */
1660 if (!(tp->t_flagsext & TF_FASTOPEN_FORCE_ENABLE) &&
1661 tp->t_tfo_flags & TFO_F_SYN_LOSS) {
1662 tp->t_tfo_stats |= TFO_S_SYN_LOSS;
1663 tcpstat.tcps_tfo_syn_loss++;
1664
1665 tcp_heuristic_tfo_loss(tp);
1666 } else {
1667 if (tp->t_tfo_flags & TFO_F_COOKIE_REQ) {
1668 tp->t_tfo_stats |= TFO_S_NO_COOKIE_RCV;
1669 tcpstat.tcps_tfo_no_cookie_rcv++;
1670 }
1671
1672 tcp_heuristic_tfo_success(tp);
1673 }
1674 }
1675 }
1676
1677 static void
tcp_tfo_rcv_probe(struct tcpcb * tp,int tlen)1678 tcp_tfo_rcv_probe(struct tcpcb *tp, int tlen)
1679 {
1680 if (tlen != 0) {
1681 return;
1682 }
1683
1684 tp->t_tfo_probe_state = TFO_PROBE_PROBING;
1685
1686 /*
1687 * We send the probe out rather quickly (after one RTO). It does not
1688 * really hurt that much, it's only one additional segment on the wire.
1689 */
1690 tp->t_timer[TCPT_KEEP] = OFFSET_FROM_START(tp, (TCP_REXMTVAL(tp)));
1691 }
1692
1693 static void
tcp_tfo_rcv_data(struct tcpcb * tp)1694 tcp_tfo_rcv_data(struct tcpcb *tp)
1695 {
1696 /* Transition from PROBING to NONE as data has been received */
1697 if (tp->t_tfo_probe_state >= TFO_PROBE_PROBING) {
1698 tp->t_tfo_probe_state = TFO_PROBE_NONE;
1699 }
1700 }
1701
1702 static void
tcp_tfo_rcv_ack(struct tcpcb * tp,struct tcphdr * th)1703 tcp_tfo_rcv_ack(struct tcpcb *tp, struct tcphdr *th)
1704 {
1705 if (tp->t_tfo_probe_state == TFO_PROBE_PROBING &&
1706 tp->t_tfo_probes > 0) {
1707 if (th->th_seq == tp->rcv_nxt) {
1708 /* No hole, so stop probing */
1709 tp->t_tfo_probe_state = TFO_PROBE_NONE;
1710 } else if (SEQ_GT(th->th_seq, tp->rcv_nxt)) {
1711 /* There is a hole! Wait a bit for data... */
1712 tp->t_tfo_probe_state = TFO_PROBE_WAIT_DATA;
1713 tp->t_timer[TCPT_KEEP] = OFFSET_FROM_START(tp,
1714 TCP_REXMTVAL(tp));
1715 }
1716 }
1717 }
1718
1719 /*
1720 * Update snd_wnd information.
1721 */
1722 static inline bool
tcp_update_window(struct tcpcb * tp,int thflags,struct tcphdr * th,u_int32_t tiwin,int tlen)1723 tcp_update_window(struct tcpcb *tp, int thflags, struct tcphdr * th,
1724 u_int32_t tiwin, int tlen)
1725 {
1726 /* Don't look at the window if there is no ACK flag */
1727 if ((thflags & TH_ACK) &&
1728 (SEQ_LT(tp->snd_wl1, th->th_seq) ||
1729 (tp->snd_wl1 == th->th_seq && (SEQ_LT(tp->snd_wl2, th->th_ack) ||
1730 (tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd))))) {
1731 /* keep track of pure window updates */
1732 if (tlen == 0 &&
1733 tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd) {
1734 tcpstat.tcps_rcvwinupd++;
1735 }
1736 tp->snd_wnd = tiwin;
1737 tp->snd_wl1 = th->th_seq;
1738 tp->snd_wl2 = th->th_ack;
1739 if (tp->snd_wnd > tp->max_sndwnd) {
1740 tp->max_sndwnd = tp->snd_wnd;
1741 }
1742
1743 if (tp->t_inpcb->inp_socket->so_flags & SOF_MP_SUBFLOW) {
1744 mptcp_update_window_wakeup(tp);
1745 }
1746 return true;
1747 }
1748 return false;
1749 }
1750
1751 static void
tcp_handle_wakeup(struct socket * so,int read_wakeup,int write_wakeup)1752 tcp_handle_wakeup(struct socket *so, int read_wakeup, int write_wakeup)
1753 {
1754 if (read_wakeup != 0) {
1755 sorwakeup(so);
1756 }
1757 if (write_wakeup != 0) {
1758 sowwakeup(so);
1759 }
1760 }
1761
1762 static void
tcp_update_snd_una(struct tcpcb * tp,uint32_t ack)1763 tcp_update_snd_una(struct tcpcb *tp, uint32_t ack)
1764 {
1765 tp->snd_una = ack;
1766 }
1767
1768 static bool
tcp_syn_data_valid(struct tcpcb * tp,struct tcphdr * tcp_hdr,int tlen)1769 tcp_syn_data_valid(struct tcpcb *tp, struct tcphdr *tcp_hdr, int tlen)
1770 {
1771 /* No data? */
1772 if (tlen <= 0) {
1773 return false;
1774 }
1775
1776 /* Not the right sequence-number? */
1777 if (tcp_hdr->th_seq != tp->irs) {
1778 return false;
1779 }
1780
1781 /* We could have wrapped around, check that */
1782 if (tp->t_inpcb->inp_stat->rxbytes > INT32_MAX) {
1783 return false;
1784 }
1785
1786 return true;
1787 }
1788
1789 /* Process IP-ECN codepoints on received packets and update receive side counters */
1790 static void
tcp_input_ip_ecn(struct tcpcb * tp,struct inpcb * inp,uint32_t tlen,uint32_t segment_count,uint8_t ip_ecn)1791 tcp_input_ip_ecn(struct tcpcb *tp, struct inpcb *inp, uint32_t tlen,
1792 uint32_t segment_count, uint8_t ip_ecn)
1793 {
1794 switch (ip_ecn) {
1795 case IPTOS_ECN_ECT1:
1796 tp->ecn_flags |= TE_ACO_ECT1;
1797 tp->t_aecn.t_rcv_ect1_bytes += tlen;
1798 break;
1799 case IPTOS_ECN_ECT0:
1800 tp->ecn_flags |= TE_ACO_ECT0;
1801 tp->t_aecn.t_rcv_ect0_bytes += tlen;
1802 break;
1803 case IPTOS_ECN_CE:
1804 tp->t_aecn.t_rcv_ce_packets += segment_count;
1805 tp->t_aecn.t_rcv_ce_bytes += tlen;
1806 tp->t_ecn_recv_ce++;
1807 tcpstat.tcps_ecn_recv_ce++;
1808 INP_INC_IFNET_STAT(inp, ecn_recv_ce);
1809 break;
1810 default:
1811 /* No counter for Not-ECT */
1812 break;
1813 }
1814 }
1815
1816 /* Process SYN packet that wishes to negotiate Accurate ECN */
1817 static void
tcp_input_process_accecn_syn(struct tcpcb * tp,int ace_flags,uint8_t ip_ecn)1818 tcp_input_process_accecn_syn(struct tcpcb *tp, int ace_flags, uint8_t ip_ecn)
1819 {
1820 switch (ace_flags) {
1821 case (0 | 0 | 0):
1822 /* No ECN */
1823 tp->t_server_accecn_state = tcp_connection_server_no_ecn_requested;
1824 break;
1825 case (0 | TH_CWR | TH_ECE):
1826 /* Legacy ECN-setup */
1827 tp->ecn_flags |= (TE_SETUPRECEIVED | TE_SENDIPECT);
1828 tp->t_server_accecn_state = tcp_connection_server_classic_ecn_requested;
1829 break;
1830 case (TH_ACE):
1831 /* Accurate ECN */
1832 if (TCP_L4S_ENABLED(tp)) {
1833 switch (ip_ecn) {
1834 case IPTOS_ECN_NOTECT:
1835 tp->ecn_flags |= TE_ACE_SETUP_NON_ECT;
1836 break;
1837 case IPTOS_ECN_ECT1:
1838 tp->ecn_flags |= TE_ACE_SETUP_ECT1;
1839 break;
1840 case IPTOS_ECN_ECT0:
1841 tp->ecn_flags |= TE_ACE_SETUP_ECT0;
1842 break;
1843 case IPTOS_ECN_CE:
1844 tp->ecn_flags |= TE_ACE_SETUP_CE;
1845 break;
1846 }
1847 /*
1848 * We set TE_SENDIPECT when handshake is complete
1849 * for Accurate ECN
1850 */
1851 tp->ecn_flags |= (TE_ACE_SETUPRECEIVED);
1852
1853 /* Initialize ECT byte counter to 1 to distinguish zeroing of options */
1854 tp->t_aecn.t_rcv_ect1_bytes = tp->t_aecn.t_rcv_ect0_bytes = 1;
1855 tp->t_aecn.t_snd_ect1_bytes = tp->t_aecn.t_snd_ect0_bytes = 1;
1856 tp->t_server_accecn_state = tcp_connection_server_accurate_ecn_requested;
1857 } else {
1858 /*
1859 * If AccECN is not enabled, ignore
1860 * the TH_AE bit and do Legacy ECN-setup
1861 */
1862 tp->ecn_flags |= (TE_SETUPRECEIVED | TE_SENDIPECT);
1863 }
1864 default:
1865 /* Forward Compatibility */
1866 /* Accurate ECN */
1867 if (TCP_L4S_ENABLED(tp)) {
1868 switch (ip_ecn) {
1869 case IPTOS_ECN_NOTECT:
1870 tp->ecn_flags |= TE_ACE_SETUP_NON_ECT;
1871 break;
1872 case IPTOS_ECN_ECT1:
1873 tp->ecn_flags |= TE_ACE_SETUP_ECT1;
1874 break;
1875 case IPTOS_ECN_ECT0:
1876 tp->ecn_flags |= TE_ACE_SETUP_ECT0;
1877 break;
1878 case IPTOS_ECN_CE:
1879 tp->ecn_flags |= TE_ACE_SETUP_CE;
1880 break;
1881 }
1882 /*
1883 * We are not yet committing to send IP ECT packets when
1884 * Accurate ECN is enabled
1885 */
1886 tp->ecn_flags |= (TE_ACE_SETUPRECEIVED);
1887
1888 /* Initialize ECT byte counter to 1 to distinguish zeroing of options */
1889 tp->t_aecn.t_rcv_ect1_bytes = tp->t_aecn.t_rcv_ect0_bytes = 1;
1890 tp->t_aecn.t_snd_ect1_bytes = tp->t_aecn.t_snd_ect0_bytes = 1;
1891 tp->t_server_accecn_state = tcp_connection_server_accurate_ecn_requested;
1892 }
1893 break;
1894 }
1895 }
1896
1897 static uint32_t
tcp_process_ace_field(struct tcpcb * tp,uint32_t pkts_acked,uint64_t old_sceb,uint8_t ace)1898 tcp_process_ace_field(struct tcpcb *tp, uint32_t pkts_acked, uint64_t old_sceb, uint8_t ace)
1899 {
1900 /* Congestion was experienced if delta_cep > 0 */
1901 uint32_t delta = 0, safe_delta = 0;
1902 delta = (ace + TCP_ACE_DIV -
1903 (tp->t_aecn.t_snd_ce_packets & TCP_ACE_MASK)) & TCP_ACE_MASK;
1904 if (pkts_acked <= TCP_ACE_MASK) {
1905 return delta;
1906 }
1907
1908 uint64_t d_ceb = tp->t_aecn.t_snd_ce_bytes - old_sceb;
1909 safe_delta = pkts_acked - ((pkts_acked - delta) & TCP_ACE_MASK);
1910
1911 if (d_ceb == 0 || d_ceb < safe_delta * tp->t_maxseg >> 1) {
1912 return delta;
1913 }
1914
1915 return safe_delta;
1916 }
1917
1918 /* Returns the number of CE marked bytes */
1919 static uint32_t
tcp_process_accecn_options(struct tcpcb * tp,struct tcpopt * to)1920 tcp_process_accecn_options(struct tcpcb *tp, struct tcpopt *to)
1921 {
1922 int delta = 0;
1923 uint32_t ce_bytes = 0;
1924
1925 if (to->to_num_accecn >= 1) {
1926 delta = ntoh24(to->to_accecn + 0);
1927 if (to->to_accecn_order == 0) {
1928 delta = (delta + TCP_ACO_DIV -
1929 (tp->t_aecn.t_snd_ect0_bytes & TCP_ACO_MASK)) & TCP_ACO_MASK;
1930 if (delta < 0) {
1931 os_log_error(OS_LOG_DEFAULT, "delta for AccECN0 options (ECT0 bytes) can't be zero");
1932 }
1933 tp->t_aecn.t_snd_ect0_bytes += delta;
1934 } else {
1935 delta = (delta + TCP_ACO_DIV -
1936 (tp->t_aecn.t_snd_ect1_bytes & TCP_ACO_MASK)) & TCP_ACO_MASK;
1937 if (delta < 0) {
1938 os_log_error(OS_LOG_DEFAULT, "delta for AccECN1 options (ECT1 bytes) can't be zero");
1939 }
1940 tp->t_aecn.t_snd_ect1_bytes += delta;
1941 }
1942 }
1943 if (to->to_num_accecn >= 2) {
1944 delta = ntoh24(to->to_accecn + 1 * TCPOLEN_ACCECN_COUNTER);
1945 delta = (delta + TCP_ACO_DIV -
1946 (tp->t_aecn.t_snd_ce_bytes & TCP_ACO_MASK)) & TCP_ACO_MASK;
1947 if (delta < 0) {
1948 os_log_error(OS_LOG_DEFAULT, "delta for AccECN options (CE bytes) can't be zero");
1949 }
1950 tp->t_aecn.t_snd_ce_bytes += delta;
1951 ce_bytes = delta;
1952 }
1953 if (to->to_num_accecn >= 3) {
1954 delta = ntoh24(to->to_accecn + 2 * TCPOLEN_ACCECN_COUNTER);
1955 if (to->to_accecn_order == 0) {
1956 delta = (delta + TCP_ACO_DIV -
1957 (tp->t_aecn.t_snd_ect1_bytes & TCP_ACO_MASK)) & TCP_ACO_MASK;
1958 if (delta < 0) {
1959 os_log_error(OS_LOG_DEFAULT, "delta for AccECN0 options (ECT1 bytes) can't be zero");
1960 }
1961 tp->t_aecn.t_snd_ect1_bytes += delta;
1962 } else {
1963 delta = (delta + TCP_ACO_DIV -
1964 (tp->t_aecn.t_snd_ect0_bytes & TCP_ACO_MASK)) & TCP_ACO_MASK;
1965 if (delta < 0) {
1966 os_log_error(OS_LOG_DEFAULT, "delta for AccECN1 options (ECT0 bytes) can't be zero");
1967 }
1968 tp->t_aecn.t_snd_ect0_bytes += delta;
1969 }
1970 }
1971
1972 return ce_bytes;
1973 }
1974
1975 static void
tcp_process_accecn(struct tcpcb * tp,struct tcpopt * to,struct tcphdr * th,uint32_t pkts_acked,uint8_t ace)1976 tcp_process_accecn(struct tcpcb *tp, struct tcpopt *to, struct tcphdr *th,
1977 uint32_t pkts_acked, uint8_t ace)
1978 {
1979 if (tp->t_aecn.accecn_processed) {
1980 os_log(OS_LOG_DEFAULT, "already processed AccECN field/options for this ACK");
1981 return;
1982 }
1983
1984 uint64_t old_sceb = tp->t_aecn.t_snd_ce_bytes;
1985 uint32_t new_ce_bytes = tcp_process_accecn_options(tp, to);
1986 uint32_t delta = tcp_process_ace_field(tp, pkts_acked, old_sceb, ace);
1987 tp->t_aecn.t_snd_ce_packets += delta;
1988 tp->t_aecn.t_delta_ce_packets = delta;
1989
1990 /* Update the time for this newly acked data or control packet */
1991 if ((to->to_flags & TOF_TS) != 0 && (to->to_tsecr != 0) &&
1992 TSTMP_GEQ(to->to_tsecr, tp->t_last_ack_tsecr)) {
1993 tp->t_last_ack_tsecr = to->to_tsecr;
1994 }
1995
1996 if (delta > 0) {
1997 tp->ecn_flags |= (TE_INRECOVERY);
1998 tp->total_ect_packets_marked += delta;
1999
2000 /* update the stats */
2001 tcpstat.tcps_ecn_ace_recv_ce += tp->t_aecn.t_delta_ce_packets;
2002 /* CE packets counter start at 5 */
2003 tp->t_ecn_capable_packets_marked = tp->t_aecn.t_snd_ce_packets - 5;
2004 tcp_ccdbg_trace(tp, th, TCP_CC_ECN_RCVD);
2005 }
2006
2007 if (CC_ALGO(tp)->process_ecn != NULL) {
2008 CC_ALGO(tp)->process_ecn(tp, th, new_ce_bytes, tp->total_ect_packets_marked,
2009 tp->total_ect_packets_acked);
2010 }
2011
2012 tp->t_aecn.accecn_processed = 1;
2013 }
2014
2015 void
tcp_input(struct mbuf * m,int off0)2016 tcp_input(struct mbuf *m, int off0)
2017 {
2018 int exiting_fr = 0;
2019 struct tcphdr *th;
2020 struct ip *ip = NULL;
2021 struct inpcb *inp;
2022 u_char *optp = NULL;
2023 int optlen = 0;
2024 int tlen, off;
2025 int drop_hdrlen;
2026 struct tcpcb *tp = 0;
2027 int thflags;
2028 struct socket *so = 0;
2029 int todrop, acked = 0, ourfinisacked, needoutput = 0;
2030 int read_wakeup = 0;
2031 int write_wakeup = 0;
2032 struct in_addr laddr;
2033 struct in6_addr laddr6;
2034 int dropsocket = 0;
2035 int iss = 0, nosock = 0;
2036 uint32_t tiwin, sack_bytes_acked = 0;
2037 uint32_t highest_sacked_seq = 0;
2038 struct tcpopt to; /* options in this segment */
2039 u_char ip_ecn = IPTOS_ECN_NOTECT;
2040 unsigned int ifscope;
2041 uint8_t isconnected, isdisconnected;
2042 struct ifnet *ifp = m->m_pkthdr.rcvif;
2043 int segment_count = m->m_pkthdr.rx_seg_cnt ? : 1;
2044 int win;
2045 u_int16_t pf_tag = 0;
2046 #if MPTCP
2047 struct mptcb *mp_tp = NULL;
2048 #endif /* MPTCP */
2049 stats_functional_type ifnet_count_type = IFNET_COUNT_TYPE(ifp);
2050 boolean_t recvd_dsack = FALSE;
2051 boolean_t dsack_tlp = false;
2052 struct tcp_respond_args tra;
2053 int prev_t_state;
2054 boolean_t check_cfil = cfil_filter_present();
2055 bool findpcb_iterated = false;
2056 bool rack_loss_detected = false;
2057 bool is_th_swapped = false;
2058 /*
2059 * The mbuf may be freed after it has been added to the receive socket
2060 * buffer or the reassembly queue, so we reinitialize th to point to a
2061 * safe copy of the TCP header
2062 */
2063 struct tcphdr saved_tcphdr = {};
2064 /*
2065 * Save copy of the IPv4/IPv6 header.
2066 * Note: use array of uint32_t to silence compiler warning when casting
2067 * to a struct ip6_hdr pointer.
2068 */
2069 #define MAX_IPWORDS ((sizeof(struct ip) + MAX_IPOPTLEN) / sizeof(uint32_t))
2070 uint32_t saved_hdr[MAX_IPWORDS];
2071
2072 #define TCP_INC_VAR(stat, npkts) do { \
2073 stat += npkts; \
2074 } while (0)
2075 drop_reason_t drop_reason = DROP_REASON_UNSPECIFIED;
2076
2077 if (tcp_ack_strategy == TCP_ACK_STRATEGY_LEGACY) {
2078 segment_count = 1;
2079 }
2080 TCP_INC_VAR(tcpstat.tcps_rcvtotal, segment_count);
2081
2082 struct ip6_hdr *ip6 = NULL;
2083 int isipv6;
2084 struct proc *kernel_proc = current_proc();
2085
2086 KERNEL_DEBUG(DBG_FNC_TCP_INPUT | DBG_FUNC_START, 0, 0, 0, 0, 0);
2087
2088 isipv6 = (mtod(m, struct ip *)->ip_v == 6) ? 1 : 0;
2089 bzero((char *)&to, sizeof(to));
2090
2091 m_add_crumb(m, PKT_CRUMB_TCP_INPUT);
2092
2093 if (m->m_flags & M_PKTHDR) {
2094 pf_tag = m_pftag(m)->pftag_tag;
2095 }
2096
2097 if (isipv6) {
2098 /*
2099 * Expect 32-bit aligned data pointer on
2100 * strict-align platforms
2101 */
2102 MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(m);
2103
2104 /* IP6_EXTHDR_CHECK() is already done at tcp6_input() */
2105 ip6 = mtod(m, struct ip6_hdr *);
2106 tlen = sizeof(*ip6) + ntohs(ip6->ip6_plen) - off0;
2107 th = (struct tcphdr *)(void *)((caddr_t)ip6 + off0);
2108
2109 if (tcp_input_checksum(AF_INET6, m, th, off0, tlen)) {
2110 TCP_LOG_DROP_PKT(ip6, th, ifp, "IPv6 bad tcp checksum");
2111 drop_reason = DROP_REASON_TCP_CHECKSUM_INCORRECT;
2112 goto dropnosock;
2113 }
2114
2115 KERNEL_DEBUG(DBG_LAYER_BEG, ((th->th_dport << 16) | th->th_sport),
2116 (((ip6->ip6_src.s6_addr16[0]) << 16) | (ip6->ip6_dst.s6_addr16[0])),
2117 th->th_seq, th->th_ack, th->th_win);
2118 /*
2119 * Be proactive about unspecified IPv6 address in source.
2120 * As we use all-zero to indicate unbounded/unconnected pcb,
2121 * unspecified IPv6 address can be used to confuse us.
2122 *
2123 * Note that packets with unspecified IPv6 destination is
2124 * already dropped in ip6_input.
2125 */
2126 if (IN6_IS_ADDR_UNSPECIFIED(&ip6->ip6_src)) {
2127 /* XXX stat */
2128 IF_TCP_STATINC(ifp, unspecv6);
2129 TCP_LOG_DROP_PKT(ip6, th, ifp, "src IPv6 address unspecified");
2130 drop_reason = DROP_REASON_TCP_SRC_ADDR_UNSPECIFIED;
2131 goto dropnosock;
2132 }
2133 DTRACE_TCP5(receive, struct mbuf *, m, struct inpcb *, NULL,
2134 struct ip6_hdr *, ip6, struct tcpcb *, NULL,
2135 struct tcphdr *, th);
2136
2137 ip_ecn = (ntohl(ip6->ip6_flow) >> 20) & IPTOS_ECN_MASK;
2138 } else {
2139 /*
2140 * Get IP and TCP header together in first mbuf.
2141 * Note: IP leaves IP header in first mbuf.
2142 */
2143 if (off0 > sizeof(struct ip)) {
2144 ip_stripoptions(m);
2145 off0 = sizeof(struct ip);
2146 }
2147 if (m->m_len < sizeof(struct tcpiphdr)) {
2148 if ((m = m_pullup(m, sizeof(struct tcpiphdr))) == 0) {
2149 tcpstat.tcps_rcvshort++;
2150 return;
2151 }
2152 }
2153
2154 /* Expect 32-bit aligned data pointer on strict-align platforms */
2155 MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(m);
2156
2157 ip = mtod(m, struct ip *);
2158 th = (struct tcphdr *)(void *)((caddr_t)ip + off0);
2159 tlen = ip->ip_len;
2160
2161 if (tcp_input_checksum(AF_INET, m, th, off0, tlen)) {
2162 TCP_LOG_DROP_PKT(ip, th, ifp, "IPv4 bad tcp checksum");
2163 drop_reason = DROP_REASON_TCP_CHECKSUM_INCORRECT;
2164 goto dropnosock;
2165 }
2166
2167 /* Re-initialization for later version check */
2168 ip->ip_v = IPVERSION;
2169 ip_ecn = (ip->ip_tos & IPTOS_ECN_MASK);
2170
2171 DTRACE_TCP5(receive, struct mbuf *, m, struct inpcb *, NULL,
2172 struct ip *, ip, struct tcpcb *, NULL, struct tcphdr *, th);
2173
2174 KERNEL_DEBUG(DBG_LAYER_BEG, ((th->th_dport << 16) | th->th_sport),
2175 (((ip->ip_src.s_addr & 0xffff) << 16) | (ip->ip_dst.s_addr & 0xffff)),
2176 th->th_seq, th->th_ack, th->th_win);
2177 }
2178
2179 #define TCP_LOG_HDR (isipv6 ? (void *)ip6 : (void *)ip)
2180
2181 /*
2182 * Check that TCP offset makes sense,
2183 * pull out TCP options and adjust length.
2184 */
2185 off = th->th_off << 2;
2186 if (off < sizeof(struct tcphdr) || off > tlen) {
2187 tcpstat.tcps_rcvbadoff++;
2188 IF_TCP_STATINC(ifp, badformat);
2189 TCP_LOG_DROP_PKT(TCP_LOG_HDR, th, ifp, "bad tcp offset");
2190 drop_reason = DROP_REASON_TCP_OFFSET_INCORRECT;
2191 goto dropnosock;
2192 }
2193 tlen -= off; /* tlen is used instead of ti->ti_len */
2194 if (off > sizeof(struct tcphdr)) {
2195 if (isipv6) {
2196 IP6_EXTHDR_CHECK(m, off0, off, return );
2197 ip6 = mtod(m, struct ip6_hdr *);
2198 th = (struct tcphdr *)(void *)((caddr_t)ip6 + off0);
2199 } else {
2200 if (m->m_len < sizeof(struct ip) + off) {
2201 if ((m = m_pullup(m, sizeof(struct ip) + off)) == 0) {
2202 tcpstat.tcps_rcvshort++;
2203 return;
2204 }
2205 ip = mtod(m, struct ip *);
2206 th = (struct tcphdr *)(void *)((caddr_t)ip + off0);
2207 }
2208 }
2209 optlen = off - sizeof(struct tcphdr);
2210 optp = (u_char *)(th + 1);
2211 /*
2212 * Do quick retrieval of timestamp options ("options
2213 * prediction?"). If timestamp is the only option and it's
2214 * formatted as recommended in RFC 1323 appendix A, we
2215 * quickly get the values now and not bother calling
2216 * tcp_dooptions(), etc.
2217 */
2218 if ((optlen == TCPOLEN_TSTAMP_APPA ||
2219 (optlen > TCPOLEN_TSTAMP_APPA &&
2220 optp[TCPOLEN_TSTAMP_APPA] == TCPOPT_EOL)) &&
2221 *(u_int32_t *)(void *)optp == htonl(TCPOPT_TSTAMP_HDR) &&
2222 (th->th_flags & TH_SYN) == 0) {
2223 to.to_flags |= TOF_TS;
2224 to.to_tsval = ntohl(*(u_int32_t *)(void *)(optp + 4));
2225 to.to_tsecr = ntohl(*(u_int32_t *)(void *)(optp + 8));
2226 optp = NULL; /* we've parsed the options */
2227 }
2228 }
2229 thflags = th->th_flags;
2230
2231 /*
2232 * Drop all packets with both the SYN and FIN bits set.
2233 * This prevents e.g. nmap from identifying the TCP/IP stack.
2234 *
2235 * This is a violation of the TCP specification.
2236 */
2237 if ((thflags & (TH_SYN | TH_FIN)) == (TH_SYN | TH_FIN)) {
2238 IF_TCP_STATINC(ifp, synfin);
2239 TCP_LOG_DROP_PKT(TCP_LOG_HDR, th, ifp, "drop SYN FIN");
2240 drop_reason = DROP_REASON_TCP_SYN_FIN;
2241 goto dropnosock;
2242 }
2243
2244 /*
2245 * Delay dropping TCP, IP headers, IPv6 ext headers, and TCP options,
2246 * until after ip6_savecontrol() is called and before other functions
2247 * which don't want those proto headers.
2248 * Because ip6_savecontrol() is going to parse the mbuf to
2249 * search for data to be passed up to user-land, it wants mbuf
2250 * parameters to be unchanged.
2251 */
2252 drop_hdrlen = off0 + off;
2253
2254 /* Since this is an entry point for input processing of tcp packets, we
2255 * can update the tcp clock here.
2256 */
2257 calculate_tcp_clock();
2258
2259 /*
2260 * Record the interface where this segment arrived on; this does not
2261 * affect normal data output (for non-detached TCP) as it provides a
2262 * hint about which route and interface to use for sending in the
2263 * absence of a PCB, when scoped routing (and thus source interface
2264 * selection) are enabled.
2265 */
2266 if ((m->m_pkthdr.pkt_flags & PKTF_LOOP) || m->m_pkthdr.rcvif == NULL) {
2267 ifscope = IFSCOPE_NONE;
2268 } else {
2269 ifscope = m->m_pkthdr.rcvif->if_index;
2270 }
2271
2272 /*
2273 * Convert TCP protocol specific fields to host format.
2274 */
2275
2276 #if BYTE_ORDER != BIG_ENDIAN
2277 NTOHL(th->th_seq);
2278 NTOHL(th->th_ack);
2279 NTOHS(th->th_win);
2280 NTOHS(th->th_urp);
2281 is_th_swapped = true;
2282 #endif
2283
2284 /*
2285 * Locate pcb for segment.
2286 */
2287 findpcb:
2288
2289 isconnected = FALSE;
2290 isdisconnected = FALSE;
2291
2292 if (isipv6) {
2293 inp = in6_pcblookup_hash(&tcbinfo, &ip6->ip6_src, th->th_sport, ip6_input_getsrcifscope(m),
2294 &ip6->ip6_dst, th->th_dport, ip6_input_getdstifscope(m), 1,
2295 m->m_pkthdr.rcvif);
2296 } else {
2297 inp = in_pcblookup_hash(&tcbinfo, ip->ip_src, th->th_sport,
2298 ip->ip_dst, th->th_dport, 1, m->m_pkthdr.rcvif);
2299 }
2300
2301 /*
2302 * Use the interface scope information from the PCB for outbound
2303 * segments. If the PCB isn't present and if scoped routing is
2304 * enabled, tcp_respond will use the scope of the interface where
2305 * the segment arrived on.
2306 */
2307 if (inp != NULL && (inp->inp_flags & INP_BOUND_IF)) {
2308 ifscope = inp->inp_boundifp->if_index;
2309 }
2310
2311 /*
2312 * If the state is CLOSED (i.e., TCB does not exist) then
2313 * all data in the incoming segment is discarded.
2314 * If the TCB exists but is in CLOSED state, it is embryonic,
2315 * but should either do a listen or a connect soon.
2316 */
2317 if (inp == NULL) {
2318 if (log_in_vain) {
2319 char dbuf[MAX_IPv6_STR_LEN], sbuf[MAX_IPv6_STR_LEN];
2320
2321 if (isipv6) {
2322 inet_ntop(AF_INET6, &ip6->ip6_dst, dbuf, sizeof(dbuf));
2323 inet_ntop(AF_INET6, &ip6->ip6_src, sbuf, sizeof(sbuf));
2324 } else {
2325 inet_ntop(AF_INET, &ip->ip_dst, dbuf, sizeof(dbuf));
2326 inet_ntop(AF_INET, &ip->ip_src, sbuf, sizeof(sbuf));
2327 }
2328 switch (log_in_vain) {
2329 case 1:
2330 if (thflags & TH_SYN) {
2331 log(LOG_INFO,
2332 "Connection attempt to TCP %s:%d from %s:%d\n",
2333 dbuf, ntohs(th->th_dport),
2334 sbuf,
2335 ntohs(th->th_sport));
2336 }
2337 break;
2338 case 2:
2339 log(LOG_INFO,
2340 "Connection attempt to TCP %s:%d from %s:%d flags:0x%x\n",
2341 dbuf, ntohs(th->th_dport), sbuf,
2342 ntohs(th->th_sport), thflags);
2343 break;
2344 case 3:
2345 case 4:
2346 if ((thflags & TH_SYN) && !(thflags & TH_ACK) &&
2347 !(m->m_flags & (M_BCAST | M_MCAST)) &&
2348 ((isipv6 && !in6_are_addr_equal_scoped(&ip6->ip6_dst, &ip6->ip6_src, ip6_input_getdstifscope(m), ip6_input_getsrcifscope(m))) ||
2349 (!isipv6 && ip->ip_dst.s_addr != ip->ip_src.s_addr))) {
2350 log_in_vain_log((LOG_INFO,
2351 "Stealth Mode connection attempt to TCP %s:%d from %s:%d\n",
2352 dbuf, ntohs(th->th_dport),
2353 sbuf,
2354 ntohs(th->th_sport)));
2355 }
2356 break;
2357 default:
2358 break;
2359 }
2360 }
2361 if (blackhole) {
2362 if (m->m_pkthdr.rcvif && m->m_pkthdr.rcvif->if_type != IFT_LOOP) {
2363 switch (blackhole) {
2364 case 1:
2365 if (thflags & TH_SYN) {
2366 TCP_LOG_DROP_PKT(TCP_LOG_HDR, th, ifp, "blackhole 1 syn for closed port");
2367 goto dropnosock;
2368 }
2369 break;
2370 case 2:
2371 TCP_LOG_DROP_PKT(TCP_LOG_HDR, th, ifp, "blackhole 2 closed port");
2372 goto dropnosock;
2373 default:
2374 TCP_LOG_DROP_PKT(TCP_LOG_HDR, th, ifp, "blackhole closed port");
2375 goto dropnosock;
2376 }
2377 }
2378 }
2379 if ((tcp_link_heuristics_flags & TCP_LINK_HEUR_STEALTH) != 0 &&
2380 if_link_heuristics_enabled(ifp)) {
2381 TCP_LOG_DROP_PKT(TCP_LOG_HDR, th, ifp, "link heuristics");
2382 IF_TCP_STATINC(ifp, linkheur_stealthdrop);
2383 goto dropnosock;
2384 }
2385 IF_TCP_STATINC(ifp, noconnnolist);
2386 TCP_LOG_DROP_PKT(TCP_LOG_HDR, th, ifp, "closed port");
2387 goto dropwithresetnosock;
2388 }
2389 so = inp->inp_socket;
2390 if (so == NULL) {
2391 /* This case shouldn't happen as the socket shouldn't be null
2392 * if inp_state isn't set to INPCB_STATE_DEAD
2393 * But just in case, we pretend we didn't find the socket if we hit this case
2394 * as this isn't cause for a panic (the socket might be leaked however)...
2395 */
2396 inp = NULL;
2397 #if TEMPDEBUG
2398 printf("tcp_input: no more socket for inp=%x. This shouldn't happen\n", inp);
2399 #endif
2400 TCP_LOG_DROP_PKT(TCP_LOG_HDR, th, ifp, "inp_socket NULL");
2401 drop_reason = DROP_REASON_TCP_NO_SOCK;
2402 goto dropnosock;
2403 }
2404
2405 socket_lock(so, 1);
2406 if (in_pcb_checkstate(inp, WNT_RELEASE, 1) == WNT_STOPUSING) {
2407 socket_unlock(so, 1);
2408 inp = NULL; // pretend we didn't find it
2409 TCP_LOG_DROP_PKT(TCP_LOG_HDR, th, ifp, "inp state WNT_STOPUSING");
2410 drop_reason = DROP_REASON_TCP_NO_SOCK;
2411 goto dropnosock;
2412 }
2413
2414 if (!isipv6 && inp->inp_faddr.s_addr != INADDR_ANY) {
2415 if (inp->inp_faddr.s_addr != ip->ip_src.s_addr ||
2416 inp->inp_laddr.s_addr != ip->ip_dst.s_addr ||
2417 inp->inp_fport != th->th_sport ||
2418 inp->inp_lport != th->th_dport) {
2419 os_log_error(OS_LOG_DEFAULT, "%s 5-tuple does not match: %u:%u %u:%u\n",
2420 __func__,
2421 ntohs(inp->inp_fport), ntohs(th->th_sport),
2422 ntohs(inp->inp_lport), ntohs(th->th_dport));
2423 if (findpcb_iterated) {
2424 drop_reason = DROP_REASON_TCP_PCB_MISMATCH;
2425 goto drop;
2426 }
2427 findpcb_iterated = true;
2428 socket_unlock(so, 1);
2429 inp = NULL;
2430 goto findpcb;
2431 }
2432 } else if (isipv6 && !IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_faddr)) {
2433 if (!in6_are_addr_equal_scoped(&inp->in6p_faddr, &ip6->ip6_src, inp->inp_fifscope, ip6_input_getsrcifscope(m)) ||
2434 !in6_are_addr_equal_scoped(&inp->in6p_laddr, &ip6->ip6_dst, inp->inp_lifscope, ip6_input_getdstifscope(m)) ||
2435 inp->inp_fport != th->th_sport ||
2436 inp->inp_lport != th->th_dport) {
2437 os_log_error(OS_LOG_DEFAULT, "%s 5-tuple does not match: %u:%u %u:%u\n",
2438 __func__,
2439 ntohs(inp->inp_fport), ntohs(th->th_sport),
2440 ntohs(inp->inp_lport), ntohs(th->th_dport));
2441 if (findpcb_iterated) {
2442 drop_reason = DROP_REASON_TCP_PCB_MISMATCH;
2443 goto drop;
2444 }
2445 findpcb_iterated = true;
2446 socket_unlock(so, 1);
2447 inp = NULL;
2448 goto findpcb;
2449 }
2450 }
2451
2452 tp = intotcpcb(inp);
2453 if (tp == NULL) {
2454 IF_TCP_STATINC(ifp, noconnlist);
2455 TCP_LOG_DROP_PKT(TCP_LOG_HDR, th, ifp, "tp is NULL");
2456 drop_reason = DROP_REASON_TCP_NO_PCB;
2457 goto dropwithreset;
2458 }
2459
2460 /* Now that we found the tcpcb, we can adjust the TCP timestamp */
2461 if (to.to_flags & TOF_TS) {
2462 to.to_tsecr -= tp->t_ts_offset;
2463 }
2464
2465 if (tp->t_state == TCPS_CLOSED) {
2466 TCP_LOG_DROP_PCB(TCP_LOG_HDR, th, tp, false, "tp state TCPS_CLOSED");
2467 drop_reason = DROP_REASON_TCP_CLOSED;
2468 goto drop;
2469 }
2470
2471 #if NECP
2472 if (so->so_state & SS_ISCONNECTED) {
2473 // Connected TCP sockets have a fully-bound local and remote,
2474 // so the policy check doesn't need to override addresses
2475 if (!necp_socket_is_allowed_to_send_recv(inp, ifp, pf_tag, NULL, NULL, NULL, NULL)) {
2476 TCP_LOG_DROP_NECP(TCP_LOG_HDR, th, intotcpcb(inp), false);
2477 IF_TCP_STATINC(ifp, badformat);
2478 drop_reason = DROP_REASON_TCP_NECP;
2479 goto drop;
2480 }
2481 } else {
2482 /*
2483 * If the proc_uuid_policy table has been updated since the last use
2484 * of the listening socket (i.e., the proc_uuid_policy_table_gencount
2485 * has been updated), the flags in the socket may be out of date.
2486 * If INP2_WANT_APP_POLICY is stale, inbound packets may
2487 * be dropped by NECP if the socket should now match a per-app
2488 * exception policy.
2489 * In order to avoid this refresh the proc_uuid_policy state to
2490 * potentially recalculate the socket's flags before checking
2491 * with NECP.
2492 */
2493 (void) inp_update_policy(inp);
2494
2495 if (isipv6) {
2496 if (!necp_socket_is_allowed_to_send_recv_v6(inp,
2497 th->th_dport, th->th_sport, &ip6->ip6_dst,
2498 &ip6->ip6_src, ifp, pf_tag, NULL, NULL, NULL, NULL)) {
2499 TCP_LOG_DROP_NECP(TCP_LOG_HDR, th, intotcpcb(inp), false);
2500 IF_TCP_STATINC(ifp, badformat);
2501 drop_reason = DROP_REASON_TCP_NECP;
2502 goto drop;
2503 }
2504 } else {
2505 if (!necp_socket_is_allowed_to_send_recv_v4(inp,
2506 th->th_dport, th->th_sport, &ip->ip_dst, &ip->ip_src,
2507 ifp, pf_tag, NULL, NULL, NULL, NULL)) {
2508 TCP_LOG_DROP_NECP(TCP_LOG_HDR, th, intotcpcb(inp), false);
2509 IF_TCP_STATINC(ifp, badformat);
2510 drop_reason = DROP_REASON_TCP_NECP;
2511 goto drop;
2512 }
2513 }
2514 }
2515 #endif /* NECP */
2516
2517 prev_t_state = tp->t_state;
2518
2519 /* If none of the FIN|SYN|RST|ACK flag is set, drop */
2520 if ((thflags & TH_ACCEPT) == 0) {
2521 TCP_LOG_DROP_PCB(TCP_LOG_HDR, th, tp, false, "rfc5961 TH_ACCEPT == 0");
2522 drop_reason = DROP_REASON_TCP_FLAGS_INCORRECT;
2523 goto drop;
2524 }
2525
2526 /* Initialize highest sacked seq to avoid using 0 as initial value */
2527 highest_sacked_seq = th->th_ack;
2528
2529 /* Unscale the window into a 32-bit value. */
2530 if ((thflags & TH_SYN) == 0) {
2531 tiwin = th->th_win << tp->snd_scale;
2532 } else {
2533 tiwin = th->th_win;
2534 }
2535
2536 /* Avoid processing packets while closing a listen socket */
2537 if (tp->t_state == TCPS_LISTEN &&
2538 (so->so_options & SO_ACCEPTCONN) == 0) {
2539 TCP_LOG_DROP_PCB(TCP_LOG_HDR, th, tp, false, "closing a listening socket");
2540 drop_reason = DROP_REASON_TCP_LISTENER_CLOSING;
2541 goto drop;
2542 }
2543
2544 if ((m->m_flags & M_PKTHDR) && (m->m_pkthdr.pkt_flags & PKTF_WAKE_PKT)) {
2545 soevent(so, SO_FILT_HINT_LOCKED | SO_FILT_HINT_WAKE_PKT);
2546 }
2547
2548 if (so->so_options & (SO_DEBUG | SO_ACCEPTCONN)) {
2549 if (so->so_options & SO_ACCEPTCONN) {
2550 struct tcpcb *tp0 = tp;
2551 struct socket *so2;
2552 struct socket *oso;
2553 struct sockaddr_storage from;
2554 struct sockaddr_storage to2;
2555 struct inpcb *oinp = sotoinpcb(so);
2556 struct ifnet *head_ifscope;
2557 bool head_nocell, head_recvanyif,
2558 head_noexpensive, head_awdl_unrestricted,
2559 head_intcoproc_allowed, head_external_port,
2560 head_noconstrained, head_management_allowed,
2561 head_ultra_constrained_allowed;
2562
2563 /* Get listener's bound-to-interface, if any */
2564 head_ifscope = (inp->inp_flags & INP_BOUND_IF) ?
2565 inp->inp_boundifp : NULL;
2566 /* Get listener's no-cellular information, if any */
2567 head_nocell = INP_NO_CELLULAR(inp);
2568 /* Get listener's recv-any-interface, if any */
2569 head_recvanyif = (inp->inp_flags & INP_RECV_ANYIF);
2570 /* Get listener's no-expensive information, if any */
2571 head_noexpensive = INP_NO_EXPENSIVE(inp);
2572 head_noconstrained = INP_NO_CONSTRAINED(inp);
2573 head_awdl_unrestricted = INP_AWDL_UNRESTRICTED(inp);
2574 head_intcoproc_allowed = INP_INTCOPROC_ALLOWED(inp);
2575 head_external_port = (inp->inp_flags2 & INP2_EXTERNAL_PORT);
2576 head_management_allowed = INP_MANAGEMENT_ALLOWED(inp);
2577 head_ultra_constrained_allowed = INP_ULTRA_CONSTRAINED_ALLOWED(inp);
2578
2579 /*
2580 * If the state is LISTEN then ignore segment if it contains an RST.
2581 * If the segment contains an ACK then it is bad and send a RST.
2582 * If it does not contain a SYN then it is not interesting; drop it.
2583 * If it is from this socket, drop it, it must be forged.
2584 */
2585 if ((thflags & (TH_RST | TH_ACK | TH_SYN)) != TH_SYN) {
2586 IF_TCP_STATINC(ifp, listbadsyn);
2587
2588 if (thflags & TH_RST) {
2589 TCP_LOG_DROP_PCB(TCP_LOG_HDR, th, tp, false,
2590 thflags & TH_SYN ? "ignore SYN with RST" : "ignore RST");
2591 drop_reason = DROP_REASON_TCP_SYN_RST;
2592 goto drop;
2593 }
2594 if (thflags & TH_ACK) {
2595 TCP_LOG_DROP_PCB(TCP_LOG_HDR, th, tp, false,
2596 thflags & TH_SYN ? "bad SYN with ACK" : "bad ACK");
2597 tp = NULL;
2598 tcpstat.tcps_badsyn++;
2599 drop_reason = DROP_REASON_TCP_SYN_ACK_LISTENER;
2600 goto dropwithreset;
2601 }
2602
2603 /* We come here if there is no SYN set */
2604 tcpstat.tcps_badsyn++;
2605 TCP_LOG_DROP_PCB(TCP_LOG_HDR, th, tp, false, "bad SYN");
2606 drop_reason = DROP_REASON_TCP_LISTENER_NO_SYN;
2607 goto drop;
2608 }
2609 KERNEL_DEBUG(DBG_FNC_TCP_NEWCONN | DBG_FUNC_START, 0, 0, 0, 0, 0);
2610 if (th->th_dport == th->th_sport) {
2611 if (isipv6) {
2612 if (in6_are_addr_equal_scoped(&ip6->ip6_dst, &ip6->ip6_src, ip6_input_getdstifscope(m), ip6_input_getsrcifscope(m))) {
2613 TCP_LOG_DROP_PCB(TCP_LOG_HDR, th, tp, false, "bad tuple same port");
2614 drop_reason = DROP_REASON_TCP_SAME_PORT;
2615 goto drop;
2616 }
2617 } else if (ip->ip_dst.s_addr == ip->ip_src.s_addr) {
2618 TCP_LOG_DROP_PCB(TCP_LOG_HDR, th, tp, false, "bad tuple same IPv4 address");
2619 drop_reason = DROP_REASON_TCP_SAME_PORT;
2620 goto drop;
2621 }
2622 }
2623 /*
2624 * RFC1122 4.2.3.10, p. 104: discard bcast/mcast SYN
2625 * in_broadcast() should never return true on a received
2626 * packet with M_BCAST not set.
2627 *
2628 * Packets with a multicast source address should also
2629 * be discarded.
2630 */
2631 if (m->m_flags & (M_BCAST | M_MCAST)) {
2632 TCP_LOG_DROP_PCB(TCP_LOG_HDR, th, tp, false, "mbuf M_BCAST | M_MCAST");
2633 drop_reason = DROP_REASON_TCP_BCAST_MCAST;
2634 goto drop;
2635 }
2636 if (isipv6) {
2637 if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) ||
2638 IN6_IS_ADDR_MULTICAST(&ip6->ip6_src)) {
2639 TCP_LOG_DROP_PCB(TCP_LOG_HDR, th, tp, false, "IN6_IS_ADDR_MULTICAST");
2640 drop_reason = DROP_REASON_TCP_BCAST_MCAST;
2641 goto drop;
2642 }
2643 } else if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) ||
2644 IN_MULTICAST(ntohl(ip->ip_src.s_addr)) ||
2645 ip->ip_src.s_addr == htonl(INADDR_BROADCAST) ||
2646 in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif)) {
2647 TCP_LOG_DROP_PCB(TCP_LOG_HDR, th, tp, false, "multicast or broadcast address");
2648 drop_reason = DROP_REASON_TCP_BCAST_MCAST;
2649 goto drop;
2650 }
2651
2652
2653 /*
2654 * If deprecated address is forbidden,
2655 * we do not accept SYN to deprecated interface
2656 * address to prevent any new inbound connection from
2657 * getting established.
2658 * When we do not accept SYN, we send a TCP RST,
2659 * with deprecated source address (instead of dropping
2660 * it). We compromise it as it is much better for peer
2661 * to send a RST, and RST will be the final packet
2662 * for the exchange.
2663 *
2664 * If we do not forbid deprecated addresses, we accept
2665 * the SYN packet. RFC 4862 forbids dropping SYN in
2666 * this case.
2667 */
2668 if (isipv6 && !ip6_use_deprecated) {
2669 uint32_t ia6_flags;
2670
2671 if (ip6_getdstifaddr_info(m, NULL,
2672 &ia6_flags) == 0) {
2673 if (ia6_flags & IN6_IFF_DEPRECATED) {
2674 tp = NULL;
2675 IF_TCP_STATINC(ifp, deprecate6);
2676 TCP_LOG_DROP_PCB(TCP_LOG_HDR, th, tp, false, "deprecated IPv6 address");
2677 drop_reason = DROP_REASON_TCP_DEPRECATED_ADDR;
2678 goto dropwithreset;
2679 }
2680 }
2681 }
2682 if (so->so_filt || check_cfil) {
2683 if (isipv6) {
2684 struct sockaddr_in6 *sin6 = SIN6(&from);
2685
2686 sin6->sin6_len = sizeof(*sin6);
2687 sin6->sin6_family = AF_INET6;
2688 sin6->sin6_port = th->th_sport;
2689 sin6->sin6_flowinfo = 0;
2690 sin6->sin6_addr = ip6->ip6_src;
2691 sin6->sin6_scope_id = 0;
2692
2693 sin6 = SIN6(&to2);
2694
2695 sin6->sin6_len = sizeof(struct sockaddr_in6);
2696 sin6->sin6_family = AF_INET6;
2697 sin6->sin6_port = th->th_dport;
2698 sin6->sin6_flowinfo = 0;
2699 sin6->sin6_addr = ip6->ip6_dst;
2700 sin6->sin6_scope_id = 0;
2701 } else {
2702 struct sockaddr_in *sin = SIN(&from);
2703
2704 sin->sin_len = sizeof(*sin);
2705 sin->sin_family = AF_INET;
2706 sin->sin_port = th->th_sport;
2707 sin->sin_addr = ip->ip_src;
2708
2709 sin = SIN(&to2);
2710
2711 sin->sin_len = sizeof(struct sockaddr_in);
2712 sin->sin_family = AF_INET;
2713 sin->sin_port = th->th_dport;
2714 sin->sin_addr = ip->ip_dst;
2715 }
2716 }
2717
2718 if (so->so_filt) {
2719 so2 = sonewconn(so, 0, SA(&from));
2720 } else {
2721 so2 = sonewconn(so, 0, NULL);
2722 }
2723 if (so2 == 0) {
2724 tcpstat.tcps_listendrop++;
2725 if (tcp_dropdropablreq(so)) {
2726 if (so->so_filt) {
2727 so2 = sonewconn(so, 0, SA(&from));
2728 } else {
2729 so2 = sonewconn(so, 0, NULL);
2730 }
2731 }
2732 if (!so2) {
2733 TCP_LOG_DROP_PCB(TCP_LOG_HDR, th, tp, false, " listen drop");
2734 drop_reason = DROP_REASON_TCP_LISTENER_DROP;
2735 goto drop;
2736 }
2737 }
2738
2739 /* Point "inp" and "tp" in tandem to new socket */
2740 inp = (struct inpcb *)so2->so_pcb;
2741 tp = intotcpcb(inp);
2742
2743 oso = so;
2744 socket_unlock(so, 0); /* Unlock but keep a reference on listener for now */
2745
2746 so = so2;
2747 socket_lock(so, 1);
2748 /*
2749 * Mark socket as temporary until we're
2750 * committed to keeping it. The code at
2751 * ``drop'' and ``dropwithreset'' check the
2752 * flag dropsocket to see if the temporary
2753 * socket created here should be discarded.
2754 * We mark the socket as discardable until
2755 * we're committed to it below in TCPS_LISTEN.
2756 * There are some error conditions in which we
2757 * have to drop the temporary socket.
2758 */
2759 dropsocket++;
2760 /*
2761 * Inherit INP_BOUND_IF from listener; testing if
2762 * head_ifscope is non-NULL is sufficient, since it
2763 * can only be set to a non-zero value earlier if
2764 * the listener has such a flag set.
2765 */
2766 if (head_ifscope != NULL) {
2767 inp->inp_flags |= INP_BOUND_IF;
2768 inp->inp_boundifp = head_ifscope;
2769 } else {
2770 inp->inp_flags &= ~INP_BOUND_IF;
2771 }
2772 /*
2773 * Inherit restrictions from listener.
2774 */
2775 if (head_nocell) {
2776 inp_set_nocellular(inp);
2777 }
2778 if (head_noexpensive) {
2779 inp_set_noexpensive(inp);
2780 }
2781 if (head_noconstrained) {
2782 inp_set_noconstrained(inp);
2783 }
2784 if (head_awdl_unrestricted) {
2785 inp_set_awdl_unrestricted(inp);
2786 }
2787 if (head_intcoproc_allowed) {
2788 inp_set_intcoproc_allowed(inp);
2789 }
2790 if (head_management_allowed) {
2791 inp_set_management_allowed(inp);
2792 }
2793 if (head_ultra_constrained_allowed) {
2794 inp_set_ultra_constrained_allowed(inp);
2795 }
2796 /*
2797 * Inherit {IN,IN6}_RECV_ANYIF from listener.
2798 */
2799 if (head_recvanyif) {
2800 inp->inp_flags |= INP_RECV_ANYIF;
2801 } else {
2802 inp->inp_flags &= ~INP_RECV_ANYIF;
2803 }
2804
2805 if (head_external_port) {
2806 inp->inp_flags2 |= INP2_EXTERNAL_PORT;
2807 }
2808 if (isipv6) {
2809 inp->in6p_laddr = ip6->ip6_dst;
2810 inp->inp_lifscope = in6_addr2scopeid(ifp, &inp->in6p_laddr);
2811 in6_verify_ifscope(&ip6->ip6_dst, inp->inp_lifscope);
2812 } else {
2813 inp->inp_vflag &= ~INP_IPV6;
2814 inp->inp_vflag |= INP_IPV4;
2815 inp->inp_laddr = ip->ip_dst;
2816 }
2817 inp->inp_lport = th->th_dport;
2818 if (in_pcbinshash(inp, SA(&from), 0) != 0) {
2819 /*
2820 * Undo the assignments above if we failed to
2821 * put the PCB on the hash lists.
2822 */
2823 if (isipv6) {
2824 inp->in6p_laddr = in6addr_any;
2825 inp->inp_lifscope = IFSCOPE_NONE;
2826 } else {
2827 inp->inp_laddr.s_addr = INADDR_ANY;
2828 }
2829 #if SKYWALK
2830 netns_release(&inp->inp_netns_token);
2831 #endif /* SKYWALK */
2832 inp->inp_lport = 0;
2833 socket_lock(oso, 0); /* release ref on parent */
2834 socket_unlock(oso, 1);
2835 TCP_LOG_DROP_PCB(TCP_LOG_HDR, th, tp, false, " in_pcbinshash failed");
2836 drop_reason = DROP_REASON_TCP_PCB_HASH_FAILED;
2837 goto drop;
2838 }
2839 socket_lock(oso, 0);
2840 if (isipv6) {
2841 /*
2842 * Inherit socket options from the listening
2843 * socket.
2844 * Note that in6p_inputopts are not (even
2845 * should not be) copied, since it stores
2846 * previously received options and is used to
2847 * detect if each new option is different than
2848 * the previous one and hence should be passed
2849 * to a user.
2850 * If we copied in6p_inputopts, a user would
2851 * not be able to receive options just after
2852 * calling the accept system call.
2853 */
2854 inp->inp_flags |=
2855 oinp->inp_flags & INP_CONTROLOPTS;
2856 if (oinp->in6p_outputopts) {
2857 inp->in6p_outputopts =
2858 ip6_copypktopts(oinp->in6p_outputopts,
2859 Z_NOWAIT);
2860 }
2861 } else {
2862 inp->inp_options = ip_srcroute();
2863 inp->inp_ip_tos = oinp->inp_ip_tos;
2864 }
2865 #if IPSEC
2866 /* copy old policy into new socket's */
2867 if (sotoinpcb(oso)->inp_sp) {
2868 int error = 0;
2869 /* Is it a security hole here to silently fail to copy the policy? */
2870 if (inp->inp_sp == NULL) {
2871 error = ipsec_init_policy(so, &inp->inp_sp);
2872 }
2873 if (error != 0 || ipsec_copy_policy(sotoinpcb(oso)->inp_sp, inp->inp_sp)) {
2874 printf("tcp_input: could not copy policy\n");
2875 }
2876 }
2877 #endif
2878 /* inherit states from the listener */
2879 DTRACE_TCP4(state__change, void, NULL, struct inpcb *, inp,
2880 struct tcpcb *, tp, int32_t, TCPS_LISTEN);
2881 TCP_LOG_STATE(tp, TCPS_LISTEN);
2882 tp->t_state = TCPS_LISTEN;
2883 tp->t_flags |= tp0->t_flags & (TF_NOPUSH | TF_NOOPT | TF_NODELAY);
2884 tp->t_flagsext |= (tp0->t_flagsext & (TF_RXTFINDROP | TF_NOTIMEWAIT | TF_FASTOPEN | TF_L4S_ENABLED | TF_L4S_DISABLED));
2885 tp->t_keepinit = tp0->t_keepinit;
2886 tp->t_keepcnt = tp0->t_keepcnt;
2887 tp->t_keepintvl = tp0->t_keepintvl;
2888 tp->t_adaptive_wtimo = tp0->t_adaptive_wtimo;
2889 tp->t_adaptive_rtimo = tp0->t_adaptive_rtimo;
2890 tp->t_inpcb->inp_ip_ttl = tp0->t_inpcb->inp_ip_ttl;
2891 if ((so->so_flags & SOF_NOTSENT_LOWAT) != 0) {
2892 tp->t_notsent_lowat = tp0->t_notsent_lowat;
2893 }
2894 if (tp->t_flagsext & (TF_L4S_ENABLED | TF_L4S_DISABLED)) {
2895 tcp_set_foreground_cc(so);
2896 }
2897 tp->t_inpcb->inp_flags2 |=
2898 tp0->t_inpcb->inp_flags2 & INP2_KEEPALIVE_OFFLOAD;
2899
2900 /* now drop the reference on the listener */
2901 socket_unlock(oso, 1);
2902
2903 tcp_set_max_rwinscale(tp, so);
2904
2905 #if CONTENT_FILTER
2906 if (check_cfil) {
2907 int error = cfil_sock_attach(so2, SA(&to2), SA(&from), CFS_CONNECTION_DIR_IN);
2908 if (error != 0) {
2909 TCP_LOG_DROP_PCB(TCP_LOG_HDR, th, tp, false, " cfil_sock_attach failed");
2910 drop_reason = DROP_REASON_TCP_CONTENT_FILTER_ATTACH;
2911 goto drop;
2912 }
2913 }
2914 #endif /* CONTENT_FILTER */
2915
2916 KERNEL_DEBUG(DBG_FNC_TCP_NEWCONN | DBG_FUNC_END, 0, 0, 0, 0, 0);
2917 }
2918 }
2919 socket_lock_assert_owned(so);
2920
2921 /*
2922 * Packet accounting should not be done on listening socket
2923 */
2924 if (th->th_flags & TH_SYN) {
2925 (void) os_add_overflow(1, tp->t_syn_rcvd, &tp->t_syn_rcvd);
2926 }
2927 if (th->th_flags & TH_FIN) {
2928 (void) os_add_overflow(1, tp->t_fin_rcvd, &tp->t_fin_rcvd);
2929 }
2930 if (th->th_flags & TH_RST) {
2931 (void) os_add_overflow(1, tp->t_rst_rcvd, &tp->t_rst_rcvd);
2932 }
2933 TCP_LOG_TH_FLAGS(TCP_LOG_HDR, th, tp, false, ifp);
2934
2935 if (net_mpklog_enabled && (m->m_pkthdr.rcvif->if_xflags & IFXF_MPK_LOG)) {
2936 MPKL_TCP_INPUT(tcp_mpkl_log_object,
2937 ntohs(tp->t_inpcb->inp_lport), ntohs(tp->t_inpcb->inp_fport),
2938 th->th_seq, th->th_ack, tlen, thflags,
2939 so->last_pid, so->so_log_seqn++);
2940 }
2941
2942 if (tp->t_state == TCPS_ESTABLISHED && tlen > 0) {
2943 /*
2944 * Evaluate the rate of arrival of packets to see if the
2945 * receiver can reduce the ack traffic. The algorithm to
2946 * stretch acks will be enabled if the connection meets
2947 * certain criteria defined in tcp_stretch_ack_enable function.
2948 */
2949 if ((tp->t_flagsext & TF_RCVUNACK_WAITSS) != 0) {
2950 TCP_INC_VAR(tp->rcv_waitforss, segment_count);
2951 }
2952 if (tcp_stretch_ack_enable(tp, thflags)) {
2953 tp->t_flags |= TF_STRETCHACK;
2954 tp->t_flagsext &= ~(TF_RCVUNACK_WAITSS);
2955 tp->rcv_waitforss = 0;
2956 } else {
2957 tp->t_flags &= ~(TF_STRETCHACK);
2958 }
2959 if (TSTMP_GT(tp->rcv_unackwin - (tcp_rcvunackwin >> 1), tcp_now)) {
2960 tp->rcv_by_unackhalfwin += (tlen + off);
2961 tp->rcv_by_unackwin += (tlen + off);
2962 } else {
2963 tp->rcv_unackwin = tcp_now + tcp_rcvunackwin;
2964 tp->rcv_by_unackwin = tp->rcv_by_unackhalfwin + tlen + off;
2965 tp->rcv_by_unackhalfwin = tlen + off;
2966 }
2967 }
2968
2969 if (TCP_L4S_ENABLED(tp) && TCP_ACC_ECN_ON(tp)) {
2970 /* Reset the state used for AccECN processing */
2971 tp->t_aecn.accecn_processed = 0;
2972 }
2973
2974 if (tp->t_state == TCPS_ESTABLISHED && BYTES_ACKED(th, tp) > 0) {
2975 if (CC_ALGO(tp)->set_bytes_acked != NULL) {
2976 CC_ALGO(tp)->set_bytes_acked(tp, BYTES_ACKED(th, tp));
2977 }
2978 if (tp->ecn_flags & TE_SENDIPECT) {
2979 /*
2980 * Data sent with ECT has been acknowledged, calculate
2981 * packets approx. by dividing by MSS. This is done to
2982 * count MSS sized packets in case packets are aggregated
2983 * by GRO/LRO.
2984 */
2985 uint32_t bytes_acked = tcp_round_to(BYTES_ACKED(th, tp), tp->t_maxseg);
2986 tp->t_ecn_capable_packets_acked += max(1, (bytes_acked / tp->t_maxseg));
2987 }
2988 }
2989
2990 /* Accurate ECN has different semantics for TH_CWR. */
2991 if (!TCP_ACC_ECN_ON(tp)) {
2992 /*
2993 * Clear TE_SENDECE if TH_CWR is set. This is harmless, so we don't
2994 * bother doing extensive checks for state and whatnot.
2995 */
2996 if (thflags & TH_CWR) {
2997 tp->ecn_flags &= ~TE_SENDECE;
2998 tp->t_ecn_recv_cwr++;
2999 }
3000 }
3001
3002 /*
3003 * Accurate ECN feedback for Data Receiver,
3004 * Process IP ECN bits and update r.cep for CE marked pure ACKs
3005 * or valid data packets
3006 */
3007 uint8_t ace = tcp_get_ace(th);
3008 if (TCP_ACC_ECN_ON(tp) && tp->t_state == TCPS_ESTABLISHED) {
3009 /* Update receive side counters */
3010 if (tlen == 0 || (tlen > 0 &&
3011 SEQ_GEQ(th->th_seq, tp->last_ack_sent) &&
3012 SEQ_LT(th->th_seq, tp->last_ack_sent + tp->rcv_wnd))) {
3013 tcp_input_ip_ecn(tp, inp, (uint32_t)tlen, (uint32_t)segment_count, ip_ecn);
3014 }
3015
3016 /* Test for ACE bleaching, initial value of ace should be non-zero */
3017 if (th->th_seq == tp->iss + 1 && ace == 0) {
3018 tp->t_client_accecn_state = tcp_connection_client_accurate_ecn_ace_bleaching_detected;
3019 }
3020 } else {
3021 /*
3022 * Explicit Congestion Notification - Flag that we need to send ECE if
3023 * + The IP Congestion experienced flag was set.
3024 * + Socket is in established state
3025 * + We negotiated ECN in the TCP setup
3026 * + This isn't a pure ack (tlen > 0)
3027 * + The data is in the valid window
3028 *
3029 * TE_SENDECE will be cleared when we receive a packet with TH_CWR set.
3030 */
3031 if (ip_ecn == IPTOS_ECN_CE && tp->t_state == TCPS_ESTABLISHED &&
3032 TCP_ECN_ENABLED(tp) && tlen > 0 &&
3033 SEQ_GEQ(th->th_seq, tp->last_ack_sent) &&
3034 SEQ_LT(th->th_seq, tp->last_ack_sent + tp->rcv_wnd)) {
3035 tp->t_ecn_recv_ce++;
3036 tcpstat.tcps_ecn_recv_ce++;
3037 INP_INC_IFNET_STAT(inp, ecn_recv_ce);
3038 /* Mark this connection as it received CE from network */
3039 tp->ecn_flags |= TE_RECV_ECN_CE;
3040 tp->ecn_flags |= TE_SENDECE;
3041 }
3042 }
3043
3044 /*
3045 * If we received an explicit notification of congestion in
3046 * ip tos ecn bits or by the CWR bit in TCP header flags, reset
3047 * the ack-stretching state. We need to handle ECN notification if
3048 * an ECN setup SYN was sent even once.
3049 */
3050 if (tp->t_state == TCPS_ESTABLISHED &&
3051 (tp->ecn_flags & TE_SETUPSENT) &&
3052 (ip_ecn == IPTOS_ECN_CE || (thflags & TH_CWR))) {
3053 tcp_reset_stretch_ack(tp);
3054 tp->t_forced_acks = TCP_FORCED_ACKS_COUNT;
3055 CLEAR_IAJ_STATE(tp);
3056 }
3057
3058 if (ip_ecn == IPTOS_ECN_CE && tp->t_state == TCPS_ESTABLISHED &&
3059 !TCP_ECN_ENABLED(tp) && !(tp->ecn_flags & TE_CEHEURI_SET)) {
3060 tcpstat.tcps_ecn_fallback_ce++;
3061 tcp_heuristic_ecn_aggressive(tp);
3062 tp->ecn_flags |= TE_CEHEURI_SET;
3063 }
3064
3065 if (tp->t_state == TCPS_ESTABLISHED && TCP_ECN_ENABLED(tp) &&
3066 ip_ecn == IPTOS_ECN_CE && !(tp->ecn_flags & TE_CEHEURI_SET)) {
3067 if (inp->inp_stat->rxpackets < ECN_MIN_CE_PROBES) {
3068 tp->t_ecn_recv_ce_pkt++;
3069 } else if (tp->t_ecn_recv_ce_pkt > ECN_MAX_CE_RATIO) {
3070 tcpstat.tcps_ecn_fallback_ce++;
3071 tcp_heuristic_ecn_aggressive(tp);
3072 tp->ecn_flags |= TE_CEHEURI_SET;
3073 INP_INC_IFNET_STAT(inp, ecn_fallback_ce);
3074 } else {
3075 /* We tracked the first ECN_MIN_CE_PROBES segments, we
3076 * now know that the path is good.
3077 */
3078 tp->ecn_flags |= TE_CEHEURI_SET;
3079 }
3080 }
3081
3082 /* Update rcvtime as a new segment was received on the connection */
3083 tp->t_rcvtime = tcp_now;
3084
3085 /*
3086 * Segment received on connection.
3087 * Reset idle time and keep-alive timer.
3088 */
3089 if (TCPS_HAVEESTABLISHED(tp->t_state)) {
3090 tcp_keepalive_reset(tp);
3091
3092 if (tp->t_mpsub) {
3093 mptcp_reset_keepalive(tp);
3094 }
3095 }
3096
3097 /*
3098 * Process options if not in LISTEN state,
3099 * else do it below (after getting remote address).
3100 */
3101 if (tp->t_state != TCPS_LISTEN && optp) {
3102 tcp_dooptions(tp, optp, optlen, th, &to);
3103 }
3104 #if MPTCP
3105 if (tp->t_state != TCPS_LISTEN && (so->so_flags & SOF_MP_SUBFLOW)) {
3106 mptcp_insert_rmap(tp, m, th);
3107 }
3108 #endif /* MPTCP */
3109 if (tp->t_state == TCPS_SYN_SENT && (thflags & TH_SYN)) {
3110 if (!(thflags & TH_ACK) ||
3111 (SEQ_GT(th->th_ack, tp->iss) &&
3112 SEQ_LEQ(th->th_ack, tp->snd_max))) {
3113 tcp_finalize_options(tp, &to, ifscope);
3114 }
3115 }
3116
3117 #if TRAFFIC_MGT
3118 /*
3119 * Compute inter-packet arrival jitter. According to RFC 3550,
3120 * inter-packet arrival jitter is defined as the difference in
3121 * packet spacing at the receiver compared to the sender for a
3122 * pair of packets. When two packets of maximum segment size come
3123 * one after the other with consecutive sequence numbers, we
3124 * consider them as packets sent together at the sender and use
3125 * them as a pair to compute inter-packet arrival jitter. This
3126 * metric indicates the delay induced by the network components due
3127 * to queuing in edge/access routers.
3128 */
3129 if (tp->t_state == TCPS_ESTABLISHED &&
3130 (thflags & (TH_SYN | TH_FIN | TH_RST | TH_URG | TH_ACK | TH_ECE | TH_PUSH)) == TH_ACK &&
3131 ((tp->t_flags & TF_NEEDFIN) == 0) &&
3132 ((to.to_flags & TOF_TS) == 0 ||
3133 TSTMP_GEQ(to.to_tsval, tp->ts_recent)) &&
3134 th->th_seq == tp->rcv_nxt && LIST_EMPTY(&tp->t_segq)) {
3135 int seg_size = tlen;
3136 if (tp->iaj_pktcnt <= IAJ_IGNORE_PKTCNT) {
3137 TCP_INC_VAR(tp->iaj_pktcnt, segment_count);
3138 }
3139
3140 if (tp->iaj_size == 0 || seg_size > tp->iaj_size ||
3141 (seg_size == tp->iaj_size && tp->iaj_rcv_ts == 0)) {
3142 /*
3143 * State related to inter-arrival jitter is
3144 * uninitialized or we are trying to find a good
3145 * first packet to start computing the metric
3146 */
3147 update_iaj_state(tp, seg_size, 0);
3148 } else {
3149 if (seg_size == tp->iaj_size) {
3150 /*
3151 * Compute inter-arrival jitter taking
3152 * this packet as the second packet
3153 */
3154 compute_iaj(tp);
3155 }
3156 if (seg_size < tp->iaj_size) {
3157 /*
3158 * There is a smaller packet in the stream.
3159 * Some times the maximum size supported
3160 * on a path can change if there is a new
3161 * link with smaller MTU. The receiver will
3162 * not know about this change. If there
3163 * are too many packets smaller than
3164 * iaj_size, we try to learn the iaj_size
3165 * again.
3166 */
3167 TCP_INC_VAR(tp->iaj_small_pkt, segment_count);
3168 if (tp->iaj_small_pkt > RESET_IAJ_SIZE_THRESH) {
3169 update_iaj_state(tp, seg_size, 1);
3170 } else {
3171 CLEAR_IAJ_STATE(tp);
3172 }
3173 } else {
3174 update_iaj_state(tp, seg_size, 0);
3175 }
3176 }
3177 } else {
3178 CLEAR_IAJ_STATE(tp);
3179 }
3180 #endif /* TRAFFIC_MGT */
3181
3182 /*
3183 * Header prediction: check for the two common cases
3184 * of a uni-directional data xfer. If the packet has
3185 * no control flags, is in-sequence, the window didn't
3186 * change and we're not retransmitting, it's a
3187 * candidate. If the length is zero and the ack moved
3188 * forward, we're the sender side of the xfer. Just
3189 * free the data acked & wake any higher level process
3190 * that was blocked waiting for space. If the length
3191 * is non-zero and the ack didn't move, we're the
3192 * receiver side. If we're getting packets in-order
3193 * (the reassembly queue is empty), add the data to
3194 * the socket buffer and note that we need a delayed ack.
3195 * Make sure that the hidden state-flags are also off.
3196 * Since we check for TCPS_ESTABLISHED above, it can only
3197 * be TH_NEEDSYN.
3198 */
3199 if (tp->t_state == TCPS_ESTABLISHED &&
3200 !(so->so_state & SS_CANTRCVMORE) &&
3201 (thflags & TH_FLAGS) == TH_ACK &&
3202 ((tp->t_flags & TF_NEEDFIN) == 0) &&
3203 ((to.to_flags & TOF_TS) == 0 ||
3204 TSTMP_GEQ(to.to_tsval, tp->ts_recent)) &&
3205 th->th_seq == tp->rcv_nxt &&
3206 tiwin && tiwin == tp->snd_wnd &&
3207 tp->snd_nxt == tp->snd_max) {
3208 /*
3209 * If last ACK falls within this segment's sequence numbers,
3210 * record the timestamp.
3211 * NOTE that the test is modified according to the latest
3212 * proposal of the [email protected] list (Braden 1993/04/26).
3213 */
3214 if ((to.to_flags & TOF_TS) != 0 &&
3215 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) {
3216 tp->ts_recent_age = tcp_now;
3217 tp->ts_recent = to.to_tsval;
3218 }
3219
3220 /*
3221 * We increment t_unacksegs_ce for both data segments
3222 * and pure ACKs for Accurate ECN
3223 */
3224 if (TCP_ACC_ECN_ON(tp) && ip_ecn == IPTOS_ECN_CE) {
3225 TCP_INC_VAR(tp->t_unacksegs_ce, segment_count);
3226 }
3227
3228 if (tlen == 0) {
3229 if (SEQ_GT(th->th_ack, tp->snd_una) &&
3230 SEQ_LEQ(th->th_ack, tp->snd_max) &&
3231 tp->snd_cwnd >= tp->snd_ssthresh &&
3232 (!IN_FASTRECOVERY(tp) &&
3233 ((!(SACK_ENABLED(tp)) &&
3234 tp->t_dupacks < tp->t_rexmtthresh) ||
3235 (SACK_ENABLED(tp) && to.to_nsacks == 0 &&
3236 TAILQ_EMPTY(&tp->snd_holes))))) {
3237 /*
3238 * this is a pure ack for outstanding data.
3239 */
3240 ++tcpstat.tcps_predack;
3241
3242 tcp_bad_rexmt_check(tp, th, &to);
3243
3244 /* Recalculate the RTT */
3245 tcp_compute_rtt(tp, &to, th);
3246
3247 VERIFY(SEQ_GEQ(th->th_ack, tp->snd_una));
3248 acked = BYTES_ACKED(th, tp);
3249 tcpstat.tcps_rcvackpack++;
3250 tcpstat.tcps_rcvackbyte += acked;
3251
3252 /* TE_SENDIPECT is only set when L4S sysctl is enabled */
3253 if (TCP_ACC_ECN_ON(tp) && (tp->ecn_flags & TE_SENDIPECT)) {
3254 if (!TCP_L4S_ENABLED(tp)) {
3255 os_log_error(OS_LOG_DEFAULT, "TE_SENDIPECT flag is set but TCP_L4S_ENABLED is not");
3256 }
3257 uint32_t pkts_acked = tcp_packets_this_ack(tp, acked);
3258 tp->total_ect_packets_acked += pkts_acked;
3259
3260 bool newly_acked_time = false;
3261 if (acked == 0 && (to.to_flags & TOF_TS) != 0 && to.to_tsecr != 0 &&
3262 TSTMP_GT(to.to_tsecr, tp->t_last_ack_tsecr)) {
3263 newly_acked_time = true;
3264 }
3265 if (acked > 0 || newly_acked_time) {
3266 tcp_process_accecn(tp, &to, th, pkts_acked, ace);
3267 }
3268 }
3269
3270 /*
3271 * Process sent segments used for RACK, called after RTT is computed
3272 * RACK reordering window doesn't need to be updated until we process
3273 * DSACK.
3274 */
3275 if (TCP_RACK_ENABLED(tp)) {
3276 tcp_segs_doack(tp, th->th_ack, &to);
3277 if (SEQ_LT(tp->snd_fack, th->th_ack)) {
3278 /*
3279 * We update snd_fack here for RACK only as it is updated
3280 * and used differently for SACK. This should be done after
3281 * ACK processing of segments which checks for reordering.
3282 * Also, we don't compare with highest_sacked_seq here as this
3283 * is the fast path with no SACK blocks.
3284 */
3285 tp->snd_fack = th->th_ack;
3286 }
3287 }
3288
3289 /*
3290 * Handle an ack that is in sequence during
3291 * congestion avoidance phase. The
3292 * calculations in this function
3293 * assume that snd_una is not updated yet.
3294 */
3295 if (CC_ALGO(tp)->congestion_avd != NULL) {
3296 CC_ALGO(tp)->congestion_avd(tp, th);
3297 }
3298 tcp_ccdbg_trace(tp, th, TCP_CC_INSEQ_ACK_RCVD);
3299 sbdrop(&so->so_snd, acked);
3300 tcp_sbsnd_trim(&so->so_snd);
3301
3302 if (SEQ_GT(tp->snd_una, tp->snd_recover) &&
3303 SEQ_LEQ(th->th_ack, tp->snd_recover)) {
3304 tp->snd_recover = th->th_ack - 1;
3305 }
3306
3307 tcp_update_snd_una(tp, th->th_ack);
3308
3309 TCP_RESET_REXMT_STATE(tp);
3310
3311 /*
3312 * pull snd_wl2 up to prevent seq wrap relative
3313 * to th_ack.
3314 */
3315 tp->snd_wl2 = th->th_ack;
3316
3317 if (tp->t_dupacks > 0) {
3318 tp->t_dupacks = 0;
3319 tp->t_rexmtthresh = tcprexmtthresh;
3320 }
3321
3322 tp->sackhint.sack_bytes_acked = 0;
3323
3324 /*
3325 * If all outstanding data are acked, stop
3326 * retransmit timer, otherwise restart timer
3327 * using current (possibly backed-off) value.
3328 * If process is waiting for space,
3329 * wakeup/selwakeup/signal. If data
3330 * are ready to send, let tcp_output
3331 * decide between more output or persist.
3332 */
3333 if (tp->snd_una == tp->snd_max) {
3334 tp->t_timer[TCPT_REXMT] = 0;
3335 tp->t_timer[TCPT_PTO] = 0;
3336 tp->t_timer[TCPT_REORDER] = 0;
3337 tcp_rack_reset_segs_retransmitted(tp);
3338 } else if (tp->t_timer[TCPT_PERSIST] == 0) {
3339 tcp_set_link_heur_rtomin(tp, inp->inp_last_outifp);
3340 tp->t_timer[TCPT_REXMT] = OFFSET_FROM_START(tp, tp->t_rxtcur);
3341 }
3342 if (!SLIST_EMPTY(&tp->t_rxt_segments) &&
3343 !TCP_DSACK_SEQ_IN_WINDOW(tp,
3344 tp->t_dsack_lastuna, tp->snd_una)) {
3345 tcp_rxtseg_clean(tp);
3346 }
3347
3348 if ((tp->t_flagsext & TF_MEASURESNDBW) != 0 &&
3349 tp->t_bwmeas != NULL) {
3350 tcp_bwmeas_check(tp);
3351 }
3352
3353 write_wakeup = 1;
3354 if (!SLIST_EMPTY(&tp->t_notify_ack)) {
3355 tcp_notify_acknowledgement(tp, so);
3356 }
3357
3358 if ((so->so_snd.sb_cc) || (tp->t_flags & TF_ACKNOW)) {
3359 (void) tcp_output(tp);
3360 }
3361
3362 tcp_tfo_rcv_ack(tp, th);
3363
3364 m_freem(m);
3365
3366 tcp_check_timer_state(tp);
3367
3368 tcp_handle_wakeup(so, read_wakeup, write_wakeup);
3369
3370 socket_unlock(so, 1);
3371 KERNEL_DEBUG(DBG_FNC_TCP_INPUT | DBG_FUNC_END, 0, 0, 0, 0, 0);
3372 return;
3373 }
3374 } else if (th->th_ack == tp->snd_una && LIST_EMPTY(&tp->t_segq) &&
3375 tlen <= tcp_sbspace(tp)) {
3376 /*
3377 * this is a pure, in-sequence data packet
3378 * with nothing on the reassembly queue and
3379 * we have enough buffer space to take it.
3380 */
3381
3382 /* Clean receiver SACK report if present */
3383 if (SACK_ENABLED(tp) && tp->rcv_numsacks) {
3384 tcp_clean_sackreport(tp);
3385 }
3386 ++tcpstat.tcps_preddat;
3387 tp->rcv_nxt += tlen;
3388 /* Update highest received sequence and its timestamp */
3389 if (SEQ_LT(tp->rcv_high, tp->rcv_nxt)) {
3390 tp->rcv_high = tp->rcv_nxt;
3391 if (to.to_flags & TOF_TS) {
3392 tp->tsv_high = to.to_tsval;
3393 }
3394 }
3395
3396 /*
3397 * Pull snd_wl1 up to prevent seq wrap relative to
3398 * th_seq.
3399 */
3400 tp->snd_wl1 = th->th_seq;
3401 /*
3402 * Pull rcv_up up to prevent seq wrap relative to
3403 * rcv_nxt.
3404 */
3405 tp->rcv_up = tp->rcv_nxt;
3406 TCP_INC_VAR(tcpstat.tcps_rcvpack, segment_count);
3407 tcpstat.tcps_rcvbyte += tlen;
3408 if (nstat_collect) {
3409 INP_ADD_STAT(inp, ifnet_count_type,
3410 rxpackets, 1);
3411 INP_ADD_STAT(inp, ifnet_count_type, rxbytes,
3412 tlen);
3413 inp_set_activity_bitmap(inp);
3414 }
3415
3416 /* Calculate the RTT on the receiver */
3417 tcp_compute_rcv_rtt(tp, &to, th);
3418
3419 tcp_sbrcv_grow(tp, &so->so_rcv, &to, tlen);
3420 if (TCP_USE_RLEDBAT(tp, so) && tcp_cc_rledbat.data_rcvd != NULL) {
3421 tcp_cc_rledbat.data_rcvd(tp, th, &to, tlen);
3422 }
3423
3424 /*
3425 * Add data to socket buffer.
3426 */
3427 so_recv_data_stat(so, m, 0);
3428 m_adj(m, drop_hdrlen); /* delayed header drop */
3429
3430 if (isipv6) {
3431 memcpy(&saved_hdr, ip6, sizeof(struct ip6_hdr));
3432 ip6 = (struct ip6_hdr *)&saved_hdr[0];
3433 } else {
3434 memcpy(&saved_hdr, ip, ip->ip_hl << 2);
3435 ip = (struct ip *)&saved_hdr[0];
3436 }
3437 memcpy(&saved_tcphdr, th, sizeof(struct tcphdr));
3438
3439 if (th->th_flags & TH_PUSH) {
3440 tp->t_flagsext |= TF_LAST_IS_PSH;
3441 } else {
3442 tp->t_flagsext &= ~TF_LAST_IS_PSH;
3443 }
3444
3445 if (sbappendstream_rcvdemux(so, m)) {
3446 mptcp_handle_input(so);
3447 read_wakeup = 1;
3448 }
3449 th = &saved_tcphdr;
3450
3451 if (isipv6) {
3452 KERNEL_DEBUG(DBG_LAYER_END, ((th->th_dport << 16) | th->th_sport),
3453 (((ip6->ip6_src.s6_addr16[0]) << 16) | (ip6->ip6_dst.s6_addr16[0])),
3454 th->th_seq, th->th_ack, th->th_win);
3455 } else {
3456 KERNEL_DEBUG(DBG_LAYER_END, ((th->th_dport << 16) | th->th_sport),
3457 (((ip->ip_src.s_addr & 0xffff) << 16) | (ip->ip_dst.s_addr & 0xffff)),
3458 th->th_seq, th->th_ack, th->th_win);
3459 }
3460 TCP_INC_VAR(tp->t_unacksegs, segment_count);
3461 if (DELAY_ACK(tp, th)) {
3462 if ((tp->t_flags & TF_DELACK) == 0) {
3463 tp->t_flags |= TF_DELACK;
3464 tp->t_timer[TCPT_DELACK] = OFFSET_FROM_START(tp, tcp_delack);
3465 }
3466 } else {
3467 tp->t_flags |= TF_ACKNOW;
3468 tcp_output(tp);
3469 }
3470
3471 tcp_adaptive_rwtimo_check(tp, tlen);
3472
3473 if (tlen > 0) {
3474 tcp_tfo_rcv_data(tp);
3475 }
3476
3477 tcp_check_timer_state(tp);
3478
3479 tcp_handle_wakeup(so, read_wakeup, write_wakeup);
3480
3481 socket_unlock(so, 1);
3482 KERNEL_DEBUG(DBG_FNC_TCP_INPUT | DBG_FUNC_END, 0, 0, 0, 0, 0);
3483 return;
3484 }
3485 }
3486
3487 /*
3488 * Calculate amount of space in receive window,
3489 * and then do TCP input processing.
3490 * Receive window is amount of space in rcv queue,
3491 * but not less than advertised window.
3492 */
3493 socket_lock_assert_owned(so);
3494 win = tcp_sbspace(tp);
3495 if (win < 0) {
3496 win = 0;
3497 } else { /* clip rcv window to 4K for modems */
3498 if (tp->t_flags & TF_SLOWLINK && slowlink_wsize > 0) {
3499 win = min(win, slowlink_wsize);
3500 }
3501 }
3502 tp->rcv_wnd = imax(win, (int)(tp->rcv_adv - tp->rcv_nxt));
3503 #if MPTCP
3504 /*
3505 * Ensure that the subflow receive window isn't greater
3506 * than the connection level receive window.
3507 */
3508 if ((tp->t_mpflags & TMPF_MPTCP_TRUE) && (mp_tp = tptomptp(tp))) {
3509 socket_lock_assert_owned(mptetoso(mp_tp->mpt_mpte));
3510 int64_t recwin_conn = (int64_t)(mp_tp->mpt_rcvadv - mp_tp->mpt_rcvnxt);
3511
3512 VERIFY(recwin_conn < INT32_MAX && recwin_conn > INT32_MIN);
3513 if (recwin_conn > 0 && tp->rcv_wnd > (uint32_t)recwin_conn) {
3514 tp->rcv_wnd = (uint32_t)recwin_conn;
3515 tcpstat.tcps_mp_reducedwin++;
3516 }
3517 }
3518 #endif /* MPTCP */
3519
3520 switch (tp->t_state) {
3521 /*
3522 * Initialize tp->rcv_nxt, and tp->irs, select an initial
3523 * tp->iss, and send a segment:
3524 * <SEQ=ISS><ACK=RCV_NXT><CTL=SYN,ACK>
3525 * Also initialize tp->snd_nxt to tp->iss+1 and tp->snd_una to tp->iss.
3526 * Fill in remote peer address fields if not previously specified.
3527 * Enter SYN_RECEIVED state, and process any other fields of this
3528 * segment in this state.
3529 */
3530 case TCPS_LISTEN: {
3531 struct sockaddr_in *sin;
3532 struct sockaddr_in6 *sin6;
3533 int error = 0;
3534
3535 socket_lock_assert_owned(so);
3536
3537 /* Clear the logging flags inherited from the listening socket */
3538 inp->inp_log_flags = 0;
3539 inp->inp_flags2 &= ~INP2_LOGGING_ENABLED;
3540
3541 if (__improbable(inp->inp_flags2 & INP2_BIND_IN_PROGRESS)) {
3542 TCP_LOG_DROP_PCB(TCP_LOG_HDR, th, tp, false, "LISTEN bind in progress");
3543 drop_reason = DROP_REASON_TCP_BIND_IN_PROGRESS;
3544 goto drop;
3545 }
3546 inp_enter_bind_in_progress(so);
3547
3548 if (isipv6) {
3549 sin6 = kalloc_type(struct sockaddr_in6, Z_NOWAIT | Z_ZERO);
3550 if (sin6 == NULL) {
3551 error = ENOMEM;
3552 TCP_LOG_DROP_PCB(TCP_LOG_HDR, th, tp, false, "LISTEN kalloc_type failed");
3553 drop_reason = DROP_REASON_TCP_MEM_ALLOC;
3554 goto pcbconnect_done;
3555 }
3556 sin6->sin6_family = AF_INET6;
3557 sin6->sin6_len = sizeof(*sin6);
3558 sin6->sin6_addr = ip6->ip6_src;
3559 sin6->sin6_port = th->th_sport;
3560 if (!in6_embedded_scope && IN6_IS_SCOPE_EMBED(&ip6->ip6_src)) {
3561 sin6->sin6_scope_id = ip6_input_getsrcifscope(m);
3562 }
3563 laddr6 = inp->in6p_laddr;
3564 uint32_t lifscope = inp->inp_lifscope;
3565 if (IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr)) {
3566 inp->in6p_laddr = ip6->ip6_dst;
3567 inp->inp_lifscope = in6_addr2scopeid(ifp, &inp->in6p_laddr);
3568 in6_verify_ifscope(&inp->in6p_laddr, inp->inp_lifscope);
3569 }
3570 if ((error = in6_pcbconnect(inp, SA(sin6), kernel_proc)) != 0) {
3571 inp->in6p_laddr = laddr6;
3572 kfree_type(struct sockaddr_in6, sin6);
3573 inp->inp_lifscope = lifscope;
3574 in6_verify_ifscope(&inp->in6p_laddr, inp->inp_lifscope);
3575 TCP_LOG_DROP_PCB(TCP_LOG_HDR, th, tp, false, " LISTEN in6_pcbconnect failed");
3576 drop_reason = DROP_REASON_TCP_PCB_CONNECT;
3577 goto pcbconnect_done;
3578 }
3579 kfree_type(struct sockaddr_in6, sin6);
3580 } else {
3581 socket_lock_assert_owned(so);
3582 sin = kalloc_type(struct sockaddr_in, Z_NOWAIT);
3583 if (sin == NULL) {
3584 error = ENOMEM;
3585 TCP_LOG_DROP_PCB(TCP_LOG_HDR, th, tp, false, "LISTEN kalloc_type failed");
3586 drop_reason = DROP_REASON_TCP_MEM_ALLOC;
3587 goto pcbconnect_done;
3588 }
3589 sin->sin_family = AF_INET;
3590 sin->sin_len = sizeof(*sin);
3591 sin->sin_addr = ip->ip_src;
3592 sin->sin_port = th->th_sport;
3593 bzero((caddr_t)sin->sin_zero, sizeof(sin->sin_zero));
3594 laddr = inp->inp_laddr;
3595 if (inp->inp_laddr.s_addr == INADDR_ANY) {
3596 inp->inp_laddr = ip->ip_dst;
3597 }
3598 if ((error = in_pcbconnect(inp, SA(sin), kernel_proc, IFSCOPE_NONE, NULL)) != 0) {
3599 inp->inp_laddr = laddr;
3600 kfree_type(struct sockaddr_in, sin);
3601 TCP_LOG_DROP_PCB(TCP_LOG_HDR, th, tp, false, " LISTEN in_pcbconnect failed");
3602 drop_reason = DROP_REASON_TCP_PCB_CONNECT;
3603 goto pcbconnect_done;
3604 }
3605 kfree_type(struct sockaddr_in, sin);
3606 }
3607 pcbconnect_done:
3608 inp_exit_bind_in_progress(so);
3609 if (error != 0) {
3610 goto drop;
3611 }
3612
3613 tcp_dooptions(tp, optp, optlen, th, &to);
3614 tcp_finalize_options(tp, &to, ifscope);
3615
3616 if (TFO_ENABLED(tp) && tcp_tfo_syn(tp, &to)) {
3617 isconnected = TRUE;
3618 }
3619
3620 if (iss) {
3621 tp->iss = iss;
3622 } else {
3623 tp->iss = tcp_new_isn(tp);
3624 }
3625 tp->irs = th->th_seq;
3626 tcp_sendseqinit(tp);
3627 tcp_rcvseqinit(tp);
3628 tp->snd_recover = tp->snd_una;
3629 /*
3630 * Initialization of the tcpcb for transaction;
3631 * set SND.WND = SEG.WND,
3632 * initialize CCsend and CCrecv.
3633 */
3634 tp->snd_wnd = tiwin; /* initial send-window */
3635 tp->max_sndwnd = tp->snd_wnd;
3636 tp->t_flags |= TF_ACKNOW;
3637 tp->t_unacksegs = 0;
3638 tp->t_unacksegs_ce = 0;
3639 DTRACE_TCP4(state__change, void, NULL, struct inpcb *, inp,
3640 struct tcpcb *, tp, int32_t, TCPS_SYN_RECEIVED);
3641 TCP_LOG_STATE(tp, TCPS_SYN_RECEIVED);
3642 tp->t_state = TCPS_SYN_RECEIVED;
3643 tp->t_timer[TCPT_KEEP] = OFFSET_FROM_START(tp,
3644 TCP_CONN_KEEPINIT(tp));
3645 tp->t_connect_time = tcp_now;
3646 dropsocket = 0; /* committed to socket */
3647
3648 if (inp->inp_flowhash == 0) {
3649 inp_calc_flowhash(inp);
3650 ASSERT(inp->inp_flowhash != 0);
3651 }
3652 /* update flowinfo - RFC 6437 */
3653 if (inp->inp_flow == 0 &&
3654 inp->in6p_flags & IN6P_AUTOFLOWLABEL) {
3655 inp->inp_flow &= ~IPV6_FLOWLABEL_MASK;
3656 inp->inp_flow |=
3657 (htonl(ip6_randomflowlabel()) & IPV6_FLOWLABEL_MASK);
3658 }
3659
3660 /* reset the incomp processing flag */
3661 so->so_flags &= ~(SOF_INCOMP_INPROGRESS);
3662 tcpstat.tcps_accepts++;
3663
3664 int ace_flags = ((th->th_x2 << 8) | thflags) & TH_ACE;
3665 tcp_input_process_accecn_syn(tp, ace_flags, ip_ecn);
3666
3667 /*
3668 * The address and connection state are finalized
3669 */
3670 TCP_LOG_CONNECT(tp, false, 0);
3671
3672 tcp_add_fsw_flow(tp, ifp);
3673
3674 goto trimthenstep6;
3675 }
3676
3677 /*
3678 * If the state is SYN_RECEIVED and the seg contains an ACK,
3679 * but not for our SYN/ACK, send a RST.
3680 */
3681 case TCPS_SYN_RECEIVED:
3682 if ((thflags & TH_ACK) &&
3683 (SEQ_LEQ(th->th_ack, tp->snd_una) ||
3684 SEQ_GT(th->th_ack, tp->snd_max))) {
3685 IF_TCP_STATINC(ifp, ooopacket);
3686 TCP_LOG_DROP_PCB(TCP_LOG_HDR, th, tp, false, "SYN_RECEIVED bad ACK");
3687 drop_reason = DROP_REASON_TCP_SYN_RECEIVED_BAD_ACK;
3688 goto dropwithreset;
3689 }
3690
3691 /*
3692 * In SYN_RECEIVED state, if we recv some SYNS with
3693 * window scale and others without, window scaling should
3694 * be disabled. Otherwise the window advertised will be
3695 * lower if we assume scaling and the other end does not.
3696 */
3697 if ((thflags & TH_SYN) &&
3698 (tp->irs == th->th_seq) &&
3699 !(to.to_flags & TOF_SCALE)) {
3700 tp->t_flags &= ~TF_RCVD_SCALE;
3701 }
3702 break;
3703
3704 /*
3705 * If the state is SYN_SENT:
3706 * if seg contains an ACK, but not for our SYN, drop the input.
3707 * if seg contains a RST, then drop the connection.
3708 * if seg does not contain SYN, then drop it.
3709 * Otherwise this is an acceptable SYN segment
3710 * initialize tp->rcv_nxt and tp->irs
3711 * if seg contains ack then advance tp->snd_una
3712 * if SYN has been acked change to ESTABLISHED else SYN_RCVD state
3713 * arrange for segment to be acked (eventually)
3714 * continue processing rest of data/controls, beginning with URG
3715 */
3716 case TCPS_SYN_SENT:
3717 if ((thflags & TH_ACK) &&
3718 (SEQ_LEQ(th->th_ack, tp->iss) ||
3719 SEQ_GT(th->th_ack, tp->snd_max))) {
3720 IF_TCP_STATINC(ifp, ooopacket);
3721 TCP_LOG_DROP_PCB(TCP_LOG_HDR, th, tp, false, "SYN_SENT bad ACK");
3722 drop_reason = DROP_REASON_TCP_SYN_SENT_BAD_ACK;
3723 goto dropwithreset;
3724 }
3725 if (thflags & TH_RST) {
3726 if ((thflags & TH_ACK) != 0) {
3727 if (TFO_ENABLED(tp) &&
3728 !(tp->t_flagsext & TF_FASTOPEN_FORCE_ENABLE)) {
3729 tcp_heuristic_tfo_rst(tp);
3730 }
3731 if ((tp->ecn_flags & (TE_SETUPSENT | TE_RCVD_SYN_RST)) == TE_SETUPSENT ||
3732 (tp->ecn_flags & (TE_ACE_SETUPSENT | TE_RCVD_SYN_RST)) == TE_ACE_SETUPSENT) {
3733 /*
3734 * On local connections, send
3735 * non-ECN syn one time before
3736 * dropping the connection
3737 */
3738 if (tp->t_flags & TF_LOCAL) {
3739 tp->ecn_flags |= TE_RCVD_SYN_RST;
3740 drop_reason = DROP_REASON_TCP_RST;
3741 goto drop;
3742 } else {
3743 tcp_heuristic_ecn_synrst(tp);
3744 }
3745 }
3746 soevent(so,
3747 (SO_FILT_HINT_LOCKED |
3748 SO_FILT_HINT_CONNRESET));
3749 tp = tcp_drop(tp, ECONNREFUSED);
3750 }
3751 TCP_LOG_DROP_PCB(TCP_LOG_HDR, th, tp, false, "SYN_SENT got RST");
3752 drop_reason = DROP_REASON_TCP_RST;
3753 goto drop;
3754 }
3755 if ((thflags & TH_SYN) == 0) {
3756 TCP_LOG_DROP_PCB(TCP_LOG_HDR, th, tp, false, "SYN_SENT no SYN");
3757 drop_reason = DROP_REASON_TCP_SYN_SENT_NO_SYN;
3758 goto drop;
3759 }
3760 tp->snd_wnd = th->th_win; /* initial send window */
3761 tp->max_sndwnd = tp->snd_wnd;
3762
3763 tp->irs = th->th_seq;
3764 tcp_rcvseqinit(tp);
3765 if (thflags & TH_ACK) {
3766 /* Client processes SYN-ACK */
3767 tcpstat.tcps_connects++;
3768
3769 const uint32_t ace_flags = ((th->th_x2 << 8) | thflags) & TH_ACE;
3770
3771 if ((thflags & (TH_ECE | TH_CWR)) == (TH_ECE)) {
3772 /* Receiving Any|0|1 is classic ECN-setup SYN-ACK */
3773 tp->ecn_flags |= TE_SETUPRECEIVED;
3774 if (TCP_ECN_ENABLED(tp)) {
3775 tcp_heuristic_ecn_success(tp);
3776 tcpstat.tcps_ecn_client_success++;
3777 }
3778
3779 if (tp->ecn_flags & TE_ACE_SETUPSENT) {
3780 /*
3781 * Sent AccECN SYN but received classic ECN SYN-ACK
3782 * Set classic ECN related flags
3783 */
3784 tp->ecn_flags |= (TE_SETUPSENT | TE_SENDIPECT);
3785 tp->ecn_flags &= ~TE_ACE_SETUPSENT;
3786 if (tp->t_client_accecn_state == tcp_connection_client_accurate_ecn_feature_enabled) {
3787 tp->t_client_accecn_state = tcp_connection_client_classic_ecn_available;
3788 }
3789 }
3790 } else if (TCP_L4S_ENABLED(tp) && ace_flags != 0 &&
3791 ace_flags != TH_ACE) {
3792 /* Initialize sender side packet & byte counters */
3793 tp->t_aecn.t_snd_ce_packets = 5;
3794 tp->t_aecn.t_snd_ect1_bytes = tp->t_aecn.t_snd_ect0_bytes = 1;
3795 tp->t_aecn.t_snd_ce_bytes = 0;
3796 tp->ecn_flags |= TE_ACE_FINAL_ACK_3WHS;
3797 /*
3798 * Client received AccECN SYN-ACK that reflects the state (ECN)
3799 * in which SYN packet was delivered. This helps to detect if
3800 * there was mangling of the SYN packet on the path. Currently, we
3801 * only send Not-ECT on SYN packets. So, we should set Not-ECT in
3802 * all packets if we receive any encoding other than 0|TH_CWR|0.
3803 * If 0|0|0 and 1|1|1 were received, fail Accurate ECN negotiation
3804 * by not setting TE_ACE_SETUPRECEIVED.
3805 */
3806 uint32_t ecn_flags = TE_ACE_SETUPRECEIVED;
3807 if (TCP_L4S_ENABLED(tp)) {
3808 ecn_flags |= TE_SENDIPECT;
3809 }
3810 switch (ace_flags) {
3811 case (0 | TH_CWR | 0):
3812 /* Non-ECT SYN was delivered */
3813 tp->ecn_flags |= ecn_flags;
3814 tcpstat.tcps_ecn_ace_syn_not_ect++;
3815 tp->t_client_accecn_state = tcp_connection_client_accurate_ecn_negotiation_success;
3816 break;
3817 case (0 | TH_CWR | TH_ECE):
3818 /* ECT1 SYN was delivered */
3819 tp->ecn_flags |= ecn_flags;
3820 /* Mangling detected, set Non-ECT on outgoing packets */
3821 tp->ecn_flags &= ~TE_SENDIPECT;
3822 tcpstat.tcps_ecn_ace_syn_ect1++;
3823 tp->t_client_accecn_state = tcp_connection_client_accurate_ecn_negotiation_success_ect_mangling_detected;
3824 break;
3825 case (TH_AE | 0 | 0):
3826 /* ECT0 SYN was delivered */
3827 tp->ecn_flags |= ecn_flags;
3828 /* Mangling detected, set Non-ECT on outgoing packets */
3829 tp->ecn_flags &= ~TE_SENDIPECT;
3830 tcpstat.tcps_ecn_ace_syn_ect0++;
3831 tp->t_client_accecn_state = tcp_connection_client_accurate_ecn_negotiation_success_ect_mangling_detected;
3832 break;
3833 case (TH_AE | TH_CWR | 0):
3834 /* CE SYN was delivered */
3835 tp->ecn_flags |= ecn_flags;
3836 /* Mangling detected, set Non-ECT on outgoing packets */
3837 tp->t_client_accecn_state = tcp_connection_client_accurate_ecn_negotiation_success_ect_mangling_detected;
3838 tp->ecn_flags &= ~TE_SENDIPECT;
3839 /*
3840 * Although we don't send ECT SYN yet, it is possible that
3841 * a network element changed Not-ECT to ECT and later there
3842 * was congestion at another network element that set it to CE.
3843 * To keep it simple, we will consider this as a congestion event
3844 * for the congestion controller.
3845 * If a TCP client in AccECN mode receives CE feedback in the TCP
3846 * flags of a SYN/ACK, it MUST NOT increment s.cep.
3847 */
3848 tp->snd_cwnd = 2 * tp->t_maxseg;
3849 tcpstat.tcps_ecn_ace_syn_ce++;
3850 break;
3851 default:
3852 break;
3853 }
3854 if (TCP_ECN_ENABLED(tp)) {
3855 tcp_heuristic_ecn_success(tp);
3856 tcpstat.tcps_ecn_client_success++;
3857 }
3858 /*
3859 * A TCP client in AccECN mode MUST feed back which of the 4
3860 * possible values of the IP-ECN field that was received in the
3861 * SYN/ACK. Set the setup flag for final ACK accordingly.
3862 * We will initialize r.cep, r.e1b, r.e0b first and then increment
3863 * if CE was set on the IP-ECN field of the SYN-ACK.
3864 */
3865 tp->t_aecn.t_rcv_ce_packets = 5;
3866 tp->t_aecn.t_rcv_ect0_bytes = tp->t_aecn.t_rcv_ect1_bytes = 1;
3867 tp->t_aecn.t_rcv_ce_bytes = 0;
3868
3869 /* Increment packet & byte counters based on IP-ECN */
3870 tcp_input_ip_ecn(tp, inp, (uint32_t)tlen, (uint32_t)segment_count, ip_ecn);
3871
3872 switch (ip_ecn) {
3873 case IPTOS_ECN_NOTECT:
3874 /* Not-ECT SYN-ACK was received */
3875 tp->ecn_flags |= TE_ACE_SETUP_NON_ECT;
3876 break;
3877 case IPTOS_ECN_ECT1:
3878 /* ECT1 SYN-ACK was received */
3879 tp->ecn_flags |= TE_ACE_SETUP_ECT1;
3880 break;
3881 case IPTOS_ECN_ECT0:
3882 /* ECT0 SYN-ACK was received */
3883 tp->ecn_flags |= TE_ACE_SETUP_ECT0;
3884 break;
3885 case IPTOS_ECN_CE:
3886 tp->ecn_flags |= TE_ACE_SETUP_CE;
3887 break;
3888 }
3889 /* Update the time for this newly SYN-ACK packet */
3890 if ((to.to_flags & TOF_TS) != 0 && (to.to_tsecr != 0) &&
3891 (tp->t_last_ack_tsecr == 0 || TSTMP_GEQ(to.to_tsecr, tp->t_last_ack_tsecr))) {
3892 tp->t_last_ack_tsecr = to.to_tsecr;
3893 }
3894 } else {
3895 if ((tp->ecn_flags & (TE_SETUPSENT | TE_ACE_SETUPSENT)) &&
3896 tp->t_rxtshift == 0) {
3897 tcp_heuristic_ecn_success(tp);
3898 tcpstat.tcps_ecn_not_supported++;
3899 }
3900 if ((tp->ecn_flags & (TE_SETUPSENT | TE_ACE_SETUPSENT)) &&
3901 tp->t_rxtshift > 0) {
3902 tcp_heuristic_ecn_loss(tp);
3903 }
3904
3905 /* non-ECN-setup SYN-ACK */
3906 tp->ecn_flags &= ~TE_SENDIPECT;
3907 /*
3908 * If Accurate ECN SYN was retransmitted twice and non-ECN SYN-ACK
3909 * was received, then we consider it as Accurate ECN blackholing
3910 */
3911 if ((tp->ecn_flags & TE_LOST_SYN) && tp->t_rxtshift <= 2 &&
3912 tp->t_client_accecn_state == tcp_connection_client_accurate_ecn_feature_enabled) {
3913 tp->t_client_accecn_state = tcp_connection_client_accurate_ecn_negotiation_blackholed;
3914 }
3915 /*
3916 * If SYN wasn't retransmitted twice yet, the server supports neither classic nor
3917 * accurate ECN SYN-ACK. Accurate ECN should already be disabled for both half connections
3918 * as TE_ACE_SETUPRECEIVED flag is not set.
3919 */
3920 if (tp->t_client_accecn_state == tcp_connection_client_accurate_ecn_feature_enabled) {
3921 tp->t_client_accecn_state = tcp_connection_client_ecn_not_available;
3922 }
3923 }
3924
3925 /* Do window scaling on this connection? */
3926 if (TCP_WINDOW_SCALE_ENABLED(tp)) {
3927 tp->snd_scale = tp->requested_s_scale;
3928 tp->rcv_scale = tp->request_r_scale;
3929 }
3930
3931 uint32_t recwin = min(tp->rcv_wnd, TCP_MAXWIN << tp->rcv_scale);
3932 if (TCP_USE_RLEDBAT(tp, so) && tcp_cc_rledbat.get_rlwin != NULL) {
3933 /* For a LBE receiver, also use rledbat_win */
3934 uint32_t rledbat_win = tcp_cc_rledbat.get_rlwin(tp);
3935 if (rledbat_win > 0) {
3936 recwin = min(recwin, rledbat_win);
3937 }
3938 }
3939 tp->rcv_adv += recwin;
3940
3941 tp->snd_una++; /* SYN is acked */
3942 if (SEQ_LT(tp->snd_nxt, tp->snd_una)) {
3943 tp->snd_nxt = tp->snd_una;
3944 }
3945
3946 /*
3947 * We have sent more in the SYN than what is being
3948 * acked. (e.g., TFO)
3949 * We should restart the sending from what the receiver
3950 * has acknowledged immediately.
3951 */
3952 if (SEQ_GT(tp->snd_nxt, th->th_ack)) {
3953 /*
3954 * rdar://problem/33214601
3955 * There is a middlebox that acks all but one
3956 * byte and still drops the data.
3957 */
3958 if (!(tp->t_flagsext & TF_FASTOPEN_FORCE_ENABLE) &&
3959 (tp->t_tfo_stats & TFO_S_SYN_DATA_SENT) &&
3960 tp->snd_max == th->th_ack + 1 &&
3961 tp->snd_max > tp->snd_una + 1) {
3962 tcp_heuristic_tfo_middlebox(tp);
3963
3964 so->so_error = ENODATA;
3965 soevent(so,
3966 (SO_FILT_HINT_LOCKED | SO_FILT_HINT_MP_SUB_ERROR));
3967
3968 tp->t_tfo_stats |= TFO_S_ONE_BYTE_PROXY;
3969 }
3970
3971 tp->snd_max = tp->snd_nxt = th->th_ack;
3972 }
3973
3974 /*
3975 * If there's data, delay ACK; if there's also a FIN
3976 * ACKNOW will be turned on later.
3977 */
3978 TCP_INC_VAR(tp->t_unacksegs, segment_count);
3979 if (TCP_ACC_ECN_ON(tp) && ip_ecn == IPTOS_ECN_CE) {
3980 TCP_INC_VAR(tp->t_unacksegs_ce, segment_count);
3981 }
3982 if (DELAY_ACK(tp, th) && tlen != 0) {
3983 if ((tp->t_flags & TF_DELACK) == 0) {
3984 tp->t_flags |= TF_DELACK;
3985 tp->t_timer[TCPT_DELACK] = OFFSET_FROM_START(tp, tcp_delack);
3986 }
3987 } else {
3988 tp->t_flags |= TF_ACKNOW;
3989 }
3990 /*
3991 * Received <SYN,ACK> in SYN_SENT[*] state.
3992 * Transitions:
3993 * SYN_SENT --> ESTABLISHED
3994 * SYN_SENT* --> FIN_WAIT_1
3995 */
3996 tp->t_starttime = tcp_now;
3997 tcp_sbrcv_tstmp_check(tp);
3998 if (tp->t_flags & TF_NEEDFIN) {
3999 DTRACE_TCP4(state__change, void, NULL,
4000 struct inpcb *, inp,
4001 struct tcpcb *, tp, int32_t,
4002 TCPS_FIN_WAIT_1);
4003 TCP_LOG_STATE(tp, TCPS_FIN_WAIT_1);
4004 tp->t_state = TCPS_FIN_WAIT_1;
4005 tp->t_flags &= ~TF_NEEDFIN;
4006 thflags &= ~TH_SYN;
4007
4008 TCP_LOG_CONNECTION_SUMMARY(tp);
4009 } else {
4010 DTRACE_TCP4(state__change, void, NULL,
4011 struct inpcb *, inp, struct tcpcb *,
4012 tp, int32_t, TCPS_ESTABLISHED);
4013 TCP_LOG_STATE(tp, TCPS_ESTABLISHED);
4014 tp->t_state = TCPS_ESTABLISHED;
4015 tp->t_timer[TCPT_KEEP] =
4016 OFFSET_FROM_START(tp,
4017 TCP_CONN_KEEPIDLE(tp));
4018 if (nstat_collect) {
4019 nstat_route_connect_success(
4020 inp->inp_route.ro_rt);
4021 }
4022 TCP_LOG_CONNECTED(tp, 0);
4023 /*
4024 * The SYN is acknowledged but una is not
4025 * updated yet. So pass the value of
4026 * ack to compute sndbytes correctly
4027 */
4028 inp_count_sndbytes(inp, th->th_ack);
4029 }
4030 tp->t_forced_acks = TCP_FORCED_ACKS_COUNT;
4031 #if MPTCP
4032 /*
4033 * Do not send the connect notification for additional
4034 * subflows until ACK for 3-way handshake arrives.
4035 */
4036 if ((!(tp->t_mpflags & TMPF_MPTCP_TRUE)) &&
4037 (tp->t_mpflags & TMPF_SENT_JOIN)) {
4038 isconnected = FALSE;
4039 } else
4040 #endif /* MPTCP */
4041 isconnected = TRUE;
4042
4043 if ((tp->t_tfo_flags & (TFO_F_COOKIE_REQ | TFO_F_COOKIE_SENT)) ||
4044 (tp->t_tfo_stats & TFO_S_SYN_DATA_SENT)) {
4045 tcp_tfo_synack(tp, &to);
4046
4047 if ((tp->t_tfo_stats & TFO_S_SYN_DATA_SENT) &&
4048 SEQ_LT(tp->snd_una, th->th_ack)) {
4049 tp->t_tfo_stats |= TFO_S_SYN_DATA_ACKED;
4050 tcpstat.tcps_tfo_syn_data_acked++;
4051 #if MPTCP
4052 if (so->so_flags & SOF_MP_SUBFLOW) {
4053 so->so_flags1 |= SOF1_TFO_REWIND;
4054 }
4055 #endif
4056 tcp_tfo_rcv_probe(tp, tlen);
4057 }
4058 }
4059 } else {
4060 /*
4061 * Received initial SYN in SYN-SENT[*] state => simul-
4062 * taneous open.
4063 * Do 3-way handshake:
4064 * SYN-SENT -> SYN-RECEIVED
4065 * SYN-SENT* -> SYN-RECEIVED*
4066 */
4067 tp->t_flags |= TF_ACKNOW;
4068 tp->t_timer[TCPT_REXMT] = 0;
4069 DTRACE_TCP4(state__change, void, NULL, struct inpcb *, inp,
4070 struct tcpcb *, tp, int32_t, TCPS_SYN_RECEIVED);
4071 TCP_LOG_STATE(tp, TCPS_SYN_RECEIVED);
4072 tp->t_state = TCPS_SYN_RECEIVED;
4073
4074 /*
4075 * During simultaneous open, TFO should not be used.
4076 * So, we disable it here, to prevent that data gets
4077 * sent on the SYN/ACK.
4078 */
4079 tcp_disable_tfo(tp);
4080 }
4081
4082 trimthenstep6:
4083 /*
4084 * Advance th->th_seq to correspond to first data byte.
4085 * If data, trim to stay within window,
4086 * dropping FIN if necessary.
4087 */
4088 th->th_seq++;
4089 if (tlen > tp->rcv_wnd) {
4090 todrop = tlen - tp->rcv_wnd;
4091 m_adj(m, -todrop);
4092 tlen = tp->rcv_wnd;
4093 thflags &= ~TH_FIN;
4094 tcpstat.tcps_rcvpackafterwin++;
4095 tcpstat.tcps_rcvbyteafterwin += todrop;
4096 }
4097 tp->snd_wl1 = th->th_seq - 1;
4098 tp->rcv_up = th->th_seq;
4099 /*
4100 * Client side of transaction: already sent SYN and data.
4101 * If the remote host used T/TCP to validate the SYN,
4102 * our data will be ACK'd; if so, enter normal data segment
4103 * processing in the middle of step 5, ack processing.
4104 * Otherwise, goto step 6.
4105 */
4106 if (thflags & TH_ACK) {
4107 goto process_ACK;
4108 }
4109 goto step6;
4110 /*
4111 * If the state is LAST_ACK or CLOSING or TIME_WAIT:
4112 * do normal processing.
4113 *
4114 * NB: Leftover from RFC1644 T/TCP. Cases to be reused later.
4115 */
4116 case TCPS_LAST_ACK:
4117 case TCPS_CLOSING:
4118 case TCPS_TIME_WAIT:
4119 break; /* continue normal processing */
4120
4121 /* Received a SYN while connection is already established.
4122 * This is a "half open connection and other anomalies" described
4123 * in RFC793 page 34, send an ACK so the remote reset the connection
4124 * or recovers by adjusting its sequence numbering. Sending an ACK is
4125 * in accordance with RFC 5961 Section 4.2
4126 *
4127 * For Accurate ECN, if we receive a packet with SYN in ESTABLISHED
4128 * state, we don't send the handshake encoding.
4129 */
4130 case TCPS_ESTABLISHED:
4131 if (thflags & TH_SYN && tlen <= 0) {
4132 /* Drop the packet silently if we have reached the limit */
4133 if (tcp_is_ack_ratelimited(tp)) {
4134 TCP_LOG_DROP_PCB(TCP_LOG_HDR, th, tp, false, "SYN in ESTABLISHED state");
4135 goto drop;
4136 } else {
4137 /* Send challenge ACK */
4138 tcpstat.tcps_synchallenge++;
4139 TCP_LOG_DROP_PCB(TCP_LOG_HDR, th, tp, false, "SYN in ESTABLISHED state");
4140 goto dropafterack;
4141 }
4142 }
4143 break;
4144 }
4145
4146 /*
4147 * States other than LISTEN or SYN_SENT.
4148 * First check the RST flag and sequence number since reset segments
4149 * are exempt from the timestamp and connection count tests. This
4150 * fixes a bug introduced by the Stevens, vol. 2, p. 960 bugfix
4151 * below which allowed reset segments in half the sequence space
4152 * to fall though and be processed (which gives forged reset
4153 * segments with a random sequence number a 50 percent chance of
4154 * killing a connection).
4155 * Then check timestamp, if present.
4156 * Then check the connection count, if present.
4157 * Then check that at least some bytes of segment are within
4158 * receive window. If segment begins before rcv_nxt,
4159 * drop leading data (and SYN); if nothing left, just ack.
4160 *
4161 *
4162 * If the RST bit is set, check the sequence number to see
4163 * if this is a valid reset segment.
4164 * RFC 793 page 37:
4165 * In all states except SYN-SENT, all reset (RST) segments
4166 * are validated by checking their SEQ-fields. A reset is
4167 * valid if its sequence number is in the window.
4168 * Note: this does not take into account delayed ACKs, so
4169 * we should test against last_ack_sent instead of rcv_nxt.
4170 * The sequence number in the reset segment is normally an
4171 * echo of our outgoing acknowlegement numbers, but some hosts
4172 * send a reset with the sequence number at the rightmost edge
4173 * of our receive window, and we have to handle this case.
4174 * Note 2: Paul Watson's paper "Slipping in the Window" has shown
4175 * that brute force RST attacks are possible. To combat this,
4176 * we use a much stricter check while in the ESTABLISHED state,
4177 * only accepting RSTs where the sequence number is equal to
4178 * last_ack_sent. In all other states (the states in which a
4179 * RST is more likely), the more permissive check is used.
4180 * RFC 5961 Section 3.2: if the RST bit is set, sequence # is
4181 * within the receive window and last_ack_sent == seq,
4182 * then reset the connection. Otherwise if the seq doesn't
4183 * match last_ack_sent, TCP must send challenge ACK. Perform
4184 * rate limitation when sending the challenge ACK.
4185 * If we have multiple segments in flight, the intial reset
4186 * segment sequence numbers will be to the left of last_ack_sent,
4187 * but they will eventually catch up.
4188 * In any case, it never made sense to trim reset segments to
4189 * fit the receive window since RFC 1122 says:
4190 * 4.2.2.12 RST Segment: RFC-793 Section 3.4
4191 *
4192 * A TCP SHOULD allow a received RST segment to include data.
4193 *
4194 * DISCUSSION
4195 * It has been suggested that a RST segment could contain
4196 * ASCII text that encoded and explained the cause of the
4197 * RST. No standard has yet been established for such
4198 * data.
4199 *
4200 * If the reset segment passes the sequence number test examine
4201 * the state:
4202 * SYN_RECEIVED STATE:
4203 * If passive open, return to LISTEN state.
4204 * If active open, inform user that connection was refused.
4205 * ESTABLISHED, FIN_WAIT_1, FIN_WAIT_2, CLOSE_WAIT STATES:
4206 * Inform user that connection was reset, and close tcb.
4207 * CLOSING, LAST_ACK STATES:
4208 * Close the tcb.
4209 * TIME_WAIT STATE:
4210 * Drop the segment - see Stevens, vol. 2, p. 964 and
4211 * RFC 1337.
4212 *
4213 * Radar 4803931: Allows for the case where we ACKed the FIN but
4214 * there is already a RST in flight from the peer.
4215 * In that case, accept the RST for non-established
4216 * state if it's one off from last_ack_sent.
4217 *
4218 * Also be lenient in closing states to allow last_ack_sent and also
4219 * last_ack_sent - 1 in case there is a lot of delay upstream
4220 * and it is an older segment that is triggering the RST
4221 */
4222 if (thflags & TH_RST) {
4223 if ((SEQ_GEQ(th->th_seq, tp->last_ack_sent) &&
4224 SEQ_LT(th->th_seq, tp->last_ack_sent + tp->rcv_wnd)) ||
4225 ((tp->rcv_wnd == 0 || tp->t_state >= TCPS_CLOSE_WAIT) &&
4226 ((tp->last_ack_sent == th->th_seq) ||
4227 (tp->last_ack_sent - 1 == th->th_seq)))) {
4228 if (tp->last_ack_sent == th->th_seq || tp->last_ack_sent - 1 == th->th_seq) {
4229 switch (tp->t_state) {
4230 case TCPS_SYN_RECEIVED:
4231 IF_TCP_STATINC(ifp, rstinsynrcv);
4232 so->so_error = ECONNREFUSED;
4233 goto close;
4234
4235 case TCPS_ESTABLISHED:
4236 if ((TCP_ECN_ENABLED(tp) || TCP_ACC_ECN_ON(tp)) &&
4237 tp->snd_una == tp->iss + 1 &&
4238 SEQ_GT(tp->snd_max, tp->snd_una)) {
4239 /*
4240 * If the first data packet on an
4241 * ECN connection, receives a RST
4242 * increment the heuristic
4243 */
4244 tcp_heuristic_ecn_droprst(tp);
4245 }
4246 OS_FALLTHROUGH;
4247 case TCPS_FIN_WAIT_1:
4248 case TCPS_CLOSE_WAIT:
4249 case TCPS_FIN_WAIT_2:
4250 so->so_error = ECONNRESET;
4251 close:
4252 soevent(so,
4253 (SO_FILT_HINT_LOCKED |
4254 SO_FILT_HINT_CONNRESET));
4255
4256 tcpstat.tcps_drops++;
4257 tp = tcp_close(tp);
4258 break;
4259
4260 case TCPS_CLOSING:
4261 case TCPS_LAST_ACK:
4262 tp = tcp_close(tp);
4263 break;
4264
4265 case TCPS_TIME_WAIT:
4266 break;
4267 }
4268 } else {
4269 tcpstat.tcps_badrst++;
4270 /* Drop if we have reached the ACK limit */
4271 if (tcp_is_ack_ratelimited(tp)) {
4272 TCP_LOG_DROP_PCB(TCP_LOG_HDR, th, tp, false, "bad RST in ESTABLISHED state");
4273 goto drop;
4274 } else {
4275 /* Send challenge ACK */
4276 tcpstat.tcps_rstchallenge++;
4277 TCP_LOG_DROP_PCB(TCP_LOG_HDR, th, tp, false, "bad RST in ESTABLISHED state");
4278 goto dropafterack;
4279 }
4280 }
4281 }
4282 drop_reason = DROP_REASON_TCP_BAD_RST;
4283 goto drop;
4284 }
4285
4286 /*
4287 * RFC 1323 PAWS: If we have a timestamp reply on this segment
4288 * and it's less than ts_recent, drop it.
4289 */
4290 if ((to.to_flags & TOF_TS) != 0 && tp->ts_recent &&
4291 TSTMP_LT(to.to_tsval, tp->ts_recent)) {
4292 /* Check to see if ts_recent is over 24 days old. */
4293 if ((int)(tcp_now - tp->ts_recent_age) > TCP_PAWS_IDLE) {
4294 /*
4295 * Invalidate ts_recent. If this segment updates
4296 * ts_recent, the age will be reset later and ts_recent
4297 * will get a valid value. If it does not, setting
4298 * ts_recent to zero will at least satisfy the
4299 * requirement that zero be placed in the timestamp
4300 * echo reply when ts_recent isn't valid. The
4301 * age isn't reset until we get a valid ts_recent
4302 * because we don't want out-of-order segments to be
4303 * dropped when ts_recent is old.
4304 */
4305 tp->ts_recent = 0;
4306 } else {
4307 tcpstat.tcps_rcvduppack++;
4308 tcpstat.tcps_rcvdupbyte += tlen;
4309 tp->t_pawsdrop++;
4310 tcpstat.tcps_pawsdrop++;
4311
4312 /*
4313 * PAWS-drop when ECN is being used? That indicates
4314 * that ECT-marked packets take a different path, with
4315 * different congestion-characteristics.
4316 *
4317 * Only fallback when we did send less than 2GB as PAWS
4318 * really has no reason to kick in earlier.
4319 */
4320 if ((TCP_ECN_ENABLED(tp) || TCP_ACC_ECN_ON(tp)) &&
4321 inp->inp_stat->rxbytes < 2147483648) {
4322 INP_INC_IFNET_STAT(inp, ecn_fallback_reorder);
4323 tcpstat.tcps_ecn_fallback_reorder++;
4324 tcp_heuristic_ecn_aggressive(tp);
4325 }
4326
4327 if (nstat_collect) {
4328 nstat_route_rx(tp->t_inpcb->inp_route.ro_rt,
4329 1, tlen, NSTAT_RX_FLAG_DUPLICATE);
4330 INP_ADD_STAT(inp, ifnet_count_type,
4331 rxpackets, 1);
4332 INP_ADD_STAT(inp, ifnet_count_type,
4333 rxbytes, tlen);
4334 tp->t_stat.rxduplicatebytes += tlen;
4335 inp_set_activity_bitmap(inp);
4336 }
4337 if (tlen > 0) {
4338 goto dropafterack;
4339 }
4340 drop_reason = DROP_REASON_TCP_PAWS;
4341 goto drop;
4342 }
4343 }
4344
4345 /*
4346 * In the SYN-RECEIVED state, validate that the packet belongs to
4347 * this connection before trimming the data to fit the receive
4348 * window. Check the sequence number versus IRS since we know
4349 * the sequence numbers haven't wrapped. This is a partial fix
4350 * for the "LAND" DoS attack.
4351 */
4352 if (tp->t_state == TCPS_SYN_RECEIVED && SEQ_LT(th->th_seq, tp->irs)) {
4353 IF_TCP_STATINC(ifp, dospacket);
4354 TCP_LOG_DROP_PCB(TCP_LOG_HDR, th, tp, false, "SYN_RECEIVED bad SEQ");
4355 drop_reason = DROP_REASON_TCP_SYN_RECEIVED_BAD_SEQ;
4356 goto dropwithreset;
4357 }
4358
4359 /*
4360 * Check if there is old data at the beginning of the window
4361 * i.e. the sequence number is before rcv_nxt
4362 */
4363 todrop = tp->rcv_nxt - th->th_seq;
4364 if (todrop > 0) {
4365 boolean_t is_syn_set = FALSE;
4366
4367 if (thflags & TH_SYN) {
4368 is_syn_set = TRUE;
4369 thflags &= ~TH_SYN;
4370 th->th_seq++;
4371 if (th->th_urp > 1) {
4372 th->th_urp--;
4373 } else {
4374 thflags &= ~TH_URG;
4375 }
4376 todrop--;
4377 }
4378 /*
4379 * Following if statement from Stevens, vol. 2, p. 960.
4380 * The amount of duplicate data is greater than or equal
4381 * to the size of the segment - entire segment is duplicate
4382 */
4383 if (todrop > tlen
4384 || (todrop == tlen && (thflags & TH_FIN) == 0)) {
4385 /*
4386 * Any valid FIN must be to the left of the window.
4387 * At this point the FIN must be a duplicate or out
4388 * of sequence; drop it.
4389 */
4390 thflags &= ~TH_FIN;
4391
4392 /*
4393 * Send an ACK to resynchronize and drop any data.
4394 * But keep on processing for RST or ACK.
4395 *
4396 * If the SYN bit was originally set, then only send
4397 * an ACK if we are not rate-limiting this connection.
4398 */
4399 if (is_syn_set) {
4400 if (!tcp_is_ack_ratelimited(tp)) {
4401 tcpstat.tcps_synchallenge++;
4402 tp->t_flags |= TF_ACKNOW;
4403 }
4404 } else {
4405 tp->t_flags |= TF_ACKNOW;
4406 }
4407
4408 if (todrop == 1) {
4409 /* This could be a keepalive */
4410 soevent(so, SO_FILT_HINT_LOCKED |
4411 SO_FILT_HINT_KEEPALIVE);
4412 }
4413 todrop = tlen;
4414 tcpstat.tcps_rcvduppack++;
4415 tcpstat.tcps_rcvdupbyte += todrop;
4416 } else {
4417 tcpstat.tcps_rcvpartduppack++;
4418 tcpstat.tcps_rcvpartdupbyte += todrop;
4419 }
4420
4421 if (todrop > 1) {
4422 /*
4423 * Note the duplicate data sequence space so that
4424 * it can be reported in DSACK option.
4425 */
4426 tp->t_dsack_lseq = th->th_seq;
4427 tp->t_dsack_rseq = th->th_seq + todrop;
4428 tp->t_flags |= TF_ACKNOW;
4429 }
4430 if (nstat_collect) {
4431 nstat_route_rx(tp->t_inpcb->inp_route.ro_rt, 1,
4432 todrop, NSTAT_RX_FLAG_DUPLICATE);
4433 INP_ADD_STAT(inp, ifnet_count_type, rxpackets, 1);
4434 INP_ADD_STAT(inp, ifnet_count_type, rxbytes, todrop);
4435 tp->t_stat.rxduplicatebytes += todrop;
4436 inp_set_activity_bitmap(inp);
4437 }
4438 drop_hdrlen += todrop; /* drop from the top afterwards */
4439 th->th_seq += todrop;
4440 tlen -= todrop;
4441 if (th->th_urp > todrop) {
4442 th->th_urp -= todrop;
4443 } else {
4444 thflags &= ~TH_URG;
4445 th->th_urp = 0;
4446 }
4447 }
4448
4449 /*
4450 * If new data are received on a connection after the user
4451 * processes are gone, then RST the other end.
4452 * Send also a RST when we received a data segment after we've
4453 * sent our FIN when the socket is defunct.
4454 * Note that an MPTCP subflow socket would have SS_NOFDREF set
4455 * by default. So, if it's an MPTCP-subflow we rather check the
4456 * MPTCP-level's socket state for SS_NOFDREF.
4457 */
4458 if (tlen) {
4459 boolean_t close_it = FALSE;
4460
4461 if (!(so->so_flags & SOF_MP_SUBFLOW) && (so->so_state & SS_NOFDREF) &&
4462 tp->t_state > TCPS_CLOSE_WAIT) {
4463 TCP_LOG_DROP_PCB(TCP_LOG_HDR, th, tp, false, "SS_NOFDREF");
4464 close_it = TRUE;
4465 }
4466
4467 if ((so->so_flags & SOF_MP_SUBFLOW) && (mptetoso(tptomptp(tp)->mpt_mpte)->so_state & SS_NOFDREF) &&
4468 tp->t_state > TCPS_CLOSE_WAIT) {
4469 TCP_LOG_DROP_PCB(TCP_LOG_HDR, th, tp, false, "SOF_MP_SUBFLOW SS_NOFDREF");
4470 close_it = TRUE;
4471 }
4472
4473 if ((so->so_flags & SOF_DEFUNCT) && tp->t_state > TCPS_FIN_WAIT_1) {
4474 TCP_LOG_DROP_PCB(TCP_LOG_HDR, th, tp, false, "SOF_DEFUNCT");
4475 close_it = TRUE;
4476 }
4477
4478 if (so->so_state & SS_CANTRCVMORE) {
4479 TCP_LOG_DROP_PCB(TCP_LOG_HDR, th, tp, false, "SS_CANTRCVMORE");
4480 close_it = TRUE;
4481 }
4482
4483 if (close_it) {
4484 tp = tcp_close(tp);
4485 tcpstat.tcps_rcvafterclose++;
4486 IF_TCP_STATINC(ifp, cleanup);
4487 drop_reason = DROP_REASON_TCP_RECV_AFTER_CLOSE;
4488 goto dropwithreset;
4489 }
4490 }
4491
4492 /*
4493 * If segment ends after window, drop trailing data
4494 * (and PUSH and FIN); if nothing left, just ACK.
4495 */
4496 todrop = (th->th_seq + tlen) - (tp->rcv_nxt + tp->rcv_wnd);
4497 if (todrop > 0) {
4498 tcpstat.tcps_rcvpackafterwin++;
4499 if (todrop >= tlen) {
4500 tcpstat.tcps_rcvbyteafterwin += tlen;
4501 /*
4502 * If a new connection request is received
4503 * while in TIME_WAIT, drop the old connection
4504 * and start over if the sequence numbers
4505 * are above the previous ones.
4506 */
4507 if (thflags & TH_SYN &&
4508 tp->t_state == TCPS_TIME_WAIT &&
4509 SEQ_GT(th->th_seq, tp->rcv_nxt)) {
4510 iss = tcp_new_isn(tp);
4511 tp = tcp_close(tp);
4512 socket_unlock(so, 1);
4513 goto findpcb;
4514 }
4515 /*
4516 * If window is closed can only take segments at
4517 * window edge, and have to drop data and PUSH from
4518 * incoming segments. Continue processing, but
4519 * remember to ack. Otherwise, drop segment
4520 * and ack.
4521 */
4522 if (tp->rcv_wnd == 0 && th->th_seq == tp->rcv_nxt) {
4523 tp->t_flags |= TF_ACKNOW;
4524 tcpstat.tcps_rcvwinprobe++;
4525 } else {
4526 goto dropafterack;
4527 }
4528 } else {
4529 tcpstat.tcps_rcvbyteafterwin += todrop;
4530 }
4531 m_adj(m, -todrop);
4532 tlen -= todrop;
4533 thflags &= ~(TH_PUSH | TH_FIN);
4534 }
4535
4536 /*
4537 * If last ACK falls within this segment's sequence numbers,
4538 * record its timestamp.
4539 * NOTE:
4540 * 1) That the test incorporates suggestions from the latest
4541 * proposal of the [email protected] list (Braden 1993/04/26).
4542 * 2) That updating only on newer timestamps interferes with
4543 * our earlier PAWS tests, so this check should be solely
4544 * predicated on the sequence space of this segment.
4545 * 3) That we modify the segment boundary check to be
4546 * Last.ACK.Sent <= SEG.SEQ + SEG.Len
4547 * instead of RFC1323's
4548 * Last.ACK.Sent < SEG.SEQ + SEG.Len,
4549 * This modified check allows us to overcome RFC1323's
4550 * limitations as described in Stevens TCP/IP Illustrated
4551 * Vol. 2 p.869. In such cases, we can still calculate the
4552 * RTT correctly when RCV.NXT == Last.ACK.Sent.
4553 */
4554 if ((to.to_flags & TOF_TS) != 0 &&
4555 SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
4556 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
4557 ((thflags & (TH_SYN | TH_FIN)) != 0))) {
4558 tp->ts_recent_age = tcp_now;
4559 tp->ts_recent = to.to_tsval;
4560 }
4561
4562 /*
4563 * Stevens: If a SYN is in the window, then this is an
4564 * error and we send an RST and drop the connection.
4565 *
4566 * RFC 5961 Section 4.2
4567 * Send challenge ACK for any SYN in synchronized state
4568 * Perform rate limitation in doing so.
4569 */
4570 if (thflags & TH_SYN) {
4571 if (!tcp_syn_data_valid(tp, th, tlen)) {
4572 tcpstat.tcps_badsyn++;
4573 /* Drop if we have reached ACK limit */
4574 if (tcp_is_ack_ratelimited(tp)) {
4575 TCP_LOG_DROP_PCB(TCP_LOG_HDR, th, tp, false, "SYN data invalid");
4576 drop_reason = DROP_REASON_TCP_SYN_DATA_INVALID;
4577 goto drop;
4578 } else {
4579 /* Send challenge ACK */
4580 tcpstat.tcps_synchallenge++;
4581 TCP_LOG_DROP_PCB(TCP_LOG_HDR, th, tp, false, "SYN data invalid");
4582 drop_reason = DROP_REASON_TCP_SYN_DATA_INVALID;
4583 goto dropafterack;
4584 }
4585 } else {
4586 /*
4587 * Received SYN (/ACK) with data.
4588 * Move sequence number along to process the data.
4589 */
4590 th->th_seq++;
4591 thflags &= ~TH_SYN;
4592 }
4593 }
4594
4595 /*
4596 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN
4597 * flag is on (half-synchronized state), then queue data for
4598 * later processing; else drop segment and return.
4599 */
4600 if ((thflags & TH_ACK) == 0) {
4601 if (tp->t_state == TCPS_SYN_RECEIVED) {
4602 if ((TFO_ENABLED(tp))) {
4603 /*
4604 * So, we received a valid segment while in
4605 * SYN-RECEIVED.
4606 * As this cannot be an RST (see that if a bit
4607 * higher), and it does not have the ACK-flag
4608 * set, we want to retransmit the SYN/ACK.
4609 * Thus, we have to reset snd_nxt to snd_una to
4610 * trigger the going back to sending of the
4611 * SYN/ACK. This is more consistent with the
4612 * behavior of tcp_output(), which expects
4613 * to send the segment that is pointed to by
4614 * snd_nxt.
4615 */
4616 tp->snd_nxt = tp->snd_una;
4617
4618 /*
4619 * We need to make absolutely sure that we are
4620 * going to reply upon a duplicate SYN-segment.
4621 */
4622 if (th->th_flags & TH_SYN) {
4623 needoutput = 1;
4624 }
4625 }
4626 /* Process this same as newly received Accurate ECN SYN */
4627 int ace_flags = ((th->th_x2 << 8) | thflags) & TH_ACE;
4628 tcp_input_process_accecn_syn(tp, ace_flags, ip_ecn);
4629
4630 goto step6;
4631 } else if (tp->t_flags & TF_ACKNOW) {
4632 TCP_LOG_DROP_PCB(TCP_LOG_HDR, th, tp, false, "bad ACK");
4633 drop_reason = DROP_REASON_TCP_BAD_ACK;
4634 goto dropafterack;
4635 } else {
4636 TCP_LOG_DROP_PCB(TCP_LOG_HDR, th, tp, false, "bad ACK");
4637 drop_reason = DROP_REASON_TCP_BAD_ACK;
4638 goto drop;
4639 }
4640 }
4641
4642 /*
4643 * Ack processing.
4644 */
4645
4646 switch (tp->t_state) {
4647 /*
4648 * In SYN_RECEIVED state, the ack ACKs our SYN, so enter
4649 * ESTABLISHED state and continue processing.
4650 * The ACK was checked above.
4651 */
4652 case TCPS_SYN_RECEIVED:
4653
4654 tcpstat.tcps_connects++;
4655
4656 /* Do window scaling? */
4657 if (TCP_WINDOW_SCALE_ENABLED(tp)) {
4658 tp->snd_scale = tp->requested_s_scale;
4659 tp->rcv_scale = tp->request_r_scale;
4660 tp->snd_wnd = th->th_win << tp->snd_scale;
4661 tp->max_sndwnd = tp->snd_wnd;
4662 tiwin = tp->snd_wnd;
4663 }
4664 /*
4665 * Make transitions:
4666 * SYN-RECEIVED -> ESTABLISHED
4667 * SYN-RECEIVED* -> FIN-WAIT-1
4668 */
4669 tp->t_starttime = tcp_now;
4670 tcp_sbrcv_tstmp_check(tp);
4671 if (tp->t_flags & TF_NEEDFIN) {
4672 DTRACE_TCP4(state__change, void, NULL,
4673 struct inpcb *, inp,
4674 struct tcpcb *, tp, int32_t, TCPS_FIN_WAIT_1);
4675 TCP_LOG_STATE(tp, TCPS_FIN_WAIT_1);
4676 tp->t_state = TCPS_FIN_WAIT_1;
4677 tp->t_flags &= ~TF_NEEDFIN;
4678
4679 TCP_LOG_CONNECTION_SUMMARY(tp);
4680 } else {
4681 DTRACE_TCP4(state__change, void, NULL,
4682 struct inpcb *, inp,
4683 struct tcpcb *, tp, int32_t, TCPS_ESTABLISHED);
4684 TCP_LOG_STATE(tp, TCPS_ESTABLISHED);
4685 tp->t_state = TCPS_ESTABLISHED;
4686 tp->t_timer[TCPT_KEEP] = OFFSET_FROM_START(tp,
4687 TCP_CONN_KEEPIDLE(tp));
4688 if (nstat_collect) {
4689 nstat_route_connect_success(
4690 tp->t_inpcb->inp_route.ro_rt);
4691 }
4692 TCP_LOG_CONNECTED(tp, 0);
4693 /*
4694 * The SYN is acknowledged but una is not updated
4695 * yet. So pass the value of ack to compute
4696 * sndbytes correctly
4697 */
4698 inp_count_sndbytes(inp, th->th_ack);
4699 }
4700 tp->t_forced_acks = TCP_FORCED_ACKS_COUNT;
4701
4702 VERIFY(LIST_EMPTY(&tp->t_segq));
4703 tp->snd_wl1 = th->th_seq - 1;
4704
4705 /*
4706 * AccECN server in SYN-RCVD state received an ACK with
4707 * SYN=0, process handshake encoding present in the ACK for SYN-ACK
4708 * and update receive side counters.
4709 */
4710 if (TCP_ACC_ECN_ON(tp) && (thflags & (TH_SYN | TH_ACK)) == TH_ACK) {
4711 const uint32_t ace_flags = ((th->th_x2 << 8) | thflags) & TH_ACE;
4712 if (tlen == 0 && to.to_nsacks == 0) {
4713 /*
4714 * ACK for SYN-ACK reflects the state (ECN) in which SYN-ACK packet
4715 * was delivered. Use Table 4 of Accurate ECN draft to decode only
4716 * when a pure ACK with no SACK block is received.
4717 * 0|0|0 will fail Accurate ECN negotiation and disable ECN.
4718 */
4719 switch (ace_flags) {
4720 case (0 | TH_CWR | 0):
4721 /* Non-ECT SYN-ACK was delivered */
4722 tp->t_aecn.t_snd_ce_packets = 5;
4723 if (tp->t_server_accecn_state == tcp_connection_server_accurate_ecn_requested) {
4724 tp->t_server_accecn_state = tcp_connection_server_accurate_ecn_negotiation_success;
4725 }
4726 break;
4727 case (0 | TH_CWR | TH_ECE):
4728 /* ECT1 SYN-ACK was delivered, mangling detected */
4729 OS_FALLTHROUGH;
4730 case (TH_AE | 0 | 0):
4731 /* ECT0 SYN-ACK was delivered, mangling detected */
4732 tp->t_aecn.t_snd_ce_packets = 5;
4733 if (tp->t_server_accecn_state == tcp_connection_server_accurate_ecn_requested) {
4734 tp->t_server_accecn_state = tcp_connection_server_accurate_ecn_negotiation_success_ect_mangling_detected;
4735 }
4736 break;
4737 case (TH_AE | TH_CWR | 0):
4738 /*
4739 * CE SYN-ACK was delivered, even though mangling happened,
4740 * CE could indicate congestion at a node after mangling occured.
4741 * Set cwnd to 2 segments
4742 */
4743 tp->t_aecn.t_snd_ce_packets = 6;
4744 tp->snd_cwnd = 2 * tp->t_maxseg;
4745 if (tp->t_server_accecn_state == tcp_connection_server_accurate_ecn_requested) {
4746 tp->t_server_accecn_state = tcp_connection_server_accurate_ecn_negotiation_success_ect_mangling_detected;
4747 }
4748 break;
4749 case (0 | 0 | 0):
4750 /* Disable ECN, as ACE fields were zeroed */
4751 tp->ecn_flags &= ~(TE_SETUPRECEIVED | TE_SENDIPECT |
4752 TE_SENDCWR | TE_ACE_SETUPRECEIVED);
4753 /*
4754 * Since last ACK has no ECN flag set and TE_LOST_SYNACK is set, this is in response
4755 * to the second (non-ECN setup) SYN-ACK retransmission. In such a case, we assume
4756 * that AccECN SYN-ACK was blackholed.
4757 */
4758 if ((tp->ecn_flags & TE_LOST_SYNACK) && tp->t_rxtshift <= 2 &&
4759 (tp->t_server_accecn_state == tcp_connection_server_classic_ecn_requested ||
4760 tp->t_server_accecn_state == tcp_connection_server_accurate_ecn_requested)) {
4761 tp->t_server_accecn_state = tcp_connection_server_accurate_ecn_negotiation_blackholed;
4762 }
4763 /*
4764 * SYN-ACK hasn't been retransmitted twice yet, so this could likely mean bleaching of ACE
4765 * on the path from client to server on last ACK.
4766 */
4767 if (tp->t_server_accecn_state == tcp_connection_server_accurate_ecn_requested) {
4768 tp->t_server_accecn_state = tcp_connection_server_accurate_ecn_ace_bleaching_detected;
4769 }
4770 break;
4771 default:
4772 /* Unused values for forward compatibility */
4773 tp->t_aecn.t_snd_ce_packets = 5;
4774 break;
4775 }
4776 /* Update the time for this newly received last ACK */
4777 if ((to.to_flags & TOF_TS) != 0 && (to.to_tsecr != 0) &&
4778 (tp->t_last_ack_tsecr == 0 || TSTMP_GEQ(to.to_tsecr, tp->t_last_ack_tsecr))) {
4779 tp->t_last_ack_tsecr = to.to_tsecr;
4780 }
4781 } else if (to.to_nsacks == 0) {
4782 /*
4783 * If 3rd ACK is lost, we won't receive the last ACK
4784 * encoding. We will move the server to AccECN mode
4785 * regardless.
4786 */
4787 tp->t_aecn.t_snd_ce_packets = 5;
4788 if (tp->t_server_accecn_state == tcp_connection_server_accurate_ecn_requested) {
4789 tp->t_server_accecn_state = tcp_connection_server_accurate_ecn_negotiation_success;
4790 }
4791 }
4792 /* Increment receive side counters based on IP-ECN */
4793 tcp_input_ip_ecn(tp, inp, (uint32_t)tlen, (uint32_t)segment_count, ip_ecn);
4794 }
4795
4796 #if MPTCP
4797 /*
4798 * Do not send the connect notification for additional subflows
4799 * until ACK for 3-way handshake arrives.
4800 */
4801 if ((!(tp->t_mpflags & TMPF_MPTCP_TRUE)) &&
4802 (tp->t_mpflags & TMPF_SENT_JOIN)) {
4803 isconnected = FALSE;
4804 } else
4805 #endif /* MPTCP */
4806 isconnected = TRUE;
4807 if ((tp->t_tfo_flags & TFO_F_COOKIE_VALID)) {
4808 /* Done this when receiving the SYN */
4809 isconnected = FALSE;
4810
4811 OSDecrementAtomic(&tcp_tfo_halfcnt);
4812
4813 /* Panic if something has gone terribly wrong. */
4814 VERIFY(tcp_tfo_halfcnt >= 0);
4815
4816 tp->t_tfo_flags &= ~TFO_F_COOKIE_VALID;
4817 }
4818
4819 /*
4820 * In case there is data in the send-queue (e.g., TFO is being
4821 * used, or connectx+data has been done), then if we would
4822 * "FALLTHROUGH", we would handle this ACK as if data has been
4823 * acknowledged. But, we have to prevent this. And this
4824 * can be prevented by increasing snd_una by 1, so that the
4825 * SYN is not considered as data (snd_una++ is actually also
4826 * done in SYN_SENT-state as part of the regular TCP stack).
4827 *
4828 * In case there is data on this ack as well, the data will be
4829 * handled by the label "dodata" right after step6.
4830 */
4831 if (so->so_snd.sb_cc) {
4832 tp->snd_una++; /* SYN is acked */
4833 if (SEQ_LT(tp->snd_nxt, tp->snd_una)) {
4834 tp->snd_nxt = tp->snd_una;
4835 }
4836
4837 /*
4838 * No duplicate-ACK handling is needed. So, we
4839 * directly advance to processing the ACK (aka,
4840 * updating the RTT estimation,...)
4841 *
4842 * But, we first need to handle eventual SACKs,
4843 * because TFO will start sending data with the
4844 * SYN/ACK, so it might be that the client
4845 * includes a SACK with its ACK.
4846 */
4847 if (SACK_ENABLED(tp) &&
4848 (to.to_nsacks > 0 || !TAILQ_EMPTY(&tp->snd_holes))) {
4849 tcp_sack_doack(tp, &to, th, &sack_bytes_acked, &highest_sacked_seq);
4850 }
4851
4852 goto process_ACK;
4853 }
4854
4855 OS_FALLTHROUGH;
4856
4857 /*
4858 * In ESTABLISHED state: drop duplicate ACKs; ACK out of range
4859 * ACKs. If the ack is in the range
4860 * tp->snd_una < th->th_ack <= tp->snd_max
4861 * then advance tp->snd_una to th->th_ack and drop
4862 * data from the retransmission queue. If this ACK reflects
4863 * more up to date window information we update our window information.
4864 */
4865 case TCPS_ESTABLISHED:
4866 case TCPS_FIN_WAIT_1:
4867 case TCPS_FIN_WAIT_2:
4868 case TCPS_CLOSE_WAIT:
4869 case TCPS_CLOSING:
4870 case TCPS_LAST_ACK:
4871 case TCPS_TIME_WAIT:
4872 if (SEQ_GT(th->th_ack, tp->snd_max)) {
4873 tcpstat.tcps_rcvacktoomuch++;
4874 if (tcp_is_ack_ratelimited(tp)) {
4875 TCP_LOG_DROP_PCB(TCP_LOG_HDR, th, tp, false, "rfc5961 rcvacktoomuch");
4876 drop_reason = DROP_REASON_TCP_ACK_TOOMUCH;
4877 goto drop;
4878 } else {
4879 drop_reason = DROP_REASON_TCP_ACK_TOOMUCH;
4880 goto dropafterack;
4881 }
4882 }
4883 if (SEQ_LT(th->th_ack, tp->snd_una - tp->max_sndwnd)) {
4884 if (tcp_is_ack_ratelimited(tp)) {
4885 TCP_LOG_DROP_PCB(TCP_LOG_HDR, th, tp, false, "rfc5961 bad ACK");
4886 drop_reason = DROP_REASON_TCP_OLD_ACK;
4887 goto drop;
4888 } else {
4889 drop_reason = DROP_REASON_TCP_OLD_ACK;
4890 goto dropafterack;
4891 }
4892 }
4893 if (SACK_ENABLED(tp) && to.to_nsacks > 0) {
4894 recvd_dsack = tcp_sack_process_dsack(tp, &to, th, &dsack_tlp);
4895 if (TCP_RACK_ENABLED(tp)) {
4896 /* If DSACK was received (not due to TLP), then increase the reordering window */
4897 if (recvd_dsack && !dsack_tlp) {
4898 tp->rack.dsack_round_seen = 1;
4899 }
4900 tcp_rack_update_reordering_window(tp, highest_sacked_seq);
4901 }
4902 /*
4903 * If DSACK is received and this packet has no
4904 * other SACK information, it can be dropped.
4905 * We do not want to treat it as a duplicate ack.
4906 */
4907 if (recvd_dsack &&
4908 SEQ_LEQ(th->th_ack, tp->snd_una) &&
4909 to.to_nsacks == 0) {
4910 tcp_bad_rexmt_check(tp, th, &to);
4911 goto drop;
4912 }
4913 }
4914
4915 if (SACK_ENABLED(tp) &&
4916 (to.to_nsacks > 0 || !TAILQ_EMPTY(&tp->snd_holes))) {
4917 tcp_sack_doack(tp, &to, th, &sack_bytes_acked, &highest_sacked_seq);
4918 }
4919
4920 #if MPTCP
4921 if (tp->t_mpuna && SEQ_GEQ(th->th_ack, tp->t_mpuna)) {
4922 if (tp->t_mpflags & TMPF_PREESTABLISHED) {
4923 /* MP TCP establishment succeeded */
4924 tp->t_mpuna = 0;
4925 if (tp->t_mpflags & TMPF_JOINED_FLOW) {
4926 if (tp->t_mpflags & TMPF_SENT_JOIN) {
4927 tp->t_mpflags &=
4928 ~TMPF_PREESTABLISHED;
4929 tp->t_mpflags |=
4930 TMPF_MPTCP_TRUE;
4931
4932 tp->t_timer[TCPT_JACK_RXMT] = 0;
4933 tp->t_mprxtshift = 0;
4934 isconnected = TRUE;
4935 } else {
4936 isconnected = FALSE;
4937 }
4938 } else {
4939 isconnected = TRUE;
4940 }
4941 }
4942 }
4943 #endif /* MPTCP */
4944
4945 tcp_tfo_rcv_ack(tp, th);
4946
4947 /*
4948 * If we have outstanding data (other than
4949 * a window probe), this is a completely
4950 * duplicate ack and the ack is the biggest we've seen.
4951 *
4952 * Need to accommodate a change in window on duplicate acks
4953 * to allow operating systems that update window during
4954 * recovery with SACK
4955 */
4956 if (SEQ_LEQ(th->th_ack, tp->snd_una)) {
4957 /*
4958 * Update snd_fack when new SACK blocks are received
4959 * without advancing the ACK
4960 */
4961 if (TCP_RACK_ENABLED(tp) && sack_bytes_acked > 0 &&
4962 SEQ_LT(tp->snd_fack, highest_sacked_seq)) {
4963 tp->snd_fack = highest_sacked_seq;
4964 }
4965
4966 /*
4967 * Process AccECN feedback here for control packets
4968 * that don't have s/acked bytes
4969 */
4970 if (TCP_ACC_ECN_ON(tp) && (tp->ecn_flags & TE_SENDIPECT) &&
4971 (sack_bytes_acked == 0)) {
4972 tp->total_ect_packets_acked += 1;
4973
4974 bool newly_acked_time = false;
4975 if (acked == 0 && (to.to_flags & TOF_TS) != 0 && to.to_tsecr != 0 &&
4976 TSTMP_GT(to.to_tsecr, tp->t_last_ack_tsecr)) {
4977 newly_acked_time = true;
4978 }
4979 if (newly_acked_time) {
4980 tcp_process_accecn(tp, &to, th, 1, ace);
4981 }
4982 }
4983
4984 if (tlen == 0 && (tiwin == tp->snd_wnd ||
4985 (to.to_nsacks > 0 && sack_bytes_acked > 0))) {
4986 uint32_t old_dupacks;
4987 /*
4988 * If both ends send FIN at the same time,
4989 * then the ack will be a duplicate ack
4990 * but we have to process the FIN. Check
4991 * for this condition and process the FIN
4992 * instead of the dupack
4993 */
4994 if ((thflags & TH_FIN) &&
4995 !TCPS_HAVERCVDFIN(tp->t_state)) {
4996 break;
4997 }
4998 process_dupack:
4999 old_dupacks = tp->t_dupacks;
5000 #if MPTCP
5001 /*
5002 * MPTCP options that are ignored must
5003 * not be treated as duplicate ACKs.
5004 */
5005 if (to.to_flags & TOF_MPTCP) {
5006 goto drop;
5007 }
5008
5009 if ((isconnected) && (tp->t_mpflags & TMPF_JOINED_FLOW)) {
5010 break;
5011 }
5012 #endif /* MPTCP */
5013 /*
5014 * If a duplicate acknowledgement was seen
5015 * after ECN, it indicates packet loss in
5016 * addition to ECN. Reset INRECOVERY flag
5017 * so that we can process partial acks
5018 * correctly
5019 */
5020 if (tp->ecn_flags & TE_INRECOVERY) {
5021 tp->ecn_flags &= ~TE_INRECOVERY;
5022 }
5023
5024 tcpstat.tcps_rcvdupack++;
5025 if (SACK_ENABLED(tp)) {
5026 tp->t_dupacks += max(1, sack_bytes_acked / tp->t_maxseg);
5027 } else {
5028 ++tp->t_dupacks;
5029 }
5030
5031 tp->sackhint.sack_bytes_acked += sack_bytes_acked;
5032
5033 if (sack_bytes_acked > 0 && TCP_ACC_ECN_ON(tp) &&
5034 (tp->ecn_flags & TE_SENDIPECT) && tp->t_state == TCPS_ESTABLISHED) {
5035 uint32_t pkts_sacked = tcp_packets_this_ack(tp, sack_bytes_acked);
5036 tp->total_ect_packets_acked += pkts_sacked;
5037 tcp_process_accecn(tp, &to, th, pkts_sacked, ace);
5038 }
5039 /*
5040 * Check if we need to reset the limit on
5041 * early retransmit
5042 */
5043 if (tp->t_early_rexmt_count > 0 &&
5044 TSTMP_GEQ(tcp_now,
5045 (tp->t_early_rexmt_win +
5046 TCP_EARLY_REXMT_WIN))) {
5047 tp->t_early_rexmt_count = 0;
5048 }
5049
5050 /*
5051 * Is early retransmit needed? We check for
5052 * this when the connection is waiting for
5053 * duplicate acks to enter fast recovery.
5054 */
5055 if (!IN_FASTRECOVERY(tp)) {
5056 tcp_early_rexmt_check(tp, th);
5057 }
5058
5059 /*
5060 * Detect loss based on RACK during dupACK processing to mark lost
5061 * segments before tcp_output is called for retransmission
5062 */
5063 if (TCP_RACK_ENABLED(tp) && tcp_rack_detect_loss_and_arm_timer(tp, tp->t_dupacks)) {
5064 rack_loss_detected = true;
5065 }
5066 /*
5067 * Below are four different processing of (dup) ACKs,
5068 * 1. Not a valid dup ACK
5069 * 2. More than 3 dup ACKs but already in Fast Recovery
5070 * 3. Entered Fast Recovery for the first time
5071 * 4. Received less than 3 dup ACKs, evaluate if we can do Limited Transmit
5072 */
5073 if (tp->t_timer[TCPT_REXMT] == 0 ||
5074 (th->th_ack != tp->snd_una && sack_bytes_acked == 0)) {
5075 /*
5076 * No outstanding data and ACK is not a duplicate as it is
5077 * less than snd_una but not equal to it.
5078 */
5079 tp->t_dupacks = 0;
5080 tp->t_rexmtthresh = tcprexmtthresh;
5081 } else if ((!TCP_RACK_ENABLED(tp) && tp->t_dupacks > tp->t_rexmtthresh && old_dupacks >= tp->t_rexmtthresh) ||
5082 IN_FASTRECOVERY(tp)) {
5083 /*
5084 * We are already in Fast Recovery and t_dupacks is greater than retransmit threshold.
5085 * Increase the cwnd by 1MSS if allowed
5086 */
5087
5088 /*
5089 * If this connection was seeing packet
5090 * reordering, then recovery might be
5091 * delayed to disambiguate between
5092 * reordering and loss
5093 */
5094 if (SACK_ENABLED(tp) && !IN_FASTRECOVERY(tp) &&
5095 (tp->t_flagsext &
5096 (TF_PKTS_REORDERED | TF_DELAY_RECOVERY)) ==
5097 (TF_PKTS_REORDERED | TF_DELAY_RECOVERY)) {
5098 /*
5099 * Since the SACK information is already
5100 * updated, this ACK will be dropped
5101 */
5102 break;
5103 }
5104
5105 /*
5106 * Dup acks mean that packets have left the
5107 * network (they're now cached at the receiver)
5108 * so bump cwnd by the amount in the receiver
5109 * to keep a constant cwnd packets in the
5110 * network.
5111 */
5112 if (SACK_ENABLED(tp) && IN_FASTRECOVERY(tp)) {
5113 int awnd;
5114
5115 /*
5116 * Compute the amount of data in flight first.
5117 * We can inject new data into the pipe iff
5118 * we have less than snd_ssthres worth of data in
5119 * flight.
5120 */
5121 awnd = (tp->snd_nxt - tp->snd_fack) + tp->sackhint.sack_bytes_rexmit;
5122 if (awnd < tp->snd_ssthresh) {
5123 tp->snd_cwnd += tp->t_maxseg;
5124 if (tp->snd_cwnd > tp->snd_ssthresh) {
5125 tp->snd_cwnd = tp->snd_ssthresh;
5126 }
5127 }
5128 } else {
5129 tp->snd_cwnd += tp->t_maxseg;
5130 }
5131
5132 /* Process any window updates */
5133 if (tiwin > tp->snd_wnd) {
5134 tcp_update_window(tp, thflags,
5135 th, tiwin, tlen);
5136 }
5137 tcp_ccdbg_trace(tp, th,
5138 TCP_CC_IN_FASTRECOVERY);
5139
5140 (void) tcp_output(tp);
5141
5142 goto drop;
5143 } else if (rack_loss_detected || (!TCP_RACK_ENABLED(tp) && tp->t_dupacks >= tp->t_rexmtthresh)) {
5144 /*
5145 * Currently not in Fast Recovery and received 3 or more dupacks.
5146 * Enter Fast Recovery, retransmit segment and set
5147 * cwnd to sshthresh if SACK is enabled.
5148 */
5149 tcp_seq onxt = tp->snd_nxt;
5150
5151 /*
5152 * If we're doing sack, check to
5153 * see if we're already in sack
5154 * recovery. If we're not doing sack,
5155 * check to see if we're in newreno
5156 * recovery.
5157 */
5158 if (SACK_ENABLED(tp)) {
5159 if (IN_FASTRECOVERY(tp)) {
5160 tp->t_dupacks = 0;
5161 break;
5162 } else if (tp->t_flagsext & TF_DELAY_RECOVERY) {
5163 break;
5164 }
5165 } else {
5166 if (SEQ_LEQ(th->th_ack, tp->snd_recover)) {
5167 tp->t_dupacks = 0;
5168 break;
5169 }
5170 }
5171 if (tp->t_flags & TF_SENTFIN) {
5172 tp->snd_recover = tp->snd_max - 1;
5173 } else {
5174 tp->snd_recover = tp->snd_max;
5175 }
5176 tp->t_timer[TCPT_PTO] = 0;
5177 tp->t_rtttime = 0;
5178
5179 /*
5180 * If the connection has seen pkt
5181 * reordering, delay recovery until
5182 * it is clear that the packet
5183 * was lost.
5184 */
5185 if (SACK_ENABLED(tp) &&
5186 (tp->t_flagsext &
5187 (TF_PKTS_REORDERED | TF_DELAY_RECOVERY))
5188 == TF_PKTS_REORDERED &&
5189 !IN_FASTRECOVERY(tp) &&
5190 tp->t_reorderwin > 0 &&
5191 (tp->t_state == TCPS_ESTABLISHED ||
5192 tp->t_state == TCPS_FIN_WAIT_1)) {
5193 tp->t_timer[TCPT_DELAYFR] =
5194 OFFSET_FROM_START(tp,
5195 tp->t_reorderwin);
5196 tp->t_flagsext |= TF_DELAY_RECOVERY;
5197 tcpstat.tcps_delay_recovery++;
5198 tcp_ccdbg_trace(tp, th,
5199 TCP_CC_DELAY_FASTRECOVERY);
5200 break;
5201 }
5202
5203 tcp_rexmt_save_state(tp);
5204 /*
5205 * If the current tcp cc module has
5206 * defined a hook for tasks to run
5207 * before entering FR, call it
5208 */
5209 if (CC_ALGO(tp)->pre_fr != NULL) {
5210 CC_ALGO(tp)->pre_fr(tp);
5211 }
5212 ENTER_FASTRECOVERY(tp);
5213 tp->t_timer[TCPT_REXMT] = 0;
5214 if (!TCP_ACC_ECN_ON(tp) && TCP_ECN_ENABLED(tp)) {
5215 tp->ecn_flags |= TE_SENDCWR;
5216 }
5217
5218 if (SACK_ENABLED(tp)) {
5219 if (TCP_RACK_ENABLED(tp)) {
5220 tcpstat.tcps_rack_recovery_episode++;
5221 tp->t_rack_recovery_episode++;
5222 } else {
5223 tcpstat.tcps_sack_recovery_episode++;
5224 tp->t_sack_recovery_episode++;
5225 }
5226
5227 tp->snd_cwnd = tp->snd_ssthresh;
5228 tp->t_flagsext &= ~TF_CWND_NONVALIDATED;
5229
5230 /* Process any window updates */
5231 if (tiwin > tp->snd_wnd) {
5232 tcp_update_window(tp, thflags, th, tiwin, tlen);
5233 }
5234
5235 tcp_ccdbg_trace(tp, th, TCP_CC_ENTER_FASTRECOVERY);
5236 (void) tcp_output(tp);
5237 goto drop;
5238 }
5239 tp->snd_nxt = th->th_ack;
5240 tp->snd_cwnd = tp->t_maxseg;
5241
5242 /* cwnd is validated after pre_fr() */
5243 tp->t_flagsext &= ~TF_CWND_NONVALIDATED;
5244
5245 /* Process any window updates */
5246 if (tiwin > tp->snd_wnd) {
5247 tcp_update_window(tp, thflags, th, tiwin, tlen);
5248 }
5249
5250 (void) tcp_output(tp);
5251 if (tp->t_flagsext & TF_CWND_NONVALIDATED) {
5252 tcp_cc_adjust_nonvalidated_cwnd(tp);
5253 } else {
5254 tp->snd_cwnd = tp->snd_ssthresh + tp->t_maxseg * tp->t_dupacks;
5255 }
5256 if (SEQ_GT(onxt, tp->snd_nxt)) {
5257 tp->snd_nxt = onxt;
5258 }
5259
5260 tcp_ccdbg_trace(tp, th, TCP_CC_ENTER_FASTRECOVERY);
5261 goto drop;
5262 } else if (ALLOW_LIMITED_TRANSMIT(tp) &&
5263 (!(SACK_ENABLED(tp)) || sack_bytes_acked > 0) &&
5264 (so->so_snd.sb_cc - (tp->snd_max - tp->snd_una)) > 0) {
5265 u_int32_t incr = (tp->t_maxseg * tp->t_dupacks);
5266
5267 /* Use Limited Transmit algorithm on the first two
5268 * duplicate acks when there is new data to transmit
5269 */
5270 tp->snd_cwnd += incr;
5271 tcpstat.tcps_limited_txt++;
5272 (void) tcp_output(tp);
5273
5274 tcp_ccdbg_trace(tp, th, TCP_CC_LIMITED_TRANSMIT);
5275
5276 /* Reset snd_cwnd back to normal */
5277 tp->snd_cwnd -= incr;
5278 }
5279 }
5280 break;
5281 }
5282 /*
5283 * If the congestion window was inflated to account
5284 * for the other side's cached packets, retract it.
5285 */
5286 if (IN_FASTRECOVERY(tp)) {
5287 if (SEQ_LT(th->th_ack, tp->snd_recover)) {
5288 /*
5289 * If we received an ECE and entered
5290 * recovery, the subsequent ACKs should
5291 * not be treated as partial acks.
5292 */
5293 if (tp->ecn_flags & TE_INRECOVERY) {
5294 goto process_ACK;
5295 }
5296 /* RACK doesn't require inflating cwnd */
5297 if (!TCP_RACK_ENABLED(tp)) {
5298 if (SACK_ENABLED(tp)) {
5299 tcp_sack_partialack(tp, th);
5300 } else {
5301 tcp_newreno_partial_ack(tp, th);
5302 }
5303 tcp_ccdbg_trace(tp, th, TCP_CC_PARTIAL_ACK);
5304 }
5305 } else {
5306 if (tcp_cubic_minor_fixes) {
5307 exiting_fr = 1;
5308 }
5309 EXIT_FASTRECOVERY(tp);
5310 if (CC_ALGO(tp)->post_fr != NULL) {
5311 CC_ALGO(tp)->post_fr(tp, th);
5312 }
5313
5314 if (TCP_RACK_ENABLED(tp)) {
5315 tcp_rack_update_reordering_win_persist(tp);
5316 }
5317
5318 tp->t_pipeack = 0;
5319 tcp_clear_pipeack_state(tp);
5320 tcp_ccdbg_trace(tp, th,
5321 TCP_CC_EXIT_FASTRECOVERY);
5322 }
5323 } else if ((tp->t_flagsext &
5324 (TF_PKTS_REORDERED | TF_DELAY_RECOVERY))
5325 == (TF_PKTS_REORDERED | TF_DELAY_RECOVERY)) {
5326 /*
5327 * If the ack acknowledges upto snd_recover or if
5328 * it acknowledges all the snd holes, exit
5329 * recovery and cancel the timer. Otherwise,
5330 * this is a partial ack. Wait for recovery timer
5331 * to enter recovery. The snd_holes have already
5332 * been updated.
5333 */
5334 if (SEQ_GEQ(th->th_ack, tp->snd_recover) ||
5335 TAILQ_EMPTY(&tp->snd_holes)) {
5336 tp->t_timer[TCPT_DELAYFR] = 0;
5337 tp->t_flagsext &= ~TF_DELAY_RECOVERY;
5338 EXIT_FASTRECOVERY(tp);
5339 tcp_ccdbg_trace(tp, th,
5340 TCP_CC_EXIT_FASTRECOVERY);
5341 }
5342 } else {
5343 /*
5344 * We were not in fast recovery. Reset the
5345 * duplicate ack counter.
5346 */
5347 tp->t_dupacks = 0;
5348 tp->t_rexmtthresh = tcprexmtthresh;
5349 }
5350
5351 process_ACK:
5352 VERIFY(SEQ_GEQ(th->th_ack, tp->snd_una));
5353 acked = BYTES_ACKED(th, tp);
5354 tcpstat.tcps_rcvackpack++;
5355 tcpstat.tcps_rcvackbyte += acked;
5356
5357 /*
5358 * If the last packet was a retransmit, make sure
5359 * it was not spurious.
5360 *
5361 * This will also take care of congestion window
5362 * adjustment if a last packet was recovered due to a
5363 * tail loss probe.
5364 */
5365 tcp_bad_rexmt_check(tp, th, &to);
5366
5367 /* Recalculate the RTT */
5368 tcp_compute_rtt(tp, &to, th);
5369
5370 /*
5371 * If all outstanding data is acked, stop retransmit
5372 * timer and remember to restart (more output or persist).
5373 * If there is more data to be acked, restart retransmit
5374 * timer, using current (possibly backed-off) value.
5375 */
5376 TCP_RESET_REXMT_STATE(tp);
5377 TCPT_RANGESET(tp->t_rxtcur, TCP_REXMTVAL(tp),
5378 tp->t_rttmin, TCPTV_REXMTMAX,
5379 TCP_ADD_REXMTSLOP(tp));
5380 if (th->th_ack == tp->snd_max) {
5381 tp->t_timer[TCPT_REXMT] = 0;
5382 tp->t_timer[TCPT_PTO] = 0;
5383 tp->t_timer[TCPT_REORDER] = 0;
5384 tcp_rack_reset_segs_retransmitted(tp);
5385 needoutput = 1;
5386 } else if (tp->t_timer[TCPT_PERSIST] == 0) {
5387 tcp_set_link_heur_rtomin(tp, inp->inp_last_outifp);
5388 tp->t_timer[TCPT_REXMT] = OFFSET_FROM_START(tp, tp->t_rxtcur);
5389 }
5390
5391 if ((prev_t_state == TCPS_SYN_SENT ||
5392 prev_t_state == TCPS_SYN_RECEIVED) &&
5393 tp->t_state == TCPS_ESTABLISHED) {
5394 TCP_LOG_RTT_INFO(tp);
5395 }
5396
5397 /*
5398 * If no data (only SYN) was ACK'd, skip rest of ACK
5399 * processing.
5400 */
5401 if (acked == 0) {
5402 goto step6;
5403 }
5404
5405 /*
5406 * Process sent segments used for RACK as we need to update
5407 * RACK state before loss detection. Update snd_fack only
5408 * after ACK processing which performs reordering detection.
5409 */
5410 if (TCP_RACK_ENABLED(tp)) {
5411 tcp_segs_doack(tp, th->th_ack, &to);
5412 if (SEQ_LT(tp->snd_fack, highest_sacked_seq)) {
5413 tp->snd_fack = highest_sacked_seq;
5414 }
5415 if (SEQ_LT(tp->snd_fack, th->th_ack)) {
5416 tp->snd_fack = th->th_ack;
5417 }
5418 }
5419 /*
5420 * When outgoing data has been acked (except the SYN+data), we
5421 * mark this connection as "sending good" for TFO.
5422 */
5423 if ((tp->t_tfo_stats & TFO_S_SYN_DATA_SENT) &&
5424 !(tp->t_tfo_flags & TFO_F_NO_SNDPROBING) &&
5425 !(th->th_flags & TH_SYN)) {
5426 tp->t_tfo_flags |= TFO_F_NO_SNDPROBING;
5427 }
5428
5429 if ((tp->ecn_flags & TE_SENDIPECT)) {
5430 /*
5431 * draft-ietf-tcpm-accurate-ecn-28
5432 * Accurate ECN feedback processing for data sender,
5433 * Process peer's feedback in received TCP thflags and update s.cep
5434 * Since SYN-ACK has a special encoding, exclude it from below.
5435 * Only perform it before CC is called and snd_una is updated.
5436 */
5437 if (TCP_ACC_ECN_ON(tp) && !(thflags & TH_SYN)) {
5438 /*
5439 * For a server in SYN_RECEIVED state (that switched to
5440 * ESTABLISHED in this ACK, exclude processing the last ACK
5441 */
5442 if (th->th_ack == tp->iss + 1) {
5443 acked = 0;
5444 }
5445 uint32_t pkts_acked = tcp_packets_this_ack(tp, acked);
5446 tp->total_ect_packets_acked += pkts_acked;
5447 /*
5448 * Calculate newly_acked_time used for AccECN feedback parsing
5449 * for data sender if ACK acknowledges packets without data
5450 * if reordering happens and certain packets have same TS.
5451 * Right now, we consider that new time was ACKed if the TS
5452 * was GT previous value, but we need to think about how to
5453 * differentiate between reordering and wrapping when TS is same
5454 * as previous value.
5455 */
5456 bool newly_acked_time = false;
5457 if (acked == 0 && sack_bytes_acked == 0 &&
5458 (to.to_flags & TOF_TS) != 0 && to.to_tsecr != 0 &&
5459 (tp->t_last_ack_tsecr == 0 || TSTMP_GT(to.to_tsecr, tp->t_last_ack_tsecr))) {
5460 newly_acked_time = true;
5461 }
5462 /*
5463 * Update s.cep if bytes have been newly S/ACKed
5464 * otherwise, this ACK has already been superseded.
5465 */
5466 if (acked > 0 || sack_bytes_acked > 0 || newly_acked_time) {
5467 tcp_process_accecn(tp, &to, th, pkts_acked, ace);
5468 }
5469 } else if (TCP_ECN_ENABLED(tp) && (thflags & TH_ECE)) {
5470 /*
5471 * For classic ECN, congestion event is receiving TH_ECE.
5472 * Reduce the congestion window if we haven't
5473 * done so.
5474 */
5475 if (!IN_FASTRECOVERY(tp)) {
5476 /*
5477 * Although we enter Fast Recovery in the below function
5478 * we exit it immediately below as th_ack >= snd_recover
5479 */
5480 tcp_enter_fast_recovery(tp);
5481 tp->ecn_flags |= (TE_INRECOVERY | TE_SENDCWR);
5482 /*
5483 * Also note that the connection received
5484 * ECE atleast once. We increment
5485 * t_ecn_capable_packets_marked when we first
5486 * enter fast recovery.
5487 */
5488 tp->ecn_flags |= TE_RECV_ECN_ECE;
5489 INP_INC_IFNET_STAT(inp, ecn_recv_ece);
5490 tcpstat.tcps_ecn_recv_ece++;
5491 tp->t_ecn_capable_packets_marked++;
5492 tcp_ccdbg_trace(tp, th, TCP_CC_ECN_RCVD);
5493 }
5494 }
5495 }
5496
5497 /*
5498 * When new data is acked, open the congestion window.
5499 * The specifics of how this is achieved are up to the
5500 * congestion control algorithm in use for this connection.
5501 *
5502 * The calculations in this function assume that snd_una is
5503 * not updated yet.
5504 */
5505 if (!IN_FASTRECOVERY(tp) && !exiting_fr) {
5506 if (CC_ALGO(tp)->ack_rcvd != NULL) {
5507 CC_ALGO(tp)->ack_rcvd(tp, th);
5508 }
5509 tcp_ccdbg_trace(tp, th, TCP_CC_ACK_RCVD);
5510 }
5511 if (acked > so->so_snd.sb_cc) {
5512 tp->snd_wnd -= so->so_snd.sb_cc;
5513 sbdrop(&so->so_snd, (int)so->so_snd.sb_cc);
5514 ourfinisacked = 1;
5515 } else {
5516 sbdrop(&so->so_snd, acked);
5517 tcp_sbsnd_trim(&so->so_snd);
5518 tp->snd_wnd -= acked;
5519 ourfinisacked = 0;
5520 }
5521 /* detect una wraparound */
5522 if (!IN_FASTRECOVERY(tp) &&
5523 SEQ_GT(tp->snd_una, tp->snd_recover) &&
5524 SEQ_LEQ(th->th_ack, tp->snd_recover)) {
5525 tp->snd_recover = th->th_ack - 1;
5526 }
5527
5528 if (IN_FASTRECOVERY(tp) &&
5529 SEQ_GEQ(th->th_ack, tp->snd_recover)) {
5530 EXIT_FASTRECOVERY(tp);
5531 if (TCP_RACK_ENABLED(tp)) {
5532 tcp_rack_update_reordering_win_persist(tp);
5533 }
5534 }
5535
5536 tcp_update_snd_una(tp, th->th_ack);
5537
5538 if (SACK_ENABLED(tp)) {
5539 if (SEQ_GT(tp->snd_una, tp->snd_recover)) {
5540 tp->snd_recover = tp->snd_una;
5541 }
5542 }
5543 if (SEQ_LT(tp->snd_nxt, tp->snd_una)) {
5544 tp->snd_nxt = tp->snd_una;
5545 }
5546
5547 /*
5548 * Detect loss based on RACK during ACK processing to mark lost
5549 * segments and call tcp_output. Rest of the ACK processing can
5550 * continue after that.
5551 */
5552 if (TCP_RACK_ENABLED(tp) && tcp_rack_detect_loss_and_arm_timer(tp, 0)) {
5553 if (!IN_FASTRECOVERY(tp)) {
5554 tcp_enter_fast_recovery(tp);
5555 tcpstat.tcps_rack_recovery_episode++;
5556 tp->t_rack_recovery_episode++;
5557 }
5558 tcp_output(tp);
5559 }
5560
5561 if (!SLIST_EMPTY(&tp->t_rxt_segments) &&
5562 !TCP_DSACK_SEQ_IN_WINDOW(tp, tp->t_dsack_lastuna,
5563 tp->snd_una)) {
5564 tcp_rxtseg_clean(tp);
5565 }
5566 if ((tp->t_flagsext & TF_MEASURESNDBW) != 0 &&
5567 tp->t_bwmeas != NULL) {
5568 tcp_bwmeas_check(tp);
5569 }
5570
5571 write_wakeup = 1;
5572
5573 if (!SLIST_EMPTY(&tp->t_notify_ack)) {
5574 tcp_notify_acknowledgement(tp, so);
5575 }
5576
5577 switch (tp->t_state) {
5578 /*
5579 * In FIN_WAIT_1 STATE in addition to the processing
5580 * for the ESTABLISHED state if our FIN is now acknowledged
5581 * then enter FIN_WAIT_2.
5582 */
5583 case TCPS_FIN_WAIT_1:
5584 if (ourfinisacked) {
5585 /*
5586 * If we can't receive any more
5587 * data, then closing user can proceed.
5588 * Starting the TCPT_2MSL timer is contrary to the
5589 * specification, but if we don't get a FIN
5590 * we'll hang forever.
5591 */
5592 DTRACE_TCP4(state__change, void, NULL,
5593 struct inpcb *, inp,
5594 struct tcpcb *, tp,
5595 int32_t, TCPS_FIN_WAIT_2);
5596 TCP_LOG_STATE(tp, TCPS_FIN_WAIT_2);
5597 tp->t_state = TCPS_FIN_WAIT_2;
5598 if (so->so_state & SS_CANTRCVMORE) {
5599 isconnected = FALSE;
5600 isdisconnected = TRUE;
5601 tcp_set_finwait_timeout(tp);
5602 }
5603 /*
5604 * fall through and make sure we also recognize
5605 * data ACKed with the FIN
5606 */
5607 }
5608 break;
5609
5610 /*
5611 * In CLOSING STATE in addition to the processing for
5612 * the ESTABLISHED state if the ACK acknowledges our FIN
5613 * then enter the TIME-WAIT state, otherwise ignore
5614 * the segment.
5615 */
5616 case TCPS_CLOSING:
5617 if (ourfinisacked) {
5618 DTRACE_TCP4(state__change, void, NULL,
5619 struct inpcb *, inp,
5620 struct tcpcb *, tp,
5621 int32_t, TCPS_TIME_WAIT);
5622 TCP_LOG_STATE(tp, TCPS_TIME_WAIT);
5623 tp->t_state = TCPS_TIME_WAIT;
5624 tcp_canceltimers(tp);
5625 if (tp->t_flagsext & TF_NOTIMEWAIT) {
5626 tp->t_flags |= TF_CLOSING;
5627 } else {
5628 add_to_time_wait(tp, 2 * tcp_msl);
5629 }
5630 isconnected = FALSE;
5631 isdisconnected = TRUE;
5632 }
5633 break;
5634
5635 /*
5636 * In LAST_ACK, we may still be waiting for data to drain
5637 * and/or to be acked, as well as for the ack of our FIN.
5638 * If our FIN is now acknowledged, delete the TCB,
5639 * enter the closed state and return.
5640 */
5641 case TCPS_LAST_ACK:
5642 if (ourfinisacked) {
5643 tp = tcp_close(tp);
5644 goto drop;
5645 }
5646 break;
5647
5648 /*
5649 * In TIME_WAIT state the only thing that should arrive
5650 * is a retransmission of the remote FIN. Acknowledge
5651 * it and restart the finack timer.
5652 */
5653 case TCPS_TIME_WAIT:
5654 add_to_time_wait(tp, 2 * tcp_msl);
5655 goto dropafterack;
5656 }
5657
5658 /*
5659 * If there is a SACK option on the ACK and we
5660 * haven't seen any duplicate acks before, count
5661 * it as a duplicate ack even if the cumulative
5662 * ack is advanced. If the receiver delayed an
5663 * ack and detected loss afterwards, then the ack
5664 * will advance cumulative ack and will also have
5665 * a SACK option. So counting it as one duplicate
5666 * ack is ok.
5667 */
5668 if (tp->t_state == TCPS_ESTABLISHED &&
5669 SACK_ENABLED(tp) && sack_bytes_acked > 0 &&
5670 to.to_nsacks > 0 && tp->t_dupacks == 0 &&
5671 SEQ_LEQ(th->th_ack, tp->snd_una) && tlen == 0 &&
5672 !(tp->t_flagsext & TF_PKTS_REORDERED)) {
5673 tcpstat.tcps_sack_ackadv++;
5674 goto process_dupack;
5675 }
5676 }
5677
5678 step6:
5679 /*
5680 * Update window information.
5681 */
5682 if (tcp_update_window(tp, thflags, th, tiwin, tlen)) {
5683 needoutput = 1;
5684 }
5685
5686 /*
5687 * Process segments with URG.
5688 */
5689 if ((thflags & TH_URG) && th->th_urp &&
5690 TCPS_HAVERCVDFIN(tp->t_state) == 0) {
5691 /*
5692 * This is a kludge, but if we receive and accept
5693 * random urgent pointers, we'll crash in
5694 * soreceive. It's hard to imagine someone
5695 * actually wanting to send this much urgent data.
5696 */
5697 if (th->th_urp + so->so_rcv.sb_cc > sb_max) {
5698 th->th_urp = 0; /* XXX */
5699 thflags &= ~TH_URG; /* XXX */
5700 goto dodata; /* XXX */
5701 }
5702 /*
5703 * If this segment advances the known urgent pointer,
5704 * then mark the data stream. This should not happen
5705 * in CLOSE_WAIT, CLOSING, LAST_ACK or TIME_WAIT STATES since
5706 * a FIN has been received from the remote side.
5707 * In these states we ignore the URG.
5708 *
5709 * According to RFC961 (Assigned Protocols),
5710 * the urgent pointer points to the last octet
5711 * of urgent data. We continue, however,
5712 * to consider it to indicate the first octet
5713 * of data past the urgent section as the original
5714 * spec states (in one of two places).
5715 */
5716 if (SEQ_GT(th->th_seq + th->th_urp, tp->rcv_up)) {
5717 tp->rcv_up = th->th_seq + th->th_urp;
5718 so->so_oobmark = so->so_rcv.sb_cc +
5719 (tp->rcv_up - tp->rcv_nxt) - 1;
5720 if (so->so_oobmark == 0) {
5721 so->so_state |= SS_RCVATMARK;
5722 }
5723 sohasoutofband(so);
5724 tp->t_oobflags &= ~(TCPOOB_HAVEDATA | TCPOOB_HADDATA);
5725 }
5726 /*
5727 * Remove out of band data so doesn't get presented to user.
5728 * This can happen independent of advancing the URG pointer,
5729 * but if two URG's are pending at once, some out-of-band
5730 * data may creep in... ick.
5731 */
5732 if (th->th_urp <= (u_int32_t)tlen
5733 #if SO_OOBINLINE
5734 && (so->so_options & SO_OOBINLINE) == 0
5735 #endif
5736 ) {
5737 tcp_pulloutofband(so, th, m,
5738 drop_hdrlen); /* hdr drop is delayed */
5739 }
5740 } else {
5741 /*
5742 * If no out of band data is expected,
5743 * pull receive urgent pointer along
5744 * with the receive window.
5745 */
5746 if (SEQ_GT(tp->rcv_nxt, tp->rcv_up)) {
5747 tp->rcv_up = tp->rcv_nxt;
5748 }
5749 }
5750 dodata:
5751
5752 /* Set socket's connect or disconnect state correcly before doing data.
5753 * The following might unlock the socket if there is an upcall or a socket
5754 * filter.
5755 */
5756 if (isconnected) {
5757 soisconnected(so);
5758 } else if (isdisconnected) {
5759 soisdisconnected(so);
5760 }
5761
5762 /* Let's check the state of pcb just to make sure that it did not get closed
5763 * when we unlocked above
5764 */
5765 if (inp->inp_state == INPCB_STATE_DEAD) {
5766 /* Just drop the packet that we are processing and return */
5767 TCP_LOG_DROP_PCB(TCP_LOG_HDR, th, tp, false, "INPCB_STATE_DEAD");
5768 drop_reason = DROP_REASON_TCP_NO_SOCK;
5769 goto drop;
5770 }
5771
5772 /*
5773 * Process the segment text, merging it into the TCP sequencing queue,
5774 * and arranging for acknowledgment of receipt if necessary.
5775 * This process logically involves adjusting tp->rcv_wnd as data
5776 * is presented to the user (this happens in tcp_usrreq.c,
5777 * case PRU_RCVD). If a FIN has already been received on this
5778 * connection then we just ignore the text.
5779 *
5780 * If we are in SYN-received state and got a valid TFO cookie, we want
5781 * to process the data.
5782 */
5783 if ((tlen || (thflags & TH_FIN)) &&
5784 TCPS_HAVERCVDFIN(tp->t_state) == 0 &&
5785 (TCPS_HAVEESTABLISHED(tp->t_state) ||
5786 (tp->t_state == TCPS_SYN_RECEIVED &&
5787 (tp->t_tfo_flags & TFO_F_COOKIE_VALID)))) {
5788 tcp_seq save_start = th->th_seq;
5789 tcp_seq save_end = th->th_seq + tlen;
5790 m_adj(m, drop_hdrlen); /* delayed header drop */
5791 /*
5792 * Insert segment which includes th into TCP reassembly queue
5793 * with control block tp. Set thflags to whether reassembly now
5794 * includes a segment with FIN. This handles the common case
5795 * inline (segment is the next to be received on an established
5796 * connection, and the queue is empty), avoiding linkage into
5797 * and removal from the queue and repetition of various
5798 * conversions.
5799 * Set DELACK for segments received in order, but ack
5800 * immediately when segments are out of order (so
5801 * fast retransmit can work).
5802 */
5803 if (th->th_seq == tp->rcv_nxt && LIST_EMPTY(&tp->t_segq)) {
5804 TCP_INC_VAR(tp->t_unacksegs, segment_count);
5805
5806 /* Calculate the RTT on the receiver */
5807 tcp_compute_rcv_rtt(tp, &to, th);
5808
5809 if (DELAY_ACK(tp, th) &&
5810 ((tp->t_flags & TF_ACKNOW) == 0)) {
5811 if ((tp->t_flags & TF_DELACK) == 0) {
5812 tp->t_flags |= TF_DELACK;
5813 tp->t_timer[TCPT_DELACK] =
5814 OFFSET_FROM_START(tp, tcp_delack);
5815 }
5816 } else {
5817 tp->t_flags |= TF_ACKNOW;
5818 }
5819 tp->rcv_nxt += tlen;
5820 /* Update highest received sequence and its timestamp */
5821 if (SEQ_LT(tp->rcv_high, tp->rcv_nxt)) {
5822 tp->rcv_high = tp->rcv_nxt;
5823 if (to.to_flags & TOF_TS) {
5824 tp->tsv_high = to.to_tsval;
5825 }
5826 }
5827
5828 thflags = th->th_flags & TH_FIN;
5829 TCP_INC_VAR(tcpstat.tcps_rcvpack, segment_count);
5830 tcpstat.tcps_rcvbyte += tlen;
5831 if (nstat_collect) {
5832 INP_ADD_STAT(inp, ifnet_count_type,
5833 rxpackets, 1);
5834 INP_ADD_STAT(inp, ifnet_count_type,
5835 rxbytes, tlen);
5836 inp_set_activity_bitmap(inp);
5837 }
5838 tcp_sbrcv_grow(tp, &so->so_rcv, &to, tlen);
5839 if (TCP_USE_RLEDBAT(tp, so) &&
5840 tcp_cc_rledbat.data_rcvd != NULL) {
5841 tcp_cc_rledbat.data_rcvd(tp, th, &to, tlen);
5842 }
5843
5844 so_recv_data_stat(so, m, drop_hdrlen);
5845
5846 if (isipv6) {
5847 memcpy(&saved_hdr, ip6, sizeof(struct ip6_hdr));
5848 ip6 = (struct ip6_hdr *)&saved_hdr[0];
5849 } else {
5850 memcpy(&saved_hdr, ip, ip->ip_hl << 2);
5851 ip = (struct ip *)&saved_hdr[0];
5852 }
5853 memcpy(&saved_tcphdr, th, sizeof(struct tcphdr));
5854
5855 if (th->th_flags & TH_PUSH) {
5856 tp->t_flagsext |= TF_LAST_IS_PSH;
5857 } else {
5858 tp->t_flagsext &= ~TF_LAST_IS_PSH;
5859 }
5860
5861 if (sbappendstream_rcvdemux(so, m)) {
5862 read_wakeup = 1;
5863 }
5864 th = &saved_tcphdr;
5865 } else {
5866 if (isipv6) {
5867 memcpy(&saved_hdr, ip6, sizeof(struct ip6_hdr));
5868 ip6 = (struct ip6_hdr *)&saved_hdr[0];
5869 } else {
5870 memcpy(&saved_hdr, ip, ip->ip_hl << 2);
5871 ip = (struct ip *)&saved_hdr[0];
5872 }
5873
5874 /* Update highest received sequence and its timestamp */
5875 if (SEQ_LT(tp->rcv_high, th->th_seq + tlen)) {
5876 tp->rcv_high = th->th_seq + tlen;
5877 if (to.to_flags & TOF_TS) {
5878 tp->tsv_high = to.to_tsval;
5879 }
5880 }
5881
5882 /*
5883 * Calculate the RTT on the receiver,
5884 * even if OOO segment is received.
5885 */
5886 tcp_compute_rcv_rtt(tp, &to, th);
5887
5888 if (tcp_autotune_reorder) {
5889 tcp_sbrcv_grow(tp, &so->so_rcv, &to, tlen);
5890 }
5891 if (TCP_USE_RLEDBAT(tp, so) &&
5892 tcp_cc_rledbat.data_rcvd != NULL) {
5893 tcp_cc_rledbat.data_rcvd(tp, th, &to, tlen);
5894 }
5895
5896 memcpy(&saved_tcphdr, th, sizeof(struct tcphdr));
5897 thflags = tcp_reass(tp, th, &tlen, m, ifp, &read_wakeup);
5898 th = &saved_tcphdr;
5899 tp->t_flags |= TF_ACKNOW;
5900 }
5901
5902 if ((tlen > 0 || (th->th_flags & TH_FIN)) && SACK_ENABLED(tp)) {
5903 if (th->th_flags & TH_FIN) {
5904 save_end++;
5905 }
5906 tcp_update_sack_list(tp, save_start, save_end);
5907 }
5908
5909 tcp_adaptive_rwtimo_check(tp, tlen);
5910
5911 if (tlen > 0) {
5912 tcp_tfo_rcv_data(tp);
5913 }
5914
5915 if (tp->t_flags & TF_DELACK) {
5916 if (isipv6) {
5917 KERNEL_DEBUG(DBG_LAYER_END, ((th->th_dport << 16) | th->th_sport),
5918 (((ip6->ip6_src.s6_addr16[0]) << 16) | (ip6->ip6_dst.s6_addr16[0])),
5919 th->th_seq, th->th_ack, th->th_win);
5920 } else {
5921 KERNEL_DEBUG(DBG_LAYER_END, ((th->th_dport << 16) | th->th_sport),
5922 (((ip->ip_src.s_addr & 0xffff) << 16) | (ip->ip_dst.s_addr & 0xffff)),
5923 th->th_seq, th->th_ack, th->th_win);
5924 }
5925 }
5926 } else {
5927 if ((so->so_flags & SOF_MP_SUBFLOW) && tlen == 0 &&
5928 (m->m_pkthdr.pkt_flags & PKTF_MPTCP_DFIN) &&
5929 (m->m_pkthdr.pkt_flags & PKTF_MPTCP)) {
5930 m_adj(m, drop_hdrlen); /* delayed header drop */
5931 /*
5932 * 0-length DATA_FIN. The rlen is actually 0. We special-case the
5933 * byte consumed by the dfin in mptcp_input and mptcp_reass_present
5934 */
5935 m->m_pkthdr.mp_rlen = 0;
5936 mptcp_input(tptomptp(tp)->mpt_mpte, m);
5937 tp->t_flags |= TF_ACKNOW;
5938 } else {
5939 m_freem(m);
5940 }
5941 thflags &= ~TH_FIN;
5942 }
5943 /*
5944 * We increment t_unacksegs_ce for both data segments and pure ACKs
5945 * No need to increment if a FIN has already been received.
5946 */
5947 if (TCP_ACC_ECN_ON(tp) && TCPS_HAVEESTABLISHED(tp->t_state) &&
5948 TCPS_HAVERCVDFIN(tp->t_state) == 0) {
5949 if (ip_ecn == IPTOS_ECN_CE) {
5950 TCP_INC_VAR(tp->t_unacksegs_ce, segment_count);
5951 }
5952 /*
5953 * Send an ACK immediately if there is a change in IP ECN
5954 * from non-CE to CE.
5955 * If new data is delivered, then ACK for every 2 CE marks,
5956 * otherwise ACK for every 3 CE marks
5957 */
5958 if ((ip_ecn == IPTOS_ECN_CE && ip_ecn != tp->t_prev_ip_ecn) ||
5959 (tp->t_unacksegs_ce >= 2 && tp->last_ack_sent != tp->rcv_nxt) ||
5960 tp->t_unacksegs_ce >= 3) {
5961 tp->t_flags |= TF_ACKNOW;
5962 }
5963 tp->t_prev_ip_ecn = ip_ecn;
5964 }
5965 /*
5966 * If FIN is received ACK the FIN and let the user know
5967 * that the connection is closing.
5968 */
5969 if (thflags & TH_FIN) {
5970 if (TCPS_HAVERCVDFIN(tp->t_state) == 0) {
5971 socantrcvmore(so);
5972 /*
5973 * If connection is half-synchronized
5974 * (ie NEEDSYN flag on) then delay ACK,
5975 * so it may be piggybacked when SYN is sent.
5976 * Otherwise, since we received a FIN then no
5977 * more input can be expected, send ACK now.
5978 */
5979 TCP_INC_VAR(tp->t_unacksegs, segment_count);
5980 tp->t_flags |= TF_ACKNOW;
5981 tp->rcv_nxt++;
5982 }
5983 switch (tp->t_state) {
5984 /*
5985 * In SYN_RECEIVED and ESTABLISHED STATES
5986 * enter the CLOSE_WAIT state.
5987 */
5988 case TCPS_SYN_RECEIVED:
5989 tp->t_starttime = tcp_now;
5990 OS_FALLTHROUGH;
5991 case TCPS_ESTABLISHED:
5992 DTRACE_TCP4(state__change, void, NULL, struct inpcb *, inp,
5993 struct tcpcb *, tp, int32_t, TCPS_CLOSE_WAIT);
5994 TCP_LOG_STATE(tp, TCPS_CLOSE_WAIT);
5995 tp->t_state = TCPS_CLOSE_WAIT;
5996 break;
5997
5998 /*
5999 * If still in FIN_WAIT_1 STATE FIN has not been acked so
6000 * enter the CLOSING state.
6001 */
6002 case TCPS_FIN_WAIT_1:
6003 DTRACE_TCP4(state__change, void, NULL, struct inpcb *, inp,
6004 struct tcpcb *, tp, int32_t, TCPS_CLOSING);
6005 TCP_LOG_STATE(tp, TCPS_CLOSING);
6006 tp->t_state = TCPS_CLOSING;
6007 break;
6008
6009 /*
6010 * In FIN_WAIT_2 state enter the TIME_WAIT state,
6011 * starting the time-wait timer, turning off the other
6012 * standard timers.
6013 */
6014 case TCPS_FIN_WAIT_2:
6015 DTRACE_TCP4(state__change, void, NULL,
6016 struct inpcb *, inp,
6017 struct tcpcb *, tp,
6018 int32_t, TCPS_TIME_WAIT);
6019 TCP_LOG_STATE(tp, TCPS_TIME_WAIT);
6020 tp->t_state = TCPS_TIME_WAIT;
6021 tcp_canceltimers(tp);
6022 tp->t_flags |= TF_ACKNOW;
6023 if (tp->t_flagsext & TF_NOTIMEWAIT) {
6024 tp->t_flags |= TF_CLOSING;
6025 } else {
6026 add_to_time_wait(tp, 2 * tcp_msl);
6027 }
6028 soisdisconnected(so);
6029 break;
6030
6031 /*
6032 * In TIME_WAIT state restart the 2 MSL time_wait timer.
6033 */
6034 case TCPS_TIME_WAIT:
6035 add_to_time_wait(tp, 2 * tcp_msl);
6036 break;
6037 }
6038 }
6039 if (read_wakeup) {
6040 mptcp_handle_input(so);
6041 }
6042
6043 /*
6044 * Return any desired output.
6045 */
6046 if (needoutput || (tp->t_flags & TF_ACKNOW)) {
6047 (void) tcp_output(tp);
6048 }
6049
6050 tcp_check_timer_state(tp);
6051
6052 tcp_handle_wakeup(so, read_wakeup, write_wakeup);
6053
6054 socket_unlock(so, 1);
6055 KERNEL_DEBUG(DBG_FNC_TCP_INPUT | DBG_FUNC_END, 0, 0, 0, 0, 0);
6056 return;
6057
6058 dropafterack:
6059 /*
6060 * Generate an ACK dropping incoming segment if it occupies
6061 * sequence space, where the ACK reflects our state.
6062 *
6063 * We can now skip the test for the RST flag since all
6064 * paths to this code happen after packets containing
6065 * RST have been dropped.
6066 *
6067 * In the SYN-RECEIVED state, don't send an ACK unless the
6068 * segment we received passes the SYN-RECEIVED ACK test.
6069 * If it fails send a RST. This breaks the loop in the
6070 * "LAND" DoS attack, and also prevents an ACK storm
6071 * between two listening ports that have been sent forged
6072 * SYN segments, each with the source address of the other.
6073 */
6074 if (tp->t_state == TCPS_SYN_RECEIVED && (thflags & TH_ACK) &&
6075 (SEQ_GT(tp->snd_una, th->th_ack) ||
6076 SEQ_GT(th->th_ack, tp->snd_max))) {
6077 IF_TCP_STATINC(ifp, dospacket);
6078 goto dropwithreset;
6079 }
6080 m_freem(m);
6081 tp->t_flags |= TF_ACKNOW;
6082
6083 (void) tcp_output(tp);
6084
6085 tcp_handle_wakeup(so, read_wakeup, write_wakeup);
6086
6087 /* Don't need to check timer state as we should have done it during tcp_output */
6088 socket_unlock(so, 1);
6089 KERNEL_DEBUG(DBG_FNC_TCP_INPUT | DBG_FUNC_END, 0, 0, 0, 0, 0);
6090 return;
6091 dropwithresetnosock:
6092 nosock = 1;
6093 dropwithreset:
6094 /*
6095 * Generate a RST, dropping incoming segment.
6096 * Make ACK acceptable to originator of segment.
6097 * Don't bother to respond if destination was broadcast/multicast.
6098 */
6099 if ((thflags & TH_RST) || m->m_flags & (M_BCAST | M_MCAST)) {
6100 goto drop;
6101 }
6102 if (isipv6) {
6103 if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) ||
6104 IN6_IS_ADDR_MULTICAST(&ip6->ip6_src)) {
6105 goto drop;
6106 }
6107 } else if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) ||
6108 IN_MULTICAST(ntohl(ip->ip_src.s_addr)) ||
6109 ip->ip_src.s_addr == htonl(INADDR_BROADCAST) ||
6110 in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif)) {
6111 goto drop;
6112 }
6113 /* IPv6 anycast check is done at tcp6_input() */
6114
6115 bzero(&tra, sizeof(tra));
6116 tra.ifscope = ifscope;
6117 tra.awdl_unrestricted = 1;
6118 tra.intcoproc_allowed = 1;
6119 tra.management_allowed = 1;
6120 if (thflags & TH_ACK) {
6121 /* mtod() below is safe as long as hdr dropping is delayed */
6122 tcp_respond(tp, mtod(m, void *), m->m_len, th, m, (tcp_seq)0, th->th_ack,
6123 TH_RST, &tra);
6124 } else {
6125 if (thflags & TH_SYN) {
6126 tlen++;
6127 }
6128 /* mtod() below is safe as long as hdr dropping is delayed */
6129 tcp_respond(tp, mtod(m, void *), m->m_len, th, m, th->th_seq + tlen,
6130 (tcp_seq)0, TH_RST | TH_ACK, &tra);
6131 }
6132 /* destroy temporarily created socket */
6133 if (dropsocket) {
6134 (void) soabort(so);
6135 socket_unlock(so, 1);
6136 } else if ((inp != NULL) && (nosock == 0)) {
6137 tcp_handle_wakeup(so, read_wakeup, write_wakeup);
6138
6139 socket_unlock(so, 1);
6140 }
6141 KERNEL_DEBUG(DBG_FNC_TCP_INPUT | DBG_FUNC_END, 0, 0, 0, 0, 0);
6142 return;
6143 dropnosock:
6144 nosock = 1;
6145 drop:
6146 /*
6147 * Drop space held by incoming segment and return.
6148 */
6149 if (isipv6 == 0) {
6150 if (ip == NULL) {
6151 ip = mtod(m, struct ip *);
6152 }
6153 /* add back the header length */
6154 ip->ip_len += (ip->ip_hl << 2);
6155 HTONS(ip->ip_len);
6156 HTONS(ip->ip_off);
6157
6158 th = (struct tcphdr *)(void *)((caddr_t)ip + off0);
6159 } else if (ip6 == NULL) {
6160 ip6 = mtod(m, struct ip6_hdr *);
6161
6162 th = (struct tcphdr *)(void *)((caddr_t)ip6 + off0);
6163 }
6164 if (is_th_swapped) {
6165 HTONL(th->th_seq);
6166 HTONL(th->th_ack);
6167 HTONS(th->th_win);
6168 HTONS(th->th_urp);
6169 }
6170 if (drop_reason != DROP_REASON_UNSPECIFIED || droptap_verbose > 0) {
6171 m_drop(m, DROPTAP_FLAG_DIR_IN | DROPTAP_FLAG_L2_MISSING, drop_reason, NULL, 0);
6172 } else {
6173 m_freem(m);
6174 }
6175 /* destroy temporarily created socket */
6176 if (dropsocket) {
6177 (void) soabort(so);
6178 socket_unlock(so, 1);
6179 } else if (nosock == 0) {
6180 tcp_handle_wakeup(so, read_wakeup, write_wakeup);
6181
6182 socket_unlock(so, 1);
6183 }
6184 KERNEL_DEBUG(DBG_FNC_TCP_INPUT | DBG_FUNC_END, 0, 0, 0, 0, 0);
6185 return;
6186 }
6187
6188 /*
6189 * Parse TCP options and place in tcpopt.
6190 */
6191 static void
tcp_dooptions(struct tcpcb * tp,u_char * cp0 __counted_by (cnt0),int cnt0,struct tcphdr * th,struct tcpopt * to)6192 tcp_dooptions(struct tcpcb *tp, u_char *cp0 __counted_by(cnt0), int cnt0, struct tcphdr *th,
6193 struct tcpopt *to)
6194 {
6195 u_short mss = 0;
6196 uint8_t opt, optlen;
6197 u_char *cp = cp0;
6198 u_char * const cpend = cp0 + cnt0;
6199 int cnt = cnt0;
6200
6201 for (; cnt > 0; cnt -= optlen, cp += optlen) {
6202 opt = cp[0];
6203 if (opt == TCPOPT_EOL) {
6204 break;
6205 }
6206 if (opt == TCPOPT_NOP) {
6207 optlen = 1;
6208 } else {
6209 if (cnt < 2) {
6210 break;
6211 }
6212 optlen = cp[1];
6213 if (optlen < 2 || optlen > cnt) {
6214 break;
6215 }
6216 }
6217 switch (opt) {
6218 default:
6219 continue;
6220
6221 case TCPOPT_MAXSEG:
6222 if (optlen != TCPOLEN_MAXSEG) {
6223 continue;
6224 }
6225 if (!(th->th_flags & TH_SYN)) {
6226 continue;
6227 }
6228 bcopy((char *) cp + 2, (char *) &mss, sizeof(mss));
6229 NTOHS(mss);
6230 to->to_mss = mss;
6231 to->to_flags |= TOF_MSS;
6232 break;
6233
6234 case TCPOPT_WINDOW:
6235 if (optlen != TCPOLEN_WINDOW) {
6236 continue;
6237 }
6238 if (!(th->th_flags & TH_SYN)) {
6239 continue;
6240 }
6241 to->to_flags |= TOF_SCALE;
6242 to->to_requested_s_scale = MIN(cp[2], TCP_MAX_WINSHIFT);
6243 break;
6244
6245 case TCPOPT_TIMESTAMP:
6246 if (optlen != TCPOLEN_TIMESTAMP) {
6247 continue;
6248 }
6249 to->to_flags |= TOF_TS;
6250 bcopy((char *)cp + 2,
6251 (char *)&to->to_tsval, sizeof(to->to_tsval));
6252 NTOHL(to->to_tsval);
6253 bcopy((char *)cp + 6,
6254 (char *)&to->to_tsecr, sizeof(to->to_tsecr));
6255 NTOHL(to->to_tsecr);
6256 to->to_tsecr -= tp->t_ts_offset;
6257 /* Re-enable sending Timestamps if we received them */
6258 if (!(tp->t_flags & TF_REQ_TSTMP) && tcp_do_timestamps) {
6259 tp->t_flags |= TF_REQ_TSTMP;
6260 }
6261 break;
6262 case TCPOPT_SACK_PERMITTED:
6263 if (optlen != TCPOLEN_SACK_PERMITTED) {
6264 continue;
6265 }
6266 if (th->th_flags & TH_SYN) {
6267 to->to_flags |= TOF_SACK;
6268 }
6269 break;
6270 case TCPOPT_SACK:
6271 if (optlen <= 2 || (optlen - 2) % TCPOLEN_SACK != 0) {
6272 continue;
6273 }
6274 to->to_nsacks = (optlen - 2) / TCPOLEN_SACK;
6275 to->to_sacks_size = optlen - 2;
6276 to->to_sacks = cp + 2;
6277 tcpstat.tcps_sack_rcv_blocks++;
6278
6279 break;
6280 case TCPOPT_FASTOPEN:
6281 if (optlen == TCPOLEN_FASTOPEN_REQ) {
6282 if (tp->t_state != TCPS_LISTEN) {
6283 continue;
6284 }
6285
6286 to->to_flags |= TOF_TFOREQ;
6287 } else {
6288 if (optlen < TCPOLEN_FASTOPEN_REQ ||
6289 (optlen - TCPOLEN_FASTOPEN_REQ) > TFO_COOKIE_LEN_MAX ||
6290 (optlen - TCPOLEN_FASTOPEN_REQ) < TFO_COOKIE_LEN_MIN) {
6291 continue;
6292 }
6293 if (tp->t_state != TCPS_LISTEN &&
6294 tp->t_state != TCPS_SYN_SENT) {
6295 continue;
6296 }
6297
6298 to->to_flags |= TOF_TFO;
6299 to->to_tfo = cp + 1;
6300 to->to_tfo_size = optlen - 1;
6301 }
6302
6303 break;
6304 case TCPOPT_ACCECN0:
6305 case TCPOPT_ACCECN1:
6306 if (optlen < (TCPOLEN_ACCECN_EMPTY + 1 * TCPOLEN_ACCECN_COUNTER) ||
6307 (optlen - 2) % TCPOLEN_ACCECN_COUNTER != 0) {
6308 continue;
6309 }
6310 to->to_num_accecn = (optlen - 2) / TCPOLEN_ACCECN_COUNTER;
6311 to->to_accecn = cp + 2;
6312 to->to_accecn_size = optlen - 2;
6313 if (opt == TCPOPT_ACCECN0) {
6314 to->to_accecn_order = 0;
6315 } else if (opt == TCPOPT_ACCECN1) {
6316 to->to_accecn_order = 1;
6317 }
6318 break;
6319
6320 #if MPTCP
6321 case TCPOPT_MULTIPATH:
6322 tcp_do_mptcp_options(tp, cp, cpend, th, to, optlen);
6323 break;
6324 #endif /* MPTCP */
6325 }
6326 }
6327 }
6328
6329 static void
tcp_finalize_options(struct tcpcb * tp,struct tcpopt * to,unsigned int ifscope)6330 tcp_finalize_options(struct tcpcb *tp, struct tcpopt *to, unsigned int ifscope)
6331 {
6332 if (to->to_flags & TOF_TS) {
6333 tp->t_flags |= TF_RCVD_TSTMP;
6334 tp->ts_recent = to->to_tsval;
6335 tp->ts_recent_age = tcp_now;
6336 }
6337 if (to->to_flags & TOF_MSS) {
6338 tcp_mss(tp, to->to_mss, ifscope);
6339 }
6340 if (SACK_ENABLED(tp)) {
6341 if (!(to->to_flags & TOF_SACK)) {
6342 tp->t_flagsext &= ~(TF_SACK_ENABLE);
6343 } else {
6344 tp->t_flags |= TF_SACK_PERMIT;
6345 }
6346 }
6347 if (to->to_flags & TOF_SCALE) {
6348 tp->t_flags |= TF_RCVD_SCALE;
6349 tp->requested_s_scale = to->to_requested_s_scale;
6350
6351 /* Re-enable window scaling, if the option is received */
6352 if (tp->request_r_scale > 0) {
6353 tp->t_flags |= TF_REQ_SCALE;
6354 }
6355 }
6356 }
6357
6358 /*
6359 * Pull out of band byte out of a segment so
6360 * it doesn't appear in the user's data queue.
6361 * It is still reflected in the segment length for
6362 * sequencing purposes.
6363 *
6364 * @param off delayed to be droped hdrlen
6365 */
6366 static void
tcp_pulloutofband(struct socket * so,struct tcphdr * th,struct mbuf * m,int off)6367 tcp_pulloutofband(struct socket *so, struct tcphdr *th, struct mbuf *m, int off)
6368 {
6369 int cnt = off + th->th_urp - 1;
6370
6371 while (cnt >= 0) {
6372 if (m->m_len > cnt) {
6373 char *cp = mtod(m, caddr_t) + cnt;
6374 struct tcpcb *tp = sototcpcb(so);
6375
6376 tp->t_iobc = *cp;
6377 tp->t_oobflags |= TCPOOB_HAVEDATA;
6378 bcopy(cp + 1, cp, (unsigned)(m->m_len - cnt - 1));
6379 m->m_len--;
6380 if (m->m_flags & M_PKTHDR) {
6381 m->m_pkthdr.len--;
6382 }
6383 return;
6384 }
6385 cnt -= m->m_len;
6386 m = m->m_next;
6387 if (m == 0) {
6388 break;
6389 }
6390 }
6391 panic("tcp_pulloutofband");
6392 }
6393
6394 uint32_t
get_base_rtt(struct tcpcb * tp)6395 get_base_rtt(struct tcpcb *tp)
6396 {
6397 struct rtentry *rt = tp->t_inpcb->inp_route.ro_rt;
6398 return (rt == NULL) ? 0 : rt->rtt_min;
6399 }
6400
6401 static void
update_curr_rtt(struct tcpcb * tp,uint32_t rtt)6402 update_curr_rtt(struct tcpcb * tp, uint32_t rtt)
6403 {
6404 tp->curr_rtt_index = (tp->curr_rtt_index + 1) % NCURR_RTT_HIST;
6405 tp->curr_rtt_hist[tp->curr_rtt_index] = rtt;
6406
6407 /* forget the old value and update minimum */
6408 tp->curr_rtt_min = 0;
6409 for (int i = 0; i < NCURR_RTT_HIST; ++i) {
6410 if (tp->curr_rtt_hist[i] != 0 && (tp->curr_rtt_min == 0 ||
6411 tp->curr_rtt_hist[i] < tp->curr_rtt_min)) {
6412 tp->curr_rtt_min = tp->curr_rtt_hist[i];
6413 }
6414 }
6415 }
6416
6417 /* Each value of RTT base represents the minimum RTT seen in a minute.
6418 * We keep upto N_RTT_BASE minutes worth of history.
6419 */
6420 void
update_base_rtt(struct tcpcb * tp,uint32_t rtt)6421 update_base_rtt(struct tcpcb *tp, uint32_t rtt)
6422 {
6423 u_int32_t base_rtt, i;
6424 struct rtentry *rt;
6425
6426 if ((rt = tp->t_inpcb->inp_route.ro_rt) == NULL) {
6427 return;
6428 }
6429 if (rt->rtt_expire_ts == 0) {
6430 RT_LOCK_SPIN(rt);
6431 if (rt->rtt_expire_ts != 0) {
6432 RT_UNLOCK(rt);
6433 goto update;
6434 }
6435 rt->rtt_expire_ts = tcp_now;
6436 rt->rtt_index = 0;
6437 rt->rtt_hist[0] = rtt;
6438 rt->rtt_min = rtt;
6439 RT_UNLOCK(rt);
6440
6441 tp->curr_rtt_index = 0;
6442 tp->curr_rtt_hist[0] = rtt;
6443 tp->curr_rtt_min = rtt;
6444 return;
6445 }
6446 update:
6447 #if TRAFFIC_MGT
6448 /*
6449 * If the recv side is being throttled, check if the
6450 * current RTT is closer to the base RTT seen in
6451 * first (recent) two slots. If so, unthrottle the stream.
6452 */
6453 if ((tp->t_flagsext & TF_RECV_THROTTLE) &&
6454 (int)(tcp_now - tp->t_recv_throttle_ts) >= TCP_RECV_THROTTLE_WIN) {
6455 base_rtt = rt->rtt_min;
6456 if (tp->t_rttcur <= (base_rtt + target_qdelay)) {
6457 tp->t_flagsext &= ~TF_RECV_THROTTLE;
6458 tp->t_recv_throttle_ts = 0;
6459 }
6460 }
6461 #endif /* TRAFFIC_MGT */
6462
6463 /* Update the next current RTT sample */
6464 update_curr_rtt(tp, rtt);
6465
6466 if ((int)(tcp_now - rt->rtt_expire_ts) >=
6467 TCP_RTT_HISTORY_EXPIRE_TIME) {
6468 RT_LOCK_SPIN(rt);
6469 /* check the condition again to avoid race */
6470 if ((int)(tcp_now - rt->rtt_expire_ts) >=
6471 TCP_RTT_HISTORY_EXPIRE_TIME) {
6472 /* Set the base rtt to 0 for idle periods */
6473 uint32_t times = MIN((tcp_now - rt->rtt_expire_ts) /
6474 TCP_RTT_HISTORY_EXPIRE_TIME, NRTT_HIST + 1);
6475
6476 for (i = rt->rtt_index + 1; i < rt->rtt_index + times; i++) {
6477 rt->rtt_hist[i % NRTT_HIST] = 0;
6478 }
6479
6480 rt->rtt_index = i % NRTT_HIST;
6481 rt->rtt_hist[rt->rtt_index] = rtt;
6482 rt->rtt_expire_ts = tcp_now;
6483 } else {
6484 rt->rtt_hist[rt->rtt_index] =
6485 min(rt->rtt_hist[rt->rtt_index], rtt);
6486 }
6487 /* forget the old value and update minimum */
6488 rt->rtt_min = 0;
6489 for (i = 0; i < NRTT_HIST; ++i) {
6490 if (rt->rtt_hist[i] != 0 &&
6491 (rt->rtt_min == 0 ||
6492 rt->rtt_hist[i] < rt->rtt_min)) {
6493 rt->rtt_min = rt->rtt_hist[i];
6494 }
6495 }
6496 RT_UNLOCK(rt);
6497 } else {
6498 rt->rtt_hist[rt->rtt_index] =
6499 min(rt->rtt_hist[rt->rtt_index], rtt);
6500 if (rt->rtt_min == 0) {
6501 rt->rtt_min = rtt;
6502 } else {
6503 rt->rtt_min = min(rt->rtt_min, rtt);
6504 }
6505 }
6506 }
6507
6508 /*
6509 * If we have a timestamp reply, update smoothed RTT. If no timestamp is
6510 * present but transmit timer is running and timed sequence number was
6511 * acked, update smoothed RTT.
6512 *
6513 * If timestamps are supported, a receiver can update RTT even if
6514 * there is no outstanding data.
6515 *
6516 * Some boxes send broken timestamp replies during the SYN+ACK phase,
6517 * ignore timestamps of 0or we could calculate a huge RTT and blow up
6518 * the retransmit timer.
6519 */
6520 static void
tcp_compute_rtt(struct tcpcb * tp,struct tcpopt * to,struct tcphdr * th)6521 tcp_compute_rtt(struct tcpcb *tp, struct tcpopt *to, struct tcphdr *th)
6522 {
6523 int rtt = 0;
6524 VERIFY(to != NULL && th != NULL);
6525 if (tp->t_rtttime != 0 && SEQ_GT(th->th_ack, tp->t_rtseq)) {
6526 u_int32_t pipe_ack_val;
6527 rtt = tcp_now - tp->t_rtttime;
6528 if (rtt == 0) {
6529 /*
6530 * Make adjustment for sub ms RTT when
6531 * timestamps are not used.
6532 */
6533 rtt = 1;
6534 }
6535 /*
6536 * Compute pipe ack -- the amount of data acknowledged
6537 * in the last RTT -- only works for sender
6538 */
6539 if (SEQ_GT(th->th_ack, tp->t_pipeack_lastuna)) {
6540 pipe_ack_val = th->th_ack - tp->t_pipeack_lastuna;
6541 /* Update the sample */
6542 tp->t_pipeack_sample[tp->t_pipeack_ind++] =
6543 pipe_ack_val;
6544 tp->t_pipeack_ind %= TCP_PIPEACK_SAMPLE_COUNT;
6545
6546 /* Compute the max of the pipeack samples */
6547 pipe_ack_val = tcp_get_max_pipeack(tp);
6548 tp->t_pipeack = (pipe_ack_val >
6549 tcp_initial_cwnd(tp)) ?
6550 pipe_ack_val : 0;
6551 }
6552 /* start another measurement */
6553 tp->t_rtttime = 0;
6554 }
6555 if (((to->to_flags & TOF_TS) != 0) &&
6556 (to->to_tsecr != 0) &&
6557 TSTMP_GEQ(tcp_now, to->to_tsecr)) {
6558 tcp_xmit_timer(tp, (tcp_now - to->to_tsecr),
6559 to->to_tsecr, th->th_ack);
6560 } else if (rtt > 0) {
6561 tcp_xmit_timer(tp, rtt, 0, th->th_ack);
6562 }
6563 }
6564
6565 static void
tcp_compute_rcv_rtt(struct tcpcb * tp,struct tcpopt * to,struct tcphdr * th)6566 tcp_compute_rcv_rtt(struct tcpcb *tp, struct tcpopt *to, struct tcphdr *th)
6567 {
6568 uint32_t rtt = 0, delta = 0;
6569 VERIFY(to != NULL && th != NULL);
6570
6571 /* Calculate RTT */
6572 if (((to->to_flags & TOF_TS) != 0) && (to->to_tsecr != 0) &&
6573 TSTMP_GEQ(tcp_now, to->to_tsecr)) {
6574 /* Timestamp is supported */
6575 rtt = tcp_now - to->to_tsecr;
6576 if (rtt == 0) {
6577 /* Make adjustment for sub ms RTT */
6578 rtt = 1;
6579 }
6580 } else if ((to->to_flags & TOF_TS) == 0) {
6581 /*
6582 * Timestamp is not supported, 1RTT is roughly
6583 * the time to receive one full window of data
6584 * Currently, RTT calculated this way is only used
6585 * for auto-tuning.
6586 */
6587 if (tp->rcv_rtt_est_ts != 0) {
6588 if (SEQ_LT(tp->rcv_nxt, tp->rcv_rtt_est_seq)) {
6589 /* Haven't received a full window yet */
6590 return;
6591 } else {
6592 rtt = tcp_now - tp->rcv_rtt_est_ts;
6593 if (rtt == 0) {
6594 /* Make adjustment for sub ms RTT */
6595 rtt = 1;
6596 }
6597 }
6598 } else {
6599 /* Use default value when no RTT measurement */
6600 rtt = TCPTV_RCVNOTS_QUANTUM;
6601 }
6602 /* Restart the measurement */
6603 tp->rcv_rtt_est_ts = tcp_now;
6604 tp->rcv_rtt_est_seq = tp->rcv_nxt + tp->rcv_wnd;
6605 }
6606
6607 /* Update receiver's SRTT */
6608 if (tp->rcv_srtt != 0) {
6609 /*
6610 * Use the smoothed rtt formula,
6611 * (srtt = rtt/8 + srtt*7/8) in fixed point
6612 */
6613 delta = (rtt << TCP_DELTA_SHIFT)
6614 - (tp->rcv_srtt >> (TCP_RTT_SHIFT - TCP_DELTA_SHIFT));
6615
6616 if ((tp->rcv_srtt += delta) <= 0) {
6617 tp->rcv_srtt = 1;
6618 }
6619 } else {
6620 /* No previous measurement */
6621 tp->rcv_srtt = rtt << TCP_RTT_SHIFT;
6622 }
6623
6624 /*
6625 * For current RTT, base RTT and current RTT over k samples,
6626 * we are using the same state for both sender and receiver
6627 * as the most recent sample is always updated before any
6628 * other processing, i.e. the sender will not end up with
6629 * a high RTT due to the receiver.
6630 */
6631 tp->t_rttcur = rtt;
6632 update_base_rtt(tp, rtt);
6633 }
6634
6635 /*
6636 * Collect new round-trip time estimate and update averages and
6637 * current timeout.
6638 */
6639 static void
tcp_xmit_timer(struct tcpcb * tp,int rtt,u_int32_t tsecr,tcp_seq th_ack)6640 tcp_xmit_timer(struct tcpcb *tp, int rtt,
6641 u_int32_t tsecr, tcp_seq th_ack)
6642 {
6643 VERIFY(rtt >= 0);
6644 int delta;
6645 int old_srtt = tp->t_srtt;
6646 int old_rttvar = tp->t_rttvar;
6647 bool log_rtt = false;
6648
6649 if (rtt == 0) {
6650 /*
6651 * As rtt has millisecond precision,
6652 * make adjustment for sub ms RTT
6653 */
6654 rtt = 1;
6655 }
6656
6657 if (rtt > 4 * TCPTV_MSL) {
6658 TCP_LOG(tp, "%s: rtt is %d - maxing it at 4 x MSL\n", __func__, rtt);
6659 /*
6660 * We compute RTT either based on the time-to-ACK a packet,
6661 * if TSval is disabled or based on the TSecr value.
6662 * If there is a middlebox messing up the TSecr value, we can
6663 * end up having HUGE rtt values, causing all kinds of problems.
6664 * Let's protect against this by capping RTT to 4*MSL
6665 * (60seconds).
6666 */
6667 rtt = 4 * TCPTV_MSL;
6668 }
6669
6670 /*
6671 * On AWDL interface, the initial RTT measurement on SYN
6672 * can be wrong due to peer caching. Avoid the first RTT
6673 * measurement as it might skew up the RTO.
6674 * <rdar://problem/28739046>
6675 */
6676 if (tp->t_inpcb->inp_last_outifp != NULL &&
6677 (tp->t_inpcb->inp_last_outifp->if_eflags & IFEF_AWDL) &&
6678 th_ack == tp->iss + 1) {
6679 return;
6680 }
6681
6682 if (tp->t_flagsext & TF_RECOMPUTE_RTT) {
6683 if (SEQ_GT(th_ack, tp->snd_una) &&
6684 SEQ_LEQ(th_ack, tp->snd_max) &&
6685 (tsecr == 0 ||
6686 TSTMP_GEQ(tsecr, tp->t_badrexmt_time))) {
6687 /*
6688 * We received a new ACK after a
6689 * spurious timeout. Adapt retransmission
6690 * timer as described in rfc 4015.
6691 */
6692 tp->t_flagsext &= ~(TF_RECOMPUTE_RTT);
6693 tp->t_badrexmt_time = 0;
6694 tp->t_srtt = max(tp->t_srtt_prev, rtt);
6695 tp->t_srtt = tp->t_srtt << TCP_RTT_SHIFT;
6696 tp->t_rttvar = max(tp->t_rttvar_prev, (rtt >> 1));
6697 tp->t_rttvar = tp->t_rttvar << TCP_RTTVAR_SHIFT;
6698
6699 if (tp->t_rttbest > (tp->t_srtt + tp->t_rttvar)) {
6700 tp->t_rttbest = tp->t_srtt + tp->t_rttvar;
6701 }
6702
6703 goto compute_rto;
6704 } else {
6705 return;
6706 }
6707 }
6708
6709 tcpstat.tcps_rttupdated++;
6710 tp->t_rttupdated++;
6711
6712 tp->t_rttcur = rtt;
6713 update_base_rtt(tp, rtt);
6714
6715 if (tp->t_srtt != 0) {
6716 /*
6717 * srtt is stored as fixed point with 5 bits after the
6718 * binary point (i.e., scaled by 32). The following magic
6719 * is equivalent to the smoothing algorithm in rfc793 with
6720 * an alpha of .875 (srtt = rtt/8 + srtt*7/8 in fixed
6721 * point).
6722 *
6723 * Freebsd adjusts rtt to origin 0 by subtracting 1
6724 * from the provided rtt value. This was required because
6725 * of the way t_rtttime was initiailised to 1 before.
6726 * Since we changed t_rtttime to be based on
6727 * tcp_now, this extra adjustment is not needed.
6728 */
6729 delta = (rtt << TCP_DELTA_SHIFT)
6730 - (tp->t_srtt >> (TCP_RTT_SHIFT - TCP_DELTA_SHIFT));
6731
6732 if ((tp->t_srtt += delta) <= 0) {
6733 tp->t_srtt = 1;
6734 }
6735
6736 /*
6737 * We accumulate a smoothed rtt variance (actually, a
6738 * smoothed mean difference), then set the retransmit
6739 * timer to smoothed rtt + 4 times the smoothed variance.
6740 * rttvar is stored as fixed point with 4 bits after the
6741 * binary point (scaled by 16). The following is
6742 * equivalent to rfc793 smoothing with an alpha of .75
6743 * (rttvar = rttvar*3/4 + |delta| / 4). This replaces
6744 * rfc793's wired-in beta.
6745 */
6746 if (delta < 0) {
6747 delta = -delta;
6748 }
6749 delta -= tp->t_rttvar >> (TCP_RTTVAR_SHIFT - TCP_DELTA_SHIFT);
6750 if ((tp->t_rttvar += delta) <= 0) {
6751 tp->t_rttvar = 1;
6752 }
6753 if (tp->t_rttbest == 0 ||
6754 tp->t_rttbest > (tp->t_srtt + tp->t_rttvar)) {
6755 tp->t_rttbest = tp->t_srtt + tp->t_rttvar;
6756 }
6757 } else {
6758 /*
6759 * No rtt measurement yet - use the unsmoothed rtt.
6760 * Set the variance to half the rtt (so our first
6761 * retransmit happens at 3*rtt).
6762 */
6763 tp->t_srtt = rtt << TCP_RTT_SHIFT;
6764 tp->t_rttvar = rtt << (TCP_RTTVAR_SHIFT - 1);
6765 tp->t_rttbest = tp->t_srtt + tp->t_rttvar;
6766
6767 /* Initialize the receive SRTT */
6768 if (tp->rcv_srtt == 0) {
6769 tp->rcv_srtt = tp->t_srtt;
6770 }
6771 }
6772
6773 compute_rto:
6774 nstat_route_rtt(tp->t_inpcb->inp_route.ro_rt, tp->t_srtt,
6775 tp->t_rttvar);
6776
6777 /*
6778 * the retransmit should happen at rtt + 4 * rttvar.
6779 * Because of the way we do the smoothing, srtt and rttvar
6780 * will each average +1/2 tick of bias. When we compute
6781 * the retransmit timer, we want 1/2 tick of rounding and
6782 * 1 extra tick because of +-1/2 tick uncertainty in the
6783 * firing of the timer. The bias will give us exactly the
6784 * 1.5 tick we need. But, because the bias is
6785 * statistical, we have to test that we don't drop below
6786 * the minimum feasible timer (which is 2 ticks).
6787 */
6788 TCPT_RANGESET(tp->t_rxtcur, TCP_REXMTVAL(tp),
6789 max(tp->t_rttmin, rtt + 2), TCPTV_REXMTMAX,
6790 TCP_ADD_REXMTSLOP(tp));
6791
6792 /*
6793 * We received an ack for a packet that wasn't retransmitted;
6794 * it is probably safe to discard any error indications we've
6795 * received recently. This isn't quite right, but close enough
6796 * for now (a route might have failed after we sent a segment,
6797 * and the return path might not be symmetrical).
6798 */
6799 tp->t_softerror = 0;
6800
6801 if (log_rtt) {
6802 TCP_LOG_RTT_INFO(tp);
6803 }
6804
6805 TCP_LOG_RTT_CHANGE(tp, old_srtt, old_rttvar);
6806 }
6807
6808 static inline unsigned int
tcp_maxmtu(struct rtentry * rt)6809 tcp_maxmtu(struct rtentry *rt)
6810 {
6811 unsigned int maxmtu;
6812 int interface_mtu = 0;
6813
6814 RT_LOCK_ASSERT_HELD(rt);
6815 interface_mtu = rt->rt_ifp->if_mtu;
6816
6817 if (rt_key(rt)->sa_family == AF_INET &&
6818 INTF_ADJUST_MTU_FOR_CLAT46(rt->rt_ifp)) {
6819 interface_mtu = IN6_LINKMTU(rt->rt_ifp);
6820 /* Further adjust the size for CLAT46 expansion */
6821 interface_mtu -= CLAT46_HDR_EXPANSION_OVERHD;
6822 }
6823
6824 if (rt->rt_rmx.rmx_mtu == 0) {
6825 maxmtu = interface_mtu;
6826 } else {
6827 maxmtu = MIN(rt->rt_rmx.rmx_mtu, interface_mtu);
6828 }
6829
6830 return maxmtu;
6831 }
6832
6833 static inline unsigned int
tcp_maxmtu6(struct rtentry * rt)6834 tcp_maxmtu6(struct rtentry *rt)
6835 {
6836 unsigned int maxmtu;
6837 struct nd_ifinfo *ndi = NULL;
6838
6839 RT_LOCK_ASSERT_HELD(rt);
6840 if ((ndi = ND_IFINFO(rt->rt_ifp)) != NULL && !ndi->initialized) {
6841 ndi = NULL;
6842 }
6843 if (ndi != NULL) {
6844 lck_mtx_lock(&ndi->lock);
6845 }
6846 if (rt->rt_rmx.rmx_mtu == 0) {
6847 maxmtu = IN6_LINKMTU(rt->rt_ifp);
6848 } else {
6849 maxmtu = MIN(rt->rt_rmx.rmx_mtu, IN6_LINKMTU(rt->rt_ifp));
6850 }
6851 if (ndi != NULL) {
6852 lck_mtx_unlock(&ndi->lock);
6853 }
6854
6855 return maxmtu;
6856 }
6857
6858 unsigned int
get_maxmtu(struct rtentry * rt)6859 get_maxmtu(struct rtentry *rt)
6860 {
6861 unsigned int maxmtu = 0;
6862
6863 RT_LOCK_ASSERT_NOTHELD(rt);
6864
6865 RT_LOCK(rt);
6866
6867 if (rt_key(rt)->sa_family == AF_INET6) {
6868 maxmtu = tcp_maxmtu6(rt);
6869 } else {
6870 maxmtu = tcp_maxmtu(rt);
6871 }
6872
6873 RT_UNLOCK(rt);
6874
6875 return maxmtu;
6876 }
6877
6878 /*
6879 * Determine a reasonable value for maxseg size.
6880 * If the route is known, check route for mtu.
6881 * If none, use an mss that can be handled on the outgoing
6882 * interface without forcing IP to fragment; if bigger than
6883 * an mbuf cluster (MCLBYTES), round down to nearest multiple of MCLBYTES
6884 * to utilize large mbufs. If no route is found, route has no mtu,
6885 * or the destination isn't local, use a default, hopefully conservative
6886 * size (usually 512 or the default IP max size, but no more than the mtu
6887 * of the interface), as we can't discover anything about intervening
6888 * gateways or networks. We also initialize the congestion/slow start
6889 * window. While looking at the routing entry, we also initialize
6890 * other path-dependent parameters from pre-set or cached values
6891 * in the routing entry.
6892 *
6893 * Also take into account the space needed for options that we
6894 * send regularly. Make maxseg shorter by that amount to assure
6895 * that we can send maxseg amount of data even when the options
6896 * are present. Store the upper limit of the length of options plus
6897 * data in maxopd.
6898 *
6899 * NOTE that this routine is only called when we process an incoming
6900 * segment, for outgoing segments only tcp_mssopt is called.
6901 *
6902 */
6903 void
tcp_mss(struct tcpcb * tp,int offer,unsigned int input_ifscope)6904 tcp_mss(struct tcpcb *tp, int offer, unsigned int input_ifscope)
6905 {
6906 struct rtentry *rt;
6907 struct ifnet *ifp;
6908 int rtt, mss;
6909 uint32_t bufsize;
6910 struct inpcb *inp;
6911 struct socket *so;
6912 int origoffer = offer;
6913 int isipv6;
6914 int min_protoh;
6915
6916 inp = tp->t_inpcb;
6917
6918 so = inp->inp_socket;
6919 /*
6920 * Nothing left to send after the socket is defunct or TCP is in the closed state
6921 */
6922 if ((so->so_state & SS_DEFUNCT) || tp->t_state == TCPS_CLOSED) {
6923 return;
6924 }
6925
6926 isipv6 = ((inp->inp_vflag & INP_IPV6) != 0) ? 1 : 0;
6927 min_protoh = isipv6 ? sizeof(struct ip6_hdr) + sizeof(struct tcphdr)
6928 : sizeof(struct tcpiphdr);
6929
6930 if (isipv6) {
6931 rt = tcp_rtlookup6(inp, input_ifscope);
6932 } else {
6933 rt = tcp_rtlookup(inp, input_ifscope);
6934 }
6935
6936 if (rt == NULL) {
6937 tp->t_maxopd = tp->t_maxseg = isipv6 ? tcp_v6mssdflt : tcp_mssdflt;
6938 return;
6939 }
6940 ifp = rt->rt_ifp;
6941 /*
6942 * Slower link window correction:
6943 * If a value is specificied for slowlink_wsize use it for
6944 * PPP links believed to be on a serial modem (speed <128Kbps).
6945 * Excludes 9600bps as it is the default value adversized
6946 * by pseudo-devices over ppp.
6947 */
6948 if (ifp->if_type == IFT_PPP && slowlink_wsize > 0 &&
6949 ifp->if_baudrate > 9600 && ifp->if_baudrate <= 128000) {
6950 tp->t_flags |= TF_SLOWLINK;
6951 }
6952
6953 /*
6954 * Offer == -1 means that we didn't receive SYN yet. Use 0 then.
6955 */
6956 if (offer == -1) {
6957 offer = rt->rt_rmx.rmx_filler[0];
6958 }
6959 /*
6960 * Offer == 0 means that there was no MSS on the SYN segment,
6961 * in this case we use tcp_mssdflt.
6962 */
6963 if (offer == 0) {
6964 offer = isipv6 ? tcp_v6mssdflt : tcp_mssdflt;
6965 } else {
6966 /*
6967 * Prevent DoS attack with too small MSS. Round up
6968 * to at least minmss.
6969 */
6970 offer = max(offer, tcp_minmss);
6971 /*
6972 * Sanity check: make sure that maxopd will be large
6973 * enough to allow some data on segments even is the
6974 * all the option space is used (40bytes). Otherwise
6975 * funny things may happen in tcp_output.
6976 */
6977 offer = max(offer, 64);
6978 }
6979 rt->rt_rmx.rmx_filler[0] = offer;
6980
6981 /*
6982 * While we're here, check if there's an initial rtt
6983 * or rttvar. Convert from the route-table units
6984 * to scaled multiples of the slow timeout timer.
6985 */
6986 if (tp->t_srtt == 0 && (rtt = rt->rt_rmx.rmx_rtt) != 0) {
6987 tcp_getrt_rtt(tp, rt);
6988 } else {
6989 tp->t_rttmin = TCPTV_REXMTMIN;
6990 }
6991
6992 mss = (isipv6 ? tcp_maxmtu6(rt) : tcp_maxmtu(rt));
6993
6994 mss = tcp_get_effective_mtu(rt, mss);
6995 #if NECP
6996 // At this point, the mss is just the MTU. Adjust if necessary.
6997 mss = necp_socket_get_effective_mtu(inp, mss);
6998 #endif /* NECP */
6999
7000 mss -= min_protoh;
7001
7002 if (rt->rt_rmx.rmx_mtu == 0) {
7003 if (isipv6) {
7004 mss = min(mss, tcp_v6mssdflt);
7005 } else {
7006 mss = min(mss, tcp_mssdflt);
7007 }
7008 }
7009
7010 mss = min(mss, offer);
7011 /*
7012 * maxopd stores the maximum length of data AND options
7013 * in a segment; maxseg is the amount of data in a normal
7014 * segment. We need to store this value (maxopd) apart
7015 * from maxseg, because now every segment carries options
7016 * and thus we normally have somewhat less data in segments.
7017 */
7018 tp->t_maxopd = mss;
7019
7020 /*
7021 * origoffer==-1 indicates, that no segments were received yet.
7022 * In this case we just guess.
7023 */
7024 if ((tp->t_flags & (TF_REQ_TSTMP | TF_NOOPT)) == TF_REQ_TSTMP &&
7025 (origoffer == -1 ||
7026 (tp->t_flags & TF_RCVD_TSTMP) == TF_RCVD_TSTMP)) {
7027 mss -= TCPOLEN_TSTAMP_APPA;
7028 }
7029
7030 #if MPTCP
7031 mss -= mptcp_adj_mss(tp, FALSE);
7032 #endif /* MPTCP */
7033 tp->t_maxseg = mss;
7034
7035 /*
7036 * If there's a pipesize (ie loopback), change the socket
7037 * buffer to that size only if it's bigger than the current
7038 * sockbuf size. Make the socket buffers an integral
7039 * number of mss units; if the mss is larger than
7040 * the socket buffer, decrease the mss.
7041 */
7042 #if RTV_SPIPE
7043 bufsize = rt->rt_rmx.rmx_sendpipe;
7044 if (bufsize < so->so_snd.sb_hiwat)
7045 #endif
7046 bufsize = so->so_snd.sb_hiwat;
7047 if (bufsize < mss) {
7048 mss = bufsize;
7049 } else {
7050 bufsize = (((bufsize + mss - 1) / mss) * mss);
7051 (void)sbreserve(&so->so_snd, bufsize);
7052 }
7053 tp->t_maxseg = mss;
7054
7055 ASSERT(tp->t_maxseg);
7056
7057 /*
7058 * Update MSS using recommendation from link status report. This is
7059 * temporary
7060 */
7061 tcp_update_mss_locked(so, ifp);
7062
7063 #if RTV_RPIPE
7064 bufsize = rt->rt_rmx.rmx_recvpipe;
7065 if (bufsize < so->so_rcv.sb_hiwat)
7066 #endif
7067 bufsize = so->so_rcv.sb_hiwat;
7068 if (bufsize > mss) {
7069 bufsize = (((bufsize + mss - 1) / mss) * mss);
7070 (void)sbreserve(&so->so_rcv, bufsize);
7071 }
7072
7073 set_tcp_stream_priority(so);
7074
7075 if (rt->rt_rmx.rmx_ssthresh) {
7076 /*
7077 * There's some sort of gateway or interface
7078 * buffer limit on the path. Use this to set
7079 * slow-start threshold, but set the threshold to
7080 * no less than 2*mss.
7081 */
7082 tp->snd_ssthresh = max(2 * mss, rt->rt_rmx.rmx_ssthresh);
7083 tcpstat.tcps_usedssthresh++;
7084 } else {
7085 tp->snd_ssthresh = TCP_MAXWIN << TCP_MAX_WINSHIFT;
7086 }
7087
7088 /*
7089 * Set the slow-start flight size depending on whether this
7090 * is a local network or not.
7091 */
7092 if (CC_ALGO(tp)->cwnd_init != NULL) {
7093 CC_ALGO(tp)->cwnd_init(tp);
7094 }
7095
7096 tcp_ccdbg_trace(tp, NULL, TCP_CC_CWND_INIT);
7097
7098 if (TCP_USE_RLEDBAT(tp, so) && tcp_cc_rledbat.rwnd_init != NULL) {
7099 tcp_cc_rledbat.rwnd_init(tp);
7100 }
7101
7102 /* Route locked during lookup above */
7103 RT_UNLOCK(rt);
7104 }
7105
7106 /*
7107 * Determine the MSS option to send on an outgoing SYN.
7108 */
7109 int
tcp_mssopt(struct tcpcb * tp)7110 tcp_mssopt(struct tcpcb *tp)
7111 {
7112 struct rtentry *rt;
7113 int mss;
7114 int isipv6;
7115 int min_protoh;
7116
7117 isipv6 = ((tp->t_inpcb->inp_vflag & INP_IPV6) != 0) ? 1 : 0;
7118 min_protoh = isipv6 ? sizeof(struct ip6_hdr) + sizeof(struct tcphdr)
7119 : sizeof(struct tcpiphdr);
7120
7121 if (isipv6) {
7122 rt = tcp_rtlookup6(tp->t_inpcb, IFSCOPE_NONE);
7123 } else {
7124 rt = tcp_rtlookup(tp->t_inpcb, IFSCOPE_NONE);
7125 }
7126 if (rt == NULL) {
7127 return isipv6 ? tcp_v6mssdflt : tcp_mssdflt;
7128 }
7129 /*
7130 * Slower link window correction:
7131 * If a value is specificied for slowlink_wsize use it for PPP links
7132 * believed to be on a serial modem (speed <128Kbps). Excludes 9600bps as
7133 * it is the default value adversized by pseudo-devices over ppp.
7134 */
7135 if (rt->rt_ifp->if_type == IFT_PPP && slowlink_wsize > 0 &&
7136 rt->rt_ifp->if_baudrate > 9600 && rt->rt_ifp->if_baudrate <= 128000) {
7137 tp->t_flags |= TF_SLOWLINK;
7138 }
7139
7140 mss = (isipv6 ? tcp_maxmtu6(rt) : tcp_maxmtu(rt));
7141
7142 mss = tcp_get_effective_mtu(rt, mss);
7143
7144 /* Route locked during lookup above */
7145 RT_UNLOCK(rt);
7146
7147 #if NECP
7148 // At this point, the mss is just the MTU. Adjust if necessary.
7149 mss = necp_socket_get_effective_mtu(tp->t_inpcb, mss);
7150 #endif /* NECP */
7151
7152 return mss - min_protoh;
7153 }
7154
7155 /*
7156 * On a partial ack arrives, force the retransmission of the
7157 * next unacknowledged segment. Do not clear tp->t_dupacks.
7158 * By setting snd_nxt to th_ack, this forces retransmission timer to
7159 * be started again.
7160 */
7161 static void
tcp_newreno_partial_ack(struct tcpcb * tp,struct tcphdr * th)7162 tcp_newreno_partial_ack(struct tcpcb *tp, struct tcphdr *th)
7163 {
7164 tcp_seq onxt = tp->snd_nxt;
7165 u_int32_t ocwnd = tp->snd_cwnd;
7166 tp->t_timer[TCPT_REXMT] = 0;
7167 tp->t_timer[TCPT_PTO] = 0;
7168 tp->t_rtttime = 0;
7169 tp->snd_nxt = th->th_ack;
7170 /*
7171 * Set snd_cwnd to one segment beyond acknowledged offset
7172 * (tp->snd_una has not yet been updated when this function
7173 * is called)
7174 */
7175 tp->snd_cwnd = tp->t_maxseg + BYTES_ACKED(th, tp);
7176 (void) tcp_output(tp);
7177 tp->snd_cwnd = ocwnd;
7178 if (SEQ_GT(onxt, tp->snd_nxt)) {
7179 tp->snd_nxt = onxt;
7180 }
7181 /*
7182 * Partial window deflation. Relies on fact that tp->snd_una
7183 * not updated yet.
7184 */
7185 if (tp->snd_cwnd > BYTES_ACKED(th, tp)) {
7186 tp->snd_cwnd -= BYTES_ACKED(th, tp);
7187 } else {
7188 tp->snd_cwnd = 0;
7189 }
7190 tp->snd_cwnd += tp->t_maxseg;
7191 }
7192
7193 /*
7194 * Drop a random TCP connection that hasn't been serviced yet and
7195 * is eligible for discard. There is a one in qlen chance that
7196 * we will return a null, saying that there are no dropable
7197 * requests. In this case, the protocol specific code should drop
7198 * the new request. This insures fairness.
7199 *
7200 * The listening TCP socket "head" must be locked
7201 */
7202 static int
tcp_dropdropablreq(struct socket * head)7203 tcp_dropdropablreq(struct socket *head)
7204 {
7205 struct socket *so, *sonext;
7206 unsigned int j, qlen;
7207 static uint32_t rnd = 0;
7208 static uint64_t old_runtime;
7209 static unsigned int cur_cnt, old_cnt;
7210 uint64_t now_sec, i;
7211 struct inpcb *inp = NULL;
7212 struct tcpcb *tp;
7213
7214 if ((head->so_options & SO_ACCEPTCONN) == 0) {
7215 return 0;
7216 }
7217
7218 if (TAILQ_EMPTY(&head->so_incomp)) {
7219 return 0;
7220 }
7221
7222 so_acquire_accept_list(head, NULL);
7223 socket_unlock(head, 0);
7224
7225 /*
7226 * Check if there is any socket in the incomp queue
7227 * that is closed because of a reset from the peer and is
7228 * waiting to be garbage collected. If so, pick that as
7229 * the victim
7230 */
7231 TAILQ_FOREACH_SAFE(so, &head->so_incomp, so_list, sonext) {
7232 inp = sotoinpcb(so);
7233 tp = intotcpcb(inp);
7234 if (tp != NULL && tp->t_state == TCPS_CLOSED &&
7235 so->so_head != NULL &&
7236 (so->so_state & (SS_INCOMP | SS_CANTSENDMORE | SS_CANTRCVMORE)) ==
7237 (SS_INCOMP | SS_CANTSENDMORE | SS_CANTRCVMORE)) {
7238 /*
7239 * The listen socket is already locked but we
7240 * can lock this socket here without lock ordering
7241 * issues because it is in the incomp queue and
7242 * is not visible to others.
7243 */
7244 if (socket_try_lock(so)) {
7245 so->so_usecount++;
7246 goto found_victim;
7247 } else {
7248 continue;
7249 }
7250 }
7251 }
7252
7253 so = TAILQ_FIRST(&head->so_incomp);
7254
7255 now_sec = net_uptime();
7256 if ((i = (now_sec - old_runtime)) != 0) {
7257 old_runtime = now_sec;
7258 old_cnt = cur_cnt / i;
7259 cur_cnt = 0;
7260 }
7261
7262 qlen = head->so_incqlen;
7263 if (rnd == 0) {
7264 rnd = RandomULong();
7265 }
7266
7267 if (++cur_cnt > qlen || old_cnt > qlen) {
7268 rnd = (314159 * rnd + 66329) & 0xffff;
7269 j = ((qlen + 1) * rnd) >> 16;
7270
7271 while (j-- && so) {
7272 so = TAILQ_NEXT(so, so_list);
7273 }
7274 }
7275 /* Find a connection that is not already closing (or being served) */
7276 while (so) {
7277 inp = (struct inpcb *)so->so_pcb;
7278
7279 sonext = TAILQ_NEXT(so, so_list);
7280
7281 if (in_pcb_checkstate(inp, WNT_ACQUIRE, 0) != WNT_STOPUSING) {
7282 /*
7283 * Avoid the issue of a socket being accepted
7284 * by one input thread and being dropped by
7285 * another input thread. If we can't get a hold
7286 * on this mutex, then grab the next socket in
7287 * line.
7288 */
7289 if (socket_try_lock(so)) {
7290 so->so_usecount++;
7291 if ((so->so_usecount == 2) &&
7292 (so->so_state & SS_INCOMP) &&
7293 !(so->so_flags & SOF_INCOMP_INPROGRESS)) {
7294 break;
7295 } else {
7296 /*
7297 * don't use if being accepted or
7298 * used in any other way
7299 */
7300 in_pcb_checkstate(inp, WNT_RELEASE, 1);
7301 socket_unlock(so, 1);
7302 }
7303 } else {
7304 /*
7305 * do not try to lock the inp in
7306 * in_pcb_checkstate because the lock
7307 * is already held in some other thread.
7308 * Only drop the inp_wntcnt reference.
7309 */
7310 in_pcb_checkstate(inp, WNT_RELEASE, 1);
7311 }
7312 }
7313 so = sonext;
7314 }
7315 if (so == NULL) {
7316 socket_lock(head, 0);
7317 so_release_accept_list(head);
7318 return 0;
7319 }
7320
7321 /* Makes sure socket is still in the right state to be discarded */
7322
7323 if (in_pcb_checkstate(inp, WNT_RELEASE, 1) == WNT_STOPUSING) {
7324 socket_unlock(so, 1);
7325 socket_lock(head, 0);
7326 so_release_accept_list(head);
7327 return 0;
7328 }
7329
7330 found_victim:
7331 if (so->so_usecount != 2 || !(so->so_state & SS_INCOMP)) {
7332 /* do not discard: that socket is being accepted */
7333 socket_unlock(so, 1);
7334 socket_lock(head, 0);
7335 so_release_accept_list(head);
7336 return 0;
7337 }
7338
7339 socket_lock(head, 0);
7340 TAILQ_REMOVE(&head->so_incomp, so, so_list);
7341 head->so_incqlen--;
7342 head->so_qlen--;
7343 so->so_state &= ~SS_INCOMP;
7344 so->so_flags |= SOF_OVERFLOW;
7345 so->so_head = NULL;
7346 so_release_accept_list(head);
7347 socket_unlock(head, 0);
7348
7349 socket_lock_assert_owned(so);
7350 tp = sototcpcb(so);
7351
7352 tcp_close(tp);
7353 if (inp->inp_wantcnt > 0 && inp->inp_wantcnt != WNT_STOPUSING) {
7354 /*
7355 * Some one has a wantcnt on this pcb. Since WNT_ACQUIRE
7356 * doesn't require a lock, it could have happened while
7357 * we are holding the lock. This pcb will have to
7358 * be garbage collected later.
7359 * Release the reference held for so_incomp queue
7360 */
7361 VERIFY(so->so_usecount > 0);
7362 so->so_usecount--;
7363 socket_unlock(so, 1);
7364 } else {
7365 /*
7366 * Unlock this socket and leave the reference on.
7367 * We need to acquire the pcbinfo lock in order to
7368 * fully dispose it off
7369 */
7370 socket_unlock(so, 0);
7371
7372 lck_rw_lock_exclusive(&tcbinfo.ipi_lock);
7373
7374 socket_lock(so, 0);
7375 /* Release the reference held for so_incomp queue */
7376 VERIFY(so->so_usecount > 0);
7377 so->so_usecount--;
7378
7379 if (so->so_usecount != 1 ||
7380 (inp->inp_wantcnt > 0 &&
7381 inp->inp_wantcnt != WNT_STOPUSING)) {
7382 /*
7383 * There is an extra wantcount or usecount
7384 * that must have been added when the socket
7385 * was unlocked. This socket will have to be
7386 * garbage collected later
7387 */
7388 socket_unlock(so, 1);
7389 } else {
7390 /* Drop the reference held for this function */
7391 VERIFY(so->so_usecount > 0);
7392 so->so_usecount--;
7393
7394 in_pcbdispose(inp);
7395 }
7396 lck_rw_done(&tcbinfo.ipi_lock);
7397 }
7398 tcpstat.tcps_drops++;
7399
7400 socket_lock(head, 0);
7401 return 1;
7402 }
7403
7404 /* Set background congestion control on a socket */
7405 void
tcp_set_background_cc(struct socket * so)7406 tcp_set_background_cc(struct socket *so)
7407 {
7408 tcp_set_new_cc(so, TCP_CC_ALGO_BACKGROUND_INDEX);
7409 }
7410
7411 /* Set foreground congestion control on a socket */
7412 void
tcp_set_foreground_cc(struct socket * so)7413 tcp_set_foreground_cc(struct socket *so)
7414 {
7415 if (tcp_use_newreno) {
7416 tcp_set_new_cc(so, TCP_CC_ALGO_NEWRENO_INDEX);
7417 #if (DEVELOPMENT || DEBUG)
7418 } else if (tcp_use_ledbat) {
7419 /* Only used for testing */
7420 tcp_set_new_cc(so, TCP_CC_ALGO_BACKGROUND_INDEX);
7421 #endif
7422 } else {
7423 struct inpcb *inp = sotoinpcb(so);
7424 struct tcpcb *tp = intotcpcb(inp);
7425 if (TCP_L4S_ENABLED(tp)) {
7426 tcp_set_new_cc(so, TCP_CC_ALGO_PRAGUE_INDEX);
7427 } else {
7428 tcp_set_new_cc(so, TCP_CC_ALGO_CUBIC_INDEX);
7429 }
7430 }
7431 }
7432
7433 static void
tcp_set_new_cc(struct socket * so,uint8_t cc_index)7434 tcp_set_new_cc(struct socket *so, uint8_t cc_index)
7435 {
7436 struct inpcb *inp = sotoinpcb(so);
7437 struct tcpcb *tp = intotcpcb(inp);
7438
7439 if (tp->tcp_cc_index != cc_index) {
7440 if (CC_ALGO(tp)->cleanup != NULL) {
7441 CC_ALGO(tp)->cleanup(tp);
7442 }
7443 tp->tcp_cc_index = cc_index;
7444
7445 tcp_cc_allocate_state(tp);
7446
7447 if (CC_ALGO(tp)->switch_to != NULL) {
7448 CC_ALGO(tp)->switch_to(tp);
7449 }
7450
7451 tcp_ccdbg_trace(tp, NULL, TCP_CC_CHANGE_ALGO);
7452 }
7453 }
7454
7455 void
tcp_set_recv_bg(struct socket * so)7456 tcp_set_recv_bg(struct socket *so)
7457 {
7458 if (!IS_TCP_RECV_BG(so)) {
7459 so->so_flags1 |= SOF1_TRAFFIC_MGT_TCP_RECVBG;
7460
7461 struct inpcb *inp = sotoinpcb(so);
7462 struct tcpcb *tp = intotcpcb(inp);
7463
7464 if (TCP_RLEDBAT_ENABLED(tp) && tcp_cc_rledbat.switch_to != NULL) {
7465 tcp_cc_rledbat.switch_to(tp);
7466 }
7467 }
7468 }
7469
7470 void
tcp_clear_recv_bg(struct socket * so)7471 tcp_clear_recv_bg(struct socket *so)
7472 {
7473 if (IS_TCP_RECV_BG(so)) {
7474 so->so_flags1 &= ~(SOF1_TRAFFIC_MGT_TCP_RECVBG);
7475 }
7476 }
7477
7478 void
inp_fc_throttle_tcp(struct inpcb * inp)7479 inp_fc_throttle_tcp(struct inpcb *inp)
7480 {
7481 tcpcb_ref_t tp = inp->inp_ppcb;
7482
7483 if (!tcp_flow_control_response) {
7484 return;
7485 }
7486
7487 /*
7488 * Back off the slow-start threshold and enter
7489 * congestion avoidance phase
7490 */
7491 if (CC_ALGO(tp)->pre_fr != NULL) {
7492 CC_ALGO(tp)->pre_fr(tp);
7493 }
7494 }
7495
7496 void
inp_fc_unthrottle_tcp(struct inpcb * inp)7497 inp_fc_unthrottle_tcp(struct inpcb *inp)
7498 {
7499 tcpcb_ref_t tp = inp->inp_ppcb;
7500 struct ifnet *outifp = inp->inp_last_outifp;
7501
7502 if (tcp_flow_control_response) {
7503 if (CC_ALGO(tp)->post_fr != NULL) {
7504 CC_ALGO(tp)->post_fr(tp, NULL);
7505 }
7506
7507 tp->t_bytes_acked = 0;
7508
7509 /*
7510 * Reset retransmit shift as we know that the reason
7511 * for delay in sending a packet is due to flow
7512 * control on the outgoing interface. There is no need
7513 * to backoff retransmit timer except for cellular interface
7514 */
7515 if (tp->t_rxtshift != 0 && outifp != NULL &&
7516 IFNET_IS_CELLULAR(outifp)) {
7517 TCP_LOG(tp, "inp_fc_unthrottle_tcp keep rxmit state t_rxtshift %d", tp->t_rxtshift);
7518 } else {
7519 TCP_RESET_REXMT_STATE(tp);
7520 }
7521
7522 tp->t_flagsext &= ~TF_CWND_NONVALIDATED;
7523
7524 /*
7525 * Start the output stream again. Since we are
7526 * not retransmitting data, do not reset the
7527 * retransmit timer or rtt calculation.
7528 */
7529 tcp_output(tp);
7530 return;
7531 }
7532
7533 /*
7534 * Back off the slow-start threshold and enter
7535 * congestion avoidance phase
7536 */
7537 if (CC_ALGO(tp)->pre_fr != NULL) {
7538 CC_ALGO(tp)->pre_fr(tp);
7539 }
7540
7541 tp->snd_cwnd = tp->snd_ssthresh;
7542 tp->t_flagsext &= ~TF_CWND_NONVALIDATED;
7543 /*
7544 * Restart counting for ABC as we changed the
7545 * congestion window just now.
7546 */
7547 tp->t_bytes_acked = 0;
7548
7549 /* Reset retransmit shift as we know that the reason
7550 * for delay in sending a packet is due to flow
7551 * control on the outgoing interface. There is no need
7552 * to backoff retransmit timer.
7553 */
7554 if (tp->t_rxtshift != 0 && outifp != NULL &&
7555 IFNET_IS_CELLULAR(outifp)) {
7556 TCP_LOG(tp, "inp_fc_unthrottle_tcp keep rxmit state t_rxtshift %d", tp->t_rxtshift);
7557 } else {
7558 TCP_RESET_REXMT_STATE(tp);
7559 }
7560
7561 /*
7562 * Start the output stream again. Since we are
7563 * not retransmitting data, do not reset the
7564 * retransmit timer or rtt calculation.
7565 */
7566 tcp_output(tp);
7567 }
7568
7569 static int
7570 tcp_getstat SYSCTL_HANDLER_ARGS
7571 {
7572 #pragma unused(oidp, arg1, arg2)
7573
7574 int error;
7575 struct tcpstat *stat;
7576 stat = &tcpstat;
7577
7578 #if XNU_TARGET_OS_OSX
7579 struct tcpstat zero_stat;
7580
7581 if (tcp_disable_access_to_stats &&
7582 !kauth_cred_issuser(kauth_cred_get())) {
7583 bzero(&zero_stat, sizeof(zero_stat));
7584 stat = &zero_stat;
7585 }
7586
7587 #endif /* XNU_TARGET_OS_OSX */
7588
7589 if (req->oldptr == 0) {
7590 req->oldlen = (size_t)sizeof(struct tcpstat);
7591 }
7592
7593 error = SYSCTL_OUT(req, stat, MIN(sizeof(tcpstat), req->oldlen));
7594
7595 return error;
7596 }
7597
7598 /*
7599 * Checksum extended TCP header and data.
7600 */
7601 int
tcp_input_checksum(int af,struct mbuf * m,struct tcphdr * th,int off,int tlen)7602 tcp_input_checksum(int af, struct mbuf *m, struct tcphdr *th, int off, int tlen)
7603 {
7604 struct ifnet *ifp = m->m_pkthdr.rcvif;
7605
7606 switch (af) {
7607 case AF_INET: {
7608 struct ip *ip = mtod(m, struct ip *);
7609 struct ipovly *ipov = (struct ipovly *)ip;
7610
7611 /* ip_stripoptions() must have been called before we get here */
7612 ASSERT((ip->ip_hl << 2) == sizeof(*ip));
7613
7614 if ((hwcksum_rx || (ifp->if_flags & IFF_LOOPBACK) ||
7615 (m->m_pkthdr.pkt_flags & PKTF_LOOP)) &&
7616 (m->m_pkthdr.csum_flags & CSUM_DATA_VALID)) {
7617 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR) {
7618 th->th_sum = m->m_pkthdr.csum_rx_val;
7619 } else {
7620 uint32_t sum = m->m_pkthdr.csum_rx_val;
7621 uint32_t start = m->m_pkthdr.csum_rx_start;
7622 int32_t trailer = (m_pktlen(m) - (off + tlen));
7623
7624 /*
7625 * Perform 1's complement adjustment of octets
7626 * that got included/excluded in the hardware-
7627 * calculated checksum value. Ignore cases
7628 * where the value already includes the entire
7629 * IP header span, as the sum for those octets
7630 * would already be 0 by the time we get here;
7631 * IP has already performed its header checksum
7632 * checks. If we do need to adjust, restore
7633 * the original fields in the IP header when
7634 * computing the adjustment value. Also take
7635 * care of any trailing bytes and subtract out
7636 * their partial sum.
7637 */
7638 ASSERT(trailer >= 0);
7639 if ((m->m_pkthdr.csum_flags & CSUM_PARTIAL) &&
7640 ((start != 0 && start != off) || trailer)) {
7641 uint32_t swbytes = (uint32_t)trailer;
7642
7643 if (start < off) {
7644 ip->ip_len += sizeof(*ip);
7645 #if BYTE_ORDER != BIG_ENDIAN
7646 HTONS(ip->ip_len);
7647 HTONS(ip->ip_off);
7648 #endif /* BYTE_ORDER != BIG_ENDIAN */
7649 }
7650 /* callee folds in sum */
7651 sum = m_adj_sum16(m, start, off,
7652 tlen, sum);
7653 if (off > start) {
7654 swbytes += (off - start);
7655 } else {
7656 swbytes += (start - off);
7657 }
7658
7659 if (start < off) {
7660 #if BYTE_ORDER != BIG_ENDIAN
7661 NTOHS(ip->ip_off);
7662 NTOHS(ip->ip_len);
7663 #endif /* BYTE_ORDER != BIG_ENDIAN */
7664 ip->ip_len -= sizeof(*ip);
7665 }
7666
7667 if (swbytes != 0) {
7668 tcp_in_cksum_stats(swbytes);
7669 }
7670 if (trailer != 0) {
7671 m_adj(m, -trailer);
7672 }
7673 }
7674
7675 /* callee folds in sum */
7676 th->th_sum = in_pseudo(ip->ip_src.s_addr,
7677 ip->ip_dst.s_addr,
7678 sum + htonl(tlen + IPPROTO_TCP));
7679 }
7680 th->th_sum ^= 0xffff;
7681 } else {
7682 uint16_t ip_sum;
7683 int len;
7684 char b[9];
7685
7686 bcopy(ipov->ih_x1, b, sizeof(ipov->ih_x1));
7687 bzero(ipov->ih_x1, sizeof(ipov->ih_x1));
7688 ip_sum = ipov->ih_len;
7689 ipov->ih_len = (u_short)tlen;
7690 #if BYTE_ORDER != BIG_ENDIAN
7691 HTONS(ipov->ih_len);
7692 #endif
7693 len = sizeof(struct ip) + tlen;
7694 th->th_sum = in_cksum(m, len);
7695 bcopy(b, ipov->ih_x1, sizeof(ipov->ih_x1));
7696 ipov->ih_len = ip_sum;
7697
7698 tcp_in_cksum_stats(len);
7699 }
7700 break;
7701 }
7702 case AF_INET6: {
7703 struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *);
7704
7705 if ((hwcksum_rx || (ifp->if_flags & IFF_LOOPBACK) ||
7706 (m->m_pkthdr.pkt_flags & PKTF_LOOP)) &&
7707 (m->m_pkthdr.csum_flags & CSUM_DATA_VALID)) {
7708 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR) {
7709 th->th_sum = m->m_pkthdr.csum_rx_val;
7710 } else {
7711 uint32_t sum = m->m_pkthdr.csum_rx_val;
7712 uint32_t start = m->m_pkthdr.csum_rx_start;
7713 int32_t trailer = (m_pktlen(m) - (off + tlen));
7714
7715 /*
7716 * Perform 1's complement adjustment of octets
7717 * that got included/excluded in the hardware-
7718 * calculated checksum value. Also take care
7719 * of any trailing bytes and subtract out their
7720 * partial sum.
7721 */
7722 ASSERT(trailer >= 0);
7723 if ((m->m_pkthdr.csum_flags & CSUM_PARTIAL) &&
7724 (start != off || trailer != 0)) {
7725 uint16_t s = 0, d = 0;
7726 uint32_t swbytes = (uint32_t)trailer;
7727
7728 if (IN6_IS_SCOPE_EMBED(&ip6->ip6_src)) {
7729 s = ip6->ip6_src.s6_addr16[1];
7730 ip6->ip6_src.s6_addr16[1] = 0;
7731 }
7732 if (IN6_IS_SCOPE_EMBED(&ip6->ip6_dst)) {
7733 d = ip6->ip6_dst.s6_addr16[1];
7734 ip6->ip6_dst.s6_addr16[1] = 0;
7735 }
7736
7737 /* callee folds in sum */
7738 sum = m_adj_sum16(m, start, off,
7739 tlen, sum);
7740 if (off > start) {
7741 swbytes += (off - start);
7742 } else {
7743 swbytes += (start - off);
7744 }
7745
7746 if (IN6_IS_SCOPE_EMBED(&ip6->ip6_src)) {
7747 ip6->ip6_src.s6_addr16[1] = s;
7748 }
7749 if (IN6_IS_SCOPE_EMBED(&ip6->ip6_dst)) {
7750 ip6->ip6_dst.s6_addr16[1] = d;
7751 }
7752
7753 if (swbytes != 0) {
7754 tcp_in6_cksum_stats(swbytes);
7755 }
7756 if (trailer != 0) {
7757 m_adj(m, -trailer);
7758 }
7759 }
7760
7761 th->th_sum = in6_pseudo(
7762 &ip6->ip6_src, &ip6->ip6_dst,
7763 sum + htonl(tlen + IPPROTO_TCP));
7764 }
7765 th->th_sum ^= 0xffff;
7766 } else {
7767 tcp_in6_cksum_stats(tlen);
7768 th->th_sum = in6_cksum(m, IPPROTO_TCP, off, tlen);
7769 }
7770 break;
7771 }
7772 default:
7773 VERIFY(0);
7774 /* NOTREACHED */
7775 }
7776
7777 if (th->th_sum != 0) {
7778 tcpstat.tcps_rcvbadsum++;
7779 IF_TCP_STATINC(ifp, badformat);
7780 return -1;
7781 }
7782
7783 return 0;
7784 }
7785
7786 #define DUMP_BUF_CHK() { \
7787 clen -= k; \
7788 if (clen < 1) \
7789 goto done; \
7790 c += k; \
7791 }
7792
7793 int
dump_tcp_reass_qlen(char * str __sized_by (str_len),int str_len)7794 dump_tcp_reass_qlen(char *str __sized_by(str_len), int str_len)
7795 {
7796 char *c = str;
7797 int k, clen = str_len;
7798
7799 if (tcp_reass_total_qlen != 0) {
7800 k = scnprintf(c, clen, "\ntcp reass qlen %d\n", tcp_reass_total_qlen);
7801 DUMP_BUF_CHK();
7802 }
7803
7804 done:
7805 return str_len - clen;
7806 }
7807
7808 uint32_t
tcp_reass_qlen_space(struct socket * so)7809 tcp_reass_qlen_space(struct socket *so)
7810 {
7811 uint32_t space = 0;
7812 struct inpcb *inp = sotoinpcb(so);
7813
7814 if (inp != NULL) {
7815 struct tcpcb *tp = intotcpcb(inp);
7816
7817 if (tp != NULL) {
7818 space = tp->t_reassq_mbcnt;
7819 }
7820 }
7821 return space;
7822 }
7823
7824
7825 SYSCTL_PROC(_net_inet_tcp, TCPCTL_STATS, stats,
7826 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, tcp_getstat,
7827 "S,tcpstat", "TCP statistics (struct tcpstat, netinet/tcp_var.h)");
7828
7829 static int
7830 sysctl_rexmtthresh SYSCTL_HANDLER_ARGS
7831 {
7832 #pragma unused(arg1, arg2)
7833
7834 int error, val = tcprexmtthresh;
7835
7836 error = sysctl_handle_int(oidp, &val, 0, req);
7837 if (error || !req->newptr) {
7838 return error;
7839 }
7840
7841 /*
7842 * Constrain the number of duplicate ACKs
7843 * to consider for TCP fast retransmit
7844 * to either 2 or 3
7845 */
7846
7847 if (val < 2 || val > 3) {
7848 return EINVAL;
7849 }
7850
7851 tcprexmtthresh = (uint8_t)val;
7852
7853 return 0;
7854 }
7855
7856 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, rexmt_thresh, CTLTYPE_INT | CTLFLAG_RW |
7857 CTLFLAG_LOCKED, &tcprexmtthresh, 0, &sysctl_rexmtthresh, "I",
7858 "Duplicate ACK Threshold for Fast Retransmit");
7859