1 /*
2 * Copyright (c) 2017-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <skywalk/os_skywalk_private.h>
30 #include <skywalk/nexus/flowswitch/fsw_var.h>
31 #include <skywalk/nexus/flowswitch/flow/flow_var.h>
32 #include <netinet/tcp.h>
33 #include <netinet/tcp_fsm.h>
34 #include <netinet/tcp_seq.h>
35 #include <netinet/tcp_timer.h>
36 #include <netinet/tcp_var.h>
37 #include <netinet/udp.h>
38 #include <netinet/in_stat.h>
39 #include <netinet/ip.h>
40 #include <netinet/ip6.h>
41 #include <sys/kdebug.h>
42
43 /* min/max linger time (in seconds */
44 #define FLOWTRACK_LINGER_MIN 1
45 #define FLOWTRACK_LINGER_MAX 120
46
47 /* maximum allowed rate of SYNs per second */
48 #define FLOWTRACK_SYN_RATE 20
49
50 static int flow_track_tcp(struct flow_entry *, struct flow_track *,
51 struct flow_track *, struct __kern_packet *, bool);
52 static int flow_track_udp(struct flow_entry *, struct flow_track *,
53 struct flow_track *, struct __kern_packet *, bool);
54
55 static void
flow_track_tcp_get_wscale(struct flow_track * s,struct __kern_packet * pkt)56 flow_track_tcp_get_wscale(struct flow_track *s, struct __kern_packet *pkt)
57 {
58 const uint8_t *hdr = (uint8_t *)(void *)pkt->pkt_flow_tcp_hdr;
59 int hlen = pkt->pkt_flow_tcp_hlen;
60 uint8_t optlen, wscale = 0;
61 const uint8_t *opt;
62
63 _CASSERT(sizeof(s->fse_flags) == sizeof(uint16_t));
64 ASSERT(hlen >= (int)sizeof(struct tcphdr));
65
66 opt = hdr + sizeof(struct tcphdr);
67 hlen -= sizeof(struct tcphdr);
68 while (hlen >= 3) {
69 switch (*opt) {
70 case TCPOPT_EOL:
71 case TCPOPT_NOP:
72 ++opt;
73 --hlen;
74 break;
75 case TCPOPT_WINDOW:
76 wscale = opt[2];
77 if (wscale > TCP_MAX_WINSHIFT) {
78 wscale = TCP_MAX_WINSHIFT;
79 }
80 os_atomic_or(&s->fse_flags, FLOWSTATEF_WSCALE, relaxed);
81 OS_FALLTHROUGH;
82 default:
83 optlen = opt[1];
84 if (optlen < 2) {
85 optlen = 2;
86 }
87 hlen -= optlen;
88 opt += optlen;
89 break;
90 }
91 }
92 s->fse_wscale = wscale;
93 }
94
95 static void
flow_track_tcp_init(struct flow_entry * fe,struct flow_track * src,struct flow_track * dst,struct __kern_packet * pkt)96 flow_track_tcp_init(struct flow_entry *fe, struct flow_track *src,
97 struct flow_track *dst, struct __kern_packet *pkt)
98 {
99 #pragma unused(dst)
100 const uint8_t tcp_flags = pkt->pkt_flow_tcp_flags;
101
102 /*
103 * Source state initialization.
104 */
105 src->fse_state = TCPS_SYN_SENT;
106 src->fse_seqlo = ntohl(pkt->pkt_flow_tcp_seq);
107 src->fse_seqhi = (src->fse_seqlo + pkt->pkt_flow_ulen + 1);
108 if (tcp_flags & TH_SYN) {
109 src->fse_seqhi++;
110 flow_track_tcp_get_wscale(src, pkt);
111 }
112 if (tcp_flags & TH_FIN) {
113 src->fse_seqhi++;
114 }
115
116 src->fse_max_win = MAX(ntohs(pkt->pkt_flow_tcp_win), 1);
117 if (src->fse_flags & FLOWSTATEF_WSCALE) {
118 /* remove scale factor from initial window */
119 int win = src->fse_max_win;
120 ASSERT(src->fse_wscale <= TCP_MAX_WINSHIFT);
121 win += (1 << src->fse_wscale);
122 src->fse_max_win = (uint16_t)((win - 1) >> src->fse_wscale);
123 }
124
125 /*
126 * Destination state initialization.
127 */
128 dst->fse_state = TCPS_CLOSED;
129 dst->fse_seqhi = 1;
130 dst->fse_max_win = 1;
131
132 /*
133 * Linger time (in seconds).
134 */
135 fe->fe_linger_wait = (2 * tcp_msl) / TCP_RETRANSHZ;
136 if (fe->fe_linger_wait < FLOWTRACK_LINGER_MIN) {
137 fe->fe_linger_wait = FLOWTRACK_LINGER_MIN;
138 } else if (fe->fe_linger_wait > FLOWTRACK_LINGER_MAX) {
139 fe->fe_linger_wait = FLOWTRACK_LINGER_MAX;
140 }
141
142 os_atomic_or(&fe->fe_flags, FLOWENTF_INITED, relaxed);
143 }
144
145 /*
146 * The TCP ACK RTT tracking is a coarse grain measurement of the time it takes
147 * for a endpoint to process incoming segment and generate ACK, at the point of
148 * observation. For flowswitch, it means that:
149 *
150 * local end RTT = local stack processing time
151 * remote end RTT = driver + network + remote endpoint's processing time
152 *
153 * Since the measurement is lightweight and sampling based, it won't learn and
154 * distinguish lost segment's ACK. So we could occasionally get large RTT
155 * sample from an ACK to a retransmitted segment. Thus rtt_max is not any
156 * meaningful to us.
157 */
158 __attribute__((always_inline))
159 static inline void
flow_track_tcp_rtt(struct flow_entry * fe,boolean_t input,struct flow_track * src,struct flow_track * dst,uint8_t tcp_flags,uint32_t seq,uint32_t ack,uint32_t ulen)160 flow_track_tcp_rtt(struct flow_entry *fe, boolean_t input,
161 struct flow_track *src, struct flow_track *dst, uint8_t tcp_flags,
162 uint32_t seq, uint32_t ack, uint32_t ulen)
163 {
164 #pragma unused(fe, input) /* KDBG defined as noop in release build */
165 uint64_t dst_last, src_last;
166 uint64_t now, time_diff;
167 uint32_t curval, oldval;
168 clock_sec_t tv_sec;
169 clock_usec_t tv_usec;
170
171 src_last = src->fse_rtt.frtt_last;
172 dst_last = dst->fse_rtt.frtt_last;
173
174 /* start a new RTT tracking session under sampling rate limit */
175 if (dst_last == 0 ||
176 _net_uptime - dst_last > FLOWTRACK_RTT_SAMPLE_INTERVAL) {
177 if (ulen > 0 &&
178 dst->fse_rtt.frtt_timestamp == 0) {
179 dst->fse_rtt.frtt_timestamp = mach_absolute_time();
180 dst->fse_rtt.frtt_last = _net_uptime;
181 dst->fse_rtt.frtt_seg_begin = seq;
182 dst->fse_rtt.frtt_seg_end = seq + ulen;
183 KDBG((SK_KTRACE_FSW_FLOW_TRACK_RTT | DBG_FUNC_START),
184 SK_KVA(fe), fe->fe_pid, ntohs(fe->fe_key.fk_sport),
185 input ? 1 : 0);
186 }
187 }
188
189 /* we have an ACK, see if current tracking session matches it */
190 if (tcp_flags & TH_ACK) {
191 if (src->fse_rtt.frtt_timestamp != 0 &&
192 src->fse_rtt.frtt_seg_begin <= ack) {
193 now = mach_absolute_time();
194 time_diff = now - src->fse_rtt.frtt_timestamp;
195
196 absolutetime_to_microtime(time_diff, &tv_sec, &tv_usec);
197 curval = (uint32_t)(tv_usec + tv_sec * 1000 * 1000);
198 oldval = src->fse_rtt.frtt_usec;
199 if (oldval == 0) {
200 src->fse_rtt.frtt_usec = curval;
201 } else {
202 /* same EWMA decay as TCP RTT */
203 src->fse_rtt.frtt_usec =
204 ((oldval << 4) - oldval + curval) >> 4;
205 }
206
207 /* reset RTT tracking session */
208 src->fse_rtt.frtt_timestamp = 0;
209 src->fse_rtt.frtt_last = 0;
210 KDBG((SK_KTRACE_FSW_FLOW_TRACK_RTT | DBG_FUNC_END),
211 SK_KVA(fe), fe->fe_pid, ntohs(fe->fe_key.fk_sport),
212 input ? 0 : 1);
213
214 /* publish rtt stats into flow_stats object */
215 /* just store both to avoid branch prediction etc. */
216 fe->fe_stats->fs_lrtt = fe->fe_ltrack.fse_rtt_usec;
217 fe->fe_stats->fs_rrtt = fe->fe_rtrack.fse_rtt_usec;
218 }
219 }
220 }
221
222 /*
223 * The TCP connection tracking logic is based on Guido van Rooij's paper:
224 * http://www.sane.nl/events/sane2000/papers/rooij.pdf
225 *
226 * In some ways, we act as a middlebox that passively tracks the TCP windows
227 * of each connection on flows marked with FLOWENTF_TRACK. We never modify
228 * the packet or generate any response (e.g. RST) to the sender; thus we are
229 * simply a silent observer. The information we gather here is used later
230 * if we need to generate a valid {FIN|RST} segment when the flow is nonviable.
231 *
232 * The implementation is borrowed from Packet Filter, and is further
233 * simplified to cater for our use cases.
234 */
235 #define FTF_HALFCLOSED 0x1 /* want flow to be marked as half closed */
236 #define FTF_WAITCLOSE 0x2 /* want flow to linger after close */
237 #define FTF_CLOSENOTIFY 0x4 /* want to notify NECP upon torn down */
238 #define FTF_WITHDRAWN 0x8 /* want flow to be torn down */
239 #define FTF_SYN_RLIM 0x10 /* want flow to rate limit SYN */
240 #define FTF_RST_RLIM 0x20 /* want flow to rate limit RST */
241 __attribute__((always_inline))
242 static inline int
flow_track_tcp(struct flow_entry * fe,struct flow_track * src,struct flow_track * dst,struct __kern_packet * pkt,bool input)243 flow_track_tcp(struct flow_entry *fe, struct flow_track *src,
244 struct flow_track *dst, struct __kern_packet *pkt, bool input)
245 {
246 const uint8_t tcp_flags = pkt->pkt_flow_tcp_flags;
247 uint16_t win = ntohs(pkt->pkt_flow_tcp_win);
248 uint32_t ack, end, seq, orig_seq;
249 uint32_t ftflags = 0;
250 uint8_t sws, dws;
251 int ackskew, err = 0;
252
253 if (__improbable((fe->fe_flags & FLOWENTF_INITED) == 0)) {
254 flow_track_tcp_init(fe, src, dst, pkt);
255 }
256
257 flow_track_tcp_rtt(fe, input, src, dst, tcp_flags,
258 ntohl(pkt->pkt_flow_tcp_seq), ntohl(pkt->pkt_flow_tcp_ack),
259 pkt->pkt_flow_ulen);
260
261 if (__improbable(dst->fse_state >= TCPS_FIN_WAIT_2 &&
262 src->fse_state >= TCPS_FIN_WAIT_2)) {
263 if ((tcp_flags & (TH_SYN | TH_ACK)) == TH_SYN) {
264 src->fse_state = dst->fse_state = TCPS_CLOSED;
265 ftflags |= FTF_SYN_RLIM;
266 }
267 if (tcp_flags & TH_RST) {
268 ftflags |= FTF_RST_RLIM;
269 }
270 if (input) {
271 err = ENETRESET;
272 }
273 goto done;
274 }
275
276 if (__probable((tcp_flags & TH_SYN) == 0 &&
277 src->fse_wscale != 0 && dst->fse_wscale != 0)) {
278 sws = src->fse_wscale;
279 dws = dst->fse_wscale;
280 } else {
281 sws = dws = 0;
282 }
283
284 orig_seq = seq = ntohl(pkt->pkt_flow_tcp_seq);
285 if (__probable(src->fse_seqlo != 0)) {
286 ack = ntohl(pkt->pkt_flow_tcp_ack);
287 end = seq + pkt->pkt_flow_ulen;
288 if (tcp_flags & TH_SYN) {
289 if ((tcp_flags & (TH_SYN | TH_ACK)) == TH_SYN) {
290 ftflags |= FTF_SYN_RLIM;
291 }
292 end++;
293 }
294 if (tcp_flags & TH_FIN) {
295 end++;
296 }
297 if (tcp_flags & TH_RST) {
298 ftflags |= FTF_RST_RLIM;
299 }
300 } else {
301 /* first packet from this end; set its state */
302 ack = ntohl(pkt->pkt_flow_tcp_ack);
303 end = seq + pkt->pkt_flow_ulen;
304 if (tcp_flags & TH_SYN) {
305 if ((tcp_flags & (TH_SYN | TH_ACK)) == TH_SYN) {
306 ftflags |= FTF_SYN_RLIM;
307 }
308 end++;
309 if (dst->fse_flags & FLOWSTATEF_WSCALE) {
310 flow_track_tcp_get_wscale(src, pkt);
311 if (src->fse_flags & FLOWSTATEF_WSCALE) {
312 /*
313 * Remove scale factor from
314 * initial window.
315 */
316 sws = src->fse_wscale;
317 win = (uint16_t)(((u_int32_t)win + (1 << sws) - 1)
318 >> sws);
319 dws = dst->fse_wscale;
320 } else {
321 /* fixup other window */
322 dst->fse_max_win = (uint16_t)(dst->fse_max_win << dst->fse_wscale);
323 /* in case of a retrans SYN|ACK */
324 dst->fse_wscale = 0;
325 }
326 }
327 }
328 if (tcp_flags & TH_FIN) {
329 end++;
330 }
331 if (tcp_flags & TH_RST) {
332 ftflags |= FTF_RST_RLIM;
333 }
334
335 src->fse_seqlo = seq;
336 if (src->fse_state < TCPS_SYN_SENT) {
337 src->fse_state = TCPS_SYN_SENT;
338 }
339
340 /*
341 * May need to slide the window (seqhi may have been set by
342 * the crappy stack check or if we picked up the connection
343 * after establishment).
344 */
345 if (src->fse_seqhi == 1 || SEQ_GEQ(end +
346 MAX(1, dst->fse_max_win << dws), src->fse_seqhi)) {
347 src->fse_seqhi = end + MAX(1, dst->fse_max_win << dws);
348 }
349 if (win > src->fse_max_win) {
350 src->fse_max_win = win;
351 }
352 }
353
354 if (!(tcp_flags & TH_ACK)) {
355 /* let it pass through the ack skew check */
356 ack = dst->fse_seqlo;
357 } else if ((ack == 0 &&
358 (tcp_flags & (TH_ACK | TH_RST)) == (TH_ACK | TH_RST)) ||
359 /* broken tcp stacks do not set ack */
360 (dst->fse_state < TCPS_SYN_SENT)) {
361 /*
362 * Many stacks (ours included) will set the ACK number in an
363 * FIN|ACK if the SYN times out -- no sequence to ACK.
364 */
365 ack = dst->fse_seqlo;
366 }
367
368 if (seq == end) {
369 /* ease sequencing restrictions on no data packets */
370 seq = src->fse_seqlo;
371 end = seq;
372 }
373
374 ackskew = dst->fse_seqlo - ack;
375
376 #define MAXACKWINDOW (0xffff + 1500) /* 1500 is an arbitrary fudge factor */
377 if (SEQ_GEQ(src->fse_seqhi, end) &&
378 /* last octet inside other's window space */
379 SEQ_GEQ(seq, src->fse_seqlo - (dst->fse_max_win << dws)) &&
380 /* retrans: not more than one window back */
381 (ackskew >= -MAXACKWINDOW) &&
382 /* acking not more than one reassembled fragment backwards */
383 (ackskew <= (MAXACKWINDOW << sws)) &&
384 /* acking not more than one window forward */
385 (!(tcp_flags & TH_RST) || orig_seq == src->fse_seqlo ||
386 (orig_seq == src->fse_seqlo + 1) ||
387 (orig_seq + 1 == src->fse_seqlo))) {
388 /* require an exact/+1 sequence match on resets when possible */
389
390 /* update max window */
391 if (src->fse_max_win < win) {
392 src->fse_max_win = win;
393 }
394 /* synchronize sequencing */
395 if (SEQ_GT(end, src->fse_seqlo)) {
396 src->fse_seqlo = end;
397 }
398 /* slide the window of what the other end can send */
399 if (SEQ_GEQ(ack + (win << sws), dst->fse_seqhi)) {
400 dst->fse_seqhi = ack + MAX((win << sws), 1);
401 }
402
403 /* update states */
404 if (tcp_flags & TH_SYN) {
405 if (src->fse_state < TCPS_SYN_SENT) {
406 src->fse_state = TCPS_SYN_SENT;
407 }
408 }
409 if (tcp_flags & TH_FIN) {
410 if (src->fse_state < TCPS_CLOSING) {
411 src->fse_seqlast = orig_seq + pkt->pkt_flow_ulen;
412 src->fse_state = TCPS_CLOSING;
413 }
414 }
415 if (tcp_flags & TH_ACK) {
416 /*
417 * Avoid transitioning to ESTABLISHED when our SYN
418 * is ACK'd along with a RST. The sending TCP may
419 * still retransmit the SYN (after dropping some
420 * options like ECN, etc.)
421 */
422 if (dst->fse_state == TCPS_SYN_SENT &&
423 !(tcp_flags & TH_RST)) {
424 dst->fse_state = TCPS_ESTABLISHED;
425 ftflags |= (FTF_WAITCLOSE | FTF_CLOSENOTIFY);
426 } else if (dst->fse_state == TCPS_CLOSING &&
427 ack == dst->fse_seqlast + 1) {
428 dst->fse_state = TCPS_FIN_WAIT_2;
429 ftflags |= FTF_WAITCLOSE;
430 if (src->fse_state >= TCPS_FIN_WAIT_2) {
431 ftflags |= FTF_WITHDRAWN;
432 } else {
433 ftflags |= FTF_HALFCLOSED;
434 }
435 }
436 }
437 if ((tcp_flags & TH_RST) &&
438 (src->fse_state == TCPS_ESTABLISHED ||
439 dst->fse_state == TCPS_ESTABLISHED)) {
440 /*
441 * If either endpoint is in ESTABLISHED, transition
442 * both to TIME_WAIT. Otherwise, keep the existing
443 * state as is, e.g. SYN_SENT.
444 */
445 src->fse_state = dst->fse_state = TCPS_TIME_WAIT;
446 ftflags |= (FTF_WITHDRAWN | FTF_WAITCLOSE);
447 }
448 } else if ((dst->fse_state < TCPS_SYN_SENT ||
449 dst->fse_state >= TCPS_FIN_WAIT_2 ||
450 src->fse_state >= TCPS_FIN_WAIT_2) &&
451 SEQ_GEQ(src->fse_seqhi + MAXACKWINDOW, end) &&
452 /* within a window forward of the originating packet */
453 SEQ_GEQ(seq, src->fse_seqlo - MAXACKWINDOW)) {
454 /* within a window backward of the originating packet */
455
456 /* BEGIN CSTYLED */
457 /*
458 * This currently handles three situations:
459 * 1) Stupid stacks will shotgun SYNs before their peer
460 * replies.
461 * 2) When flow tracking catches an already established
462 * stream (the flow states are cleared, etc.)
463 * 3) Packets get funky immediately after the connection
464 * closes (this should catch spurious ACK|FINs that
465 * web servers like to spew after a close).
466 *
467 * This must be a little more careful than the above code
468 * since packet floods will also be caught here.
469 */
470 /* END CSTYLED */
471
472 /* update max window */
473 if (src->fse_max_win < win) {
474 src->fse_max_win = win;
475 }
476 /* synchronize sequencing */
477 if (SEQ_GT(end, src->fse_seqlo)) {
478 src->fse_seqlo = end;
479 }
480 /* slide the window of what the other end can send */
481 if (SEQ_GEQ(ack + (win << sws), dst->fse_seqhi)) {
482 dst->fse_seqhi = ack + MAX((win << sws), 1);
483 }
484
485 /*
486 * Cannot set dst->fse_seqhi here since this could be a
487 * shotgunned SYN and not an already established connection.
488 */
489
490 if (tcp_flags & TH_FIN) {
491 if (src->fse_state < TCPS_CLOSING) {
492 src->fse_seqlast = orig_seq + pkt->pkt_flow_ulen;
493 src->fse_state = TCPS_CLOSING;
494 }
495 }
496 if (tcp_flags & TH_RST) {
497 src->fse_state = dst->fse_state = TCPS_TIME_WAIT;
498 ftflags |= FTF_WAITCLOSE;
499 }
500 } else {
501 if (dst->fse_state == TCPS_SYN_SENT &&
502 src->fse_state == TCPS_SYN_SENT) {
503 src->fse_seqlo = 0;
504 src->fse_seqhi = 1;
505 src->fse_max_win = 1;
506 }
507 }
508
509 done:
510 if (__improbable((ftflags & FTF_HALFCLOSED) != 0)) {
511 os_atomic_or(&fe->fe_flags, FLOWENTF_HALF_CLOSED, relaxed);
512 ftflags &= ~FTF_HALFCLOSED;
513 }
514
515 /*
516 * Hold on to namespace for a while after the flow is closed.
517 */
518 if (__improbable((ftflags & FTF_WAITCLOSE) != 0 &&
519 (fe->fe_flags & FLOWENTF_WAIT_CLOSE) == 0)) {
520 os_atomic_or(&fe->fe_flags, FLOWENTF_WAIT_CLOSE, relaxed);
521 ftflags &= ~FTF_WAITCLOSE;
522 }
523
524 /*
525 * Notify NECP upon tear down (for established flows).
526 */
527 if (__improbable((ftflags & FTF_CLOSENOTIFY) != 0 &&
528 (fe->fe_flags & FLOWENTF_CLOSE_NOTIFY) == 0)) {
529 os_atomic_or(&fe->fe_flags, FLOWENTF_CLOSE_NOTIFY, relaxed);
530 ftflags &= ~FTF_CLOSENOTIFY;
531 }
532
533 /*
534 * Flow is withdrawn; the port we have should not be included in
535 * the list of offloaded ports, as the connection is no longer
536 * usable (we're not expecting any more data).
537 * Also clear FLOWENTF_HALF_CLOSED flag here. It's fine if reaper
538 * thread hadn't pickedup FLOWENTF_HALF_CLOSED, as it will pick up
539 * FLOWENTF_WITHDRAWN and notify netns of full withdrawn.
540 */
541 if (__improbable((ftflags & FTF_WITHDRAWN) != 0)) {
542 ftflags &= ~FTF_WITHDRAWN;
543 if (fe->fe_flags & FLOWENTF_HALF_CLOSED) {
544 os_atomic_andnot(&fe->fe_flags, FLOWENTF_HALF_CLOSED, relaxed);
545 }
546 fe->fe_want_withdraw = 1;
547 }
548
549 /*
550 * If no other work is needed, we're done.
551 */
552 if (ftflags == 0 || input) {
553 return err;
554 }
555
556 /*
557 * If we're over the rate limit for outbound SYNs, drop packet.
558 */
559 if (__improbable((ftflags & FTF_SYN_RLIM) != 0)) {
560 uint32_t now = (uint32_t)_net_uptime;
561 if ((now - src->fse_syn_ts) > 1) {
562 src->fse_syn_ts = now;
563 src->fse_syn_cnt = 0;
564 }
565 if (++src->fse_syn_cnt > FLOWTRACK_SYN_RATE) {
566 err = EPROTO;
567 }
568 }
569
570 return err;
571 }
572 #undef FTF_WAITCLOSE
573 #undef FTF_CLOSENOTIFY
574 #undef FTF_WITHDRAWN
575 #undef FTF_SYN_RLIM
576 #undef FTF_RST_RLIM
577
578 boolean_t
flow_track_tcp_want_abort(struct flow_entry * fe)579 flow_track_tcp_want_abort(struct flow_entry *fe)
580 {
581 struct flow_track *src = &fe->fe_ltrack;
582 struct flow_track *dst = &fe->fe_rtrack;
583
584 if (fe->fe_key.fk_proto != IPPROTO_TCP ||
585 (fe->fe_flags & FLOWENTF_ABORTED)) {
586 goto done;
587 }
588
589 /* this can be enhanced; for now rely on established state */
590 if (src->fse_state == TCPS_ESTABLISHED ||
591 dst->fse_state == TCPS_ESTABLISHED) {
592 src->fse_state = dst->fse_state = TCPS_TIME_WAIT;
593 /* don't process more than once */
594 os_atomic_or(&fe->fe_flags, FLOWENTF_ABORTED, relaxed);
595 return TRUE;
596 }
597 done:
598 return FALSE;
599 }
600
601 static void
flow_track_udp_init(struct flow_entry * fe,struct flow_track * src,struct flow_track * dst,struct __kern_packet * pkt)602 flow_track_udp_init(struct flow_entry *fe, struct flow_track *src,
603 struct flow_track *dst, struct __kern_packet *pkt)
604 {
605 #pragma unused(pkt)
606 /*
607 * Source state initialization.
608 */
609 src->fse_state = FT_STATE_NO_TRAFFIC;
610
611 /*
612 * Destination state initialization.
613 */
614 dst->fse_state = FT_STATE_NO_TRAFFIC;
615
616 os_atomic_or(&fe->fe_flags, FLOWENTF_INITED, relaxed);
617 }
618
619 __attribute__((always_inline))
620 static inline int
flow_track_udp(struct flow_entry * fe,struct flow_track * src,struct flow_track * dst,struct __kern_packet * pkt,bool input)621 flow_track_udp(struct flow_entry *fe, struct flow_track *src,
622 struct flow_track *dst, struct __kern_packet *pkt, bool input)
623 {
624 #pragma unused(input)
625 if (__improbable((fe->fe_flags & FLOWENTF_INITED) == 0)) {
626 flow_track_udp_init(fe, src, dst, pkt);
627 }
628
629 if (__improbable(src->fse_state == FT_STATE_NO_TRAFFIC)) {
630 src->fse_state = FT_STATE_SINGLE;
631 }
632 if (__improbable(dst->fse_state == FT_STATE_SINGLE)) {
633 dst->fse_state = FT_STATE_MULTIPLE;
634 }
635
636 return 0;
637 }
638
639 void
flow_track_stats(struct flow_entry * fe,uint64_t bytes,uint64_t packets,bool active,bool in)640 flow_track_stats(struct flow_entry *fe, uint64_t bytes, uint64_t packets,
641 bool active, bool in)
642 {
643 volatile struct sk_stats_flow_track *fst;
644
645 if (in) {
646 fst = &fe->fe_stats->fs_rtrack;
647 } else {
648 fst = &fe->fe_stats->fs_ltrack;
649 }
650
651 fst->sft_bytes += bytes;
652 fst->sft_packets += packets;
653
654 if (__probable(active)) {
655 in_stat_set_activity_bitmap(&fe->fe_stats->fs_activity,
656 _net_uptime);
657 }
658 }
659
660 int
flow_pkt_track(struct flow_entry * fe,struct __kern_packet * pkt,bool in)661 flow_pkt_track(struct flow_entry *fe, struct __kern_packet *pkt, bool in)
662 {
663 struct flow_track *src, *dst;
664 int ret = 0;
665
666 _CASSERT(SFT_STATE_CLOSED == FT_STATE_CLOSED);
667 _CASSERT(SFT_STATE_LISTEN == FT_STATE_LISTEN);
668 _CASSERT(SFT_STATE_SYN_SENT == FT_STATE_SYN_SENT);
669 _CASSERT(SFT_STATE_SYN_RECEIVED == FT_STATE_SYN_RECEIVED);
670 _CASSERT(SFT_STATE_ESTABLISHED == FT_STATE_ESTABLISHED);
671 _CASSERT(SFT_STATE_CLOSE_WAIT == FT_STATE_CLOSE_WAIT);
672 _CASSERT(SFT_STATE_FIN_WAIT_1 == FT_STATE_FIN_WAIT_1);
673 _CASSERT(SFT_STATE_CLOSING == FT_STATE_CLOSING);
674 _CASSERT(SFT_STATE_LAST_ACK == FT_STATE_LAST_ACK);
675 _CASSERT(SFT_STATE_FIN_WAIT_2 == FT_STATE_FIN_WAIT_2);
676 _CASSERT(SFT_STATE_TIME_WAIT == FT_STATE_TIME_WAIT);
677 _CASSERT(SFT_STATE_NO_TRAFFIC == FT_STATE_NO_TRAFFIC);
678 _CASSERT(SFT_STATE_SINGLE == FT_STATE_SINGLE);
679 _CASSERT(SFT_STATE_MULTIPLE == FT_STATE_MULTIPLE);
680 _CASSERT(SFT_STATE_MAX == FT_STATE_MAX);
681
682 _CASSERT(FT_STATE_CLOSED == TCPS_CLOSED);
683 _CASSERT(FT_STATE_LISTEN == TCPS_LISTEN);
684 _CASSERT(FT_STATE_SYN_SENT == TCPS_SYN_SENT);
685 _CASSERT(FT_STATE_SYN_RECEIVED == TCPS_SYN_RECEIVED);
686 _CASSERT(FT_STATE_ESTABLISHED == TCPS_ESTABLISHED);
687 _CASSERT(FT_STATE_CLOSE_WAIT == TCPS_CLOSE_WAIT);
688 _CASSERT(FT_STATE_FIN_WAIT_1 == TCPS_FIN_WAIT_1);
689 _CASSERT(FT_STATE_CLOSING == TCPS_CLOSING);
690 _CASSERT(FT_STATE_LAST_ACK == TCPS_LAST_ACK);
691 _CASSERT(FT_STATE_FIN_WAIT_2 == TCPS_FIN_WAIT_2);
692 _CASSERT(FT_STATE_TIME_WAIT == TCPS_TIME_WAIT);
693
694 ASSERT(pkt->pkt_qum_qflags & QUM_F_FLOW_CLASSIFIED);
695
696 if (in) {
697 src = &fe->fe_rtrack;
698 dst = &fe->fe_ltrack;
699 } else {
700 src = &fe->fe_ltrack;
701 dst = &fe->fe_rtrack;
702 }
703
704 flow_track_stats(fe, (pkt->pkt_length - pkt->pkt_l2_len), 1,
705 (pkt->pkt_flow_ulen != 0), in);
706
707 /* skip flow state tracking on non-initial fragments */
708 if (pkt->pkt_flow_ip_is_frag && !pkt->pkt_flow_ip_is_first_frag) {
709 return 0;
710 }
711
712 switch (pkt->pkt_flow_ip_proto) {
713 case IPPROTO_TCP:
714 if (__probable((fe->fe_flags & FLOWENTF_TRACK) != 0)) {
715 ret = flow_track_tcp(fe, src, dst, pkt, in);
716 }
717 break;
718
719 case IPPROTO_UDP:
720 if (__probable((fe->fe_flags & FLOWENTF_TRACK) != 0)) {
721 ret = flow_track_udp(fe, src, dst, pkt, in);
722 }
723 break;
724 }
725
726 return ret;
727 }
728
729 /*
730 * @function flow_track_abort_tcp
731 * @abstract send RST for a given TCP flow.
732 * @param in_pkt incoming packet that triggers RST.
733 * @param rst_pkt use as RST template for SEQ/ACK information.
734 */
735 void
flow_track_abort_tcp(struct flow_entry * fe,struct __kern_packet * in_pkt,struct __kern_packet * rst_pkt)736 flow_track_abort_tcp(struct flow_entry *fe, struct __kern_packet *in_pkt,
737 struct __kern_packet *rst_pkt)
738 {
739 struct nx_flowswitch *fsw = fe->fe_fsw;
740 struct flow_track *src, *dst;
741 struct ip *ip;
742 struct ip6_hdr *ip6;
743 struct tcphdr *th;
744 uint16_t len, tlen;
745 struct mbuf *m;
746
747 /* guaranteed by caller */
748 ASSERT(fsw->fsw_ifp != NULL);
749 ASSERT(in_pkt == NULL || rst_pkt == NULL);
750
751 src = &fe->fe_ltrack;
752 dst = &fe->fe_rtrack;
753
754 tlen = sizeof(struct tcphdr);
755 if (fe->fe_key.fk_ipver == IPVERSION) {
756 len = sizeof(struct ip) + tlen;
757 } else {
758 ASSERT(fe->fe_key.fk_ipver == IPV6_VERSION);
759 len = sizeof(struct ip6_hdr) + tlen;
760 }
761
762 m = m_gethdr(M_NOWAIT, MT_HEADER);
763 if (__improbable(m == NULL)) {
764 return;
765 }
766
767 m->m_pkthdr.pkt_proto = IPPROTO_TCP;
768 m->m_data += max_linkhdr; /* 32-bit aligned */
769 m->m_pkthdr.len = m->m_len = len;
770
771 /* zero out for checksum */
772 bzero(m->m_data, len);
773
774 if (fe->fe_key.fk_ipver == IPVERSION) {
775 ip = mtod(m, struct ip *);
776
777 /* IP header fields included in the TCP checksum */
778 ip->ip_p = IPPROTO_TCP;
779 ip->ip_len = htons(tlen);
780 if (rst_pkt == NULL) {
781 ip->ip_src = fe->fe_key.fk_src4;
782 ip->ip_dst = fe->fe_key.fk_dst4;
783 } else {
784 ip->ip_src = rst_pkt->pkt_flow_ipv4_src;
785 ip->ip_dst = rst_pkt->pkt_flow_ipv4_dst;
786 }
787
788 th = (struct tcphdr *)(void *)((char *)ip + sizeof(*ip));
789 } else {
790 ip6 = mtod(m, struct ip6_hdr *);
791
792 /* IP header fields included in the TCP checksum */
793 ip6->ip6_nxt = IPPROTO_TCP;
794 ip6->ip6_plen = htons(tlen);
795 if (rst_pkt == NULL) {
796 ip6->ip6_src = fe->fe_key.fk_src6;
797 ip6->ip6_dst = fe->fe_key.fk_dst6;
798 } else {
799 ip6->ip6_src = rst_pkt->pkt_flow_ipv6_src;
800 ip6->ip6_dst = rst_pkt->pkt_flow_ipv6_dst;
801 }
802
803 th = (struct tcphdr *)(void *)((char *)ip6 + sizeof(*ip6));
804 }
805
806 /*
807 * TCP header (fabricate a pure RST).
808 */
809 if (in_pkt != NULL) {
810 th->th_sport = in_pkt->pkt_flow_tcp_dst;
811 th->th_dport = in_pkt->pkt_flow_tcp_src;
812 if (__probable(in_pkt->pkt_flow_tcp_flags | TH_ACK)) {
813 /* <SEQ=SEG.ACK><CTL=RST> */
814 th->th_seq = in_pkt->pkt_flow_tcp_ack;
815 th->th_ack = 0;
816 th->th_flags = TH_RST;
817 } else {
818 /* <SEQ=0><ACK=SEG.SEQ+SEG.LEN><CTL=RST,ACK> */
819 th->th_seq = 0;
820 th->th_ack = in_pkt->pkt_flow_tcp_seq +
821 in_pkt->pkt_flow_ulen;
822 th->th_flags = TH_RST | TH_ACK;
823 }
824 } else if (rst_pkt != NULL) {
825 th->th_sport = rst_pkt->pkt_flow_tcp_src;
826 th->th_dport = rst_pkt->pkt_flow_tcp_dst;
827 th->th_seq = rst_pkt->pkt_flow_tcp_seq;
828 th->th_ack = rst_pkt->pkt_flow_tcp_ack;
829 th->th_flags = rst_pkt->pkt_flow_tcp_flags;
830 } else {
831 th->th_sport = fe->fe_key.fk_sport;
832 th->th_dport = fe->fe_key.fk_dport;
833 th->th_seq = htonl(src->fse_seqlo); /* peer's last ACK */
834 th->th_ack = 0;
835 th->th_flags = TH_RST;
836 }
837 th->th_off = (tlen >> 2);
838 th->th_win = 0;
839
840 FSW_STATS_INC(FSW_STATS_FLOWS_ABORTED);
841
842 if (fe->fe_key.fk_ipver == IPVERSION) {
843 struct ip_out_args ipoa;
844 struct route ro;
845
846 bzero(&ipoa, sizeof(ipoa));
847 ipoa.ipoa_boundif = fsw->fsw_ifp->if_index;
848 ipoa.ipoa_flags = (IPOAF_SELECT_SRCIF | IPOAF_BOUND_IF |
849 IPOAF_BOUND_SRCADDR);
850 ipoa.ipoa_sotc = SO_TC_UNSPEC;
851 ipoa.ipoa_netsvctype = _NET_SERVICE_TYPE_UNSPEC;
852
853 /* TCP checksum */
854 th->th_sum = in_cksum(m, len);
855
856 ip->ip_v = IPVERSION;
857 ip->ip_hl = sizeof(*ip) >> 2;
858 ip->ip_tos = 0;
859 /*
860 * ip_output() expects ip_len and ip_off to be in host order.
861 */
862 ip->ip_len = len;
863 ip->ip_off = IP_DF;
864 ip->ip_ttl = (uint8_t)ip_defttl;
865 ip->ip_sum = 0;
866
867 bzero(&ro, sizeof(ro));
868 (void) ip_output(m, NULL, &ro, IP_OUTARGS, NULL, &ipoa);
869 ROUTE_RELEASE(&ro);
870 } else {
871 struct ip6_out_args ip6oa;
872 struct route_in6 ro6;
873
874 bzero(&ip6oa, sizeof(ip6oa));
875 ip6oa.ip6oa_boundif = fsw->fsw_ifp->if_index;
876 ip6oa.ip6oa_flags = (IP6OAF_SELECT_SRCIF | IP6OAF_BOUND_IF |
877 IP6OAF_BOUND_SRCADDR);
878 ip6oa.ip6oa_sotc = SO_TC_UNSPEC;
879 ip6oa.ip6oa_netsvctype = _NET_SERVICE_TYPE_UNSPEC;
880
881 /* TCP checksum */
882 th->th_sum = in6_cksum(m, IPPROTO_TCP,
883 sizeof(struct ip6_hdr), tlen);
884
885 ip6->ip6_vfc |= IPV6_VERSION;
886 ip6->ip6_hlim = IPV6_DEFHLIM;
887
888 bzero(&ro6, sizeof(ro6));
889 (void) ip6_output(m, NULL, &ro6, IPV6_OUTARGS,
890 NULL, NULL, &ip6oa);
891 ROUTE_RELEASE(&ro6);
892 }
893 }
894
895 void
flow_track_abort_quic(struct flow_entry * fe,uint8_t * token)896 flow_track_abort_quic(struct flow_entry *fe, uint8_t *token)
897 {
898 struct quic_stateless_reset {
899 uint8_t ssr_header[30];
900 uint8_t ssr_token[QUIC_STATELESS_RESET_TOKEN_SIZE];
901 };
902 struct nx_flowswitch *fsw = fe->fe_fsw;
903 struct ip *ip;
904 struct ip6_hdr *ip6;
905 struct udphdr *uh;
906 struct quic_stateless_reset *qssr;
907 uint16_t len, l3hlen, ulen;
908 struct mbuf *m;
909 unsigned int one = 1;
910 int error;
911
912 /* guaranteed by caller */
913 ASSERT(fsw->fsw_ifp != NULL);
914
915 /* skip zero token */
916 bool is_zero_token = true;
917 for (size_t i = 0; i < QUIC_STATELESS_RESET_TOKEN_SIZE; i++) {
918 if (token[i] != 0) {
919 is_zero_token = false;
920 break;
921 }
922 }
923 if (is_zero_token) {
924 return;
925 }
926
927 ulen = sizeof(struct udphdr) + sizeof(struct quic_stateless_reset);
928 if (fe->fe_key.fk_ipver == IPVERSION) {
929 l3hlen = sizeof(struct ip);
930 } else {
931 ASSERT(fe->fe_key.fk_ipver == IPV6_VERSION);
932 l3hlen = sizeof(struct ip6_hdr);
933 }
934
935 len = l3hlen + ulen;
936
937 error = mbuf_allocpacket(MBUF_DONTWAIT, max_linkhdr + len, &one, &m);
938 if (__improbable(error != 0)) {
939 return;
940 }
941 VERIFY(m != 0);
942
943 m->m_pkthdr.pkt_proto = IPPROTO_UDP;
944 m->m_data += max_linkhdr; /* 32-bit aligned */
945 m->m_pkthdr.len = m->m_len = len;
946
947 /* zero out for checksum */
948 bzero(m->m_data, len);
949
950 if (fe->fe_key.fk_ipver == IPVERSION) {
951 ip = mtod(m, struct ip *);
952 ip->ip_p = IPPROTO_UDP;
953 ip->ip_len = htons(ulen);
954 ip->ip_src = fe->fe_key.fk_src4;
955 ip->ip_dst = fe->fe_key.fk_dst4;
956 uh = (struct udphdr *)(void *)((char *)ip + sizeof(*ip));
957 } else {
958 ip6 = mtod(m, struct ip6_hdr *);
959 ip6->ip6_nxt = IPPROTO_UDP;
960 ip6->ip6_plen = htons(ulen);
961 ip6->ip6_src = fe->fe_key.fk_src6;
962 ip6->ip6_dst = fe->fe_key.fk_dst6;
963 uh = (struct udphdr *)(void *)((char *)ip6 + sizeof(*ip6));
964 }
965
966 /* UDP header */
967 uh->uh_sport = fe->fe_key.fk_sport;
968 uh->uh_dport = fe->fe_key.fk_dport;
969 uh->uh_ulen = htons(ulen);
970
971 /* QUIC stateless reset */
972 qssr = (struct quic_stateless_reset *)(uh + 1);
973 read_frandom(&qssr->ssr_header, sizeof(qssr->ssr_header));
974 qssr->ssr_header[0] = (qssr->ssr_header[0] & 0x3f) | 0x40;
975 memcpy(qssr->ssr_token, token, QUIC_STATELESS_RESET_TOKEN_SIZE);
976
977 FSW_STATS_INC(FSW_STATS_FLOWS_ABORTED);
978
979 if (fe->fe_key.fk_ipver == IPVERSION) {
980 struct ip_out_args ipoa;
981 struct route ro;
982
983 bzero(&ipoa, sizeof(ipoa));
984 ipoa.ipoa_boundif = fsw->fsw_ifp->if_index;
985 ipoa.ipoa_flags = (IPOAF_SELECT_SRCIF | IPOAF_BOUND_IF |
986 IPOAF_BOUND_SRCADDR);
987 ipoa.ipoa_sotc = SO_TC_UNSPEC;
988 ipoa.ipoa_netsvctype = _NET_SERVICE_TYPE_UNSPEC;
989
990 uh->uh_sum = in_cksum(m, len);
991 if (uh->uh_sum == 0) {
992 uh->uh_sum = 0xffff;
993 }
994
995 ip->ip_v = IPVERSION;
996 ip->ip_hl = sizeof(*ip) >> 2;
997 ip->ip_tos = 0;
998 /*
999 * ip_output() expects ip_len and ip_off to be in host order.
1000 */
1001 ip->ip_len = len;
1002 ip->ip_off = IP_DF;
1003 ip->ip_ttl = (uint8_t)ip_defttl;
1004 ip->ip_sum = 0;
1005
1006 bzero(&ro, sizeof(ro));
1007 (void) ip_output(m, NULL, &ro, IP_OUTARGS, NULL, &ipoa);
1008 ROUTE_RELEASE(&ro);
1009 } else {
1010 struct ip6_out_args ip6oa;
1011 struct route_in6 ro6;
1012
1013 bzero(&ip6oa, sizeof(ip6oa));
1014 ip6oa.ip6oa_boundif = fsw->fsw_ifp->if_index;
1015 ip6oa.ip6oa_flags = (IP6OAF_SELECT_SRCIF | IP6OAF_BOUND_IF |
1016 IP6OAF_BOUND_SRCADDR);
1017 ip6oa.ip6oa_sotc = SO_TC_UNSPEC;
1018 ip6oa.ip6oa_netsvctype = _NET_SERVICE_TYPE_UNSPEC;
1019
1020 uh->uh_sum = in6_cksum(m, IPPROTO_UDP, sizeof(struct ip6_hdr),
1021 ulen);
1022 if (uh->uh_sum == 0) {
1023 uh->uh_sum = 0xffff;
1024 }
1025
1026 ip6->ip6_vfc |= IPV6_VERSION;
1027 ip6->ip6_hlim = IPV6_DEFHLIM;
1028
1029 bzero(&ro6, sizeof(ro6));
1030 (void) ip6_output(m, NULL, &ro6, IPV6_OUTARGS,
1031 NULL, NULL, &ip6oa);
1032 ROUTE_RELEASE(&ro6);
1033 }
1034 }
1035