1 /*
2 * Copyright (c) 2012-2021 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 /*
30 * A note on the MPTCP/NECP-interactions:
31 *
32 * MPTCP uses NECP-callbacks to get notified of interface/policy events.
33 * MPTCP registers to these events at the MPTCP-layer for interface-events
34 * through a call to necp_client_register_multipath_cb.
35 * To get per-flow events (aka per TCP-subflow), we register to it with
36 * necp_client_register_socket_flow. Both registrations happen by using the
37 * necp-client-uuid that comes from the app.
38 *
39 * The locking is rather tricky. In general, we expect the lock-ordering to
40 * happen from necp-fd -> necp->client -> mpp_lock.
41 *
42 * There are however some subtleties.
43 *
44 * 1. When registering the multipath_cb, we are holding the mpp_lock. This is
45 * safe, because it is the very first time this MPTCP-connection goes into NECP.
46 * As we go into NECP we take the NECP-locks and thus are guaranteed that no
47 * NECP-locks will deadlock us. Because these NECP-events will also first take
48 * the NECP-locks. Either they win the race and thus won't find our
49 * MPTCP-connection. Or, MPTCP wins the race and thus it will safely install
50 * the callbacks while holding the NECP lock.
51 *
52 * 2. When registering the subflow-callbacks we must unlock the mpp_lock. This,
53 * because we have already registered callbacks and we might race against an
54 * NECP-event that will match on our socket. So, we have to unlock to be safe.
55 *
56 * 3. When removing the multipath_cb, we do it in mp_pcbdispose(). The
57 * so_usecount has reached 0. We must be careful to not remove the mpp_socket
58 * pointers before we unregistered the callback. Because, again we might be
59 * racing against an NECP-event. Unregistering must happen with an unlocked
60 * mpp_lock, because of the lock-ordering constraint. It could be that
61 * before we had a chance to unregister an NECP-event triggers. That's why
62 * we need to check for the so_usecount in mptcp_session_necp_cb. If we get
63 * there while the socket is being garbage-collected, the use-count will go
64 * down to 0 and we exit. Removal of the multipath_cb again happens by taking
65 * the NECP-locks so any running NECP-events will finish first and exit cleanly.
66 *
67 * 4. When removing the subflow-callback, we do it in in_pcbdispose(). Again,
68 * the socket-lock must be unlocked for lock-ordering constraints. This gets a
69 * bit tricky here, as in tcp_garbage_collect we hold the mp_so and so lock.
70 * So, we drop the mp_so-lock as soon as the subflow is unlinked with
71 * mptcp_subflow_del. Then, in in_pcbdispose we drop the subflow-lock.
72 * If an NECP-event was waiting on the lock in mptcp_subflow_necp_cb, when it
73 * gets it, it will realize that the subflow became non-MPTCP and retry (see
74 * tcp_lock). Then it waits again on the subflow-lock. When we drop this lock
75 * in in_pcbdispose, and enter necp_inpcb_dispose, this one will have to wait
76 * for the NECP-lock (held by the other thread that is taking care of the NECP-
77 * event). So, the event now finally gets the subflow-lock and then hits an
78 * so_usecount that is 0 and exits. Eventually, we can remove the subflow from
79 * the NECP callback.
80 */
81
82 #include <sys/param.h>
83 #include <sys/systm.h>
84 #include <sys/kernel.h>
85 #include <sys/mbuf.h>
86 #include <sys/mcache.h>
87 #include <sys/socket.h>
88 #include <sys/socketvar.h>
89 #include <sys/syslog.h>
90 #include <sys/protosw.h>
91
92 #include <kern/zalloc.h>
93 #include <kern/locks.h>
94
95 #include <mach/sdt.h>
96
97 #include <net/if.h>
98 #include <netinet/in.h>
99 #include <netinet/in_var.h>
100 #include <netinet/tcp.h>
101 #include <netinet/tcp_fsm.h>
102 #include <netinet/tcp_seq.h>
103 #include <netinet/tcp_var.h>
104 #include <netinet/mptcp_var.h>
105 #include <netinet/mptcp.h>
106 #include <netinet/mptcp_seq.h>
107 #include <netinet/mptcp_opt.h>
108 #include <netinet/mptcp_timer.h>
109
110 int mptcp_enable = 1;
111 SYSCTL_INT(_net_inet_mptcp, OID_AUTO, enable, CTLFLAG_RW | CTLFLAG_LOCKED,
112 &mptcp_enable, 0, "Enable Multipath TCP Support");
113
114 /*
115 * Number of times to try negotiating MPTCP on SYN retransmissions.
116 * We haven't seen any reports of a middlebox that is dropping all SYN-segments
117 * that have an MPTCP-option. Thus, let's be generous and retransmit it 4 times.
118 */
119 int mptcp_mpcap_retries = 4;
120 SYSCTL_INT(_net_inet_mptcp, OID_AUTO, mptcp_cap_retr,
121 CTLFLAG_RW | CTLFLAG_LOCKED,
122 &mptcp_mpcap_retries, 0, "Number of MP Capable SYN Retries");
123
124 /*
125 * By default, DSS checksum is turned off, revisit if we ever do
126 * MPTCP for non SSL Traffic.
127 */
128 int mptcp_dss_csum = 0;
129 SYSCTL_INT(_net_inet_mptcp, OID_AUTO, dss_csum, CTLFLAG_RW | CTLFLAG_LOCKED,
130 &mptcp_dss_csum, 0, "Enable DSS checksum");
131
132 /*
133 * When mptcp_fail_thresh number of retransmissions are sent, subflow failover
134 * is attempted on a different path.
135 */
136 int mptcp_fail_thresh = 1;
137 SYSCTL_INT(_net_inet_mptcp, OID_AUTO, fail, CTLFLAG_RW | CTLFLAG_LOCKED,
138 &mptcp_fail_thresh, 0, "Failover threshold");
139
140 /*
141 * MPTCP subflows have TCP keepalives set to ON. Set a conservative keeptime
142 * as carrier networks mostly have a 30 minute to 60 minute NAT Timeout.
143 * Some carrier networks have a timeout of 10 or 15 minutes.
144 */
145 int mptcp_subflow_keeptime = 60 * 14;
146 SYSCTL_INT(_net_inet_mptcp, OID_AUTO, keepalive, CTLFLAG_RW | CTLFLAG_LOCKED,
147 &mptcp_subflow_keeptime, 0, "Keepalive in seconds");
148
149 int mptcp_rtthist_rtthresh = 600;
150 SYSCTL_INT(_net_inet_mptcp, OID_AUTO, rtthist_thresh, CTLFLAG_RW | CTLFLAG_LOCKED,
151 &mptcp_rtthist_rtthresh, 0, "Rtt threshold");
152
153 int mptcp_rtothresh = 1500;
154 SYSCTL_INT(_net_inet_mptcp, OID_AUTO, rto_thresh, CTLFLAG_RW | CTLFLAG_LOCKED,
155 &mptcp_rtothresh, 0, "RTO threshold");
156
157 /*
158 * Probe the preferred path, when it is not in use
159 */
160 uint32_t mptcp_probeto = 1000;
161 SYSCTL_UINT(_net_inet_mptcp, OID_AUTO, probeto, CTLFLAG_RW | CTLFLAG_LOCKED,
162 &mptcp_probeto, 0, "Disable probing by setting to 0");
163
164 uint32_t mptcp_probecnt = 5;
165 SYSCTL_UINT(_net_inet_mptcp, OID_AUTO, probecnt, CTLFLAG_RW | CTLFLAG_LOCKED,
166 &mptcp_probecnt, 0, "Number of probe writes");
167
168 uint32_t mptcp_enable_v1 = 1;
169 SYSCTL_UINT(_net_inet_mptcp, OID_AUTO, enable_v1, CTLFLAG_RW | CTLFLAG_LOCKED,
170 &mptcp_enable_v1, 0, "Enable or disable v1");
171
172 static int
173 sysctl_mptcp_version_check SYSCTL_HANDLER_ARGS
174 {
175 #pragma unused(arg1, arg2)
176 int error;
177 int new_value = *(int *)oidp->oid_arg1;
178 int old_value = *(int *)oidp->oid_arg1;
179
180 error = sysctl_handle_int(oidp, &new_value, 0, req);
181 if (!error) {
182 if (new_value != MPTCP_VERSION_0 && new_value != MPTCP_VERSION_1) {
183 return EINVAL;
184 }
185 *(int *)oidp->oid_arg1 = new_value;
186 }
187
188 os_log(OS_LOG_DEFAULT,
189 "%s:%u sysctl net.inet.tcp.mptcp_preferred_version: %d -> %d)",
190 proc_best_name(current_proc()), proc_selfpid(),
191 old_value, *(int *)oidp->oid_arg1);
192
193 return error;
194 }
195
196 int mptcp_preferred_version = MPTCP_VERSION_1;
197 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, mptcp_preferred_version,
198 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
199 &mptcp_preferred_version, 0, &sysctl_mptcp_version_check, "I", "");
200
201 int mptcp_reass_total_qlen = 0;
202 SYSCTL_INT(_net_inet_mptcp, OID_AUTO, reass_qlen,
203 CTLFLAG_RD | CTLFLAG_LOCKED, &mptcp_reass_total_qlen, 0,
204 "Total number of MPTCP segments in reassembly queues");
205
206 static int
mptcp_reass_present(struct socket * mp_so)207 mptcp_reass_present(struct socket *mp_so)
208 {
209 struct mptses *mpte = mpsotompte(mp_so);
210 struct mptcb *mp_tp = mpte->mpte_mptcb;
211 struct tseg_qent *q;
212 int dowakeup = 0;
213 int flags = 0;
214 int count = 0;
215
216 /*
217 * Present data to user, advancing rcv_nxt through
218 * completed sequence space.
219 */
220 if (mp_tp->mpt_state < MPTCPS_ESTABLISHED) {
221 return flags;
222 }
223 q = LIST_FIRST(&mp_tp->mpt_segq);
224 if (!q || q->tqe_m->m_pkthdr.mp_dsn != mp_tp->mpt_rcvnxt) {
225 return flags;
226 }
227
228 /*
229 * If there is already another thread doing reassembly for this
230 * connection, it is better to let it finish the job --
231 * (radar 16316196)
232 */
233 if (mp_tp->mpt_flags & MPTCPF_REASS_INPROG) {
234 return flags;
235 }
236
237 mp_tp->mpt_flags |= MPTCPF_REASS_INPROG;
238
239 do {
240 mp_tp->mpt_rcvnxt += q->tqe_len;
241 LIST_REMOVE(q, tqe_q);
242 if (mp_so->so_state & SS_CANTRCVMORE) {
243 m_freem(q->tqe_m);
244 } else {
245 flags = !!(q->tqe_m->m_pkthdr.pkt_flags & PKTF_MPTCP_DFIN);
246 if (sbappendstream_rcvdemux(mp_so, q->tqe_m)) {
247 dowakeup = 1;
248 }
249 }
250 zfree(tcp_reass_zone, q);
251 mp_tp->mpt_reassqlen--;
252 count++;
253 q = LIST_FIRST(&mp_tp->mpt_segq);
254 } while (q && q->tqe_m->m_pkthdr.mp_dsn == mp_tp->mpt_rcvnxt);
255 mp_tp->mpt_flags &= ~MPTCPF_REASS_INPROG;
256
257 if (count > 0) {
258 OSAddAtomic(-count, &mptcp_reass_total_qlen);
259 }
260 if (dowakeup) {
261 sorwakeup(mp_so); /* done with socket lock held */
262 }
263 return flags;
264 }
265
266 static int
mptcp_reass(struct socket * mp_so,struct pkthdr * phdr,int * tlenp,struct mbuf * m)267 mptcp_reass(struct socket *mp_so, struct pkthdr *phdr, int *tlenp, struct mbuf *m)
268 {
269 struct mptcb *mp_tp = mpsotomppcb(mp_so)->mpp_pcbe->mpte_mptcb;
270 u_int64_t mb_dsn = phdr->mp_dsn;
271 struct tseg_qent *q;
272 struct tseg_qent *p = NULL;
273 struct tseg_qent *nq;
274 struct tseg_qent *te = NULL;
275 uint32_t qlimit;
276
277 /*
278 * Limit the number of segments in the reassembly queue to prevent
279 * holding on to too many segments (and thus running out of mbufs).
280 * Make sure to let the missing segment through which caused this
281 * queue. Always keep one global queue entry spare to be able to
282 * process the missing segment.
283 */
284 qlimit = MIN(MAX(100, mp_so->so_rcv.sb_hiwat >> 10),
285 (tcp_autorcvbuf_max >> 10));
286 if (mb_dsn != mp_tp->mpt_rcvnxt &&
287 (mp_tp->mpt_reassqlen + 1) >= qlimit) {
288 tcpstat.tcps_mptcp_rcvmemdrop++;
289 m_freem(m);
290 *tlenp = 0;
291 return 0;
292 }
293
294 /* Allocate a new queue entry. If we can't, just drop the pkt. XXX */
295 te = zalloc_flags(tcp_reass_zone, Z_WAITOK | Z_NOFAIL);
296
297 mp_tp->mpt_reassqlen++;
298 OSIncrementAtomic(&mptcp_reass_total_qlen);
299
300 /*
301 * Find a segment which begins after this one does.
302 */
303 LIST_FOREACH(q, &mp_tp->mpt_segq, tqe_q) {
304 if (MPTCP_SEQ_GT(q->tqe_m->m_pkthdr.mp_dsn, mb_dsn)) {
305 break;
306 }
307 p = q;
308 }
309
310 /*
311 * If there is a preceding segment, it may provide some of
312 * our data already. If so, drop the data from the incoming
313 * segment. If it provides all of our data, drop us.
314 */
315 if (p != NULL) {
316 int64_t i;
317 /* conversion to int (in i) handles seq wraparound */
318 i = p->tqe_m->m_pkthdr.mp_dsn + p->tqe_len - mb_dsn;
319 if (i > 0) {
320 if (i >= *tlenp) {
321 tcpstat.tcps_mptcp_rcvduppack++;
322 m_freem(m);
323 zfree(tcp_reass_zone, te);
324 te = NULL;
325 mp_tp->mpt_reassqlen--;
326 OSDecrementAtomic(&mptcp_reass_total_qlen);
327 /*
328 * Try to present any queued data
329 * at the left window edge to the user.
330 * This is needed after the 3-WHS
331 * completes.
332 */
333 goto out;
334 }
335 VERIFY(i <= INT_MAX);
336 m_adj(m, (int)i);
337 *tlenp -= i;
338 phdr->mp_dsn += i;
339 }
340 }
341
342 tcpstat.tcps_mp_oodata++;
343
344 /*
345 * While we overlap succeeding segments trim them or,
346 * if they are completely covered, dequeue them.
347 */
348 while (q) {
349 int64_t i = (mb_dsn + *tlenp) - q->tqe_m->m_pkthdr.mp_dsn;
350 if (i <= 0) {
351 break;
352 }
353
354 if (i < q->tqe_len) {
355 q->tqe_m->m_pkthdr.mp_dsn += i;
356 q->tqe_len -= i;
357
358 VERIFY(i <= INT_MAX);
359 m_adj(q->tqe_m, (int)i);
360 break;
361 }
362
363 nq = LIST_NEXT(q, tqe_q);
364 LIST_REMOVE(q, tqe_q);
365 m_freem(q->tqe_m);
366 zfree(tcp_reass_zone, q);
367 mp_tp->mpt_reassqlen--;
368 OSDecrementAtomic(&mptcp_reass_total_qlen);
369 q = nq;
370 }
371
372 /* Insert the new segment queue entry into place. */
373 te->tqe_m = m;
374 te->tqe_th = NULL;
375 te->tqe_len = *tlenp;
376
377 if (p == NULL) {
378 LIST_INSERT_HEAD(&mp_tp->mpt_segq, te, tqe_q);
379 } else {
380 LIST_INSERT_AFTER(p, te, tqe_q);
381 }
382
383 out:
384 return mptcp_reass_present(mp_so);
385 }
386
387 /*
388 * MPTCP input, called when data has been read from a subflow socket.
389 */
390 void
mptcp_input(struct mptses * mpte,struct mbuf * m)391 mptcp_input(struct mptses *mpte, struct mbuf *m)
392 {
393 struct socket *mp_so;
394 struct mptcb *mp_tp = NULL;
395 int count = 0, wakeup = 0;
396 struct mbuf *save = NULL, *prev = NULL;
397 struct mbuf *freelist = NULL, *tail = NULL;
398
399 VERIFY(m->m_flags & M_PKTHDR);
400
401 mp_so = mptetoso(mpte);
402 mp_tp = mpte->mpte_mptcb;
403
404 socket_lock_assert_owned(mp_so);
405
406 DTRACE_MPTCP(input);
407
408 mp_tp->mpt_rcvwnd = mptcp_sbspace(mp_tp);
409
410 /*
411 * Each mbuf contains MPTCP Data Sequence Map
412 * Process the data for reassembly, delivery to MPTCP socket
413 * client, etc.
414 *
415 */
416 count = mp_so->so_rcv.sb_cc;
417
418 /*
419 * In the degraded fallback case, data is accepted without DSS map
420 */
421 if (mp_tp->mpt_flags & MPTCPF_FALLBACK_TO_TCP) {
422 struct mbuf *iter;
423 int mb_dfin;
424 fallback:
425 mb_dfin = 0;
426 mptcp_sbrcv_grow(mp_tp);
427
428 iter = m;
429 while (iter) {
430 if ((iter->m_flags & M_PKTHDR) &&
431 (iter->m_pkthdr.pkt_flags & PKTF_MPTCP_DFIN)) {
432 mb_dfin = 1;
433 }
434
435 if ((iter->m_flags & M_PKTHDR) && m_pktlen(iter) == 0) {
436 /* Don't add zero-length packets, so jump it! */
437 if (prev == NULL) {
438 m = iter->m_next;
439 m_free(iter);
440 iter = m;
441 } else {
442 prev->m_next = iter->m_next;
443 m_free(iter);
444 iter = prev->m_next;
445 }
446
447 /* It was a zero-length packet so next one must be a pkthdr */
448 VERIFY(iter == NULL || iter->m_flags & M_PKTHDR);
449 } else {
450 prev = iter;
451 iter = iter->m_next;
452 }
453 }
454
455 /*
456 * assume degraded flow as this may be the first packet
457 * without DSS, and the subflow state is not updated yet.
458 */
459 if (sbappendstream_rcvdemux(mp_so, m)) {
460 sorwakeup(mp_so);
461 }
462
463 DTRACE_MPTCP5(receive__degraded, struct mbuf *, m,
464 struct socket *, mp_so,
465 struct sockbuf *, &mp_so->so_rcv,
466 struct sockbuf *, &mp_so->so_snd,
467 struct mptses *, mpte);
468 count = mp_so->so_rcv.sb_cc - count;
469
470 mp_tp->mpt_rcvnxt += count;
471
472 if (mb_dfin) {
473 mptcp_close_fsm(mp_tp, MPCE_RECV_DATA_FIN);
474 socantrcvmore(mp_so);
475 }
476 return;
477 }
478
479 do {
480 u_int64_t mb_dsn;
481 int32_t mb_datalen;
482 int64_t todrop;
483 int mb_dfin = 0;
484
485 VERIFY(m->m_flags & M_PKTHDR);
486
487 /* If fallback occurs, mbufs will not have PKTF_MPTCP set */
488 if (!(m->m_pkthdr.pkt_flags & PKTF_MPTCP)) {
489 goto fallback;
490 }
491
492 save = m->m_next;
493 /*
494 * A single TCP packet formed of multiple mbufs
495 * holds DSS mapping in the first mbuf of the chain.
496 * Other mbufs in the chain may have M_PKTHDR set
497 * even though they belong to the same TCP packet
498 * and therefore use the DSS mapping stored in the
499 * first mbuf of the mbuf chain. mptcp_input() can
500 * get an mbuf chain with multiple TCP packets.
501 */
502 while (save && (!(save->m_flags & M_PKTHDR) ||
503 !(save->m_pkthdr.pkt_flags & PKTF_MPTCP))) {
504 prev = save;
505 save = save->m_next;
506 }
507 if (prev) {
508 prev->m_next = NULL;
509 } else {
510 m->m_next = NULL;
511 }
512
513 mb_dsn = m->m_pkthdr.mp_dsn;
514 mb_datalen = m->m_pkthdr.mp_rlen;
515
516 todrop = (mb_dsn + mb_datalen) - (mp_tp->mpt_rcvnxt + mp_tp->mpt_rcvwnd);
517 if (todrop > 0) {
518 tcpstat.tcps_mptcp_rcvpackafterwin++;
519
520 os_log_info(mptcp_log_handle, "%s - %lx: dropping dsn %u dlen %u rcvnxt %u rcvwnd %u todrop %lld\n",
521 __func__, (unsigned long)VM_KERNEL_ADDRPERM(mpte),
522 (uint32_t)mb_dsn, mb_datalen, (uint32_t)mp_tp->mpt_rcvnxt,
523 mp_tp->mpt_rcvwnd, todrop);
524
525 if (todrop >= mb_datalen) {
526 if (freelist == NULL) {
527 freelist = m;
528 } else {
529 tail->m_next = m;
530 }
531
532 if (prev != NULL) {
533 tail = prev;
534 } else {
535 tail = m;
536 }
537
538 m = save;
539 prev = save = NULL;
540 continue;
541 } else {
542 VERIFY(todrop <= INT_MAX);
543 m_adj(m, (int)-todrop);
544 mb_datalen -= todrop;
545 m->m_pkthdr.mp_rlen -= todrop;
546 }
547
548 /*
549 * We drop from the right edge of the mbuf, thus the
550 * DATA_FIN is dropped as well
551 */
552 m->m_pkthdr.pkt_flags &= ~PKTF_MPTCP_DFIN;
553 }
554
555 if (MPTCP_SEQ_LT(mb_dsn, mp_tp->mpt_rcvnxt)) {
556 if (MPTCP_SEQ_LEQ((mb_dsn + mb_datalen),
557 mp_tp->mpt_rcvnxt)) {
558 if (freelist == NULL) {
559 freelist = m;
560 } else {
561 tail->m_next = m;
562 }
563
564 if (prev != NULL) {
565 tail = prev;
566 } else {
567 tail = m;
568 }
569
570 m = save;
571 prev = save = NULL;
572 continue;
573 } else {
574 VERIFY((mp_tp->mpt_rcvnxt - mb_dsn) <= INT_MAX);
575 m_adj(m, (int)(mp_tp->mpt_rcvnxt - mb_dsn));
576 mb_datalen -= (mp_tp->mpt_rcvnxt - mb_dsn);
577 mb_dsn = mp_tp->mpt_rcvnxt;
578 VERIFY(mb_datalen >= 0 && mb_datalen <= USHRT_MAX);
579 m->m_pkthdr.mp_rlen = (uint16_t)mb_datalen;
580 m->m_pkthdr.mp_dsn = mb_dsn;
581 }
582 }
583
584 if (MPTCP_SEQ_GT(mb_dsn, mp_tp->mpt_rcvnxt) ||
585 !LIST_EMPTY(&mp_tp->mpt_segq)) {
586 mb_dfin = mptcp_reass(mp_so, &m->m_pkthdr, &mb_datalen, m);
587
588 goto next;
589 }
590 mb_dfin = !!(m->m_pkthdr.pkt_flags & PKTF_MPTCP_DFIN);
591
592 mptcp_sbrcv_grow(mp_tp);
593
594 if (sbappendstream_rcvdemux(mp_so, m)) {
595 wakeup = 1;
596 }
597
598 DTRACE_MPTCP6(receive, struct mbuf *, m, struct socket *, mp_so,
599 struct sockbuf *, &mp_so->so_rcv,
600 struct sockbuf *, &mp_so->so_snd,
601 struct mptses *, mpte,
602 struct mptcb *, mp_tp);
603 count = mp_so->so_rcv.sb_cc - count;
604 tcpstat.tcps_mp_rcvtotal++;
605 tcpstat.tcps_mp_rcvbytes += count;
606
607 mp_tp->mpt_rcvnxt += count;
608
609 next:
610 if (mb_dfin) {
611 mptcp_close_fsm(mp_tp, MPCE_RECV_DATA_FIN);
612 socantrcvmore(mp_so);
613 }
614 m = save;
615 prev = save = NULL;
616 count = mp_so->so_rcv.sb_cc;
617 } while (m);
618
619 if (freelist) {
620 m_freem(freelist);
621 }
622
623 if (wakeup) {
624 sorwakeup(mp_so);
625 }
626 }
627
628 boolean_t
mptcp_can_send_more(struct mptcb * mp_tp,boolean_t ignore_reinject)629 mptcp_can_send_more(struct mptcb *mp_tp, boolean_t ignore_reinject)
630 {
631 struct socket *mp_so = mptetoso(mp_tp->mpt_mpte);
632
633 /*
634 * Always send if there is data in the reinject-queue.
635 */
636 if (!ignore_reinject && mp_tp->mpt_mpte->mpte_reinjectq) {
637 return TRUE;
638 }
639
640 /*
641 * Don't send, if:
642 *
643 * 1. snd_nxt >= snd_max : Means, basically everything has been sent.
644 * Except when using TFO, we might be doing a 0-byte write.
645 * 2. snd_una + snd_wnd <= snd_nxt: No space in the receiver's window
646 * 3. snd_nxt + 1 == snd_max and we are closing: A DATA_FIN is scheduled.
647 */
648
649 if (!(mp_so->so_flags1 & SOF1_PRECONNECT_DATA) && MPTCP_SEQ_GEQ(mp_tp->mpt_sndnxt, mp_tp->mpt_sndmax)) {
650 return FALSE;
651 }
652
653 if (MPTCP_SEQ_LEQ(mp_tp->mpt_snduna + mp_tp->mpt_sndwnd, mp_tp->mpt_sndnxt)) {
654 return FALSE;
655 }
656
657 if (mp_tp->mpt_sndnxt + 1 == mp_tp->mpt_sndmax && mp_tp->mpt_state > MPTCPS_CLOSE_WAIT) {
658 return FALSE;
659 }
660
661 if (mp_tp->mpt_state >= MPTCPS_FIN_WAIT_2) {
662 return FALSE;
663 }
664
665 return TRUE;
666 }
667
668 /*
669 * MPTCP output.
670 */
671 int
mptcp_output(struct mptses * mpte)672 mptcp_output(struct mptses *mpte)
673 {
674 struct mptcb *mp_tp;
675 struct mptsub *mpts;
676 struct mptsub *mpts_tried = NULL;
677 struct socket *mp_so;
678 struct mptsub *preferred_mpts = NULL;
679 uint64_t old_snd_nxt;
680 int error = 0;
681
682 mp_so = mptetoso(mpte);
683 mp_tp = mpte->mpte_mptcb;
684
685 socket_lock_assert_owned(mp_so);
686
687 if (mp_so->so_flags & SOF_DEFUNCT) {
688 return 0;
689 }
690
691 VERIFY(!(mpte->mpte_mppcb->mpp_flags & MPP_WUPCALL));
692 mpte->mpte_mppcb->mpp_flags |= MPP_WUPCALL;
693
694 old_snd_nxt = mp_tp->mpt_sndnxt;
695 while (mptcp_can_send_more(mp_tp, FALSE)) {
696 /* get the "best" subflow to be used for transmission */
697 mpts = mptcp_get_subflow(mpte, &preferred_mpts);
698 if (mpts == NULL) {
699 break;
700 }
701
702 /* In case there's just one flow, we reattempt later */
703 if (mpts_tried != NULL &&
704 (mpts == mpts_tried || (mpts->mpts_flags & MPTSF_FAILINGOVER))) {
705 mpts_tried->mpts_flags &= ~MPTSF_FAILINGOVER;
706 mpts_tried->mpts_flags |= MPTSF_ACTIVE;
707 mptcp_start_timer(mpte, MPTT_REXMT);
708 break;
709 }
710
711 /*
712 * Automatic sizing of send socket buffer. Increase the send
713 * socket buffer size if all of the following criteria are met
714 * 1. the receiver has enough buffer space for this data
715 * 2. send buffer is filled to 7/8th with data (so we actually
716 * have data to make use of it);
717 */
718 if ((mp_so->so_snd.sb_flags & (SB_AUTOSIZE | SB_TRIM)) == SB_AUTOSIZE &&
719 tcp_cansbgrow(&mp_so->so_snd)) {
720 if ((mp_tp->mpt_sndwnd / 4 * 5) >= mp_so->so_snd.sb_hiwat &&
721 mp_so->so_snd.sb_cc >= (mp_so->so_snd.sb_hiwat / 8 * 7)) {
722 if (sbreserve(&mp_so->so_snd,
723 min(mp_so->so_snd.sb_hiwat + tcp_autosndbuf_inc,
724 tcp_autosndbuf_max)) == 1) {
725 mp_so->so_snd.sb_idealsize = mp_so->so_snd.sb_hiwat;
726 }
727 }
728 }
729
730 DTRACE_MPTCP3(output, struct mptses *, mpte, struct mptsub *, mpts,
731 struct socket *, mp_so);
732 error = mptcp_subflow_output(mpte, mpts, 0);
733 if (error) {
734 /* can be a temporary loss of source address or other error */
735 mpts->mpts_flags |= MPTSF_FAILINGOVER;
736 mpts->mpts_flags &= ~MPTSF_ACTIVE;
737 mpts_tried = mpts;
738 if (error != ECANCELED) {
739 os_log_error(mptcp_log_handle, "%s - %lx: Error = %d mpts_flags %#x\n",
740 __func__, (unsigned long)VM_KERNEL_ADDRPERM(mpte),
741 error, mpts->mpts_flags);
742 }
743 break;
744 }
745 /* The model is to have only one active flow at a time */
746 mpts->mpts_flags |= MPTSF_ACTIVE;
747 mpts->mpts_probesoon = mpts->mpts_probecnt = 0;
748
749 /* Allows us to update the smoothed rtt */
750 if (mptcp_probeto && mpts != preferred_mpts && preferred_mpts != NULL) {
751 if (preferred_mpts->mpts_probesoon) {
752 if ((tcp_now - preferred_mpts->mpts_probesoon) > mptcp_probeto) {
753 mptcp_subflow_output(mpte, preferred_mpts, MPTCP_SUBOUT_PROBING);
754 if (preferred_mpts->mpts_probecnt >= mptcp_probecnt) {
755 preferred_mpts->mpts_probesoon = 0;
756 preferred_mpts->mpts_probecnt = 0;
757 }
758 }
759 } else {
760 preferred_mpts->mpts_probesoon = tcp_now;
761 preferred_mpts->mpts_probecnt = 0;
762 }
763 }
764
765 if (mpte->mpte_active_sub == NULL) {
766 mpte->mpte_active_sub = mpts;
767 } else if (mpte->mpte_active_sub != mpts) {
768 mpte->mpte_active_sub->mpts_flags &= ~MPTSF_ACTIVE;
769 mpte->mpte_active_sub = mpts;
770
771 mptcpstats_inc_switch(mpte, mpts);
772 }
773 }
774
775 if (mp_tp->mpt_state > MPTCPS_CLOSE_WAIT) {
776 if (mp_tp->mpt_sndnxt + 1 == mp_tp->mpt_sndmax &&
777 mp_tp->mpt_snduna == mp_tp->mpt_sndnxt) {
778 mptcp_finish_usrclosed(mpte);
779 }
780 }
781
782 mptcp_handle_deferred_upcalls(mpte->mpte_mppcb, MPP_WUPCALL);
783
784 /* subflow errors should not be percolated back up */
785 return 0;
786 }
787
788
789 static struct mptsub *
mptcp_choose_subflow(struct mptsub * mpts,struct mptsub * curbest,int * currtt)790 mptcp_choose_subflow(struct mptsub *mpts, struct mptsub *curbest, int *currtt)
791 {
792 struct tcpcb *tp = sototcpcb(mpts->mpts_socket);
793
794 /*
795 * Lower RTT? Take it, if it's our first one, or
796 * it doesn't has any loss, or the current one has
797 * loss as well.
798 */
799 if (tp->t_srtt && *currtt > tp->t_srtt &&
800 (curbest == NULL || tp->t_rxtshift == 0 ||
801 sototcpcb(curbest->mpts_socket)->t_rxtshift)) {
802 *currtt = tp->t_srtt;
803 return mpts;
804 }
805
806 /*
807 * If we find a subflow without loss, take it always!
808 */
809 if (curbest &&
810 sototcpcb(curbest->mpts_socket)->t_rxtshift &&
811 tp->t_rxtshift == 0) {
812 *currtt = tp->t_srtt;
813 return mpts;
814 }
815
816 return curbest != NULL ? curbest : mpts;
817 }
818
819 static struct mptsub *
mptcp_return_subflow(struct mptsub * mpts)820 mptcp_return_subflow(struct mptsub *mpts)
821 {
822 if (mpts && mptcp_subflow_cwnd_space(mpts->mpts_socket) <= 0) {
823 return NULL;
824 }
825
826 return mpts;
827 }
828
829 static boolean_t
mptcp_subflow_is_slow(struct mptses * mpte,struct mptsub * mpts)830 mptcp_subflow_is_slow(struct mptses *mpte, struct mptsub *mpts)
831 {
832 struct tcpcb *tp = sototcpcb(mpts->mpts_socket);
833 int fail_thresh = mptcp_fail_thresh;
834
835 if (mpte->mpte_svctype == MPTCP_SVCTYPE_HANDOVER || mpte->mpte_svctype == MPTCP_SVCTYPE_PURE_HANDOVER) {
836 fail_thresh *= 2;
837 }
838
839 return tp->t_rxtshift >= fail_thresh &&
840 (mptetoso(mpte)->so_snd.sb_cc || mpte->mpte_reinjectq);
841 }
842
843 /*
844 * Return the most eligible subflow to be used for sending data.
845 */
846 struct mptsub *
mptcp_get_subflow(struct mptses * mpte,struct mptsub ** preferred)847 mptcp_get_subflow(struct mptses *mpte, struct mptsub **preferred)
848 {
849 struct tcpcb *besttp, *secondtp;
850 struct inpcb *bestinp, *secondinp;
851 struct mptsub *mpts;
852 struct mptsub *best = NULL;
853 struct mptsub *second_best = NULL;
854 int exp_rtt = INT_MAX, cheap_rtt = INT_MAX;
855
856 /*
857 * First Step:
858 * Choose the best subflow for cellular and non-cellular interfaces.
859 */
860
861 TAILQ_FOREACH(mpts, &mpte->mpte_subflows, mpts_entry) {
862 struct socket *so = mpts->mpts_socket;
863 struct tcpcb *tp = sototcpcb(so);
864 struct inpcb *inp = sotoinpcb(so);
865
866 /*
867 * First, the hard conditions to reject subflows
868 * (e.g., not connected,...)
869 */
870 if (inp->inp_last_outifp == NULL) {
871 continue;
872 }
873
874 if (INP_WAIT_FOR_IF_FEEDBACK(inp)) {
875 continue;
876 }
877
878 /* There can only be one subflow in degraded state */
879 if (mpts->mpts_flags & MPTSF_MP_DEGRADED) {
880 best = mpts;
881 break;
882 }
883
884 /*
885 * If this subflow is waiting to finally send, do it!
886 */
887 if (so->so_flags1 & SOF1_PRECONNECT_DATA) {
888 return mptcp_return_subflow(mpts);
889 }
890
891 /*
892 * Only send if the subflow is MP_CAPABLE. The exceptions to
893 * this rule (degraded or TFO) have been taken care of above.
894 */
895 if (!(mpts->mpts_flags & MPTSF_MP_CAPABLE)) {
896 continue;
897 }
898
899 if ((so->so_state & SS_ISDISCONNECTED) ||
900 !(so->so_state & SS_ISCONNECTED) ||
901 !TCPS_HAVEESTABLISHED(tp->t_state) ||
902 tp->t_state > TCPS_CLOSE_WAIT) {
903 continue;
904 }
905
906 /*
907 * Second, the soft conditions to find the subflow with best
908 * conditions for each set (aka cellular vs non-cellular)
909 */
910 if (IFNET_IS_CELLULAR(inp->inp_last_outifp)) {
911 second_best = mptcp_choose_subflow(mpts, second_best,
912 &exp_rtt);
913 } else {
914 best = mptcp_choose_subflow(mpts, best, &cheap_rtt);
915 }
916 }
917
918 /*
919 * If there is no preferred or backup subflow, and there is no active
920 * subflow use the last usable subflow.
921 */
922 if (best == NULL) {
923 return mptcp_return_subflow(second_best);
924 }
925
926 if (second_best == NULL) {
927 return mptcp_return_subflow(best);
928 }
929
930 besttp = sototcpcb(best->mpts_socket);
931 bestinp = sotoinpcb(best->mpts_socket);
932 secondtp = sototcpcb(second_best->mpts_socket);
933 secondinp = sotoinpcb(second_best->mpts_socket);
934
935 if (preferred != NULL) {
936 *preferred = mptcp_return_subflow(best);
937 }
938
939 /*
940 * Second Step: Among best and second_best. Choose the one that is
941 * most appropriate for this particular service-type.
942 */
943 if (mpte->mpte_svctype == MPTCP_SVCTYPE_PURE_HANDOVER) {
944 return mptcp_return_subflow(best);
945 } else if (mpte->mpte_svctype == MPTCP_SVCTYPE_HANDOVER) {
946 /*
947 * Only handover if Symptoms tells us to do so.
948 */
949 if (!IFNET_IS_CELLULAR(bestinp->inp_last_outifp) &&
950 mptcp_wifi_quality_for_session(mpte) != MPTCP_WIFI_QUALITY_GOOD &&
951 mptcp_subflow_is_slow(mpte, best)) {
952 return mptcp_return_subflow(second_best);
953 }
954
955 return mptcp_return_subflow(best);
956 } else if (mpte->mpte_svctype == MPTCP_SVCTYPE_INTERACTIVE) {
957 int rtt_thresh = mptcp_rtthist_rtthresh << TCP_RTT_SHIFT;
958 int rto_thresh = mptcp_rtothresh;
959
960 /* Adjust with symptoms information */
961 if (!IFNET_IS_CELLULAR(bestinp->inp_last_outifp) &&
962 mptcp_wifi_quality_for_session(mpte) != MPTCP_WIFI_QUALITY_GOOD) {
963 rtt_thresh /= 2;
964 rto_thresh /= 2;
965 }
966
967 if (besttp->t_srtt && secondtp->t_srtt &&
968 besttp->t_srtt >= rtt_thresh &&
969 secondtp->t_srtt < rtt_thresh) {
970 tcpstat.tcps_mp_sel_rtt++;
971 return mptcp_return_subflow(second_best);
972 }
973
974 if (mptcp_subflow_is_slow(mpte, best) &&
975 secondtp->t_rxtshift == 0) {
976 return mptcp_return_subflow(second_best);
977 }
978
979 /* Compare RTOs, select second_best if best's rto exceeds rtothresh */
980 if (besttp->t_rxtcur && secondtp->t_rxtcur &&
981 besttp->t_rxtcur >= rto_thresh &&
982 secondtp->t_rxtcur < rto_thresh) {
983 tcpstat.tcps_mp_sel_rto++;
984
985 return mptcp_return_subflow(second_best);
986 }
987
988 /*
989 * None of the above conditions for sending on the secondary
990 * were true. So, let's schedule on the best one, if he still
991 * has some space in the congestion-window.
992 */
993 return mptcp_return_subflow(best);
994 } else if (mpte->mpte_svctype >= MPTCP_SVCTYPE_AGGREGATE) {
995 struct mptsub *tmp;
996
997 /*
998 * We only care about RTT when aggregating
999 */
1000 if (besttp->t_srtt > secondtp->t_srtt) {
1001 tmp = best;
1002 best = second_best;
1003 besttp = secondtp;
1004 bestinp = secondinp;
1005
1006 second_best = tmp;
1007 secondtp = sototcpcb(second_best->mpts_socket);
1008 secondinp = sotoinpcb(second_best->mpts_socket);
1009 }
1010
1011 /* Is there still space in the congestion window? */
1012 if (mptcp_subflow_cwnd_space(bestinp->inp_socket) <= 0) {
1013 return mptcp_return_subflow(second_best);
1014 }
1015
1016 return mptcp_return_subflow(best);
1017 } else {
1018 panic("Unknown service-type configured for MPTCP");
1019 }
1020
1021 return NULL;
1022 }
1023
1024 void
mptcp_close_fsm(struct mptcb * mp_tp,uint32_t event)1025 mptcp_close_fsm(struct mptcb *mp_tp, uint32_t event)
1026 {
1027 struct socket *mp_so = mptetoso(mp_tp->mpt_mpte);
1028
1029 socket_lock_assert_owned(mp_so);
1030
1031 DTRACE_MPTCP2(state__change, struct mptcb *, mp_tp,
1032 uint32_t, event);
1033
1034 switch (mp_tp->mpt_state) {
1035 case MPTCPS_CLOSED:
1036 case MPTCPS_LISTEN:
1037 mp_tp->mpt_state = MPTCPS_TERMINATE;
1038 break;
1039
1040 case MPTCPS_ESTABLISHED:
1041 if (event == MPCE_CLOSE) {
1042 mp_tp->mpt_state = MPTCPS_FIN_WAIT_1;
1043 mp_tp->mpt_sndmax += 1; /* adjust for Data FIN */
1044 } else if (event == MPCE_RECV_DATA_FIN) {
1045 mp_tp->mpt_rcvnxt += 1; /* adj remote data FIN */
1046 mp_tp->mpt_state = MPTCPS_CLOSE_WAIT;
1047 }
1048 break;
1049
1050 case MPTCPS_CLOSE_WAIT:
1051 if (event == MPCE_CLOSE) {
1052 mp_tp->mpt_state = MPTCPS_LAST_ACK;
1053 mp_tp->mpt_sndmax += 1; /* adjust for Data FIN */
1054 }
1055 break;
1056
1057 case MPTCPS_FIN_WAIT_1:
1058 if (event == MPCE_RECV_DATA_ACK) {
1059 mp_tp->mpt_state = MPTCPS_FIN_WAIT_2;
1060 } else if (event == MPCE_RECV_DATA_FIN) {
1061 mp_tp->mpt_rcvnxt += 1; /* adj remote data FIN */
1062 mp_tp->mpt_state = MPTCPS_CLOSING;
1063 }
1064 break;
1065
1066 case MPTCPS_CLOSING:
1067 if (event == MPCE_RECV_DATA_ACK) {
1068 mp_tp->mpt_state = MPTCPS_TIME_WAIT;
1069 }
1070 break;
1071
1072 case MPTCPS_LAST_ACK:
1073 if (event == MPCE_RECV_DATA_ACK) {
1074 mptcp_close(mp_tp->mpt_mpte, mp_tp);
1075 }
1076 break;
1077
1078 case MPTCPS_FIN_WAIT_2:
1079 if (event == MPCE_RECV_DATA_FIN) {
1080 mp_tp->mpt_rcvnxt += 1; /* adj remote data FIN */
1081 mp_tp->mpt_state = MPTCPS_TIME_WAIT;
1082 }
1083 break;
1084
1085 case MPTCPS_TIME_WAIT:
1086 case MPTCPS_TERMINATE:
1087 break;
1088
1089 default:
1090 VERIFY(0);
1091 /* NOTREACHED */
1092 }
1093 DTRACE_MPTCP2(state__change, struct mptcb *, mp_tp,
1094 uint32_t, event);
1095 }
1096
1097 /* If you change this function, match up mptcp_update_rcv_state_f */
1098 void
mptcp_update_dss_rcv_state(struct mptcp_dsn_opt * dss_info,struct tcpcb * tp,uint16_t csum)1099 mptcp_update_dss_rcv_state(struct mptcp_dsn_opt *dss_info, struct tcpcb *tp,
1100 uint16_t csum)
1101 {
1102 struct mptcb *mp_tp = tptomptp(tp);
1103 u_int64_t full_dsn = 0;
1104
1105 NTOHL(dss_info->mdss_dsn);
1106 NTOHL(dss_info->mdss_subflow_seqn);
1107 NTOHS(dss_info->mdss_data_len);
1108
1109 /* XXX for autosndbuf grow sb here */
1110 MPTCP_EXTEND_DSN(mp_tp->mpt_rcvnxt, dss_info->mdss_dsn, full_dsn);
1111 mptcp_update_rcv_state_meat(mp_tp, tp,
1112 full_dsn, dss_info->mdss_subflow_seqn, dss_info->mdss_data_len,
1113 csum);
1114 }
1115
1116 void
mptcp_update_rcv_state_meat(struct mptcb * mp_tp,struct tcpcb * tp,u_int64_t full_dsn,u_int32_t seqn,u_int16_t mdss_data_len,uint16_t csum)1117 mptcp_update_rcv_state_meat(struct mptcb *mp_tp, struct tcpcb *tp,
1118 u_int64_t full_dsn, u_int32_t seqn, u_int16_t mdss_data_len,
1119 uint16_t csum)
1120 {
1121 if (mdss_data_len == 0) {
1122 os_log_error(mptcp_log_handle, "%s - %lx: Infinite Mapping.\n",
1123 __func__, (unsigned long)VM_KERNEL_ADDRPERM(mp_tp->mpt_mpte));
1124
1125 if ((mp_tp->mpt_flags & MPTCPF_CHECKSUM) && (csum != 0)) {
1126 os_log_error(mptcp_log_handle, "%s - %lx: Bad checksum %x \n",
1127 __func__, (unsigned long)VM_KERNEL_ADDRPERM(mp_tp->mpt_mpte), csum);
1128 }
1129 mptcp_notify_mpfail(tp->t_inpcb->inp_socket);
1130 return;
1131 }
1132
1133 mptcp_notify_mpready(tp->t_inpcb->inp_socket);
1134
1135 tp->t_rcv_map.mpt_dsn = full_dsn;
1136 tp->t_rcv_map.mpt_sseq = seqn;
1137 tp->t_rcv_map.mpt_len = mdss_data_len;
1138 tp->t_rcv_map.mpt_csum = csum;
1139 tp->t_mpflags |= TMPF_EMBED_DSN;
1140 }
1141
1142
1143 static uint16_t
mptcp_input_csum(struct tcpcb * tp,struct mbuf * m,uint64_t dsn,uint32_t sseq,uint16_t dlen,uint16_t csum,int dfin)1144 mptcp_input_csum(struct tcpcb *tp, struct mbuf *m, uint64_t dsn, uint32_t sseq,
1145 uint16_t dlen, uint16_t csum, int dfin)
1146 {
1147 struct mptcb *mp_tp = tptomptp(tp);
1148 int real_len = dlen - dfin;
1149 uint32_t sum = 0;
1150
1151 VERIFY(real_len >= 0);
1152
1153 if (mp_tp == NULL) {
1154 return 0;
1155 }
1156
1157 if (!(mp_tp->mpt_flags & MPTCPF_CHECKSUM)) {
1158 return 0;
1159 }
1160
1161 if (tp->t_mpflags & TMPF_TCP_FALLBACK) {
1162 return 0;
1163 }
1164
1165 /*
1166 * The remote side may send a packet with fewer bytes than the
1167 * claimed DSS checksum length.
1168 */
1169 if ((int)m_length2(m, NULL) < real_len) {
1170 return 0xffff;
1171 }
1172
1173 if (real_len != 0) {
1174 sum = m_sum16(m, 0, real_len);
1175 }
1176
1177 sum += in_pseudo64(htonll(dsn), htonl(sseq), htons(dlen) + csum);
1178 ADDCARRY(sum);
1179
1180 DTRACE_MPTCP3(checksum__result, struct tcpcb *, tp, struct mbuf *, m,
1181 uint32_t, sum);
1182
1183 return ~sum & 0xffff;
1184 }
1185
1186 /*
1187 * MPTCP Checksum support
1188 * The checksum is calculated whenever the MPTCP DSS option is included
1189 * in the TCP packet. The checksum includes the sum of the MPTCP psuedo
1190 * header and the actual data indicated by the length specified in the
1191 * DSS option.
1192 */
1193
1194 int
mptcp_validate_csum(struct tcpcb * tp,struct mbuf * m,uint64_t dsn,uint32_t sseq,uint16_t dlen,uint16_t csum,int dfin)1195 mptcp_validate_csum(struct tcpcb *tp, struct mbuf *m, uint64_t dsn,
1196 uint32_t sseq, uint16_t dlen, uint16_t csum, int dfin)
1197 {
1198 uint16_t mptcp_csum;
1199
1200 mptcp_csum = mptcp_input_csum(tp, m, dsn, sseq, dlen, csum, dfin);
1201 if (mptcp_csum) {
1202 tp->t_mpflags |= TMPF_SND_MPFAIL;
1203 mptcp_notify_mpfail(tp->t_inpcb->inp_socket);
1204 m_freem(m);
1205 tcpstat.tcps_mp_badcsum++;
1206 return -1;
1207 }
1208 return 0;
1209 }
1210
1211 uint16_t
mptcp_output_csum(struct mbuf * m,uint64_t dss_val,uint32_t sseq,uint16_t dlen)1212 mptcp_output_csum(struct mbuf *m, uint64_t dss_val, uint32_t sseq, uint16_t dlen)
1213 {
1214 uint32_t sum = 0;
1215
1216 if (dlen) {
1217 sum = m_sum16(m, 0, dlen);
1218 }
1219
1220 dss_val = mptcp_hton64(dss_val);
1221 sseq = htonl(sseq);
1222 dlen = htons(dlen);
1223 sum += in_pseudo64(dss_val, sseq, dlen);
1224
1225 ADDCARRY(sum);
1226 sum = ~sum & 0xffff;
1227 DTRACE_MPTCP2(checksum__result, struct mbuf *, m, uint32_t, sum);
1228
1229 return (uint16_t)sum;
1230 }
1231
1232 /*
1233 * When WiFi signal starts fading, there's more loss and RTT spikes.
1234 * Check if there has been a large spike by comparing against
1235 * a tolerable RTT spike threshold.
1236 */
1237 boolean_t
mptcp_no_rto_spike(struct socket * so)1238 mptcp_no_rto_spike(struct socket *so)
1239 {
1240 struct tcpcb *tp = intotcpcb(sotoinpcb(so));
1241 int32_t spike = 0;
1242
1243 if (tp->t_rxtcur > mptcp_rtothresh) {
1244 spike = tp->t_rxtcur - mptcp_rtothresh;
1245 }
1246
1247 if (spike > 0) {
1248 return FALSE;
1249 } else {
1250 return TRUE;
1251 }
1252 }
1253
1254 void
mptcp_handle_deferred_upcalls(struct mppcb * mpp,uint32_t flag)1255 mptcp_handle_deferred_upcalls(struct mppcb *mpp, uint32_t flag)
1256 {
1257 VERIFY(mpp->mpp_flags & flag);
1258 mpp->mpp_flags &= ~flag;
1259
1260 if (mptcp_should_defer_upcall(mpp)) {
1261 return;
1262 }
1263
1264 if (mpp->mpp_flags & MPP_SHOULD_WORKLOOP) {
1265 mpp->mpp_flags &= ~MPP_SHOULD_WORKLOOP;
1266
1267 mptcp_subflow_workloop(mpp->mpp_pcbe);
1268 }
1269
1270 if (mpp->mpp_flags & MPP_SHOULD_RWAKEUP) {
1271 mpp->mpp_flags &= ~MPP_SHOULD_RWAKEUP;
1272
1273 sorwakeup(mpp->mpp_socket);
1274 }
1275
1276 if (mpp->mpp_flags & MPP_SHOULD_WWAKEUP) {
1277 mpp->mpp_flags &= ~MPP_SHOULD_WWAKEUP;
1278
1279 sowwakeup(mpp->mpp_socket);
1280 }
1281 }
1282
1283 static void
mptcp_reset_itfinfo(struct mpt_itf_info * info)1284 mptcp_reset_itfinfo(struct mpt_itf_info *info)
1285 {
1286 memset(info, 0, sizeof(*info));
1287 }
1288
1289 void
mptcp_session_necp_cb(void * handle,int action,uint32_t interface_index,uint32_t necp_flags,__unused bool * viable)1290 mptcp_session_necp_cb(void *handle, int action, uint32_t interface_index,
1291 uint32_t necp_flags, __unused bool *viable)
1292 {
1293 boolean_t has_v4 = !!(necp_flags & NECP_CLIENT_RESULT_FLAG_HAS_IPV4);
1294 boolean_t has_v6 = !!(necp_flags & NECP_CLIENT_RESULT_FLAG_HAS_IPV6);
1295 boolean_t has_nat64 = !!(necp_flags & NECP_CLIENT_RESULT_FLAG_HAS_NAT64);
1296 boolean_t low_power = !!(necp_flags & NECP_CLIENT_RESULT_FLAG_INTERFACE_LOW_POWER);
1297 struct mppcb *mp = (struct mppcb *)handle;
1298 struct mptses *mpte = mptompte(mp);
1299 struct socket *mp_so;
1300 struct mptcb *mp_tp;
1301 uint32_t i, ifindex;
1302 struct ifnet *ifp;
1303 int locked = 0;
1304
1305 ifindex = interface_index;
1306 VERIFY(ifindex != IFSCOPE_NONE);
1307
1308 /* About to be garbage-collected (see note about MPTCP/NECP interactions) */
1309 if (mp->mpp_socket->so_usecount == 0) {
1310 return;
1311 }
1312
1313 mp_so = mptetoso(mpte);
1314
1315 if (action != NECP_CLIENT_CBACTION_INITIAL) {
1316 socket_lock(mp_so, 1);
1317 locked = 1;
1318
1319 /* Check again, because it might have changed while waiting */
1320 if (mp->mpp_socket->so_usecount == 0) {
1321 goto out;
1322 }
1323 }
1324
1325 socket_lock_assert_owned(mp_so);
1326
1327 mp_tp = mpte->mpte_mptcb;
1328
1329 ifnet_head_lock_shared();
1330 ifp = ifindex2ifnet[ifindex];
1331 ifnet_head_done();
1332
1333 os_log(mptcp_log_handle, "%s - %lx: action: %u ifindex %u delegated to %u usecount %u mpt_flags %#x state %u v4 %u v6 %u nat64 %u power %u\n",
1334 __func__, (unsigned long)VM_KERNEL_ADDRPERM(mpte), action, ifindex,
1335 ifp && ifp->if_delegated.ifp ? ifp->if_delegated.ifp->if_index : IFSCOPE_NONE,
1336 mp->mpp_socket->so_usecount, mp_tp->mpt_flags, mp_tp->mpt_state,
1337 has_v4, has_v6, has_nat64, low_power);
1338
1339 /* No need on fallen back sockets */
1340 if (mp_tp->mpt_flags & MPTCPF_FALLBACK_TO_TCP) {
1341 goto out;
1342 }
1343
1344 /*
1345 * When the interface goes in low-power mode we don't want to establish
1346 * new subflows on it. Thus, mark it internally as non-viable.
1347 */
1348 if (low_power) {
1349 action = NECP_CLIENT_CBACTION_NONVIABLE;
1350 }
1351
1352 if (action == NECP_CLIENT_CBACTION_NONVIABLE) {
1353 for (i = 0; i < mpte->mpte_itfinfo_size; i++) {
1354 if (mpte->mpte_itfinfo[i].ifindex == IFSCOPE_NONE) {
1355 continue;
1356 }
1357
1358 if (mpte->mpte_itfinfo[i].ifindex == ifindex) {
1359 mptcp_reset_itfinfo(&mpte->mpte_itfinfo[i]);
1360 }
1361 }
1362
1363 mptcp_sched_create_subflows(mpte);
1364 } else if (action == NECP_CLIENT_CBACTION_VIABLE ||
1365 action == NECP_CLIENT_CBACTION_INITIAL) {
1366 int found_slot = 0, slot_index = -1;
1367 struct sockaddr *dst;
1368
1369 if (ifp == NULL) {
1370 goto out;
1371 }
1372
1373 if (IFNET_IS_COMPANION_LINK(ifp)) {
1374 goto out;
1375 }
1376
1377 if (IFNET_IS_EXPENSIVE(ifp) &&
1378 (mp_so->so_restrictions & SO_RESTRICT_DENY_EXPENSIVE)) {
1379 goto out;
1380 }
1381
1382 if (IFNET_IS_CONSTRAINED(ifp) &&
1383 (mp_so->so_restrictions & SO_RESTRICT_DENY_CONSTRAINED)) {
1384 goto out;
1385 }
1386
1387 if (IFNET_IS_CELLULAR(ifp) &&
1388 (mp_so->so_restrictions & SO_RESTRICT_DENY_CELLULAR)) {
1389 goto out;
1390 }
1391
1392 if (IS_INTF_CLAT46(ifp)) {
1393 has_v4 = FALSE;
1394 }
1395
1396 /* Look for the slot on where to store/update the interface-info. */
1397 for (i = 0; i < mpte->mpte_itfinfo_size; i++) {
1398 /* Found a potential empty slot where we can put it */
1399 if (mpte->mpte_itfinfo[i].ifindex == 0) {
1400 found_slot = 1;
1401 slot_index = i;
1402 }
1403
1404 /*
1405 * The interface is already in our array. Check if we
1406 * need to update it.
1407 */
1408 if (mpte->mpte_itfinfo[i].ifindex == ifindex &&
1409 (mpte->mpte_itfinfo[i].has_v4_conn != has_v4 ||
1410 mpte->mpte_itfinfo[i].has_v6_conn != has_v6 ||
1411 mpte->mpte_itfinfo[i].has_nat64_conn != has_nat64)) {
1412 found_slot = 1;
1413 slot_index = i;
1414 break;
1415 }
1416
1417 if (mpte->mpte_itfinfo[i].ifindex == ifindex) {
1418 /*
1419 * Ok, it's already there and we don't need
1420 * to update it
1421 */
1422 goto out;
1423 }
1424 }
1425
1426 dst = mptcp_get_session_dst(mpte, has_v6, has_v4);
1427 if (dst && dst->sa_family == AF_INET &&
1428 has_v6 && !has_nat64 && !has_v4) {
1429 if (found_slot) {
1430 mpte->mpte_itfinfo[slot_index].ifindex = ifindex;
1431 mpte->mpte_itfinfo[slot_index].has_v4_conn = has_v4;
1432 mpte->mpte_itfinfo[slot_index].has_v6_conn = has_v6;
1433 mpte->mpte_itfinfo[slot_index].has_nat64_conn = has_nat64;
1434 }
1435 goto out;
1436 }
1437
1438 if (found_slot == 0) {
1439 int new_size = mpte->mpte_itfinfo_size * 2;
1440 struct mpt_itf_info *info = kalloc_data(sizeof(*info) * new_size, Z_ZERO);
1441
1442 if (info == NULL) {
1443 os_log_error(mptcp_log_handle, "%s - %lx: malloc failed for %u\n",
1444 __func__, (unsigned long)VM_KERNEL_ADDRPERM(mpte), new_size);
1445 goto out;
1446 }
1447
1448 memcpy(info, mpte->mpte_itfinfo, mpte->mpte_itfinfo_size * sizeof(*info));
1449
1450 if (mpte->mpte_itfinfo_size > MPTE_ITFINFO_SIZE) {
1451 kfree_data(mpte->mpte_itfinfo,
1452 sizeof(*info) * mpte->mpte_itfinfo_size);
1453 }
1454
1455 /* We allocated a new one, thus the first must be empty */
1456 slot_index = mpte->mpte_itfinfo_size;
1457
1458 mpte->mpte_itfinfo = info;
1459 mpte->mpte_itfinfo_size = new_size;
1460 }
1461
1462 VERIFY(slot_index >= 0 && slot_index < (int)mpte->mpte_itfinfo_size);
1463 mpte->mpte_itfinfo[slot_index].ifindex = ifindex;
1464 mpte->mpte_itfinfo[slot_index].has_v4_conn = has_v4;
1465 mpte->mpte_itfinfo[slot_index].has_v6_conn = has_v6;
1466 mpte->mpte_itfinfo[slot_index].has_nat64_conn = has_nat64;
1467
1468 mptcp_sched_create_subflows(mpte);
1469 }
1470
1471 out:
1472 if (locked) {
1473 socket_unlock(mp_so, 1);
1474 }
1475 }
1476
1477 void
mptcp_set_restrictions(struct socket * mp_so)1478 mptcp_set_restrictions(struct socket *mp_so)
1479 {
1480 struct mptses *mpte = mpsotompte(mp_so);
1481 uint32_t i;
1482
1483 socket_lock_assert_owned(mp_so);
1484
1485 ifnet_head_lock_shared();
1486
1487 for (i = 0; i < mpte->mpte_itfinfo_size; i++) {
1488 struct mpt_itf_info *info = &mpte->mpte_itfinfo[i];
1489 uint32_t ifindex = info->ifindex;
1490 struct ifnet *ifp;
1491
1492 if (ifindex == IFSCOPE_NONE) {
1493 continue;
1494 }
1495
1496 ifp = ifindex2ifnet[ifindex];
1497 if (ifp == NULL) {
1498 continue;
1499 }
1500
1501 if (IFNET_IS_EXPENSIVE(ifp) &&
1502 (mp_so->so_restrictions & SO_RESTRICT_DENY_EXPENSIVE)) {
1503 info->ifindex = IFSCOPE_NONE;
1504 }
1505
1506 if (IFNET_IS_CONSTRAINED(ifp) &&
1507 (mp_so->so_restrictions & SO_RESTRICT_DENY_CONSTRAINED)) {
1508 info->ifindex = IFSCOPE_NONE;
1509 }
1510
1511 if (IFNET_IS_CELLULAR(ifp) &&
1512 (mp_so->so_restrictions & SO_RESTRICT_DENY_CELLULAR)) {
1513 info->ifindex = IFSCOPE_NONE;
1514 }
1515 }
1516
1517 ifnet_head_done();
1518 }
1519
1520 #define DUMP_BUF_CHK() { \
1521 clen -= k; \
1522 if (clen < 1) \
1523 goto done; \
1524 c += k; \
1525 }
1526
1527 int
dump_mptcp_reass_qlen(char * str,int str_len)1528 dump_mptcp_reass_qlen(char *str, int str_len)
1529 {
1530 char *c = str;
1531 int k, clen = str_len;
1532
1533 if (mptcp_reass_total_qlen != 0) {
1534 k = scnprintf(c, clen, "\nmptcp reass qlen %d\n", mptcp_reass_total_qlen);
1535 DUMP_BUF_CHK();
1536 }
1537
1538 done:
1539 return str_len - clen;
1540 }
1541