1 /*
2 * Copyright (c) 2012-2017 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 #include <sys/param.h>
29 #include <sys/systm.h>
30 #include <netinet/in_systm.h>
31 #include <sys/socket.h>
32 #include <sys/socketvar.h>
33 #include <sys/syslog.h>
34 #include <net/route.h>
35 #include <netinet/in.h>
36 #include <net/if.h>
37
38 #include <netinet/ip.h>
39 #include <netinet/ip_var.h>
40 #include <netinet/in_var.h>
41 #include <netinet/tcp.h>
42 #include <netinet/tcp_cache.h>
43 #include <netinet/tcp_seq.h>
44 #include <netinet/tcpip.h>
45 #include <netinet/tcp_fsm.h>
46 #include <netinet/mptcp_var.h>
47 #include <netinet/mptcp.h>
48 #include <netinet/mptcp_opt.h>
49 #include <netinet/mptcp_seq.h>
50
51 #include <libkern/crypto/sha1.h>
52 #include <libkern/crypto/sha2.h>
53 #include <netinet/mptcp_timer.h>
54
55 #include <mach/sdt.h>
56
57 static int mptcp_validate_join_hmac(struct tcpcb *, u_char*, int);
58 static int mptcp_snd_mpprio(struct tcpcb *tp, u_char *cp, int optlen);
59 static void mptcp_send_remaddr_opt(struct tcpcb *, struct mptcp_remaddr_opt *);
60 static int mptcp_echo_add_addr(struct tcpcb *, u_char *, unsigned int);
61
62 /*
63 * MPTCP Options Output Processing
64 */
65
66 static unsigned
mptcp_setup_first_subflow_syn_opts(struct socket * so,u_char * opt,unsigned optlen)67 mptcp_setup_first_subflow_syn_opts(struct socket *so, u_char *opt, unsigned optlen)
68 {
69 struct mptcp_mpcapable_opt_rsp mptcp_opt;
70 struct tcpcb *tp = sototcpcb(so);
71 struct mptcb *mp_tp = tptomptp(tp);
72 struct mptses *mpte = mp_tp->mpt_mpte;
73 int ret;
74
75 uint8_t mmco_len = mp_tp->mpt_version == MPTCP_VERSION_0 ?
76 sizeof(struct mptcp_mpcapable_opt_rsp) :
77 sizeof(struct mptcp_mpcapable_opt_common);
78
79 ret = tcp_heuristic_do_mptcp(tp);
80 if (ret > 0) {
81 os_log_info(mptcp_log_handle, "%s - %lx: Not doing MPTCP due to heuristics",
82 __func__, (unsigned long)VM_KERNEL_ADDRPERM(mp_tp->mpt_mpte));
83 mp_tp->mpt_flags |= MPTCPF_FALLBACK_HEURISTIC;
84 return optlen;
85 }
86
87 /*
88 * Avoid retransmitting the MP_CAPABLE option.
89 */
90 if (ret == 0 &&
91 tp->t_rxtshift > mptcp_mpcap_retries &&
92 !(mpte->mpte_flags & MPTE_FORCE_ENABLE)) {
93 if (!(mp_tp->mpt_flags & (MPTCPF_FALLBACK_HEURISTIC | MPTCPF_HEURISTIC_TRAC))) {
94 mp_tp->mpt_flags |= MPTCPF_HEURISTIC_TRAC;
95 tcp_heuristic_mptcp_loss(tp);
96 }
97 return optlen;
98 }
99
100 bzero(&mptcp_opt, sizeof(struct mptcp_mpcapable_opt_rsp));
101
102 mptcp_opt.mmc_common.mmco_kind = TCPOPT_MULTIPATH;
103 mptcp_opt.mmc_common.mmco_len = mmco_len;
104 mptcp_opt.mmc_common.mmco_subtype = MPO_CAPABLE;
105 mptcp_opt.mmc_common.mmco_version = mp_tp->mpt_version;
106 mptcp_opt.mmc_common.mmco_flags |= MPCAP_PROPOSAL_SBIT;
107 if (mp_tp->mpt_flags & MPTCPF_CHECKSUM) {
108 mptcp_opt.mmc_common.mmco_flags |= MPCAP_CHECKSUM_CBIT;
109 }
110 mptcp_opt.mmc_localkey = mp_tp->mpt_localkey;
111
112 memcpy(opt + optlen, &mptcp_opt, mmco_len);
113 optlen += mmco_len;
114
115 return optlen;
116 }
117
118 static unsigned
mptcp_setup_join_subflow_syn_opts(struct socket * so,u_char * opt,unsigned optlen)119 mptcp_setup_join_subflow_syn_opts(struct socket *so, u_char *opt, unsigned optlen)
120 {
121 struct mptcp_mpjoin_opt_req mpjoin_req;
122 struct inpcb *inp = sotoinpcb(so);
123 struct tcpcb *tp = NULL;
124 struct mptsub *mpts;
125
126 if (!inp) {
127 return optlen;
128 }
129
130 tp = intotcpcb(inp);
131 if (!tp) {
132 return optlen;
133 }
134
135 mpts = tp->t_mpsub;
136
137 bzero(&mpjoin_req, sizeof(mpjoin_req));
138 mpjoin_req.mmjo_kind = TCPOPT_MULTIPATH;
139 mpjoin_req.mmjo_len = sizeof(mpjoin_req);
140 mpjoin_req.mmjo_subtype_bkp = MPO_JOIN << 4;
141
142 if (tp->t_mpflags & TMPF_BACKUP_PATH) {
143 mpjoin_req.mmjo_subtype_bkp |= MPTCP_BACKUP;
144 } else if (inp->inp_boundifp && IFNET_IS_CELLULAR(inp->inp_boundifp) &&
145 mptcp_subflows_need_backup_flag(mpts->mpts_mpte)) {
146 mpjoin_req.mmjo_subtype_bkp |= MPTCP_BACKUP;
147 tp->t_mpflags |= TMPF_BACKUP_PATH;
148 } else {
149 mpts->mpts_flags |= MPTSF_PREFERRED;
150 }
151
152 mpjoin_req.mmjo_addr_id = tp->t_local_aid;
153 mpjoin_req.mmjo_peer_token = tptomptp(tp)->mpt_remotetoken;
154 mptcp_get_rands(tp->t_local_aid, tptomptp(tp),
155 &mpjoin_req.mmjo_rand, NULL);
156 memcpy(opt + optlen, &mpjoin_req, mpjoin_req.mmjo_len);
157 optlen += mpjoin_req.mmjo_len;
158
159 return optlen;
160 }
161
162 unsigned
mptcp_setup_join_ack_opts(struct tcpcb * tp,u_char * opt,unsigned optlen)163 mptcp_setup_join_ack_opts(struct tcpcb *tp, u_char *opt, unsigned optlen)
164 {
165 unsigned new_optlen;
166 struct mptcp_mpjoin_opt_rsp2 join_rsp2;
167
168 if ((MAX_TCPOPTLEN - optlen) < sizeof(struct mptcp_mpjoin_opt_rsp2)) {
169 printf("%s: no space left %d \n", __func__, optlen);
170 return optlen;
171 }
172
173 bzero(&join_rsp2, sizeof(struct mptcp_mpjoin_opt_rsp2));
174 join_rsp2.mmjo_kind = TCPOPT_MULTIPATH;
175 join_rsp2.mmjo_len = sizeof(struct mptcp_mpjoin_opt_rsp2);
176 join_rsp2.mmjo_subtype = MPO_JOIN;
177 mptcp_get_mpjoin_hmac(tp->t_local_aid, tptomptp(tp),
178 (u_char*)&join_rsp2.mmjo_mac, HMAC_TRUNCATED_ACK);
179 memcpy(opt + optlen, &join_rsp2, join_rsp2.mmjo_len);
180 new_optlen = optlen + join_rsp2.mmjo_len;
181 return new_optlen;
182 }
183
184 unsigned
mptcp_setup_syn_opts(struct socket * so,u_char * opt,unsigned optlen)185 mptcp_setup_syn_opts(struct socket *so, u_char *opt, unsigned optlen)
186 {
187 unsigned new_optlen;
188
189 if (!(so->so_flags & SOF_MP_SEC_SUBFLOW)) {
190 new_optlen = mptcp_setup_first_subflow_syn_opts(so, opt, optlen);
191 } else {
192 new_optlen = mptcp_setup_join_subflow_syn_opts(so, opt, optlen);
193 }
194
195 return new_optlen;
196 }
197
198 static int
mptcp_send_mpfail(struct tcpcb * tp,u_char * opt,unsigned int optlen)199 mptcp_send_mpfail(struct tcpcb *tp, u_char *opt, unsigned int optlen)
200 {
201 #pragma unused(tp, opt, optlen)
202
203 struct mptcb *mp_tp = NULL;
204 struct mptcp_mpfail_opt fail_opt;
205 uint64_t dsn;
206 uint8_t len = sizeof(struct mptcp_mpfail_opt);
207
208 mp_tp = tptomptp(tp);
209 if (mp_tp == NULL) {
210 tp->t_mpflags &= ~TMPF_SND_MPFAIL;
211 return optlen;
212 }
213
214 /* if option space low give up */
215 if ((MAX_TCPOPTLEN - optlen) < sizeof(struct mptcp_mpfail_opt)) {
216 tp->t_mpflags &= ~TMPF_SND_MPFAIL;
217 return optlen;
218 }
219
220 dsn = mp_tp->mpt_rcvnxt;
221
222 bzero(&fail_opt, sizeof(fail_opt));
223 fail_opt.mfail_kind = TCPOPT_MULTIPATH;
224 fail_opt.mfail_len = len;
225 fail_opt.mfail_subtype = MPO_FAIL;
226 fail_opt.mfail_dsn = mptcp_hton64(dsn);
227 memcpy(opt + optlen, &fail_opt, len);
228 optlen += len;
229 tp->t_mpflags &= ~TMPF_SND_MPFAIL;
230 return optlen;
231 }
232
233 static int
mptcp_send_infinite_mapping(struct tcpcb * tp,u_char * opt,unsigned int optlen)234 mptcp_send_infinite_mapping(struct tcpcb *tp, u_char *opt, unsigned int optlen)
235 {
236 struct socket *so = tp->t_inpcb->inp_socket;
237 uint8_t len = sizeof(struct mptcp_dsn_opt);
238 struct mptcp_dsn_opt infin_opt;
239 struct mptcb *mp_tp = NULL;
240 uint8_t csum_len = 0;
241
242 if (!so) {
243 return optlen;
244 }
245
246 mp_tp = tptomptp(tp);
247 if (mp_tp == NULL) {
248 return optlen;
249 }
250
251 if (mp_tp->mpt_flags & MPTCPF_CHECKSUM) {
252 csum_len = 2;
253 }
254
255 /* try later */
256 if ((MAX_TCPOPTLEN - optlen) < (len + csum_len)) {
257 return optlen;
258 }
259
260 bzero(&infin_opt, sizeof(infin_opt));
261 infin_opt.mdss_copt.mdss_kind = TCPOPT_MULTIPATH;
262 infin_opt.mdss_copt.mdss_len = len + csum_len;
263 infin_opt.mdss_copt.mdss_subtype = MPO_DSS;
264 infin_opt.mdss_copt.mdss_flags |= MDSS_M;
265 if (mp_tp->mpt_flags & MPTCPF_RECVD_MPFAIL) {
266 infin_opt.mdss_dsn = (u_int32_t)
267 MPTCP_DATASEQ_LOW32(mp_tp->mpt_dsn_at_csum_fail);
268 infin_opt.mdss_subflow_seqn = mp_tp->mpt_ssn_at_csum_fail;
269 } else {
270 /*
271 * If MPTCP fallback happens, but TFO succeeds, the data on the
272 * SYN does not belong to the MPTCP data sequence space.
273 */
274 if ((tp->t_tfo_stats & TFO_S_SYN_DATA_ACKED) &&
275 ((mp_tp->mpt_local_idsn + 1) == mp_tp->mpt_snduna)) {
276 infin_opt.mdss_subflow_seqn = 1;
277 } else {
278 infin_opt.mdss_subflow_seqn = tp->snd_una - tp->t_mpsub->mpts_iss;
279 }
280 infin_opt.mdss_dsn = (u_int32_t)
281 MPTCP_DATASEQ_LOW32(mp_tp->mpt_snduna);
282 }
283
284 if ((infin_opt.mdss_dsn == 0) || (infin_opt.mdss_subflow_seqn == 0)) {
285 return optlen;
286 }
287 infin_opt.mdss_dsn = htonl(infin_opt.mdss_dsn);
288 infin_opt.mdss_subflow_seqn = htonl(infin_opt.mdss_subflow_seqn);
289 infin_opt.mdss_data_len = 0;
290
291 memcpy(opt + optlen, &infin_opt, len);
292 optlen += len;
293 if (csum_len != 0) {
294 /* The checksum field is set to 0 for infinite mapping */
295 uint16_t csum = 0;
296 memcpy(opt + optlen, &csum, csum_len);
297 optlen += csum_len;
298 }
299
300 tp->t_mpflags |= TMPF_INFIN_SENT;
301 tcpstat.tcps_estab_fallback++;
302 return optlen;
303 }
304
305
306 static int
mptcp_ok_to_fin(struct tcpcb * tp,u_int64_t dsn,u_int32_t datalen)307 mptcp_ok_to_fin(struct tcpcb *tp, u_int64_t dsn, u_int32_t datalen)
308 {
309 struct mptcb *mp_tp = tptomptp(tp);
310
311 dsn = (mp_tp->mpt_sndmax & MPTCP_DATASEQ_LOW32_MASK) | dsn;
312 if ((dsn + datalen) == mp_tp->mpt_sndmax) {
313 return 1;
314 }
315
316 return 0;
317 }
318
319 unsigned int
mptcp_setup_opts(struct tcpcb * tp,int32_t off,u_char * opt,unsigned int optlen,int flags,int len,boolean_t * p_mptcp_acknow,boolean_t * do_not_compress)320 mptcp_setup_opts(struct tcpcb *tp, int32_t off, u_char *opt,
321 unsigned int optlen, int flags, int len,
322 boolean_t *p_mptcp_acknow, boolean_t *do_not_compress)
323 {
324 struct inpcb *inp = (struct inpcb *)tp->t_inpcb;
325 struct socket *so = inp->inp_socket;
326 struct mptcb *mp_tp = tptomptp(tp);
327 boolean_t do_csum = FALSE;
328 boolean_t send_64bit_dsn = FALSE;
329 boolean_t send_64bit_ack = FALSE;
330 u_int32_t old_mpt_flags = tp->t_mpflags & TMPF_MPTCP_SIGNALS;
331 boolean_t initial_data = FALSE;
332
333 if (mptcp_enable == 0 || mp_tp == NULL || tp->t_state == TCPS_CLOSED) {
334 /* do nothing */
335 goto ret_optlen;
336 }
337
338 socket_lock_assert_owned(mptetoso(mp_tp->mpt_mpte));
339
340 if (mp_tp->mpt_flags & MPTCPF_CHECKSUM) {
341 do_csum = TRUE;
342 }
343
344 /* tcp_output handles the SYN path separately */
345 if (flags & TH_SYN) {
346 goto ret_optlen;
347 }
348
349 if ((MAX_TCPOPTLEN - optlen) <
350 sizeof(struct mptcp_mpcapable_opt_common)) {
351 os_log_error(mptcp_log_handle, "%s - %lx: no space left %d flags %x tp->t_mpflags %x len %d\n",
352 __func__, (unsigned long)VM_KERNEL_ADDRPERM(mp_tp->mpt_mpte),
353 optlen, flags, tp->t_mpflags, len);
354 goto ret_optlen;
355 }
356
357 if (tp->t_mpflags & TMPF_TCP_FALLBACK) {
358 if (tp->t_mpflags & TMPF_SND_MPFAIL) {
359 optlen = mptcp_send_mpfail(tp, opt, optlen);
360 } else if (!(tp->t_mpflags & TMPF_INFIN_SENT)) {
361 optlen = mptcp_send_infinite_mapping(tp, opt, optlen);
362 }
363
364 *do_not_compress = TRUE;
365
366 goto ret_optlen;
367 }
368
369 if (len > 0 && off == 0 && tp->t_mpflags & TMPF_SEND_DSN && tp->t_mpflags & TMPF_SND_KEYS) {
370 uint64_t dsn = 0;
371 uint32_t relseq = 0;
372 uint16_t data_len = 0, dss_csum = 0;
373 mptcp_output_getm_dsnmap64(so, off, &dsn, &relseq, &data_len, &dss_csum);
374 if (dsn == mp_tp->mpt_local_idsn + 1) {
375 initial_data = TRUE;
376 }
377 }
378
379 /* send MP_CAPABLE when it's the INITIAL ACK or data */
380 if (tp->t_mpflags & TMPF_SND_KEYS &&
381 (mp_tp->mpt_version == MPTCP_VERSION_0 || initial_data ||
382 (mp_tp->mpt_sndnxt == mp_tp->mpt_local_idsn + 1 && len == 0))) {
383 struct mptcp_mpcapable_opt_rsp2 mptcp_opt;
384 boolean_t send_data_level_details = tp->t_mpflags & TMPF_SEND_DSN ? TRUE : FALSE;
385
386 uint8_t mmco_len = sizeof(struct mptcp_mpcapable_opt_rsp1);
387 if (send_data_level_details) {
388 mmco_len += 2;
389 if (do_csum) {
390 mmco_len += 2;
391 }
392 }
393 if ((MAX_TCPOPTLEN - optlen) < mmco_len) {
394 os_log_error(mptcp_log_handle, "%s - %lx: not enough space in TCP option, "
395 "optlen: %u, mmco_len: %d\n", __func__,
396 (unsigned long)VM_KERNEL_ADDRPERM(mp_tp->mpt_mpte),
397 optlen, mmco_len);
398 goto ret_optlen;
399 }
400
401 bzero(&mptcp_opt, sizeof(struct mptcp_mpcapable_opt_rsp2));
402 mptcp_opt.mmc_rsp1.mmc_common.mmco_kind = TCPOPT_MULTIPATH;
403 mptcp_opt.mmc_rsp1.mmc_common.mmco_len = mmco_len;
404 mptcp_opt.mmc_rsp1.mmc_common.mmco_subtype = MPO_CAPABLE;
405 mptcp_opt.mmc_rsp1.mmc_common.mmco_version = mp_tp->mpt_version;
406 mptcp_opt.mmc_rsp1.mmc_common.mmco_flags |= MPCAP_PROPOSAL_SBIT;
407 if (do_csum) {
408 mptcp_opt.mmc_rsp1.mmc_common.mmco_flags |= MPCAP_CHECKSUM_CBIT;
409 }
410 mptcp_opt.mmc_rsp1.mmc_localkey = mp_tp->mpt_localkey;
411 mptcp_opt.mmc_rsp1.mmc_remotekey = mp_tp->mpt_remotekey;
412 if (send_data_level_details) {
413 mptcp_output_getm_data_level_details(so, off, &mptcp_opt.data_len, &mptcp_opt.csum);
414 mptcp_opt.data_len = htons(mptcp_opt.data_len);
415 }
416 memcpy(opt + optlen, &mptcp_opt, mmco_len);
417
418 if (mp_tp->mpt_version == MPTCP_VERSION_0) {
419 tp->t_mpflags &= ~TMPF_SND_KEYS;
420 }
421 optlen += mmco_len;
422
423 if (!tp->t_mpuna) {
424 tp->t_mpuna = tp->snd_una;
425 } else {
426 /* its a retransmission of the MP_CAPABLE ACK */
427 }
428
429 *do_not_compress = TRUE;
430
431 goto ret_optlen;
432 }
433
434 if (tp->t_mpflags & TMPF_SND_JACK) {
435 *do_not_compress = TRUE;
436 optlen = mptcp_setup_join_ack_opts(tp, opt, optlen);
437 if (!tp->t_mpuna) {
438 tp->t_mpuna = tp->snd_una;
439 }
440 /* Start a timer to retransmit the ACK */
441 tp->t_timer[TCPT_JACK_RXMT] =
442 OFFSET_FROM_START(tp, tcp_jack_rxmt);
443
444 tp->t_mpflags &= ~TMPF_SND_JACK;
445 goto ret_optlen;
446 }
447
448 if (!(tp->t_mpflags & (TMPF_MPTCP_TRUE | TMPF_PREESTABLISHED))) {
449 goto ret_optlen;
450 }
451 /*
452 * From here on, all options are sent only if MPTCP_TRUE
453 * or when data is sent early on as in Fast Join
454 */
455
456 if ((tp->t_mpflags & TMPF_MPTCP_TRUE) &&
457 (tp->t_mpflags & TMPF_SND_REM_ADDR)) {
458 int rem_opt_len = sizeof(struct mptcp_remaddr_opt);
459 if ((optlen + rem_opt_len) <= MAX_TCPOPTLEN) {
460 mptcp_send_remaddr_opt(tp,
461 (struct mptcp_remaddr_opt *)(opt + optlen));
462 optlen += rem_opt_len;
463 } else {
464 tp->t_mpflags &= ~TMPF_SND_REM_ADDR;
465 }
466
467 *do_not_compress = TRUE;
468 }
469
470 if (tp->t_mpflags & TMPF_MPTCP_ECHO_ADDR) {
471 optlen = mptcp_echo_add_addr(tp, opt, optlen);
472 }
473
474 if (tp->t_mpflags & TMPF_SND_MPPRIO) {
475 optlen = mptcp_snd_mpprio(tp, opt, optlen);
476
477 *do_not_compress = TRUE;
478 }
479
480 if (mp_tp->mpt_flags & MPTCPF_SND_64BITDSN) {
481 send_64bit_dsn = TRUE;
482 }
483 if (mp_tp->mpt_flags & MPTCPF_SND_64BITACK) {
484 send_64bit_ack = TRUE;
485 }
486
487 #define CHECK_OPTLEN { \
488 if ((MAX_TCPOPTLEN - optlen) < dssoptlen) { \
489 os_log_error(mptcp_log_handle, "%s: dssoptlen %d optlen %d \n", __func__, \
490 dssoptlen, optlen); \
491 goto ret_optlen; \
492 } \
493 }
494
495 #define DO_FIN(dsn_opt) { \
496 int sndfin = 0; \
497 sndfin = mptcp_ok_to_fin(tp, dsn_opt.mdss_dsn, len); \
498 if (sndfin) { \
499 dsn_opt.mdss_copt.mdss_flags |= MDSS_F; \
500 dsn_opt.mdss_data_len += 1; \
501 if (do_csum) \
502 dss_csum = in_addword(dss_csum, 1); \
503 } \
504 }
505
506 #define CHECK_DATALEN { \
507 /* MPTCP socket does not support IP options */ \
508 if ((len + optlen + dssoptlen) > tp->t_maxopd) { \
509 os_log_error(mptcp_log_handle, "%s: nosp %d len %d opt %d %d %d\n", \
510 __func__, len, dssoptlen, optlen, \
511 tp->t_maxseg, tp->t_maxopd); \
512 /* remove option length from payload len */ \
513 len = tp->t_maxopd - optlen - dssoptlen; \
514 } \
515 }
516
517 if ((tp->t_mpflags & TMPF_SEND_DSN) &&
518 (send_64bit_dsn)) {
519 /*
520 * If there was the need to send 64-bit Data ACK along
521 * with 64-bit DSN, then 26 or 28 bytes would be used.
522 * With timestamps and NOOP padding that will cause
523 * overflow. Hence, in the rare event that both 64-bit
524 * DSN and 64-bit ACK have to be sent, delay the send of
525 * 64-bit ACK until our 64-bit DSN is acked with a 64-bit ack.
526 * XXX If this delay causes issue, remove the 2-byte padding.
527 */
528 struct mptcp_dss64_ack32_opt dsn_ack_opt;
529 uint8_t dssoptlen = sizeof(dsn_ack_opt);
530 uint16_t dss_csum;
531
532 if (do_csum) {
533 dssoptlen += 2;
534 }
535
536 CHECK_OPTLEN;
537
538 bzero(&dsn_ack_opt, sizeof(dsn_ack_opt));
539 dsn_ack_opt.mdss_copt.mdss_kind = TCPOPT_MULTIPATH;
540 dsn_ack_opt.mdss_copt.mdss_subtype = MPO_DSS;
541 dsn_ack_opt.mdss_copt.mdss_len = dssoptlen;
542 dsn_ack_opt.mdss_copt.mdss_flags |=
543 MDSS_M | MDSS_m | MDSS_A;
544
545 CHECK_DATALEN;
546
547 mptcp_output_getm_dsnmap64(so, off,
548 &dsn_ack_opt.mdss_dsn,
549 &dsn_ack_opt.mdss_subflow_seqn,
550 &dsn_ack_opt.mdss_data_len,
551 &dss_csum);
552
553 if ((dsn_ack_opt.mdss_data_len == 0) ||
554 (dsn_ack_opt.mdss_dsn == 0)) {
555 goto ret_optlen;
556 }
557
558 if (tp->t_mpflags & TMPF_SEND_DFIN) {
559 DO_FIN(dsn_ack_opt);
560 }
561
562 dsn_ack_opt.mdss_ack =
563 htonl(MPTCP_DATAACK_LOW32(mp_tp->mpt_rcvnxt));
564
565 dsn_ack_opt.mdss_dsn = mptcp_hton64(dsn_ack_opt.mdss_dsn);
566 dsn_ack_opt.mdss_subflow_seqn = htonl(
567 dsn_ack_opt.mdss_subflow_seqn);
568 dsn_ack_opt.mdss_data_len = htons(
569 dsn_ack_opt.mdss_data_len);
570
571 memcpy(opt + optlen, &dsn_ack_opt, sizeof(dsn_ack_opt));
572 if (do_csum) {
573 *((uint16_t *)(void *)(opt + optlen + sizeof(dsn_ack_opt))) = dss_csum;
574 }
575
576 optlen += dssoptlen;
577
578 tp->t_mpflags &= ~TMPF_MPTCP_ACKNOW;
579
580 *do_not_compress = TRUE;
581
582 goto ret_optlen;
583 }
584
585 if ((tp->t_mpflags & TMPF_SEND_DSN) &&
586 (!send_64bit_dsn) &&
587 !(tp->t_mpflags & TMPF_MPTCP_ACKNOW)) {
588 struct mptcp_dsn_opt dsn_opt;
589 uint8_t dssoptlen = sizeof(struct mptcp_dsn_opt);
590 uint16_t dss_csum;
591
592 if (do_csum) {
593 dssoptlen += 2;
594 }
595
596 CHECK_OPTLEN;
597
598 bzero(&dsn_opt, sizeof(dsn_opt));
599 dsn_opt.mdss_copt.mdss_kind = TCPOPT_MULTIPATH;
600 dsn_opt.mdss_copt.mdss_subtype = MPO_DSS;
601 dsn_opt.mdss_copt.mdss_len = dssoptlen;
602 dsn_opt.mdss_copt.mdss_flags |= MDSS_M;
603
604 CHECK_DATALEN;
605
606 mptcp_output_getm_dsnmap32(so, off, &dsn_opt.mdss_dsn,
607 &dsn_opt.mdss_subflow_seqn,
608 &dsn_opt.mdss_data_len,
609 &dss_csum);
610
611 if ((dsn_opt.mdss_data_len == 0) ||
612 (dsn_opt.mdss_dsn == 0)) {
613 goto ret_optlen;
614 }
615
616 if (tp->t_mpflags & TMPF_SEND_DFIN) {
617 DO_FIN(dsn_opt);
618 }
619
620 dsn_opt.mdss_dsn = htonl(dsn_opt.mdss_dsn);
621 dsn_opt.mdss_subflow_seqn = htonl(dsn_opt.mdss_subflow_seqn);
622 dsn_opt.mdss_data_len = htons(dsn_opt.mdss_data_len);
623 memcpy(opt + optlen, &dsn_opt, sizeof(dsn_opt));
624 if (do_csum) {
625 *((uint16_t *)(void *)(opt + optlen + sizeof(dsn_opt))) = dss_csum;
626 }
627
628 optlen += dssoptlen;
629 tp->t_mpflags &= ~TMPF_MPTCP_ACKNOW;
630
631 *do_not_compress = TRUE;
632
633 goto ret_optlen;
634 }
635
636 /* 32-bit Data ACK option */
637 if ((tp->t_mpflags & TMPF_MPTCP_ACKNOW) &&
638 (!send_64bit_ack) &&
639 !(tp->t_mpflags & TMPF_SEND_DSN) &&
640 !(tp->t_mpflags & TMPF_SEND_DFIN)) {
641 struct mptcp_data_ack_opt dack_opt;
642 uint8_t dssoptlen = 0;
643 do_ack32_only:
644 dssoptlen = sizeof(dack_opt);
645
646 CHECK_OPTLEN;
647
648 bzero(&dack_opt, dssoptlen);
649 dack_opt.mdss_copt.mdss_kind = TCPOPT_MULTIPATH;
650 dack_opt.mdss_copt.mdss_len = dssoptlen;
651 dack_opt.mdss_copt.mdss_subtype = MPO_DSS;
652 dack_opt.mdss_copt.mdss_flags |= MDSS_A;
653 dack_opt.mdss_ack =
654 htonl(MPTCP_DATAACK_LOW32(mp_tp->mpt_rcvnxt));
655 memcpy(opt + optlen, &dack_opt, dssoptlen);
656 optlen += dssoptlen;
657 VERIFY(optlen <= MAX_TCPOPTLEN);
658 tp->t_mpflags &= ~TMPF_MPTCP_ACKNOW;
659 goto ret_optlen;
660 }
661
662 /* 64-bit Data ACK option */
663 if ((tp->t_mpflags & TMPF_MPTCP_ACKNOW) &&
664 (send_64bit_ack) &&
665 !(tp->t_mpflags & TMPF_SEND_DSN) &&
666 !(tp->t_mpflags & TMPF_SEND_DFIN)) {
667 struct mptcp_data_ack64_opt dack_opt;
668 uint8_t dssoptlen = 0;
669 do_ack64_only:
670 dssoptlen = sizeof(dack_opt);
671
672 CHECK_OPTLEN;
673
674 bzero(&dack_opt, dssoptlen);
675 dack_opt.mdss_copt.mdss_kind = TCPOPT_MULTIPATH;
676 dack_opt.mdss_copt.mdss_len = dssoptlen;
677 dack_opt.mdss_copt.mdss_subtype = MPO_DSS;
678 dack_opt.mdss_copt.mdss_flags |= (MDSS_A | MDSS_a);
679 dack_opt.mdss_ack = mptcp_hton64(mp_tp->mpt_rcvnxt);
680 /*
681 * The other end should retransmit 64-bit DSN until it
682 * receives a 64-bit ACK.
683 */
684 mp_tp->mpt_flags &= ~MPTCPF_SND_64BITACK;
685 memcpy(opt + optlen, &dack_opt, dssoptlen);
686 optlen += dssoptlen;
687 VERIFY(optlen <= MAX_TCPOPTLEN);
688 tp->t_mpflags &= ~TMPF_MPTCP_ACKNOW;
689 goto ret_optlen;
690 }
691
692 /* 32-bit DSS+Data ACK option */
693 if ((tp->t_mpflags & TMPF_SEND_DSN) &&
694 (!send_64bit_dsn) &&
695 (!send_64bit_ack) &&
696 (tp->t_mpflags & TMPF_MPTCP_ACKNOW)) {
697 struct mptcp_dss_ack_opt dss_ack_opt;
698 uint8_t dssoptlen = sizeof(dss_ack_opt);
699 uint16_t dss_csum;
700
701 if (do_csum) {
702 dssoptlen += 2;
703 }
704
705 CHECK_OPTLEN;
706
707 bzero(&dss_ack_opt, sizeof(dss_ack_opt));
708 dss_ack_opt.mdss_copt.mdss_kind = TCPOPT_MULTIPATH;
709 dss_ack_opt.mdss_copt.mdss_len = dssoptlen;
710 dss_ack_opt.mdss_copt.mdss_subtype = MPO_DSS;
711 dss_ack_opt.mdss_copt.mdss_flags |= MDSS_A | MDSS_M;
712 dss_ack_opt.mdss_ack =
713 htonl(MPTCP_DATAACK_LOW32(mp_tp->mpt_rcvnxt));
714
715 CHECK_DATALEN;
716
717 mptcp_output_getm_dsnmap32(so, off, &dss_ack_opt.mdss_dsn,
718 &dss_ack_opt.mdss_subflow_seqn,
719 &dss_ack_opt.mdss_data_len,
720 &dss_csum);
721
722 if ((dss_ack_opt.mdss_data_len == 0) ||
723 (dss_ack_opt.mdss_dsn == 0)) {
724 goto do_ack32_only;
725 }
726
727 if (tp->t_mpflags & TMPF_SEND_DFIN) {
728 DO_FIN(dss_ack_opt);
729 }
730
731 dss_ack_opt.mdss_dsn = htonl(dss_ack_opt.mdss_dsn);
732 dss_ack_opt.mdss_subflow_seqn =
733 htonl(dss_ack_opt.mdss_subflow_seqn);
734 dss_ack_opt.mdss_data_len = htons(dss_ack_opt.mdss_data_len);
735 memcpy(opt + optlen, &dss_ack_opt, sizeof(dss_ack_opt));
736 if (do_csum) {
737 *((uint16_t *)(void *)(opt + optlen + sizeof(dss_ack_opt))) = dss_csum;
738 }
739
740 optlen += dssoptlen;
741
742 if (optlen > MAX_TCPOPTLEN) {
743 panic("optlen too large");
744 }
745 tp->t_mpflags &= ~TMPF_MPTCP_ACKNOW;
746 goto ret_optlen;
747 }
748
749 /* 32-bit DSS + 64-bit DACK option */
750 if ((tp->t_mpflags & TMPF_SEND_DSN) &&
751 (!send_64bit_dsn) &&
752 (send_64bit_ack) &&
753 (tp->t_mpflags & TMPF_MPTCP_ACKNOW)) {
754 struct mptcp_dss32_ack64_opt dss_ack_opt;
755 uint8_t dssoptlen = sizeof(dss_ack_opt);
756 uint16_t dss_csum;
757
758 if (do_csum) {
759 dssoptlen += 2;
760 }
761
762 CHECK_OPTLEN;
763
764 bzero(&dss_ack_opt, sizeof(dss_ack_opt));
765 dss_ack_opt.mdss_copt.mdss_kind = TCPOPT_MULTIPATH;
766 dss_ack_opt.mdss_copt.mdss_len = dssoptlen;
767 dss_ack_opt.mdss_copt.mdss_subtype = MPO_DSS;
768 dss_ack_opt.mdss_copt.mdss_flags |= MDSS_M | MDSS_A | MDSS_a;
769 dss_ack_opt.mdss_ack =
770 mptcp_hton64(mp_tp->mpt_rcvnxt);
771
772 CHECK_DATALEN;
773
774 mptcp_output_getm_dsnmap32(so, off, &dss_ack_opt.mdss_dsn,
775 &dss_ack_opt.mdss_subflow_seqn,
776 &dss_ack_opt.mdss_data_len,
777 &dss_csum);
778
779 if ((dss_ack_opt.mdss_data_len == 0) ||
780 (dss_ack_opt.mdss_dsn == 0)) {
781 goto do_ack64_only;
782 }
783
784 if (tp->t_mpflags & TMPF_SEND_DFIN) {
785 DO_FIN(dss_ack_opt);
786 }
787
788 dss_ack_opt.mdss_dsn = htonl(dss_ack_opt.mdss_dsn);
789 dss_ack_opt.mdss_subflow_seqn =
790 htonl(dss_ack_opt.mdss_subflow_seqn);
791 dss_ack_opt.mdss_data_len = htons(dss_ack_opt.mdss_data_len);
792 memcpy(opt + optlen, &dss_ack_opt, sizeof(dss_ack_opt));
793 if (do_csum) {
794 *((uint16_t *)(void *)(opt + optlen + sizeof(dss_ack_opt))) = dss_csum;
795 }
796
797 optlen += dssoptlen;
798
799 if (optlen > MAX_TCPOPTLEN) {
800 panic("optlen too large");
801 }
802 tp->t_mpflags &= ~TMPF_MPTCP_ACKNOW;
803
804 *do_not_compress = TRUE;
805
806 goto ret_optlen;
807 }
808
809 if (tp->t_mpflags & TMPF_SEND_DFIN) {
810 uint8_t dssoptlen = sizeof(struct mptcp_dss_ack_opt);
811 struct mptcp_dss_ack_opt dss_ack_opt;
812 uint16_t dss_csum;
813
814 if (do_csum) {
815 uint64_t dss_val = mptcp_hton64(mp_tp->mpt_sndmax - 1);
816 uint16_t dlen = htons(1);
817 uint32_t sseq = 0;
818 uint32_t sum;
819
820
821 dssoptlen += 2;
822
823 sum = in_pseudo64(dss_val, sseq, dlen);
824 ADDCARRY(sum);
825 dss_csum = ~sum & 0xffff;
826 }
827
828 CHECK_OPTLEN;
829
830 bzero(&dss_ack_opt, sizeof(dss_ack_opt));
831
832 /*
833 * Data FIN occupies one sequence space.
834 * Don't send it if it has been Acked.
835 */
836 if ((mp_tp->mpt_sndnxt + 1 != mp_tp->mpt_sndmax) ||
837 (mp_tp->mpt_snduna == mp_tp->mpt_sndmax)) {
838 goto ret_optlen;
839 }
840
841 dss_ack_opt.mdss_copt.mdss_kind = TCPOPT_MULTIPATH;
842 dss_ack_opt.mdss_copt.mdss_len = dssoptlen;
843 dss_ack_opt.mdss_copt.mdss_subtype = MPO_DSS;
844 dss_ack_opt.mdss_copt.mdss_flags |= MDSS_A | MDSS_M | MDSS_F;
845 dss_ack_opt.mdss_ack =
846 htonl(MPTCP_DATAACK_LOW32(mp_tp->mpt_rcvnxt));
847 dss_ack_opt.mdss_dsn =
848 htonl(MPTCP_DATASEQ_LOW32(mp_tp->mpt_sndmax - 1));
849 dss_ack_opt.mdss_subflow_seqn = 0;
850 dss_ack_opt.mdss_data_len = 1;
851 dss_ack_opt.mdss_data_len = htons(dss_ack_opt.mdss_data_len);
852 memcpy(opt + optlen, &dss_ack_opt, sizeof(dss_ack_opt));
853 if (do_csum) {
854 *((uint16_t *)(void *)(opt + optlen + sizeof(dss_ack_opt))) = dss_csum;
855 }
856
857 optlen += dssoptlen;
858
859 *do_not_compress = TRUE;
860 }
861
862 ret_optlen:
863 if (TRUE == *p_mptcp_acknow) {
864 u_int32_t new_mpt_flags = tp->t_mpflags & TMPF_MPTCP_SIGNALS;
865
866 /*
867 * If none of the above mpflags were acted on by
868 * this routine, reset these flags and set p_mptcp_acknow
869 * to false.
870 *
871 * XXX The reset value of p_mptcp_acknow can be used
872 * to communicate tcp_output to NOT send a pure ack without any
873 * MPTCP options as it will be treated as a dup ack.
874 * Since the instances of mptcp_setup_opts not acting on
875 * these options are mostly corner cases and sending a dup
876 * ack here would only have an impact if the system
877 * has sent consecutive dup acks before this false one,
878 * we haven't modified the logic in tcp_output to avoid
879 * that.
880 */
881 if (old_mpt_flags == new_mpt_flags) {
882 tp->t_mpflags &= ~TMPF_MPTCP_SIGNALS;
883 *p_mptcp_acknow = FALSE;
884 }
885 }
886
887 return optlen;
888 }
889
890 /*
891 * MPTCP Options Input Processing
892 */
893
894 static int
mptcp_sanitize_option(struct tcpcb * tp,int mptcp_subtype)895 mptcp_sanitize_option(struct tcpcb *tp, int mptcp_subtype)
896 {
897 struct mptcb *mp_tp = tptomptp(tp);
898 int ret = 1;
899
900 switch (mptcp_subtype) {
901 case MPO_CAPABLE:
902 break;
903 case MPO_JOIN: /* fall through */
904 case MPO_DSS: /* fall through */
905 case MPO_FASTCLOSE: /* fall through */
906 case MPO_FAIL: /* fall through */
907 case MPO_REMOVE_ADDR: /* fall through */
908 case MPO_ADD_ADDR: /* fall through */
909 case MPO_PRIO: /* fall through */
910 if (mp_tp->mpt_state < MPTCPS_ESTABLISHED) {
911 ret = 0;
912 }
913 break;
914 default:
915 ret = 0;
916 os_log_error(mptcp_log_handle, "%s - %lx: type = %d \n", __func__,
917 (unsigned long)VM_KERNEL_ADDRPERM(mp_tp->mpt_mpte), mptcp_subtype);
918 break;
919 }
920 return ret;
921 }
922
923 static int
mptcp_valid_mpcapable_common_opt(u_char * cp)924 mptcp_valid_mpcapable_common_opt(u_char *cp)
925 {
926 struct mptcp_mpcapable_opt_common *rsp =
927 (struct mptcp_mpcapable_opt_common *)cp;
928
929 /* mmco_kind, mmco_len and mmco_subtype are validated before */
930
931 if (!(rsp->mmco_flags & MPCAP_PROPOSAL_SBIT)) {
932 return 0;
933 }
934
935 if (rsp->mmco_flags & (MPCAP_BBIT | MPCAP_DBIT |
936 MPCAP_EBIT | MPCAP_FBIT | MPCAP_GBIT)) {
937 return 0;
938 }
939
940 return 1;
941 }
942
943
944 static void
mptcp_do_mpcapable_opt(struct tcpcb * tp,u_char * cp,struct tcphdr * th,uint8_t optlen)945 mptcp_do_mpcapable_opt(struct tcpcb *tp, u_char *cp, struct tcphdr *th,
946 uint8_t optlen)
947 {
948 struct mptcp_mpcapable_opt_rsp *rsp = NULL;
949 struct mptcb *mp_tp = tptomptp(tp);
950 struct mptses *mpte = mp_tp->mpt_mpte;
951
952 /* Only valid on SYN/ACK */
953 if ((th->th_flags & (TH_SYN | TH_ACK)) != (TH_SYN | TH_ACK)) {
954 return;
955 }
956
957 /* Validate the kind, len, flags */
958 if (mptcp_valid_mpcapable_common_opt(cp) != 1) {
959 tcpstat.tcps_invalid_mpcap++;
960 return;
961 }
962
963 /* handle SYN/ACK retransmission by acknowledging with ACK */
964 if (mp_tp->mpt_state >= MPTCPS_ESTABLISHED) {
965 return;
966 }
967
968 /* A SYN/ACK contains peer's key and flags */
969 if (optlen != sizeof(struct mptcp_mpcapable_opt_rsp)) {
970 /* complain */
971 os_log_error(mptcp_log_handle, "%s - %lx: SYN_ACK optlen = %u, sizeof mp opt = %lu \n",
972 __func__, (unsigned long)VM_KERNEL_ADDRPERM(mpte), optlen,
973 sizeof(struct mptcp_mpcapable_opt_rsp));
974 tcpstat.tcps_invalid_mpcap++;
975 return;
976 }
977
978 /*
979 * If checksum flag is set, enable MPTCP checksum, even if
980 * it was not negotiated on the first SYN.
981 */
982 if (((struct mptcp_mpcapable_opt_common *)cp)->mmco_flags &
983 MPCAP_CHECKSUM_CBIT) {
984 mp_tp->mpt_flags |= MPTCPF_CHECKSUM;
985 }
986
987 if (((struct mptcp_mpcapable_opt_common *)cp)->mmco_flags &
988 MPCAP_UNICAST_IPBIT) {
989 mpte->mpte_flags |= MPTE_UNICAST_IP;
990
991 /* We need an explicit signal for the addresses - zero the existing ones */
992 memset(&mpte->mpte_sub_dst_v4, 0, sizeof(mpte->mpte_sub_dst_v4));
993 memset(&mpte->mpte_sub_dst_v6, 0, sizeof(mpte->mpte_sub_dst_v6));
994 }
995
996 rsp = (struct mptcp_mpcapable_opt_rsp *)cp;
997 mp_tp->mpt_remotekey = rsp->mmc_localkey;
998 /* For now just downgrade to the peer's version */
999 mp_tp->mpt_peer_version = rsp->mmc_common.mmco_version;
1000 if (rsp->mmc_common.mmco_version < mp_tp->mpt_version) {
1001 os_log_error(mptcp_log_handle, "local version: %d > peer version %d", mp_tp->mpt_version, rsp->mmc_common.mmco_version);
1002 mp_tp->mpt_version = rsp->mmc_common.mmco_version;
1003 tcpstat.tcps_mp_verdowngrade++;
1004 return;
1005 }
1006 if (mptcp_init_remote_parms(mp_tp) != 0) {
1007 tcpstat.tcps_invalid_mpcap++;
1008 return;
1009 }
1010 tcp_heuristic_mptcp_success(tp);
1011 tcp_cache_update_mptcp_version(tp, TRUE);
1012 tp->t_mpflags |= (TMPF_SND_KEYS | TMPF_MPTCP_TRUE);
1013 }
1014
1015
1016 static void
mptcp_do_mpjoin_opt(struct tcpcb * tp,u_char * cp,struct tcphdr * th,uint8_t optlen)1017 mptcp_do_mpjoin_opt(struct tcpcb *tp, u_char *cp, struct tcphdr *th, uint8_t optlen)
1018 {
1019 #define MPTCP_JOPT_ERROR_PATH(tp) { \
1020 tcpstat.tcps_invalid_joins++; \
1021 if (tp->t_inpcb->inp_socket != NULL) { \
1022 soevent(tp->t_inpcb->inp_socket, \
1023 SO_FILT_HINT_LOCKED | SO_FILT_HINT_MUSTRST); \
1024 } \
1025 }
1026 int error = 0;
1027 struct mptcp_mpjoin_opt_rsp *join_rsp =
1028 (struct mptcp_mpjoin_opt_rsp *)cp;
1029
1030 /* Only valid on SYN/ACK */
1031 if ((th->th_flags & (TH_SYN | TH_ACK)) != (TH_SYN | TH_ACK)) {
1032 return;
1033 }
1034
1035 if (optlen != sizeof(struct mptcp_mpjoin_opt_rsp)) {
1036 os_log_error(mptcp_log_handle, "%s - %lx: SYN_ACK: unexpected optlen = %u mp option = %lu\n",
1037 __func__, (unsigned long)VM_KERNEL_ADDRPERM(tptomptp(tp)->mpt_mpte),
1038 optlen, sizeof(struct mptcp_mpjoin_opt_rsp));
1039 tp->t_mpflags &= ~TMPF_PREESTABLISHED;
1040 /* send RST and close */
1041 MPTCP_JOPT_ERROR_PATH(tp);
1042 return;
1043 }
1044
1045 mptcp_set_raddr_rand(tp->t_local_aid, tptomptp(tp),
1046 join_rsp->mmjo_addr_id, join_rsp->mmjo_rand);
1047 error = mptcp_validate_join_hmac(tp,
1048 (u_char*)&join_rsp->mmjo_mac, HMAC_TRUNCATED_SYNACK);
1049 if (error) {
1050 os_log_error(mptcp_log_handle, "%s - %lx: SYN_ACK error = %d \n",
1051 __func__, (unsigned long)VM_KERNEL_ADDRPERM(tptomptp(tp)->mpt_mpte),
1052 error);
1053 tp->t_mpflags &= ~TMPF_PREESTABLISHED;
1054 /* send RST and close */
1055 MPTCP_JOPT_ERROR_PATH(tp);
1056 return;
1057 }
1058 tp->t_mpflags |= (TMPF_SENT_JOIN | TMPF_SND_JACK);
1059 }
1060
1061 static int
mptcp_validate_join_hmac(struct tcpcb * tp,u_char * hmac,int mac_len)1062 mptcp_validate_join_hmac(struct tcpcb *tp, u_char* hmac, int mac_len)
1063 {
1064 u_char digest[MAX(SHA1_RESULTLEN, SHA256_DIGEST_LENGTH)] = {0};
1065 struct mptcb *mp_tp = tptomptp(tp);
1066 u_int32_t rem_rand, loc_rand;
1067
1068 rem_rand = loc_rand = 0;
1069
1070 mptcp_get_rands(tp->t_local_aid, mp_tp, &loc_rand, &rem_rand);
1071 if ((rem_rand == 0) || (loc_rand == 0)) {
1072 return -1;
1073 }
1074
1075 if (mp_tp->mpt_version == MPTCP_VERSION_0) {
1076 mptcp_hmac_sha1(mp_tp->mpt_remotekey, mp_tp->mpt_localkey, rem_rand, loc_rand,
1077 digest);
1078 } else {
1079 uint32_t data[2];
1080 data[0] = rem_rand;
1081 data[1] = loc_rand;
1082 mptcp_hmac_sha256(mp_tp->mpt_remotekey, mp_tp->mpt_localkey, (u_char *)data, 8, digest);
1083 }
1084
1085 if (bcmp(digest, hmac, mac_len) == 0) {
1086 return 0; /* matches */
1087 } else {
1088 printf("%s: remote key %llx local key %llx remote rand %x "
1089 "local rand %x \n", __func__, mp_tp->mpt_remotekey, mp_tp->mpt_localkey,
1090 rem_rand, loc_rand);
1091 return -1;
1092 }
1093 }
1094
1095 /*
1096 * Update the mptcb send state variables, but the actual sbdrop occurs
1097 * in MPTCP layer
1098 */
1099 void
mptcp_data_ack_rcvd(struct mptcb * mp_tp,struct tcpcb * tp,u_int64_t full_dack)1100 mptcp_data_ack_rcvd(struct mptcb *mp_tp, struct tcpcb *tp, u_int64_t full_dack)
1101 {
1102 uint64_t acked = full_dack - mp_tp->mpt_snduna;
1103
1104 VERIFY(acked <= INT_MAX);
1105
1106 if (acked) {
1107 struct socket *mp_so = mptetoso(mp_tp->mpt_mpte);
1108
1109 if (acked > mp_so->so_snd.sb_cc) {
1110 if (acked > mp_so->so_snd.sb_cc + 1 ||
1111 mp_tp->mpt_state < MPTCPS_FIN_WAIT_1) {
1112 os_log_error(mptcp_log_handle, "%s - %lx: acked %u, sb_cc %u full %u suna %u state %u\n",
1113 __func__, (unsigned long)VM_KERNEL_ADDRPERM(mp_tp->mpt_mpte),
1114 (uint32_t)acked, mp_so->so_snd.sb_cc,
1115 (uint32_t)full_dack, (uint32_t)mp_tp->mpt_snduna,
1116 mp_tp->mpt_state);
1117 }
1118
1119 sbdrop(&mp_so->so_snd, (int)mp_so->so_snd.sb_cc);
1120 } else {
1121 sbdrop(&mp_so->so_snd, (int)acked);
1122 }
1123
1124 mp_tp->mpt_snduna += acked;
1125 /* In degraded mode, we may get some Data ACKs */
1126 if ((tp->t_mpflags & TMPF_TCP_FALLBACK) &&
1127 !(mp_tp->mpt_flags & MPTCPF_POST_FALLBACK_SYNC) &&
1128 MPTCP_SEQ_GT(mp_tp->mpt_sndnxt, mp_tp->mpt_snduna)) {
1129 /* bring back sndnxt to retransmit MPTCP data */
1130 mp_tp->mpt_sndnxt = mp_tp->mpt_dsn_at_csum_fail;
1131 mp_tp->mpt_flags |= MPTCPF_POST_FALLBACK_SYNC;
1132 tp->t_inpcb->inp_socket->so_flags1 |=
1133 SOF1_POST_FALLBACK_SYNC;
1134 }
1135
1136 mptcp_clean_reinjectq(mp_tp->mpt_mpte);
1137
1138 sowwakeup(mp_so);
1139 }
1140 if (full_dack == mp_tp->mpt_sndmax &&
1141 mp_tp->mpt_state >= MPTCPS_FIN_WAIT_1) {
1142 mptcp_close_fsm(mp_tp, MPCE_RECV_DATA_ACK);
1143 tp->t_mpflags &= ~TMPF_SEND_DFIN;
1144 }
1145
1146 if ((tp->t_mpflags & TMPF_SND_KEYS) &&
1147 MPTCP_SEQ_GT(mp_tp->mpt_snduna, mp_tp->mpt_local_idsn + 1)) {
1148 tp->t_mpflags &= ~TMPF_SND_KEYS;
1149 }
1150 }
1151
1152 void
mptcp_update_window_wakeup(struct tcpcb * tp)1153 mptcp_update_window_wakeup(struct tcpcb *tp)
1154 {
1155 struct mptcb *mp_tp = tptomptp(tp);
1156
1157 socket_lock_assert_owned(mptetoso(mp_tp->mpt_mpte));
1158
1159 if (mp_tp->mpt_flags & MPTCPF_FALLBACK_TO_TCP) {
1160 mp_tp->mpt_sndwnd = tp->snd_wnd;
1161 mp_tp->mpt_sndwl1 = mp_tp->mpt_rcvnxt;
1162 mp_tp->mpt_sndwl2 = mp_tp->mpt_snduna;
1163 }
1164
1165 sowwakeup(tp->t_inpcb->inp_socket);
1166 }
1167
1168 static void
mptcp_update_window(struct mptcb * mp_tp,u_int64_t ack,u_int64_t seq,u_int32_t tiwin)1169 mptcp_update_window(struct mptcb *mp_tp, u_int64_t ack, u_int64_t seq, u_int32_t tiwin)
1170 {
1171 if (MPTCP_SEQ_LT(mp_tp->mpt_sndwl1, seq) ||
1172 (mp_tp->mpt_sndwl1 == seq &&
1173 (MPTCP_SEQ_LT(mp_tp->mpt_sndwl2, ack) ||
1174 (mp_tp->mpt_sndwl2 == ack && tiwin > mp_tp->mpt_sndwnd)))) {
1175 mp_tp->mpt_sndwnd = tiwin;
1176 mp_tp->mpt_sndwl1 = seq;
1177 mp_tp->mpt_sndwl2 = ack;
1178 }
1179 }
1180
1181 static void
mptcp_do_dss_opt_ack_meat(u_int64_t full_dack,u_int64_t full_dsn,struct tcpcb * tp,u_int32_t tiwin)1182 mptcp_do_dss_opt_ack_meat(u_int64_t full_dack, u_int64_t full_dsn,
1183 struct tcpcb *tp, u_int32_t tiwin)
1184 {
1185 struct mptcb *mp_tp = tptomptp(tp);
1186 int close_notify = 0;
1187
1188 tp->t_mpflags |= TMPF_RCVD_DACK;
1189
1190 if (MPTCP_SEQ_LEQ(full_dack, mp_tp->mpt_sndmax) &&
1191 MPTCP_SEQ_GEQ(full_dack, mp_tp->mpt_snduna)) {
1192 mptcp_data_ack_rcvd(mp_tp, tp, full_dack);
1193 if (mp_tp->mpt_state > MPTCPS_FIN_WAIT_2) {
1194 close_notify = 1;
1195 }
1196 if (mp_tp->mpt_flags & MPTCPF_RCVD_64BITACK) {
1197 mp_tp->mpt_flags &= ~MPTCPF_RCVD_64BITACK;
1198 mp_tp->mpt_flags &= ~MPTCPF_SND_64BITDSN;
1199 }
1200 mptcp_notify_mpready(tp->t_inpcb->inp_socket);
1201 if (close_notify) {
1202 mptcp_notify_close(tp->t_inpcb->inp_socket);
1203 }
1204 }
1205
1206 mptcp_update_window(mp_tp, full_dack, full_dsn, tiwin);
1207 }
1208
1209 static void
mptcp_do_dss_opt_meat(u_char * cp,struct tcpcb * tp,struct tcphdr * th)1210 mptcp_do_dss_opt_meat(u_char *cp, struct tcpcb *tp, struct tcphdr *th)
1211 {
1212 struct mptcp_dss_copt *dss_rsp = (struct mptcp_dss_copt *)cp;
1213 u_int64_t full_dack = 0;
1214 u_int32_t tiwin = th->th_win << tp->snd_scale;
1215 struct mptcb *mp_tp = tptomptp(tp);
1216 int csum_len = 0;
1217
1218 #define MPTCP_DSS_OPT_SZ_CHK(len, expected_len) { \
1219 if (len != expected_len) { \
1220 os_log_error(mptcp_log_handle, "%s - %lx: bad len = %d dss: %x\n",\
1221 __func__, (unsigned long)VM_KERNEL_ADDRPERM(mp_tp->mpt_mpte), \
1222 len, dss_rsp->mdss_flags); \
1223 return; \
1224 } \
1225 }
1226
1227 if (mp_tp->mpt_flags & MPTCPF_CHECKSUM) {
1228 csum_len = 2;
1229 }
1230
1231 dss_rsp->mdss_flags &= (MDSS_A | MDSS_a | MDSS_M | MDSS_m);
1232 switch (dss_rsp->mdss_flags) {
1233 case (MDSS_M):
1234 {
1235 /* 32-bit DSS, No Data ACK */
1236 struct mptcp_dsn_opt *dss_rsp1;
1237 dss_rsp1 = (struct mptcp_dsn_opt *)cp;
1238
1239 MPTCP_DSS_OPT_SZ_CHK(dss_rsp1->mdss_copt.mdss_len,
1240 sizeof(struct mptcp_dsn_opt) + csum_len);
1241 if (csum_len == 0) {
1242 mptcp_update_dss_rcv_state(dss_rsp1, tp, 0);
1243 } else {
1244 mptcp_update_dss_rcv_state(dss_rsp1, tp,
1245 *(uint16_t *)(void *)(cp +
1246 (dss_rsp1->mdss_copt.mdss_len - csum_len)));
1247 }
1248 break;
1249 }
1250 case (MDSS_A):
1251 {
1252 /* 32-bit Data ACK, no DSS */
1253 struct mptcp_data_ack_opt *dack_opt;
1254 dack_opt = (struct mptcp_data_ack_opt *)cp;
1255
1256 MPTCP_DSS_OPT_SZ_CHK(dack_opt->mdss_copt.mdss_len,
1257 sizeof(struct mptcp_data_ack_opt));
1258
1259 u_int32_t dack = dack_opt->mdss_ack;
1260 NTOHL(dack);
1261 MPTCP_EXTEND_DSN(mp_tp->mpt_snduna, dack, full_dack);
1262 mptcp_do_dss_opt_ack_meat(full_dack, mp_tp->mpt_sndwl1, tp, tiwin);
1263 break;
1264 }
1265 case (MDSS_M | MDSS_A):
1266 {
1267 /* 32-bit Data ACK + 32-bit DSS */
1268 struct mptcp_dss_ack_opt *dss_ack_rsp;
1269 dss_ack_rsp = (struct mptcp_dss_ack_opt *)cp;
1270 u_int64_t full_dsn;
1271 uint16_t csum = 0;
1272
1273 MPTCP_DSS_OPT_SZ_CHK(dss_ack_rsp->mdss_copt.mdss_len,
1274 sizeof(struct mptcp_dss_ack_opt) + csum_len);
1275
1276 u_int32_t dack = dss_ack_rsp->mdss_ack;
1277 NTOHL(dack);
1278 MPTCP_EXTEND_DSN(mp_tp->mpt_snduna, dack, full_dack);
1279
1280 NTOHL(dss_ack_rsp->mdss_dsn);
1281 NTOHL(dss_ack_rsp->mdss_subflow_seqn);
1282 NTOHS(dss_ack_rsp->mdss_data_len);
1283 MPTCP_EXTEND_DSN(mp_tp->mpt_rcvnxt, dss_ack_rsp->mdss_dsn, full_dsn);
1284
1285 mptcp_do_dss_opt_ack_meat(full_dack, full_dsn, tp, tiwin);
1286
1287 if (csum_len != 0) {
1288 csum = *(uint16_t *)(void *)(cp + (dss_ack_rsp->mdss_copt.mdss_len - csum_len));
1289 }
1290
1291 mptcp_update_rcv_state_meat(mp_tp, tp,
1292 full_dsn,
1293 dss_ack_rsp->mdss_subflow_seqn,
1294 dss_ack_rsp->mdss_data_len,
1295 csum);
1296 break;
1297 }
1298 case (MDSS_M | MDSS_m):
1299 {
1300 /* 64-bit DSS , No Data ACK */
1301 struct mptcp_dsn64_opt *dsn64;
1302 dsn64 = (struct mptcp_dsn64_opt *)cp;
1303 u_int64_t full_dsn;
1304 uint16_t csum = 0;
1305
1306 MPTCP_DSS_OPT_SZ_CHK(dsn64->mdss_copt.mdss_len,
1307 sizeof(struct mptcp_dsn64_opt) + csum_len);
1308
1309 mp_tp->mpt_flags |= MPTCPF_SND_64BITACK;
1310
1311 full_dsn = mptcp_ntoh64(dsn64->mdss_dsn);
1312 NTOHL(dsn64->mdss_subflow_seqn);
1313 NTOHS(dsn64->mdss_data_len);
1314
1315 if (csum_len != 0) {
1316 csum = *(uint16_t *)(void *)(cp + dsn64->mdss_copt.mdss_len - csum_len);
1317 }
1318
1319 mptcp_update_rcv_state_meat(mp_tp, tp, full_dsn,
1320 dsn64->mdss_subflow_seqn,
1321 dsn64->mdss_data_len,
1322 csum);
1323 break;
1324 }
1325 case (MDSS_A | MDSS_a):
1326 {
1327 /* 64-bit Data ACK, no DSS */
1328 struct mptcp_data_ack64_opt *dack64;
1329 dack64 = (struct mptcp_data_ack64_opt *)cp;
1330
1331 MPTCP_DSS_OPT_SZ_CHK(dack64->mdss_copt.mdss_len,
1332 sizeof(struct mptcp_data_ack64_opt));
1333
1334 mp_tp->mpt_flags |= MPTCPF_RCVD_64BITACK;
1335
1336 full_dack = mptcp_ntoh64(dack64->mdss_ack);
1337 mptcp_do_dss_opt_ack_meat(full_dack, mp_tp->mpt_sndwl1, tp, tiwin);
1338 break;
1339 }
1340 case (MDSS_M | MDSS_m | MDSS_A):
1341 {
1342 /* 64-bit DSS + 32-bit Data ACK */
1343 struct mptcp_dss64_ack32_opt *dss_ack_rsp;
1344 dss_ack_rsp = (struct mptcp_dss64_ack32_opt *)cp;
1345 u_int64_t full_dsn;
1346 uint16_t csum = 0;
1347
1348 MPTCP_DSS_OPT_SZ_CHK(dss_ack_rsp->mdss_copt.mdss_len,
1349 sizeof(struct mptcp_dss64_ack32_opt) + csum_len);
1350
1351 u_int32_t dack = dss_ack_rsp->mdss_ack;
1352 NTOHL(dack);
1353 mp_tp->mpt_flags |= MPTCPF_SND_64BITACK;
1354 MPTCP_EXTEND_DSN(mp_tp->mpt_snduna, dack, full_dack);
1355
1356 full_dsn = mptcp_ntoh64(dss_ack_rsp->mdss_dsn);
1357 NTOHL(dss_ack_rsp->mdss_subflow_seqn);
1358 NTOHS(dss_ack_rsp->mdss_data_len);
1359
1360 mptcp_do_dss_opt_ack_meat(full_dack, full_dsn, tp, tiwin);
1361
1362 if (csum_len != 0) {
1363 csum = *(uint16_t *)(void *)(cp + dss_ack_rsp->mdss_copt.mdss_len - csum_len);
1364 }
1365
1366 mptcp_update_rcv_state_meat(mp_tp, tp, full_dsn,
1367 dss_ack_rsp->mdss_subflow_seqn,
1368 dss_ack_rsp->mdss_data_len,
1369 csum);
1370
1371 break;
1372 }
1373 case (MDSS_M | MDSS_A | MDSS_a):
1374 {
1375 /* 32-bit DSS + 64-bit Data ACK */
1376 struct mptcp_dss32_ack64_opt *dss32_ack64_opt;
1377 dss32_ack64_opt = (struct mptcp_dss32_ack64_opt *)cp;
1378 u_int64_t full_dsn;
1379
1380 MPTCP_DSS_OPT_SZ_CHK(
1381 dss32_ack64_opt->mdss_copt.mdss_len,
1382 sizeof(struct mptcp_dss32_ack64_opt) + csum_len);
1383
1384 full_dack = mptcp_ntoh64(dss32_ack64_opt->mdss_ack);
1385 NTOHL(dss32_ack64_opt->mdss_dsn);
1386 mp_tp->mpt_flags |= MPTCPF_RCVD_64BITACK;
1387 MPTCP_EXTEND_DSN(mp_tp->mpt_rcvnxt,
1388 dss32_ack64_opt->mdss_dsn, full_dsn);
1389 NTOHL(dss32_ack64_opt->mdss_subflow_seqn);
1390 NTOHS(dss32_ack64_opt->mdss_data_len);
1391
1392 mptcp_do_dss_opt_ack_meat(full_dack, full_dsn, tp, tiwin);
1393 if (csum_len == 0) {
1394 mptcp_update_rcv_state_meat(mp_tp, tp, full_dsn,
1395 dss32_ack64_opt->mdss_subflow_seqn,
1396 dss32_ack64_opt->mdss_data_len, 0);
1397 } else {
1398 mptcp_update_rcv_state_meat(mp_tp, tp, full_dsn,
1399 dss32_ack64_opt->mdss_subflow_seqn,
1400 dss32_ack64_opt->mdss_data_len,
1401 *(uint16_t *)(void *)(cp +
1402 dss32_ack64_opt->mdss_copt.mdss_len -
1403 csum_len));
1404 }
1405 break;
1406 }
1407 case (MDSS_M | MDSS_m | MDSS_A | MDSS_a):
1408 {
1409 /* 64-bit DSS + 64-bit Data ACK */
1410 struct mptcp_dss64_ack64_opt *dss64_ack64;
1411 dss64_ack64 = (struct mptcp_dss64_ack64_opt *)cp;
1412 u_int64_t full_dsn;
1413
1414 MPTCP_DSS_OPT_SZ_CHK(dss64_ack64->mdss_copt.mdss_len,
1415 sizeof(struct mptcp_dss64_ack64_opt) + csum_len);
1416
1417 mp_tp->mpt_flags |= MPTCPF_RCVD_64BITACK;
1418 mp_tp->mpt_flags |= MPTCPF_SND_64BITACK;
1419 full_dsn = mptcp_ntoh64(dss64_ack64->mdss_dsn);
1420 full_dack = mptcp_ntoh64(dss64_ack64->mdss_dsn);
1421 mptcp_do_dss_opt_ack_meat(full_dack, full_dsn, tp, tiwin);
1422 NTOHL(dss64_ack64->mdss_subflow_seqn);
1423 NTOHS(dss64_ack64->mdss_data_len);
1424 if (csum_len == 0) {
1425 mptcp_update_rcv_state_meat(mp_tp, tp, full_dsn,
1426 dss64_ack64->mdss_subflow_seqn,
1427 dss64_ack64->mdss_data_len, 0);
1428 } else {
1429 mptcp_update_rcv_state_meat(mp_tp, tp, full_dsn,
1430 dss64_ack64->mdss_subflow_seqn,
1431 dss64_ack64->mdss_data_len,
1432 *(uint16_t *)(void *)(cp +
1433 dss64_ack64->mdss_copt.mdss_len -
1434 csum_len));
1435 }
1436 break;
1437 }
1438 default:
1439 break;
1440 }
1441 }
1442
1443 static void
mptcp_do_dss_opt(struct tcpcb * tp,u_char * cp,struct tcphdr * th)1444 mptcp_do_dss_opt(struct tcpcb *tp, u_char *cp, struct tcphdr *th)
1445 {
1446 struct mptcp_dss_copt *dss_rsp = (struct mptcp_dss_copt *)cp;
1447 struct mptcb *mp_tp = tptomptp(tp);
1448
1449 if (!mp_tp) {
1450 return;
1451 }
1452
1453 if (dss_rsp->mdss_subtype == MPO_DSS) {
1454 if (dss_rsp->mdss_flags & MDSS_F) {
1455 tp->t_rcv_map.mpt_dfin = 1;
1456 } else {
1457 tp->t_rcv_map.mpt_dfin = 0;
1458 }
1459
1460 mptcp_do_dss_opt_meat(cp, tp, th);
1461 }
1462 }
1463
1464 static void
mptcp_do_fastclose_opt(struct tcpcb * tp,u_char * cp,struct tcphdr * th)1465 mptcp_do_fastclose_opt(struct tcpcb *tp, u_char *cp, struct tcphdr *th)
1466 {
1467 struct mptcb *mp_tp = NULL;
1468 struct mptcp_fastclose_opt *fc_opt = (struct mptcp_fastclose_opt *)cp;
1469
1470 if (th->th_flags != TH_ACK) {
1471 return;
1472 }
1473
1474 if (fc_opt->mfast_len != sizeof(struct mptcp_fastclose_opt)) {
1475 tcpstat.tcps_invalid_opt++;
1476 return;
1477 }
1478
1479 mp_tp = tptomptp(tp);
1480 if (!mp_tp) {
1481 return;
1482 }
1483
1484 if (fc_opt->mfast_key != mp_tp->mpt_localkey) {
1485 tcpstat.tcps_invalid_opt++;
1486 return;
1487 }
1488
1489 /*
1490 * fastclose could make us more vulnerable to attacks, hence
1491 * accept only those that are at the next expected sequence number.
1492 */
1493 if (th->th_seq != tp->rcv_nxt) {
1494 tcpstat.tcps_invalid_opt++;
1495 return;
1496 }
1497
1498 /* Reset this flow */
1499 tp->t_mpflags |= TMPF_FASTCLOSERCV;
1500
1501 if (tp->t_inpcb->inp_socket != NULL) {
1502 soevent(tp->t_inpcb->inp_socket,
1503 SO_FILT_HINT_LOCKED | SO_FILT_HINT_MUSTRST);
1504 }
1505 }
1506
1507
1508 static void
mptcp_do_mpfail_opt(struct tcpcb * tp,u_char * cp,struct tcphdr * th)1509 mptcp_do_mpfail_opt(struct tcpcb *tp, u_char *cp, struct tcphdr *th)
1510 {
1511 struct mptcp_mpfail_opt *fail_opt = (struct mptcp_mpfail_opt *)cp;
1512 u_int32_t mdss_subflow_seqn = 0;
1513 struct mptcb *mp_tp;
1514 int error = 0;
1515
1516 /*
1517 * mpfail could make us more vulnerable to attacks. Hence accept
1518 * only those that are the next expected sequence number.
1519 */
1520 if (th->th_seq != tp->rcv_nxt) {
1521 tcpstat.tcps_invalid_opt++;
1522 return;
1523 }
1524
1525 /* A packet without RST, must atleast have the ACK bit set */
1526 if ((th->th_flags != TH_ACK) && (th->th_flags != TH_RST)) {
1527 return;
1528 }
1529
1530 if (fail_opt->mfail_len != sizeof(struct mptcp_mpfail_opt)) {
1531 return;
1532 }
1533
1534 mp_tp = tptomptp(tp);
1535
1536 mp_tp->mpt_flags |= MPTCPF_RECVD_MPFAIL;
1537 mp_tp->mpt_dsn_at_csum_fail = mptcp_hton64(fail_opt->mfail_dsn);
1538 error = mptcp_get_map_for_dsn(tp->t_inpcb->inp_socket,
1539 mp_tp->mpt_dsn_at_csum_fail, &mdss_subflow_seqn);
1540 if (error == 0) {
1541 mp_tp->mpt_ssn_at_csum_fail = mdss_subflow_seqn;
1542 }
1543
1544 mptcp_notify_mpfail(tp->t_inpcb->inp_socket);
1545 }
1546
1547 static boolean_t
mptcp_validate_add_addr_hmac(struct tcpcb * tp,u_char * hmac,u_char * msg,uint16_t msg_len,uint16_t mac_len)1548 mptcp_validate_add_addr_hmac(struct tcpcb *tp, u_char *hmac,
1549 u_char *msg, uint16_t msg_len, uint16_t mac_len)
1550 {
1551 u_char digest[SHA256_DIGEST_LENGTH] = {0};
1552 struct mptcb *mp_tp = tptomptp(tp);
1553
1554 VERIFY(mac_len <= SHA256_DIGEST_LENGTH);
1555 mptcp_hmac_sha256(mp_tp->mpt_remotekey, mp_tp->mpt_localkey, msg, msg_len, digest);
1556
1557 if (bcmp(digest + SHA256_DIGEST_LENGTH - mac_len, hmac, mac_len) == 0) {
1558 return true; /* matches */
1559 } else {
1560 return false;
1561 }
1562 }
1563
1564 static void
mptcp_do_add_addr_opt_v1(struct tcpcb * tp,u_char * cp)1565 mptcp_do_add_addr_opt_v1(struct tcpcb *tp, u_char *cp)
1566 {
1567 struct mptcb *mp_tp = tptomptp(tp);
1568 struct mptses *mpte = mp_tp->mpt_mpte;
1569
1570 struct mptcp_add_addr_opt *addr_opt = (struct mptcp_add_addr_opt *)cp;
1571
1572 if (addr_opt->maddr_len != MPTCP_V1_ADD_ADDR_OPT_LEN_V4 &&
1573 addr_opt->maddr_len != MPTCP_V1_ADD_ADDR_OPT_LEN_V4 + 2 &&
1574 addr_opt->maddr_len != MPTCP_V1_ADD_ADDR_OPT_LEN_V6 &&
1575 addr_opt->maddr_len != MPTCP_V1_ADD_ADDR_OPT_LEN_V6 + 2) {
1576 os_log_info(mptcp_log_handle, "%s - %lx: Wrong ADD_ADDR length %u\n",
1577 __func__, (unsigned long)VM_KERNEL_ADDRPERM(mpte),
1578 addr_opt->maddr_len);
1579
1580 return;
1581 }
1582
1583 if ((addr_opt->maddr_flags & MPTCP_V1_ADD_ADDR_ECHO) != 0) {
1584 os_log_info(mptcp_log_handle, "%s - %lx: Received ADD_ADDR with echo bit\n",
1585 __func__, (unsigned long)VM_KERNEL_ADDRPERM(mpte));
1586
1587 return;
1588 }
1589
1590 if (addr_opt->maddr_len < MPTCP_V1_ADD_ADDR_OPT_LEN_V6) {
1591 struct sockaddr_in *dst = &mpte->mpte_sub_dst_v4;
1592 struct in_addr *addr = &addr_opt->maddr_u.maddr_addrv4;
1593 in_addr_t haddr = ntohl(addr->s_addr);
1594
1595 if (IN_ZERONET(haddr) ||
1596 IN_LOOPBACK(haddr) ||
1597 IN_LINKLOCAL(haddr) ||
1598 IN_DS_LITE(haddr) ||
1599 IN_6TO4_RELAY_ANYCAST(haddr) ||
1600 IN_MULTICAST(haddr) ||
1601 INADDR_BROADCAST == haddr ||
1602 IN_PRIVATE(haddr) ||
1603 IN_SHARED_ADDRESS_SPACE(haddr)) {
1604 os_log_info(mptcp_log_handle, "%s - %lx: ADD_ADDR invalid addr: %x\n",
1605 __func__, (unsigned long)VM_KERNEL_ADDRPERM(mpte),
1606 addr->s_addr);
1607
1608 return;
1609 }
1610
1611 u_char *hmac = (void *)(cp + addr_opt->maddr_len - HMAC_TRUNCATED_ADD_ADDR);
1612 uint16_t msg_len = sizeof(struct mptcp_add_addr_hmac_msg_v4);
1613 struct mptcp_add_addr_hmac_msg_v4 msg = {0};
1614 msg.maddr_addrid = addr_opt->maddr_addrid;
1615 msg.maddr_addr = addr_opt->maddr_u.maddr_addrv4;
1616 if (addr_opt->maddr_len > MPTCP_V1_ADD_ADDR_OPT_LEN_V4) {
1617 msg.maddr_port = *(uint16_t *)(void *)(cp + addr_opt->maddr_len - HMAC_TRUNCATED_ADD_ADDR - 2);
1618 }
1619 if (!mptcp_validate_add_addr_hmac(tp, hmac, (u_char *)&msg, msg_len, HMAC_TRUNCATED_ADD_ADDR)) {
1620 os_log_info(mptcp_log_handle, "%s - %lx: ADD_ADDR addr: %x invalid HMAC\n",
1621 __func__, (unsigned long)VM_KERNEL_ADDRPERM(mpte),
1622 addr->s_addr);
1623 return;
1624 }
1625
1626 dst->sin_len = sizeof(*dst);
1627 dst->sin_family = AF_INET;
1628 if (addr_opt->maddr_len > MPTCP_V1_ADD_ADDR_OPT_LEN_V4) {
1629 dst->sin_port = *(uint16_t *)(void *)(cp + addr_opt->maddr_len - HMAC_TRUNCATED_ADD_ADDR - 2);
1630 } else {
1631 dst->sin_port = mpte->__mpte_dst_v4.sin_port;
1632 }
1633 dst->sin_addr.s_addr = addr->s_addr;
1634 mpte->sub_dst_addr_id_v4 = addr_opt->maddr_addrid;
1635 mpte->mpte_last_added_addr_is_v4 = TRUE;
1636 } else {
1637 struct sockaddr_in6 *dst = &mpte->mpte_sub_dst_v6;
1638 struct in6_addr *addr = &addr_opt->maddr_u.maddr_addrv6;
1639
1640 if (IN6_IS_ADDR_LINKLOCAL(addr) ||
1641 IN6_IS_ADDR_MULTICAST(addr) ||
1642 IN6_IS_ADDR_UNSPECIFIED(addr) ||
1643 IN6_IS_ADDR_LOOPBACK(addr) ||
1644 IN6_IS_ADDR_V4COMPAT(addr) ||
1645 IN6_IS_ADDR_V4MAPPED(addr)) {
1646 char dbuf[MAX_IPv6_STR_LEN];
1647
1648 inet_ntop(AF_INET6, addr, dbuf, sizeof(dbuf));
1649 os_log_info(mptcp_log_handle, "%s - %lx: ADD_ADDRv6 invalid addr: %s\n",
1650 __func__, (unsigned long)VM_KERNEL_ADDRPERM(mpte),
1651 dbuf);
1652
1653 return;
1654 }
1655
1656 u_char *hmac = (void *)(cp + addr_opt->maddr_len - HMAC_TRUNCATED_ADD_ADDR);
1657 uint16_t msg_len = sizeof(struct mptcp_add_addr_hmac_msg_v6);
1658 struct mptcp_add_addr_hmac_msg_v6 msg = {0};
1659 msg.maddr_addrid = addr_opt->maddr_addrid;
1660 msg.maddr_addr = addr_opt->maddr_u.maddr_addrv6;
1661 if (addr_opt->maddr_len > MPTCP_V1_ADD_ADDR_OPT_LEN_V6) {
1662 msg.maddr_port = *(uint16_t *)(void *)(cp + addr_opt->maddr_len - HMAC_TRUNCATED_ADD_ADDR - 2);
1663 }
1664 if (!mptcp_validate_add_addr_hmac(tp, hmac, (u_char *)&msg, msg_len, HMAC_TRUNCATED_ADD_ADDR)) {
1665 char dbuf[MAX_IPv6_STR_LEN];
1666
1667 inet_ntop(AF_INET6, addr, dbuf, sizeof(dbuf));
1668 os_log_info(mptcp_log_handle, "%s - %lx: ADD_ADDR addr: %s invalid HMAC\n",
1669 __func__, (unsigned long)VM_KERNEL_ADDRPERM(mpte),
1670 dbuf);
1671 return;
1672 }
1673
1674 dst->sin6_len = sizeof(*dst);
1675 dst->sin6_family = AF_INET6;
1676 if (addr_opt->maddr_len > MPTCP_V1_ADD_ADDR_OPT_LEN_V6) {
1677 dst->sin6_port = *(uint16_t *)(void *)(cp + addr_opt->maddr_len - HMAC_TRUNCATED_ADD_ADDR - 2);
1678 } else {
1679 dst->sin6_port = mpte->__mpte_dst_v6.sin6_port;
1680 }
1681 memcpy(&dst->sin6_addr, addr, sizeof(*addr));
1682 mpte->sub_dst_addr_id_v6 = addr_opt->maddr_addrid;
1683 mpte->mpte_last_added_addr_is_v4 = FALSE;
1684 }
1685
1686 os_log_info(mptcp_log_handle, "%s - %lx: Received ADD_ADDRv%u\n",
1687 __func__, (unsigned long)VM_KERNEL_ADDRPERM(mpte),
1688 addr_opt->maddr_flags);
1689
1690 tp->t_mpflags |= TMPF_MPTCP_ECHO_ADDR;
1691 mptcp_sched_create_subflows(mpte);
1692 }
1693
1694 static void
mptcp_do_add_addr_opt_v0(struct mptses * mpte,u_char * cp)1695 mptcp_do_add_addr_opt_v0(struct mptses *mpte, u_char *cp)
1696 {
1697 struct mptcp_add_addr_opt *addr_opt = (struct mptcp_add_addr_opt *)cp;
1698
1699 if (addr_opt->maddr_len != MPTCP_V0_ADD_ADDR_OPT_LEN_V4 &&
1700 addr_opt->maddr_len != MPTCP_V0_ADD_ADDR_OPT_LEN_V6) {
1701 os_log_info(mptcp_log_handle, "%s - %lx: Wrong ADD_ADDR length %u\n",
1702 __func__, (unsigned long)VM_KERNEL_ADDRPERM(mpte),
1703 addr_opt->maddr_len);
1704
1705 return;
1706 }
1707
1708 if (addr_opt->maddr_len == MPTCP_V0_ADD_ADDR_OPT_LEN_V4 &&
1709 addr_opt->maddr_flags != MPTCP_V0_ADD_ADDR_IPV4) {
1710 os_log_info(mptcp_log_handle, "%s - %lx: ADD_ADDR length for v4 but version is %u\n",
1711 __func__, (unsigned long)VM_KERNEL_ADDRPERM(mpte),
1712 addr_opt->maddr_flags);
1713
1714 return;
1715 }
1716
1717 if (addr_opt->maddr_len == MPTCP_V0_ADD_ADDR_OPT_LEN_V6 &&
1718 addr_opt->maddr_flags != MPTCP_V0_ADD_ADDR_IPV6) {
1719 os_log_info(mptcp_log_handle, "%s - %lx: ADD_ADDR length for v6 but version is %u\n",
1720 __func__, (unsigned long)VM_KERNEL_ADDRPERM(mpte),
1721 addr_opt->maddr_flags);
1722
1723 return;
1724 }
1725
1726 if (addr_opt->maddr_len == MPTCP_V0_ADD_ADDR_OPT_LEN_V4) {
1727 struct sockaddr_in *dst = &mpte->mpte_sub_dst_v4;
1728 struct in_addr *addr = &addr_opt->maddr_u.maddr_addrv4;
1729 in_addr_t haddr = ntohl(addr->s_addr);
1730
1731 if (IN_ZERONET(haddr) ||
1732 IN_LOOPBACK(haddr) ||
1733 IN_LINKLOCAL(haddr) ||
1734 IN_DS_LITE(haddr) ||
1735 IN_6TO4_RELAY_ANYCAST(haddr) ||
1736 IN_MULTICAST(haddr) ||
1737 INADDR_BROADCAST == haddr ||
1738 IN_PRIVATE(haddr) ||
1739 IN_SHARED_ADDRESS_SPACE(haddr)) {
1740 os_log_info(mptcp_log_handle, "%s - %lx: ADD_ADDR invalid addr: %x\n",
1741 __func__, (unsigned long)VM_KERNEL_ADDRPERM(mpte),
1742 addr->s_addr);
1743
1744 return;
1745 }
1746
1747 dst->sin_len = sizeof(*dst);
1748 dst->sin_family = AF_INET;
1749 dst->sin_port = mpte->__mpte_dst_v4.sin_port;
1750 dst->sin_addr.s_addr = addr->s_addr;
1751 mpte->mpte_last_added_addr_is_v4 = TRUE;
1752 } else {
1753 struct sockaddr_in6 *dst = &mpte->mpte_sub_dst_v6;
1754 struct in6_addr *addr = &addr_opt->maddr_u.maddr_addrv6;
1755
1756 if (IN6_IS_ADDR_LINKLOCAL(addr) ||
1757 IN6_IS_ADDR_MULTICAST(addr) ||
1758 IN6_IS_ADDR_UNSPECIFIED(addr) ||
1759 IN6_IS_ADDR_LOOPBACK(addr) ||
1760 IN6_IS_ADDR_V4COMPAT(addr) ||
1761 IN6_IS_ADDR_V4MAPPED(addr)) {
1762 char dbuf[MAX_IPv6_STR_LEN];
1763
1764 inet_ntop(AF_INET6, addr, dbuf, sizeof(dbuf));
1765 os_log_info(mptcp_log_handle, "%s - %lx: ADD_ADDRv6 invalid addr: %s\n",
1766 __func__, (unsigned long)VM_KERNEL_ADDRPERM(mpte),
1767 dbuf);
1768
1769 return;
1770 }
1771
1772 dst->sin6_len = sizeof(*dst);
1773 dst->sin6_family = AF_INET6;
1774 dst->sin6_port = mpte->__mpte_dst_v6.sin6_port;
1775 dst->sin6_addr = *addr;
1776 mpte->mpte_last_added_addr_is_v4 = FALSE;
1777 }
1778
1779 os_log_info(mptcp_log_handle, "%s - %lx: Received ADD_ADDRv%u\n",
1780 __func__, (unsigned long)VM_KERNEL_ADDRPERM(mpte),
1781 addr_opt->maddr_flags);
1782
1783 mptcp_sched_create_subflows(mpte);
1784 }
1785
1786 void
tcp_do_mptcp_options(struct tcpcb * tp,u_char * cp,struct tcphdr * th,struct tcpopt * to,uint8_t optlen)1787 tcp_do_mptcp_options(struct tcpcb *tp, u_char *cp, struct tcphdr *th,
1788 struct tcpopt *to, uint8_t optlen)
1789 {
1790 int mptcp_subtype;
1791 struct mptcb *mp_tp = tptomptp(tp);
1792
1793 if (mp_tp == NULL) {
1794 return;
1795 }
1796
1797 socket_lock_assert_owned(mptetoso(mp_tp->mpt_mpte));
1798
1799 /* All MPTCP options have atleast 4 bytes */
1800 if (optlen < 4) {
1801 return;
1802 }
1803
1804 mptcp_subtype = (cp[2] >> 4);
1805
1806 if (mptcp_sanitize_option(tp, mptcp_subtype) == 0) {
1807 return;
1808 }
1809
1810 switch (mptcp_subtype) {
1811 case MPO_CAPABLE:
1812 mptcp_do_mpcapable_opt(tp, cp, th, optlen);
1813 break;
1814 case MPO_JOIN:
1815 mptcp_do_mpjoin_opt(tp, cp, th, optlen);
1816 break;
1817 case MPO_DSS:
1818 mptcp_do_dss_opt(tp, cp, th);
1819 break;
1820 case MPO_FASTCLOSE:
1821 mptcp_do_fastclose_opt(tp, cp, th);
1822 break;
1823 case MPO_FAIL:
1824 mptcp_do_mpfail_opt(tp, cp, th);
1825 break;
1826 case MPO_ADD_ADDR:
1827 if (mp_tp->mpt_version == MPTCP_VERSION_0) {
1828 mptcp_do_add_addr_opt_v0(mp_tp->mpt_mpte, cp);
1829 } else {
1830 mptcp_do_add_addr_opt_v1(tp, cp);
1831 }
1832 break;
1833 case MPO_REMOVE_ADDR: /* fall through */
1834 case MPO_PRIO:
1835 to->to_flags |= TOF_MPTCP;
1836 break;
1837 default:
1838 break;
1839 }
1840 return;
1841 }
1842
1843 /* REMOVE_ADDR option is sent when a source address goes away */
1844 static void
mptcp_send_remaddr_opt(struct tcpcb * tp,struct mptcp_remaddr_opt * opt)1845 mptcp_send_remaddr_opt(struct tcpcb *tp, struct mptcp_remaddr_opt *opt)
1846 {
1847 bzero(opt, sizeof(*opt));
1848 opt->mr_kind = TCPOPT_MULTIPATH;
1849 opt->mr_len = sizeof(*opt);
1850 opt->mr_subtype = MPO_REMOVE_ADDR;
1851 opt->mr_addr_id = tp->t_rem_aid;
1852 tp->t_mpflags &= ~TMPF_SND_REM_ADDR;
1853 }
1854
1855 static int
mptcp_echo_add_addr(struct tcpcb * tp,u_char * cp,unsigned int optlen)1856 mptcp_echo_add_addr(struct tcpcb *tp, u_char *cp, unsigned int optlen)
1857 {
1858 struct mptcp_add_addr_opt mpaddr;
1859 struct mptcb *mp_tp = tptomptp(tp);
1860 struct mptses *mpte = mp_tp->mpt_mpte;
1861
1862 // MPTCP v0 doesn't require echoing add_addr
1863 if (mp_tp->mpt_version == MPTCP_VERSION_0) {
1864 return optlen;
1865 }
1866
1867 size_t mpaddr_size = mpte->mpte_last_added_addr_is_v4 ? MPTCP_V1_ADD_ADDR_ECHO_OPT_LEN_V4 : MPTCP_V1_ADD_ADDR_ECHO_OPT_LEN_V6;
1868 if ((MAX_TCPOPTLEN - optlen) < mpaddr_size) {
1869 return optlen;
1870 }
1871
1872 bzero(&mpaddr, sizeof(mpaddr));
1873 mpaddr.maddr_kind = TCPOPT_MULTIPATH;
1874 mpaddr.maddr_len = (uint8_t)mpaddr_size;
1875 mpaddr.maddr_subtype = MPO_ADD_ADDR;
1876 mpaddr.maddr_flags = MPTCP_V1_ADD_ADDR_ECHO;
1877 if (mpte->mpte_last_added_addr_is_v4) {
1878 mpaddr.maddr_u.maddr_addrv4.s_addr = mpte->mpte_sub_dst_v4.sin_addr.s_addr;
1879 mpaddr.maddr_addrid = mpte->sub_dst_addr_id_v4;
1880 } else {
1881 mpaddr.maddr_u.maddr_addrv6 = mpte->mpte_sub_dst_v6.sin6_addr;
1882 mpaddr.maddr_addrid = mpte->sub_dst_addr_id_v6;
1883 }
1884
1885 memcpy(cp + optlen, &mpaddr, mpaddr_size);
1886 optlen += mpaddr_size;
1887 tp->t_mpflags &= ~TMPF_MPTCP_ECHO_ADDR;
1888 return optlen;
1889 }
1890
1891 /* We send MP_PRIO option based on the values set by the SIOCSCONNORDER ioctl */
1892 static int
mptcp_snd_mpprio(struct tcpcb * tp,u_char * cp,int optlen)1893 mptcp_snd_mpprio(struct tcpcb *tp, u_char *cp, int optlen)
1894 {
1895 struct mptcp_mpprio_addr_opt mpprio;
1896 struct mptcb *mp_tp = tptomptp(tp);
1897 size_t mpprio_size = sizeof(mpprio);
1898 // MP_PRIO of MPTCPv1 doesn't include AddrID
1899 if (mp_tp->mpt_version == MPTCP_VERSION_1) {
1900 mpprio_size -= sizeof(uint8_t);
1901 }
1902
1903 if (tp->t_state != TCPS_ESTABLISHED) {
1904 tp->t_mpflags &= ~TMPF_SND_MPPRIO;
1905 return optlen;
1906 }
1907
1908 if ((MAX_TCPOPTLEN - optlen) < (int)mpprio_size) {
1909 return optlen;
1910 }
1911
1912 bzero(&mpprio, sizeof(mpprio));
1913 mpprio.mpprio_kind = TCPOPT_MULTIPATH;
1914 mpprio.mpprio_len = (uint8_t)mpprio_size;
1915 mpprio.mpprio_subtype = MPO_PRIO;
1916 if (tp->t_mpflags & TMPF_BACKUP_PATH) {
1917 mpprio.mpprio_flags |= MPTCP_MPPRIO_BKP;
1918 }
1919 mpprio.mpprio_addrid = tp->t_local_aid;
1920 memcpy(cp + optlen, &mpprio, mpprio_size);
1921 optlen += mpprio_size;
1922 tp->t_mpflags &= ~TMPF_SND_MPPRIO;
1923 return optlen;
1924 }
1925