xref: /xnu-11417.140.69/bsd/netinet/mptcp_opt.c (revision 43a90889846e00bfb5cf1d255cdc0a701a1e05a4)
1 /*
2  * Copyright (c) 2012-2017 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 #include <sys/param.h>
29 #include <sys/systm.h>
30 #include <sys/sysctl.h>
31 #include <netinet/in_systm.h>
32 #include <sys/socket.h>
33 #include <sys/socketvar.h>
34 #include <sys/syslog.h>
35 #include <net/route.h>
36 #include <netinet/in.h>
37 #include <net/if.h>
38 
39 #include <netinet/ip.h>
40 #include <netinet/ip_var.h>
41 #include <netinet/in_var.h>
42 #include <netinet/tcp.h>
43 #include <netinet/tcp_cache.h>
44 #include <netinet/tcp_seq.h>
45 #include <netinet/tcpip.h>
46 #include <netinet/tcp_fsm.h>
47 #include <netinet/mptcp_var.h>
48 #include <netinet/mptcp.h>
49 #include <netinet/mptcp_opt.h>
50 #include <netinet/mptcp_seq.h>
51 
52 #include <libkern/crypto/sha1.h>
53 #include <libkern/crypto/sha2.h>
54 #include <netinet/mptcp_timer.h>
55 
56 #include <mach/sdt.h>
57 
58 static int mptcp_validate_join_hmac(struct tcpcb *, u_char* __sized_by(maclen), int maclen);
59 static int mptcp_snd_mpprio(struct tcpcb *tp, u_char *cp __ended_by(optend), u_char *optend, int optlen);
60 static void mptcp_send_remaddr_opt(struct tcpcb *, struct mptcp_remaddr_opt *);
61 static int mptcp_echo_add_addr(struct tcpcb *, u_char * __ended_by(optend), u_char *optend, unsigned int);
62 
63 /*
64  * MPTCP Options Output Processing
65  */
66 
67 static unsigned
mptcp_setup_first_subflow_syn_opts(struct socket * so,u_char * opt __ended_by (optend),u_char * optend __unused,unsigned optlen)68 mptcp_setup_first_subflow_syn_opts(struct socket *so, u_char *opt __ended_by(optend), u_char *optend __unused, unsigned optlen)
69 {
70 	struct mptcp_mpcapable_opt_rsp mptcp_opt;
71 	struct tcpcb *tp = sototcpcb(so);
72 	struct mptcb *mp_tp = tptomptp(tp);
73 	struct mptses *mpte = mp_tp->mpt_mpte;
74 	int ret;
75 
76 	uint8_t mmco_len = mp_tp->mpt_version == MPTCP_VERSION_0 ?
77 	    sizeof(struct mptcp_mpcapable_opt_rsp) :
78 	    sizeof(struct mptcp_mpcapable_opt_common);
79 
80 	ret = tcp_heuristic_do_mptcp(tp);
81 	if (ret > 0) {
82 		os_log(mptcp_log_handle, "%s - %lx: Not doing MPTCP due to heuristics",
83 		    __func__, (unsigned long)VM_KERNEL_ADDRPERM(mp_tp->mpt_mpte));
84 		mp_tp->mpt_flags |= MPTCPF_FALLBACK_HEURISTIC;
85 		return optlen;
86 	}
87 
88 	/*
89 	 * Avoid retransmitting the MP_CAPABLE option.
90 	 */
91 	if (ret == 0 &&
92 	    tp->t_rxtshift > mptcp_mpcap_retries &&
93 	    !(mpte->mpte_flags & MPTE_FORCE_ENABLE)) {
94 		if (!(mp_tp->mpt_flags & (MPTCPF_FALLBACK_HEURISTIC | MPTCPF_HEURISTIC_TRAC))) {
95 			mp_tp->mpt_flags |= MPTCPF_HEURISTIC_TRAC;
96 			tcp_heuristic_mptcp_loss(tp);
97 		}
98 		return optlen;
99 	}
100 
101 	bzero(&mptcp_opt, sizeof(struct mptcp_mpcapable_opt_rsp));
102 
103 	mptcp_opt.mmc_common.mmco_kind = TCPOPT_MULTIPATH;
104 	mptcp_opt.mmc_common.mmco_len = mmco_len;
105 	mptcp_opt.mmc_common.mmco_subtype = MPO_CAPABLE;
106 	mptcp_opt.mmc_common.mmco_version = mp_tp->mpt_version;
107 	mptcp_opt.mmc_common.mmco_flags |= MPCAP_PROPOSAL_SBIT;
108 	if (mp_tp->mpt_flags & MPTCPF_CHECKSUM) {
109 		mptcp_opt.mmc_common.mmco_flags |= MPCAP_CHECKSUM_CBIT;
110 	}
111 	mptcp_opt.mmc_localkey = mp_tp->mpt_localkey;
112 
113 	memcpy(opt + optlen, &mptcp_opt, mmco_len);
114 	optlen += mmco_len;
115 
116 	return optlen;
117 }
118 
119 static unsigned
mptcp_setup_join_subflow_syn_opts(struct socket * so,u_char * opt __ended_by (optend),u_char * optend __unused,unsigned optlen)120 mptcp_setup_join_subflow_syn_opts(struct socket *so, u_char *opt __ended_by(optend), u_char *optend __unused, unsigned optlen)
121 {
122 	struct mptcp_mpjoin_opt_req mpjoin_req;
123 	struct inpcb *inp = sotoinpcb(so);
124 	struct tcpcb *tp = NULL;
125 	struct mptsub *mpts;
126 
127 	if (!inp) {
128 		return optlen;
129 	}
130 
131 	tp = intotcpcb(inp);
132 	if (!tp) {
133 		return optlen;
134 	}
135 
136 	mpts = tp->t_mpsub;
137 
138 	bzero(&mpjoin_req, sizeof(mpjoin_req));
139 	mpjoin_req.mmjo_kind = TCPOPT_MULTIPATH;
140 	mpjoin_req.mmjo_len = sizeof(mpjoin_req);
141 	mpjoin_req.mmjo_subtype_bkp = MPO_JOIN << 4;
142 
143 	if (tp->t_mpflags & TMPF_BACKUP_PATH) {
144 		mpjoin_req.mmjo_subtype_bkp |= MPTCP_BACKUP;
145 	} else if (inp->inp_boundifp && IFNET_IS_CELLULAR(inp->inp_boundifp) &&
146 	    mptcp_subflows_need_backup_flag(mpts->mpts_mpte)) {
147 		mpjoin_req.mmjo_subtype_bkp |= MPTCP_BACKUP;
148 		tp->t_mpflags |= TMPF_BACKUP_PATH;
149 	} else {
150 		mpts->mpts_flags |= MPTSF_PREFERRED;
151 	}
152 
153 	mpjoin_req.mmjo_addr_id = tp->t_local_aid;
154 	mpjoin_req.mmjo_peer_token = tptomptp(tp)->mpt_remotetoken;
155 	mptcp_get_rands(tp->t_local_aid, tptomptp(tp),
156 	    &mpjoin_req.mmjo_rand, NULL);
157 	memcpy(opt + optlen, &mpjoin_req, mpjoin_req.mmjo_len);
158 	optlen += mpjoin_req.mmjo_len;
159 
160 	return optlen;
161 }
162 
163 unsigned
mptcp_setup_join_ack_opts(struct tcpcb * tp,u_char * opt __ended_by (optend),u_char * optend __unused,unsigned optlen)164 mptcp_setup_join_ack_opts(struct tcpcb *tp, u_char *opt __ended_by(optend), u_char *optend __unused, unsigned optlen)
165 {
166 	unsigned new_optlen;
167 	struct mptcp_mpjoin_opt_rsp2 join_rsp2;
168 
169 	if ((MAX_TCPOPTLEN - optlen) < sizeof(struct mptcp_mpjoin_opt_rsp2)) {
170 		printf("%s: no space left %d \n", __func__, optlen);
171 		return optlen;
172 	}
173 
174 	bzero(&join_rsp2, sizeof(struct mptcp_mpjoin_opt_rsp2));
175 	join_rsp2.mmjo_kind = TCPOPT_MULTIPATH;
176 	join_rsp2.mmjo_len = sizeof(struct mptcp_mpjoin_opt_rsp2);
177 	join_rsp2.mmjo_subtype = MPO_JOIN;
178 	mptcp_get_mpjoin_hmac(tp->t_local_aid, tptomptp(tp),
179 	    (u_char*)&join_rsp2.mmjo_mac, HMAC_TRUNCATED_ACK);
180 	memcpy(opt + optlen, &join_rsp2, join_rsp2.mmjo_len);
181 	new_optlen = optlen + join_rsp2.mmjo_len;
182 	return new_optlen;
183 }
184 
185 unsigned
mptcp_setup_syn_opts(struct socket * so,u_char * opt __ended_by (optend),u_char * optend,unsigned optlen)186 mptcp_setup_syn_opts(struct socket *so, u_char *opt __ended_by(optend), u_char *optend, unsigned optlen)
187 {
188 	unsigned new_optlen;
189 
190 	if (!(so->so_flags & SOF_MP_SEC_SUBFLOW)) {
191 		new_optlen = mptcp_setup_first_subflow_syn_opts(so, opt, optend, optlen);
192 	} else {
193 		new_optlen = mptcp_setup_join_subflow_syn_opts(so, opt, optend, optlen);
194 	}
195 
196 	return new_optlen;
197 }
198 
199 static int
mptcp_send_mpfail(struct tcpcb * tp,u_char * opt __ended_by (optend),u_char * optend,unsigned int optlen)200 mptcp_send_mpfail(struct tcpcb *tp, u_char *opt __ended_by(optend), u_char *optend, unsigned int optlen)
201 {
202 #pragma unused(tp, opt, optend, optlen)
203 
204 	struct mptcb *mp_tp = NULL;
205 	struct mptcp_mpfail_opt fail_opt;
206 	uint64_t dsn;
207 	uint8_t len = sizeof(struct mptcp_mpfail_opt);
208 
209 	mp_tp = tptomptp(tp);
210 	if (mp_tp == NULL) {
211 		tp->t_mpflags &= ~TMPF_SND_MPFAIL;
212 		return optlen;
213 	}
214 
215 	/* if option space low give up */
216 	if ((MAX_TCPOPTLEN - optlen) < sizeof(struct mptcp_mpfail_opt)) {
217 		tp->t_mpflags &= ~TMPF_SND_MPFAIL;
218 		return optlen;
219 	}
220 
221 	dsn = mp_tp->mpt_rcvnxt;
222 
223 	bzero(&fail_opt, sizeof(fail_opt));
224 	fail_opt.mfail_kind = TCPOPT_MULTIPATH;
225 	fail_opt.mfail_len = len;
226 	fail_opt.mfail_subtype = MPO_FAIL;
227 	fail_opt.mfail_dsn = mptcp_hton64(dsn);
228 	memcpy(opt + optlen, &fail_opt, len);
229 	optlen += len;
230 	tp->t_mpflags &= ~TMPF_SND_MPFAIL;
231 	return optlen;
232 }
233 
234 static int
mptcp_send_infinite_mapping(struct tcpcb * tp,u_char * opt __ended_by (optend),u_char * optend __unused,unsigned int optlen)235 mptcp_send_infinite_mapping(struct tcpcb *tp, u_char *opt __ended_by(optend), u_char *optend __unused, unsigned int optlen)
236 {
237 	struct socket *so = tp->t_inpcb->inp_socket;
238 	uint8_t len = sizeof(struct mptcp_dsn_opt);
239 	struct mptcp_dsn_opt infin_opt;
240 	struct mptcb *mp_tp = NULL;
241 	uint8_t csum_len = 0;
242 
243 	if (!so) {
244 		return optlen;
245 	}
246 
247 	mp_tp = tptomptp(tp);
248 	if (mp_tp == NULL) {
249 		return optlen;
250 	}
251 
252 	if (mp_tp->mpt_flags & MPTCPF_CHECKSUM) {
253 		csum_len = 2;
254 	}
255 
256 	/* try later */
257 	if ((MAX_TCPOPTLEN - optlen) < (len + csum_len)) {
258 		return optlen;
259 	}
260 
261 	bzero(&infin_opt, sizeof(infin_opt));
262 	infin_opt.mdss_copt.mdss_kind = TCPOPT_MULTIPATH;
263 	infin_opt.mdss_copt.mdss_len = len + csum_len;
264 	infin_opt.mdss_copt.mdss_subtype = MPO_DSS;
265 	infin_opt.mdss_copt.mdss_flags |= MDSS_M;
266 	if (mp_tp->mpt_flags & MPTCPF_RECVD_MPFAIL) {
267 		infin_opt.mdss_dsn = (u_int32_t)
268 		    MPTCP_DATASEQ_LOW32(mp_tp->mpt_dsn_at_csum_fail);
269 		infin_opt.mdss_subflow_seqn = mp_tp->mpt_ssn_at_csum_fail;
270 	} else {
271 		/*
272 		 * If MPTCP fallback happens, but TFO succeeds, the data on the
273 		 * SYN does not belong to the MPTCP data sequence space.
274 		 */
275 		if ((tp->t_tfo_stats & TFO_S_SYN_DATA_ACKED) &&
276 		    ((mp_tp->mpt_local_idsn + 1) == mp_tp->mpt_snduna)) {
277 			infin_opt.mdss_subflow_seqn = 1;
278 		} else {
279 			infin_opt.mdss_subflow_seqn = tp->snd_una - tp->t_mpsub->mpts_iss;
280 		}
281 		infin_opt.mdss_dsn = (u_int32_t)
282 		    MPTCP_DATASEQ_LOW32(mp_tp->mpt_snduna);
283 	}
284 
285 	if ((infin_opt.mdss_dsn == 0) || (infin_opt.mdss_subflow_seqn == 0)) {
286 		return optlen;
287 	}
288 	infin_opt.mdss_dsn = htonl(infin_opt.mdss_dsn);
289 	infin_opt.mdss_subflow_seqn = htonl(infin_opt.mdss_subflow_seqn);
290 	infin_opt.mdss_data_len = 0;
291 
292 	memcpy(opt + optlen, &infin_opt, len);
293 	optlen += len;
294 	if (csum_len != 0) {
295 		/* The checksum field is set to 0 for infinite mapping */
296 		uint16_t csum = 0;
297 		memcpy(opt + optlen, &csum, csum_len);
298 		optlen += csum_len;
299 	}
300 
301 	tp->t_mpflags |= TMPF_INFIN_SENT;
302 	tcpstat.tcps_estab_fallback++;
303 	return optlen;
304 }
305 
306 
307 static int
mptcp_ok_to_fin(struct tcpcb * tp,u_int64_t dsn,u_int32_t datalen)308 mptcp_ok_to_fin(struct tcpcb *tp, u_int64_t dsn, u_int32_t datalen)
309 {
310 	struct mptcb *mp_tp = tptomptp(tp);
311 
312 	dsn = (mp_tp->mpt_sndmax & MPTCP_DATASEQ_LOW32_MASK) | dsn;
313 	if ((dsn + datalen) == mp_tp->mpt_sndmax) {
314 		return 1;
315 	}
316 
317 	return 0;
318 }
319 
320 unsigned int
mptcp_setup_opts(struct tcpcb * tp,int32_t off,u_char * opt __ended_by (optend),u_char * optend,unsigned int optlen,int flags,int len,boolean_t * p_mptcp_acknow,boolean_t * do_not_compress)321 mptcp_setup_opts(struct tcpcb *tp, int32_t off, u_char *opt __ended_by(optend), u_char *optend,
322     unsigned int optlen, int flags, int len,
323     boolean_t *p_mptcp_acknow, boolean_t *do_not_compress)
324 {
325 	struct inpcb *inp = (struct inpcb *)tp->t_inpcb;
326 	struct socket *so = inp->inp_socket;
327 	struct mptcb *mp_tp = tptomptp(tp);
328 	boolean_t do_csum = FALSE;
329 	boolean_t send_64bit_dsn = FALSE;
330 	boolean_t send_64bit_ack = FALSE;
331 	uint32_t old_mpt_flags = tp->t_mpflags & TMPF_MPTCP_SIGNALS;
332 	boolean_t initial_data = FALSE;
333 
334 	/* There is a case where offset can become negative. tcp_output()
335 	 * gracefully handles this. So, let's make MPTCP more robust as well.
336 	 */
337 	if (off < 0) {
338 		off = 0;
339 	}
340 
341 	if (mptcp_enable == 0 || mp_tp == NULL || tp->t_state == TCPS_CLOSED) {
342 		/* do nothing */
343 		goto ret_optlen;
344 	}
345 
346 	socket_lock_assert_owned(mptetoso(mp_tp->mpt_mpte));
347 
348 	if (mp_tp->mpt_flags & MPTCPF_CHECKSUM) {
349 		do_csum = TRUE;
350 	}
351 
352 	/* tcp_output handles the SYN path separately */
353 	if (flags & TH_SYN) {
354 		goto ret_optlen;
355 	}
356 
357 	if ((MAX_TCPOPTLEN - optlen) <
358 	    sizeof(struct mptcp_mpcapable_opt_common)) {
359 		os_log_error(mptcp_log_handle, "%s - %lx: no space left %d flags %x tp->t_mpflags %x len %d\n",
360 		    __func__, (unsigned long)VM_KERNEL_ADDRPERM(mp_tp->mpt_mpte),
361 		    optlen, flags, tp->t_mpflags, len);
362 		goto ret_optlen;
363 	}
364 
365 	if (tp->t_mpflags & TMPF_TCP_FALLBACK) {
366 		if (tp->t_mpflags & TMPF_SND_MPFAIL) {
367 			optlen = mptcp_send_mpfail(tp, opt, optend, optlen);
368 		} else if (!(tp->t_mpflags & TMPF_INFIN_SENT)) {
369 			optlen = mptcp_send_infinite_mapping(tp, opt, optend, optlen);
370 		}
371 
372 		*do_not_compress = TRUE;
373 
374 		goto ret_optlen;
375 	}
376 
377 	if (len > 0 && off == 0 && tp->t_mpflags & TMPF_SEND_DSN && tp->t_mpflags & TMPF_SND_KEYS) {
378 		uint64_t dsn = 0;
379 		uint32_t relseq = 0;
380 		uint16_t data_len = 0, dss_csum = 0;
381 		mptcp_output_getm_dsnmap64(so, off, &dsn, &relseq, &data_len, &dss_csum);
382 		if (dsn == mp_tp->mpt_local_idsn + 1) {
383 			initial_data = TRUE;
384 		}
385 	}
386 
387 	/* send MP_CAPABLE when it's the INITIAL ACK or data */
388 	if (tp->t_mpflags & TMPF_SND_KEYS &&
389 	    (mp_tp->mpt_version == MPTCP_VERSION_0 || initial_data ||
390 	    (mp_tp->mpt_sndnxt == mp_tp->mpt_local_idsn + 1 && len == 0))) {
391 		struct mptcp_mpcapable_opt_rsp2 mptcp_opt;
392 		boolean_t send_data_level_details = tp->t_mpflags & TMPF_SEND_DSN ? TRUE : FALSE;
393 
394 		uint8_t mmco_len = sizeof(struct mptcp_mpcapable_opt_rsp1);
395 		if (send_data_level_details) {
396 			mmco_len += 2;
397 			if (do_csum) {
398 				mmco_len += 2;
399 			}
400 		}
401 		if ((MAX_TCPOPTLEN - optlen) < mmco_len) {
402 			os_log_error(mptcp_log_handle, "%s - %lx: not enough space in TCP option, "
403 			    "optlen: %u, mmco_len: %d\n", __func__,
404 			    (unsigned long)VM_KERNEL_ADDRPERM(mp_tp->mpt_mpte),
405 			    optlen, mmco_len);
406 			goto ret_optlen;
407 		}
408 
409 		bzero(&mptcp_opt, sizeof(struct mptcp_mpcapable_opt_rsp2));
410 		mptcp_opt.mmc_rsp1.mmc_common.mmco_kind = TCPOPT_MULTIPATH;
411 		mptcp_opt.mmc_rsp1.mmc_common.mmco_len = mmco_len;
412 		mptcp_opt.mmc_rsp1.mmc_common.mmco_subtype = MPO_CAPABLE;
413 		mptcp_opt.mmc_rsp1.mmc_common.mmco_version = mp_tp->mpt_version;
414 		mptcp_opt.mmc_rsp1.mmc_common.mmco_flags |= MPCAP_PROPOSAL_SBIT;
415 		if (do_csum) {
416 			mptcp_opt.mmc_rsp1.mmc_common.mmco_flags |= MPCAP_CHECKSUM_CBIT;
417 		}
418 		mptcp_opt.mmc_rsp1.mmc_localkey = mp_tp->mpt_localkey;
419 		mptcp_opt.mmc_rsp1.mmc_remotekey = mp_tp->mpt_remotekey;
420 		if (send_data_level_details) {
421 			mptcp_output_getm_data_level_details(so, off, &mptcp_opt.data_len, &mptcp_opt.csum);
422 			mptcp_opt.data_len = htons(mptcp_opt.data_len);
423 		}
424 		memcpy(opt + optlen, &mptcp_opt, mmco_len);
425 
426 		if (mp_tp->mpt_version == MPTCP_VERSION_0) {
427 			tp->t_mpflags &= ~TMPF_SND_KEYS;
428 		}
429 		optlen += mmco_len;
430 
431 		if (!tp->t_mpuna) {
432 			tp->t_mpuna = tp->snd_una;
433 		} else {
434 			/* its a retransmission of the MP_CAPABLE ACK */
435 		}
436 
437 		*do_not_compress = TRUE;
438 
439 		goto ret_optlen;
440 	}
441 
442 	if (tp->t_mpflags & TMPF_SND_JACK) {
443 		*do_not_compress = TRUE;
444 		optlen = mptcp_setup_join_ack_opts(tp, opt, optend, optlen);
445 		if (!tp->t_mpuna) {
446 			tp->t_mpuna = tp->snd_una;
447 		}
448 		/* Start a timer to retransmit the ACK */
449 		tp->t_timer[TCPT_JACK_RXMT] =
450 		    OFFSET_FROM_START(tp, tcp_jack_rxmt);
451 
452 		tp->t_mpflags &= ~TMPF_SND_JACK;
453 		goto ret_optlen;
454 	}
455 
456 	if (!(tp->t_mpflags & (TMPF_MPTCP_TRUE | TMPF_PREESTABLISHED))) {
457 		goto ret_optlen;
458 	}
459 	/*
460 	 * From here on, all options are sent only if MPTCP_TRUE
461 	 * or when data is sent early on as in Fast Join
462 	 */
463 
464 	if ((tp->t_mpflags & TMPF_MPTCP_TRUE) &&
465 	    (tp->t_mpflags & TMPF_SND_REM_ADDR)) {
466 		int rem_opt_len = sizeof(struct mptcp_remaddr_opt);
467 		if (optlen + rem_opt_len <= MAX_TCPOPTLEN) {
468 			mptcp_send_remaddr_opt(tp,
469 			    (struct mptcp_remaddr_opt *)(opt + optlen));
470 			optlen += rem_opt_len;
471 		} else {
472 			tp->t_mpflags &= ~TMPF_SND_REM_ADDR;
473 		}
474 
475 		*do_not_compress = TRUE;
476 	}
477 
478 	if (tp->t_mpflags & TMPF_MPTCP_ECHO_ADDR) {
479 		optlen = mptcp_echo_add_addr(tp, opt, optend, optlen);
480 	}
481 
482 	if (tp->t_mpflags & TMPF_SND_MPPRIO) {
483 		optlen = mptcp_snd_mpprio(tp, opt, optend, optlen);
484 
485 		*do_not_compress = TRUE;
486 	}
487 
488 	if (mp_tp->mpt_flags & MPTCPF_SND_64BITDSN) {
489 		send_64bit_dsn = TRUE;
490 	}
491 	if (mp_tp->mpt_flags & MPTCPF_SND_64BITACK) {
492 		send_64bit_ack = TRUE;
493 	}
494 
495 #define CHECK_OPTLEN    {                                                                   \
496 	if (MAX_TCPOPTLEN - optlen < dssoptlen) {                                         \
497 	        os_log_error(mptcp_log_handle, "%s: dssoptlen %d optlen %d \n", __func__,   \
498 	            dssoptlen, optlen);                                                     \
499 	            goto ret_optlen;                                                        \
500 	}                                                                                   \
501 }
502 
503 #define DO_FIN(dsn_opt) {                                               \
504 	int sndfin = 0;                                                 \
505 	sndfin = mptcp_ok_to_fin(tp, dsn_opt.mdss_dsn, len);            \
506 	if (sndfin) {                                                   \
507 	        dsn_opt.mdss_copt.mdss_flags |= MDSS_F;                 \
508 	        dsn_opt.mdss_data_len += 1;                             \
509 	        if (do_csum)                                            \
510 	                dss_csum = in_addword(dss_csum, 1);             \
511 	}                                                               \
512 }
513 
514 #define CHECK_DATALEN {                                                             \
515 	/* MPTCP socket does not support IP options */                              \
516 	if ((len + optlen + dssoptlen) > tp->t_maxopd) {                            \
517 	        os_log_error(mptcp_log_handle, "%s: nosp %d len %d opt %d %d %d\n", \
518 	            __func__, len, dssoptlen, optlen,                               \
519 	            tp->t_maxseg, tp->t_maxopd);                                    \
520 	/* remove option length from payload len */                         \
521 	        len = tp->t_maxopd - optlen - dssoptlen;                            \
522 	}                                                                           \
523 }
524 
525 	if ((tp->t_mpflags & TMPF_SEND_DSN) &&
526 	    (send_64bit_dsn)) {
527 		/*
528 		 * If there was the need to send 64-bit Data ACK along
529 		 * with 64-bit DSN, then 26 or 28 bytes would be used.
530 		 * With timestamps and NOOP padding that will cause
531 		 * overflow. Hence, in the rare event that both 64-bit
532 		 * DSN and 64-bit ACK have to be sent, delay the send of
533 		 * 64-bit ACK until our 64-bit DSN is acked with a 64-bit ack.
534 		 * XXX If this delay causes issue, remove the 2-byte padding.
535 		 */
536 		struct mptcp_dss64_ack32_opt dsn_ack_opt;
537 		uint8_t dssoptlen = sizeof(dsn_ack_opt);
538 		uint16_t dss_csum;
539 
540 		if (do_csum) {
541 			dssoptlen += 2;
542 		}
543 
544 		CHECK_OPTLEN;
545 
546 		bzero(&dsn_ack_opt, sizeof(dsn_ack_opt));
547 		dsn_ack_opt.mdss_copt.mdss_kind = TCPOPT_MULTIPATH;
548 		dsn_ack_opt.mdss_copt.mdss_subtype = MPO_DSS;
549 		dsn_ack_opt.mdss_copt.mdss_len = dssoptlen;
550 		dsn_ack_opt.mdss_copt.mdss_flags |=
551 		    MDSS_M | MDSS_m | MDSS_A;
552 
553 		CHECK_DATALEN;
554 
555 		mptcp_output_getm_dsnmap64(so, off,
556 		    &dsn_ack_opt.mdss_dsn,
557 		    &dsn_ack_opt.mdss_subflow_seqn,
558 		    &dsn_ack_opt.mdss_data_len,
559 		    &dss_csum);
560 
561 		if ((dsn_ack_opt.mdss_data_len == 0) ||
562 		    (dsn_ack_opt.mdss_dsn == 0)) {
563 			goto ret_optlen;
564 		}
565 
566 		if (tp->t_mpflags & TMPF_SEND_DFIN) {
567 			DO_FIN(dsn_ack_opt);
568 		}
569 
570 		dsn_ack_opt.mdss_ack =
571 		    htonl(MPTCP_DATAACK_LOW32(mp_tp->mpt_rcvnxt));
572 
573 		dsn_ack_opt.mdss_dsn = mptcp_hton64(dsn_ack_opt.mdss_dsn);
574 		dsn_ack_opt.mdss_subflow_seqn = htonl(
575 			dsn_ack_opt.mdss_subflow_seqn);
576 		dsn_ack_opt.mdss_data_len = htons(
577 			dsn_ack_opt.mdss_data_len);
578 
579 		memcpy(opt + optlen, &dsn_ack_opt, sizeof(dsn_ack_opt));
580 		if (do_csum) {
581 			*((uint16_t *)(void *)(opt + optlen + sizeof(dsn_ack_opt))) = dss_csum;
582 		}
583 
584 		optlen += dssoptlen;
585 
586 		tp->t_mpflags &= ~TMPF_MPTCP_ACKNOW;
587 
588 		*do_not_compress = TRUE;
589 
590 		goto ret_optlen;
591 	}
592 
593 	if ((tp->t_mpflags & TMPF_SEND_DSN) &&
594 	    (!send_64bit_dsn) &&
595 	    !(tp->t_mpflags & TMPF_MPTCP_ACKNOW)) {
596 		struct mptcp_dsn_opt dsn_opt;
597 		uint8_t dssoptlen = sizeof(struct mptcp_dsn_opt);
598 		uint16_t dss_csum;
599 
600 		if (do_csum) {
601 			dssoptlen += 2;
602 		}
603 
604 		CHECK_OPTLEN;
605 
606 		bzero(&dsn_opt, sizeof(dsn_opt));
607 		dsn_opt.mdss_copt.mdss_kind = TCPOPT_MULTIPATH;
608 		dsn_opt.mdss_copt.mdss_subtype = MPO_DSS;
609 		dsn_opt.mdss_copt.mdss_len = dssoptlen;
610 		dsn_opt.mdss_copt.mdss_flags |= MDSS_M;
611 
612 		CHECK_DATALEN;
613 
614 		mptcp_output_getm_dsnmap32(so, off, &dsn_opt.mdss_dsn,
615 		    &dsn_opt.mdss_subflow_seqn,
616 		    &dsn_opt.mdss_data_len,
617 		    &dss_csum);
618 
619 		if ((dsn_opt.mdss_data_len == 0) ||
620 		    (dsn_opt.mdss_dsn == 0)) {
621 			goto ret_optlen;
622 		}
623 
624 		if (tp->t_mpflags & TMPF_SEND_DFIN) {
625 			DO_FIN(dsn_opt);
626 		}
627 
628 		dsn_opt.mdss_dsn = htonl(dsn_opt.mdss_dsn);
629 		dsn_opt.mdss_subflow_seqn = htonl(dsn_opt.mdss_subflow_seqn);
630 		dsn_opt.mdss_data_len = htons(dsn_opt.mdss_data_len);
631 		memcpy(opt + optlen, &dsn_opt, sizeof(dsn_opt));
632 		if (do_csum) {
633 			*((uint16_t *)(void *)(opt + optlen + sizeof(dsn_opt))) = dss_csum;
634 		}
635 
636 		optlen += dssoptlen;
637 		tp->t_mpflags &= ~TMPF_MPTCP_ACKNOW;
638 
639 		*do_not_compress = TRUE;
640 
641 		goto ret_optlen;
642 	}
643 
644 	/* 32-bit Data ACK option */
645 	if ((tp->t_mpflags & TMPF_MPTCP_ACKNOW) &&
646 	    (!send_64bit_ack) &&
647 	    !(tp->t_mpflags & TMPF_SEND_DSN) &&
648 	    !(tp->t_mpflags & TMPF_SEND_DFIN)) {
649 		struct mptcp_data_ack_opt dack_opt;
650 		uint8_t dssoptlen = 0;
651 do_ack32_only:
652 		dssoptlen = sizeof(dack_opt);
653 
654 		CHECK_OPTLEN;
655 
656 		bzero(&dack_opt, dssoptlen);
657 		dack_opt.mdss_copt.mdss_kind = TCPOPT_MULTIPATH;
658 		dack_opt.mdss_copt.mdss_len = dssoptlen;
659 		dack_opt.mdss_copt.mdss_subtype = MPO_DSS;
660 		dack_opt.mdss_copt.mdss_flags |= MDSS_A;
661 		dack_opt.mdss_ack =
662 		    htonl(MPTCP_DATAACK_LOW32(mp_tp->mpt_rcvnxt));
663 		memcpy(opt + optlen, &dack_opt, dssoptlen);
664 		optlen += dssoptlen;
665 		VERIFY(optlen <= MAX_TCPOPTLEN);
666 		tp->t_mpflags &= ~TMPF_MPTCP_ACKNOW;
667 		goto ret_optlen;
668 	}
669 
670 	/* 64-bit Data ACK option */
671 	if ((tp->t_mpflags & TMPF_MPTCP_ACKNOW) &&
672 	    (send_64bit_ack) &&
673 	    !(tp->t_mpflags & TMPF_SEND_DSN) &&
674 	    !(tp->t_mpflags & TMPF_SEND_DFIN)) {
675 		struct mptcp_data_ack64_opt dack_opt;
676 		uint8_t dssoptlen = 0;
677 do_ack64_only:
678 		dssoptlen = sizeof(dack_opt);
679 
680 		CHECK_OPTLEN;
681 
682 		bzero(&dack_opt, dssoptlen);
683 		dack_opt.mdss_copt.mdss_kind = TCPOPT_MULTIPATH;
684 		dack_opt.mdss_copt.mdss_len = dssoptlen;
685 		dack_opt.mdss_copt.mdss_subtype = MPO_DSS;
686 		dack_opt.mdss_copt.mdss_flags |= (MDSS_A | MDSS_a);
687 		dack_opt.mdss_ack = mptcp_hton64(mp_tp->mpt_rcvnxt);
688 		/*
689 		 * The other end should retransmit 64-bit DSN until it
690 		 * receives a 64-bit ACK.
691 		 */
692 		mp_tp->mpt_flags &= ~MPTCPF_SND_64BITACK;
693 		memcpy(opt + optlen, &dack_opt, dssoptlen);
694 		optlen += dssoptlen;
695 		VERIFY(optlen <= MAX_TCPOPTLEN);
696 		tp->t_mpflags &= ~TMPF_MPTCP_ACKNOW;
697 		goto ret_optlen;
698 	}
699 
700 	/* 32-bit DSS+Data ACK option */
701 	if ((tp->t_mpflags & TMPF_SEND_DSN) &&
702 	    (!send_64bit_dsn) &&
703 	    (!send_64bit_ack) &&
704 	    (tp->t_mpflags & TMPF_MPTCP_ACKNOW)) {
705 		struct mptcp_dss_ack_opt dss_ack_opt;
706 		uint8_t dssoptlen = sizeof(dss_ack_opt);
707 		uint16_t dss_csum;
708 
709 		if (do_csum) {
710 			dssoptlen += 2;
711 		}
712 
713 		CHECK_OPTLEN;
714 
715 		bzero(&dss_ack_opt, sizeof(dss_ack_opt));
716 		dss_ack_opt.mdss_copt.mdss_kind = TCPOPT_MULTIPATH;
717 		dss_ack_opt.mdss_copt.mdss_len = dssoptlen;
718 		dss_ack_opt.mdss_copt.mdss_subtype = MPO_DSS;
719 		dss_ack_opt.mdss_copt.mdss_flags |= MDSS_A | MDSS_M;
720 		dss_ack_opt.mdss_ack =
721 		    htonl(MPTCP_DATAACK_LOW32(mp_tp->mpt_rcvnxt));
722 
723 		CHECK_DATALEN;
724 
725 		mptcp_output_getm_dsnmap32(so, off, &dss_ack_opt.mdss_dsn,
726 		    &dss_ack_opt.mdss_subflow_seqn,
727 		    &dss_ack_opt.mdss_data_len,
728 		    &dss_csum);
729 
730 		if ((dss_ack_opt.mdss_data_len == 0) ||
731 		    (dss_ack_opt.mdss_dsn == 0)) {
732 			goto do_ack32_only;
733 		}
734 
735 		if (tp->t_mpflags & TMPF_SEND_DFIN) {
736 			DO_FIN(dss_ack_opt);
737 		}
738 
739 		dss_ack_opt.mdss_dsn = htonl(dss_ack_opt.mdss_dsn);
740 		dss_ack_opt.mdss_subflow_seqn =
741 		    htonl(dss_ack_opt.mdss_subflow_seqn);
742 		dss_ack_opt.mdss_data_len = htons(dss_ack_opt.mdss_data_len);
743 		memcpy(opt + optlen, &dss_ack_opt, sizeof(dss_ack_opt));
744 		if (do_csum) {
745 			*((uint16_t *)(void *)(opt + optlen + sizeof(dss_ack_opt))) = dss_csum;
746 		}
747 
748 		optlen += dssoptlen;
749 
750 		if (optlen > MAX_TCPOPTLEN) {
751 			panic("optlen too large");
752 		}
753 		tp->t_mpflags &= ~TMPF_MPTCP_ACKNOW;
754 		goto ret_optlen;
755 	}
756 
757 	/* 32-bit DSS + 64-bit DACK option */
758 	if ((tp->t_mpflags & TMPF_SEND_DSN) &&
759 	    (!send_64bit_dsn) &&
760 	    (send_64bit_ack) &&
761 	    (tp->t_mpflags & TMPF_MPTCP_ACKNOW)) {
762 		struct mptcp_dss32_ack64_opt dss_ack_opt;
763 		uint8_t dssoptlen = sizeof(dss_ack_opt);
764 		uint16_t dss_csum;
765 
766 		if (do_csum) {
767 			dssoptlen += 2;
768 		}
769 
770 		CHECK_OPTLEN;
771 
772 		bzero(&dss_ack_opt, sizeof(dss_ack_opt));
773 		dss_ack_opt.mdss_copt.mdss_kind = TCPOPT_MULTIPATH;
774 		dss_ack_opt.mdss_copt.mdss_len = dssoptlen;
775 		dss_ack_opt.mdss_copt.mdss_subtype = MPO_DSS;
776 		dss_ack_opt.mdss_copt.mdss_flags |= MDSS_M | MDSS_A | MDSS_a;
777 		dss_ack_opt.mdss_ack =
778 		    mptcp_hton64(mp_tp->mpt_rcvnxt);
779 
780 		CHECK_DATALEN;
781 
782 		mptcp_output_getm_dsnmap32(so, off, &dss_ack_opt.mdss_dsn,
783 		    &dss_ack_opt.mdss_subflow_seqn,
784 		    &dss_ack_opt.mdss_data_len,
785 		    &dss_csum);
786 
787 		if ((dss_ack_opt.mdss_data_len == 0) ||
788 		    (dss_ack_opt.mdss_dsn == 0)) {
789 			goto do_ack64_only;
790 		}
791 
792 		if (tp->t_mpflags & TMPF_SEND_DFIN) {
793 			DO_FIN(dss_ack_opt);
794 		}
795 
796 		dss_ack_opt.mdss_dsn = htonl(dss_ack_opt.mdss_dsn);
797 		dss_ack_opt.mdss_subflow_seqn =
798 		    htonl(dss_ack_opt.mdss_subflow_seqn);
799 		dss_ack_opt.mdss_data_len = htons(dss_ack_opt.mdss_data_len);
800 		memcpy(opt + optlen, &dss_ack_opt, sizeof(dss_ack_opt));
801 		if (do_csum) {
802 			*((uint16_t *)(void *)(opt + optlen + sizeof(dss_ack_opt))) = dss_csum;
803 		}
804 
805 		optlen += dssoptlen;
806 
807 		if (optlen > MAX_TCPOPTLEN) {
808 			panic("optlen too large");
809 		}
810 		tp->t_mpflags &= ~TMPF_MPTCP_ACKNOW;
811 
812 		*do_not_compress = TRUE;
813 
814 		goto ret_optlen;
815 	}
816 
817 	if (tp->t_mpflags & TMPF_SEND_DFIN) {
818 		uint8_t dssoptlen = sizeof(struct mptcp_dss_ack_opt);
819 		struct mptcp_dss_ack_opt dss_ack_opt;
820 		uint16_t dss_csum;
821 
822 		if (do_csum) {
823 			uint64_t dss_val = mptcp_hton64(mp_tp->mpt_sndmax - 1);
824 			uint16_t dlen = htons(1);
825 			uint32_t sseq = 0;
826 			uint32_t sum;
827 
828 
829 			dssoptlen += 2;
830 
831 			sum = in_pseudo64(dss_val, sseq, dlen);
832 			ADDCARRY(sum);
833 			dss_csum = ~sum & 0xffff;
834 		}
835 
836 		CHECK_OPTLEN;
837 
838 		bzero(&dss_ack_opt, sizeof(dss_ack_opt));
839 
840 		/*
841 		 * Data FIN occupies one sequence space.
842 		 * Don't send it if it has been Acked.
843 		 */
844 		if ((mp_tp->mpt_sndnxt + 1 != mp_tp->mpt_sndmax) ||
845 		    (mp_tp->mpt_snduna == mp_tp->mpt_sndmax)) {
846 			goto ret_optlen;
847 		}
848 
849 		dss_ack_opt.mdss_copt.mdss_kind = TCPOPT_MULTIPATH;
850 		dss_ack_opt.mdss_copt.mdss_len = dssoptlen;
851 		dss_ack_opt.mdss_copt.mdss_subtype = MPO_DSS;
852 		dss_ack_opt.mdss_copt.mdss_flags |= MDSS_A | MDSS_M | MDSS_F;
853 		dss_ack_opt.mdss_ack =
854 		    htonl(MPTCP_DATAACK_LOW32(mp_tp->mpt_rcvnxt));
855 		dss_ack_opt.mdss_dsn =
856 		    htonl(MPTCP_DATASEQ_LOW32(mp_tp->mpt_sndmax - 1));
857 		dss_ack_opt.mdss_subflow_seqn = 0;
858 		dss_ack_opt.mdss_data_len = 1;
859 		dss_ack_opt.mdss_data_len = htons(dss_ack_opt.mdss_data_len);
860 		memcpy(opt + optlen, &dss_ack_opt, sizeof(dss_ack_opt));
861 		if (do_csum) {
862 			*((uint16_t *)(void *)(opt + optlen + sizeof(dss_ack_opt))) = dss_csum;
863 		}
864 
865 		optlen += dssoptlen;
866 
867 		*do_not_compress = TRUE;
868 	}
869 
870 ret_optlen:
871 	if (TRUE == *p_mptcp_acknow) {
872 		uint32_t new_mpt_flags = tp->t_mpflags & TMPF_MPTCP_SIGNALS;
873 
874 		/*
875 		 * If none of the above mpflags were acted on by
876 		 * this routine, reset these flags and set p_mptcp_acknow
877 		 * to false.
878 		 *
879 		 * XXX The reset value of p_mptcp_acknow can be used
880 		 * to communicate tcp_output to NOT send a pure ack without any
881 		 * MPTCP options as it will be treated as a dup ack.
882 		 * Since the instances of mptcp_setup_opts not acting on
883 		 * these options are mostly corner cases and sending a dup
884 		 * ack here would only have an impact if the system
885 		 * has sent consecutive dup acks before this false one,
886 		 * we haven't modified the logic in tcp_output to avoid
887 		 * that.
888 		 */
889 		if (old_mpt_flags == new_mpt_flags) {
890 			tp->t_mpflags &= ~TMPF_MPTCP_SIGNALS;
891 			*p_mptcp_acknow = FALSE;
892 		}
893 	}
894 
895 	return optlen;
896 }
897 
898 /*
899  * MPTCP Options Input Processing
900  */
901 
902 /*
903  * In most cases, option can be parsed by performing the cast
904  *
905  *       opt_type *opt = (opt_type*)optp;
906  *
907  * However, in some cases there will be less bytes on the wire
908  * the size of the corresponding C struct, i.e.:
909  *
910  *              (optend - optp) < sizeof(opt_type)
911  *
912  * In such cases, the bounds of `opt' will be smaller than
913  * the size of its declared pointee type. Any attempt to
914  * dereference `opt' (or to access its fields)
915  * will lead to an `-fbounds-safety' trap.
916  *
917  * To prevent such undesirable situation, we are using
918  * the "shadow storage" pattern:
919  * - If there are enough bytes so that the cast expression
920  *       opt_type *opt = (opt_type*)optp;
921  *   will produce a "valid" pointer, we will perform a cast.
922  * - Otherwise, we will copy the bytes into a stack allocated
923  *   structure, and return a pointer to that structure.
924  *
925  * If the `VERBOSE_OPTION_PARSING_LOGGING' is set to 1,
926  * the code will produce additional logging at the detriment
927  * of performance. This is off by default, but the code is kept for now.
928  */
929 #define VERBOSE_OPTION_PARSING_LOGGING 0
930 #if VERBOSE_OPTION_PARSING_LOGGING
931 
932 #define MPTCP_OPT_CHECK_UNDERRUN(shadow_opt, optlen)  do {                                  \
933 	if (__improbable(sizeof((shadow_opt)) < (optlen))) {                                    \
934 	        size_t ignored = (optlen) - sizeof((shadow_opt));                               \
935 	        os_log(mptcp_log_handle,                                                        \
936 	                "%s - option length exceeds the size of underlying storage "            \
937 	                "(optlen=%lu, storage size=%lu) %lu bytes will be ignored\n",           \
938 	                __func__, (size_t)(optlen), sizeof((shadow_opt)), ignored);             \
939 	}                                                                                       \
940 } while(0)
941 
942 #define MPTCP_OPT_REPORT_COPY(shadow_opt, available)  do {                                  \
943 	os_log(mptcp_log_handle,                                                                \
944 	        "%s - insufficent input to use cast-parsing (required=%lu; available=%ld); "    \
945 	        " option data will be copied to local storage\n",                               \
946 	                __func__, sizeof((shadow_opt)), available);                             \
947                                                                                             \
948 } while(0)
949 
950 #else /* !VERBOSE_OPTION_PARSING_LOGGING*/
951 
952 #define MPTCP_OPT_CHECK_UNDERRUN(shadow_opt, optlen)  do {                                 \
953 	(void)(optlen);                                                                        \
954 } while(0)
955 
956 #define MPTCP_OPT_REPORT_COPY(shadow_opt, optlen)     do {                                 \
957 	(void)(optlen);                                                                        \
958 } while(0)
959 #endif /* DEBUG || DEVELOPMENT */
960 
961 
962 #define MPTCP_OPT_GET(shadow_opt, optp, optend, optlen)   ({                                \
963 	__typeof__((shadow_opt)) * __single opt_ptr;                                            \
964                                                                                             \
965 	ptrdiff_t available = (optend) - (optp);                                                \
966                                                                                             \
967     MPTCP_OPT_CHECK_UNDERRUN(shadow_opt, optlen);                                           \
968                                                                                             \
969 	if (__improbable(available < sizeof((shadow_opt)))) {                                   \
970 	        MPTCP_OPT_REPORT_COPY(shadow_opt, available);                                   \
971 	        memset((caddr_t)&(shadow_opt) + available,                                      \
972 	                0, sizeof((shadow_opt)) - available);                                   \
973 	        memcpy(&(shadow_opt), (optp), available);                                       \
974 	        opt_ptr = &(shadow_opt);                                                        \
975 	} else {                                                                                \
976 	        opt_ptr = __unsafe_forge_single(__typeof__((shadow_opt))*, (optp));             \
977 	}                                                                                       \
978 	opt_ptr;                                                                                \
979 })
980 
981 static int
mptcp_sanitize_option(struct tcpcb * tp,int mptcp_subtype)982 mptcp_sanitize_option(struct tcpcb *tp, int mptcp_subtype)
983 {
984 	struct mptcb *mp_tp = tptomptp(tp);
985 	int ret = 1;
986 
987 	switch (mptcp_subtype) {
988 	case MPO_CAPABLE:
989 		break;
990 	case MPO_JOIN:                  /* fall through */
991 	case MPO_DSS:                   /* fall through */
992 	case MPO_FASTCLOSE:             /* fall through */
993 	case MPO_FAIL:                  /* fall through */
994 	case MPO_REMOVE_ADDR:           /* fall through */
995 	case MPO_ADD_ADDR:              /* fall through */
996 	case MPO_PRIO:                  /* fall through */
997 		if (mp_tp->mpt_state < MPTCPS_ESTABLISHED) {
998 			ret = 0;
999 		}
1000 		break;
1001 	default:
1002 		ret = 0;
1003 		os_log_error(mptcp_log_handle, "%s - %lx: type = %d \n", __func__,
1004 		    (unsigned long)VM_KERNEL_ADDRPERM(mp_tp->mpt_mpte), mptcp_subtype);
1005 		break;
1006 	}
1007 	return ret;
1008 }
1009 
1010 static int
mptcp_valid_mpcapable_common_opt(struct mptcp_mpcapable_opt_common * crsp)1011 mptcp_valid_mpcapable_common_opt(struct mptcp_mpcapable_opt_common  *crsp)
1012 {
1013 	/* mmco_kind, mmco_len and mmco_subtype are validated before */
1014 
1015 	if (!(crsp->mmco_flags & MPCAP_PROPOSAL_SBIT)) {
1016 		return 0;
1017 	}
1018 
1019 	if (crsp->mmco_flags & (MPCAP_BBIT | MPCAP_DBIT |
1020 	    MPCAP_EBIT | MPCAP_FBIT | MPCAP_GBIT)) {
1021 		return 0;
1022 	}
1023 
1024 	return 1;
1025 }
1026 
1027 
1028 static void
mptcp_do_mpcapable_opt(struct tcpcb * tp,u_char * cp __ended_by (optend),u_char * optend,struct tcphdr * th,uint8_t optlen)1029 mptcp_do_mpcapable_opt(struct tcpcb *tp, u_char *cp __ended_by(optend), u_char *optend, struct tcphdr *th,
1030     uint8_t optlen)
1031 {
1032 	struct mptcp_mpcapable_opt_common   crsp_s, *crsp;
1033 	crsp = MPTCP_OPT_GET(crsp_s, cp, optend, optlen);
1034 	struct mptcp_mpcapable_opt_rsp rsp_s, *rsp = NULL;
1035 	struct mptcb *mp_tp = tptomptp(tp);
1036 	struct mptses *mpte = mp_tp->mpt_mpte;
1037 
1038 	/* Only valid on SYN/ACK */
1039 	if ((th->th_flags & (TH_SYN | TH_ACK)) != (TH_SYN | TH_ACK)) {
1040 		return;
1041 	}
1042 
1043 	/* Validate the kind, len, flags */
1044 	if (mptcp_valid_mpcapable_common_opt(crsp) != 1) {
1045 		tcpstat.tcps_invalid_mpcap++;
1046 		return;
1047 	}
1048 
1049 	/* handle SYN/ACK retransmission by acknowledging with ACK */
1050 	if (mp_tp->mpt_state >= MPTCPS_ESTABLISHED) {
1051 		return;
1052 	}
1053 
1054 	/* A SYN/ACK contains peer's key and flags */
1055 	if (optlen != sizeof(struct mptcp_mpcapable_opt_rsp)) {
1056 		/* complain */
1057 		os_log_error(mptcp_log_handle, "%s - %lx: SYN_ACK optlen = %u, sizeof mp opt = %lu \n",
1058 		    __func__, (unsigned long)VM_KERNEL_ADDRPERM(mpte), optlen,
1059 		    sizeof(struct mptcp_mpcapable_opt_rsp));
1060 		tcpstat.tcps_invalid_mpcap++;
1061 		return;
1062 	}
1063 
1064 	/*
1065 	 * If checksum flag is set, enable MPTCP checksum, even if
1066 	 * it was not negotiated on the first SYN.
1067 	 */
1068 	if (crsp->mmco_flags & MPCAP_CHECKSUM_CBIT) {
1069 		mp_tp->mpt_flags |= MPTCPF_CHECKSUM;
1070 	}
1071 
1072 	if (crsp->mmco_flags & MPCAP_UNICAST_IPBIT) {
1073 		mpte->mpte_flags |= MPTE_UNICAST_IP;
1074 
1075 		/* We need an explicit signal for the addresses - zero the existing ones */
1076 		memset(&mpte->mpte_sub_dst_v4, 0, sizeof(mpte->mpte_sub_dst_v4));
1077 		memset(&mpte->mpte_sub_dst_v6, 0, sizeof(mpte->mpte_sub_dst_v6));
1078 	}
1079 
1080 	rsp = MPTCP_OPT_GET(rsp_s, cp, optend, optlen);
1081 	mp_tp->mpt_remotekey = rsp->mmc_localkey;
1082 	/* For now just downgrade to the peer's version */
1083 	if (rsp->mmc_common.mmco_version < mp_tp->mpt_version) {
1084 		os_log_error(mptcp_log_handle, "local version: %d > peer version %d", mp_tp->mpt_version, rsp->mmc_common.mmco_version);
1085 		mp_tp->mpt_version = rsp->mmc_common.mmco_version;
1086 		tcpstat.tcps_mp_verdowngrade++;
1087 		return;
1088 	}
1089 	if (mptcp_init_remote_parms(mp_tp) != 0) {
1090 		tcpstat.tcps_invalid_mpcap++;
1091 		return;
1092 	}
1093 	tcp_heuristic_mptcp_success(tp);
1094 	tcp_cache_update_mptcp_version(tp, TRUE);
1095 	tp->t_mpflags |= (TMPF_SND_KEYS | TMPF_MPTCP_TRUE);
1096 }
1097 
1098 
1099 static void
mptcp_do_mpjoin_opt(struct tcpcb * tp,u_char * cp __ended_by (optend),u_char * optend,struct tcphdr * th,uint8_t optlen)1100 mptcp_do_mpjoin_opt(struct tcpcb *tp, u_char *cp __ended_by(optend), u_char *optend, struct tcphdr *th, uint8_t optlen)
1101 {
1102 #define MPTCP_JOPT_ERROR_PATH(tp) {                                     \
1103 	tcpstat.tcps_invalid_joins++;                                   \
1104 	if (tp->t_inpcb->inp_socket != NULL) {                          \
1105 	        soevent(tp->t_inpcb->inp_socket,                        \
1106 	            SO_FILT_HINT_LOCKED | SO_FILT_HINT_MUSTRST);        \
1107 	}                                                               \
1108 }
1109 	int error = 0;
1110 	struct mptcp_mpjoin_opt_rsp join_rsp_s, *join_rsp;
1111 	join_rsp = MPTCP_OPT_GET(join_rsp_s, cp, optend, optlen);
1112 
1113 	/* Only valid on SYN/ACK */
1114 	if ((th->th_flags & (TH_SYN | TH_ACK)) != (TH_SYN | TH_ACK)) {
1115 		return;
1116 	}
1117 
1118 	if (optlen != sizeof(struct mptcp_mpjoin_opt_rsp)) {
1119 		os_log_error(mptcp_log_handle, "%s - %lx: SYN_ACK: unexpected optlen = %u mp option = %lu\n",
1120 		    __func__, (unsigned long)VM_KERNEL_ADDRPERM(tptomptp(tp)->mpt_mpte),
1121 		    optlen, sizeof(struct mptcp_mpjoin_opt_rsp));
1122 		tp->t_mpflags &= ~TMPF_PREESTABLISHED;
1123 		/* send RST and close */
1124 		MPTCP_JOPT_ERROR_PATH(tp);
1125 		return;
1126 	}
1127 
1128 	mptcp_set_raddr_rand(tp->t_local_aid, tptomptp(tp),
1129 	    join_rsp->mmjo_addr_id, join_rsp->mmjo_rand);
1130 	error = mptcp_validate_join_hmac(tp,
1131 	    (u_char*)&join_rsp->mmjo_mac, HMAC_TRUNCATED_SYNACK);
1132 	if (error) {
1133 		os_log_error(mptcp_log_handle, "%s - %lx: SYN_ACK error = %d \n",
1134 		    __func__, (unsigned long)VM_KERNEL_ADDRPERM(tptomptp(tp)->mpt_mpte),
1135 		    error);
1136 		tp->t_mpflags &= ~TMPF_PREESTABLISHED;
1137 		/* send RST and close */
1138 		MPTCP_JOPT_ERROR_PATH(tp);
1139 		return;
1140 	}
1141 	tp->t_mpflags |= (TMPF_SENT_JOIN | TMPF_SND_JACK);
1142 }
1143 
1144 static int
mptcp_validate_join_hmac(struct tcpcb * tp,u_char * hmac __sized_by (mac_len),int mac_len)1145 mptcp_validate_join_hmac(struct tcpcb *tp, u_char* hmac __sized_by(mac_len), int mac_len)
1146 {
1147 	u_char digest[MAX(SHA1_RESULTLEN, SHA256_DIGEST_LENGTH)] = {0};
1148 	struct mptcb *mp_tp = tptomptp(tp);
1149 	u_int32_t rem_rand, loc_rand;
1150 
1151 	rem_rand = loc_rand = 0;
1152 
1153 	mptcp_get_rands(tp->t_local_aid, mp_tp, &loc_rand, &rem_rand);
1154 	if ((rem_rand == 0) || (loc_rand == 0)) {
1155 		return -1;
1156 	}
1157 
1158 	if (mp_tp->mpt_version == MPTCP_VERSION_0) {
1159 		mptcp_hmac_sha1(mp_tp->mpt_remotekey, mp_tp->mpt_localkey, rem_rand, loc_rand,
1160 		    digest);
1161 	} else {
1162 		uint32_t data[2];
1163 		data[0] = rem_rand;
1164 		data[1] = loc_rand;
1165 		mptcp_hmac_sha256(mp_tp->mpt_remotekey, mp_tp->mpt_localkey, (u_char *)data, 8, digest);
1166 	}
1167 
1168 	if (bcmp(digest, hmac, mac_len) == 0) {
1169 		return 0; /* matches */
1170 	} else {
1171 		printf("%s: remote key %llx local key %llx remote rand %x "
1172 		    "local rand %x \n", __func__, mp_tp->mpt_remotekey, mp_tp->mpt_localkey,
1173 		    rem_rand, loc_rand);
1174 		return -1;
1175 	}
1176 }
1177 
1178 /*
1179  * Update the mptcb send state variables, but the actual sbdrop occurs
1180  * in MPTCP layer
1181  */
1182 void
mptcp_data_ack_rcvd(struct mptcb * mp_tp,struct tcpcb * tp,u_int64_t full_dack)1183 mptcp_data_ack_rcvd(struct mptcb *mp_tp, struct tcpcb *tp, u_int64_t full_dack)
1184 {
1185 	uint64_t acked = full_dack - mp_tp->mpt_snduna;
1186 
1187 	VERIFY(acked <= INT_MAX);
1188 
1189 	if (acked) {
1190 		struct socket *mp_so = mptetoso(mp_tp->mpt_mpte);
1191 
1192 		if (acked > mp_so->so_snd.sb_cc) {
1193 			if (acked > mp_so->so_snd.sb_cc + 1 ||
1194 			    mp_tp->mpt_state < MPTCPS_FIN_WAIT_1) {
1195 				os_log_error(mptcp_log_handle, "%s - %lx: acked %u, sb_cc %u full %u suna %u state %u\n",
1196 				    __func__, (unsigned long)VM_KERNEL_ADDRPERM(mp_tp->mpt_mpte),
1197 				    (uint32_t)acked, mp_so->so_snd.sb_cc,
1198 				    (uint32_t)full_dack, (uint32_t)mp_tp->mpt_snduna,
1199 				    mp_tp->mpt_state);
1200 			}
1201 
1202 			sbdrop(&mp_so->so_snd, (int)mp_so->so_snd.sb_cc);
1203 		} else {
1204 			sbdrop(&mp_so->so_snd, (int)acked);
1205 		}
1206 
1207 		mp_tp->mpt_snduna += acked;
1208 		/* In degraded mode, we may get some Data ACKs */
1209 		if ((tp->t_mpflags & TMPF_TCP_FALLBACK) &&
1210 		    !(mp_tp->mpt_flags & MPTCPF_POST_FALLBACK_SYNC) &&
1211 		    MPTCP_SEQ_GT(mp_tp->mpt_sndnxt, mp_tp->mpt_snduna)) {
1212 			/* bring back sndnxt to retransmit MPTCP data */
1213 			mp_tp->mpt_sndnxt = mp_tp->mpt_dsn_at_csum_fail;
1214 			mp_tp->mpt_flags |= MPTCPF_POST_FALLBACK_SYNC;
1215 			tp->t_inpcb->inp_socket->so_flags1 |=
1216 			    SOF1_POST_FALLBACK_SYNC;
1217 		}
1218 
1219 		mptcp_clean_reinjectq(mp_tp->mpt_mpte);
1220 
1221 		sowwakeup(mp_so);
1222 	}
1223 	if (full_dack == mp_tp->mpt_sndmax &&
1224 	    mp_tp->mpt_state >= MPTCPS_FIN_WAIT_1) {
1225 		mptcp_close_fsm(mp_tp, MPCE_RECV_DATA_ACK);
1226 		tp->t_mpflags &= ~TMPF_SEND_DFIN;
1227 	}
1228 
1229 	if ((tp->t_mpflags & TMPF_SND_KEYS) &&
1230 	    MPTCP_SEQ_GT(mp_tp->mpt_snduna, mp_tp->mpt_local_idsn + 1)) {
1231 		tp->t_mpflags &= ~TMPF_SND_KEYS;
1232 	}
1233 }
1234 
1235 void
mptcp_update_window_wakeup(struct tcpcb * tp)1236 mptcp_update_window_wakeup(struct tcpcb *tp)
1237 {
1238 	struct mptcb *mp_tp = tptomptp(tp);
1239 
1240 	socket_lock_assert_owned(mptetoso(mp_tp->mpt_mpte));
1241 
1242 	if (mp_tp->mpt_flags & MPTCPF_FALLBACK_TO_TCP) {
1243 		mp_tp->mpt_sndwnd = tp->snd_wnd;
1244 		mp_tp->mpt_sndwl1 = mp_tp->mpt_rcvnxt;
1245 		mp_tp->mpt_sndwl2 = mp_tp->mpt_snduna;
1246 	}
1247 
1248 	sowwakeup(tp->t_inpcb->inp_socket);
1249 }
1250 
1251 static void
mptcp_update_window(struct mptcb * mp_tp,u_int64_t ack,u_int64_t seq,u_int32_t tiwin)1252 mptcp_update_window(struct mptcb *mp_tp, u_int64_t ack, u_int64_t seq, u_int32_t tiwin)
1253 {
1254 	if (MPTCP_SEQ_LT(mp_tp->mpt_sndwl1, seq) ||
1255 	    (mp_tp->mpt_sndwl1 == seq &&
1256 	    (MPTCP_SEQ_LT(mp_tp->mpt_sndwl2, ack) ||
1257 	    (mp_tp->mpt_sndwl2 == ack && tiwin > mp_tp->mpt_sndwnd)))) {
1258 		mp_tp->mpt_sndwnd = tiwin;
1259 		mp_tp->mpt_sndwl1 = seq;
1260 		mp_tp->mpt_sndwl2 = ack;
1261 	}
1262 }
1263 
1264 static void
mptcp_do_dss_opt_ack_meat(u_int64_t full_dack,u_int64_t full_dsn,struct tcpcb * tp,u_int32_t tiwin)1265 mptcp_do_dss_opt_ack_meat(u_int64_t full_dack, u_int64_t full_dsn,
1266     struct tcpcb *tp, u_int32_t tiwin)
1267 {
1268 	struct mptcb *mp_tp = tptomptp(tp);
1269 	int close_notify = 0;
1270 
1271 	tp->t_mpflags |= TMPF_RCVD_DACK;
1272 
1273 	if (MPTCP_SEQ_LEQ(full_dack, mp_tp->mpt_sndmax) &&
1274 	    MPTCP_SEQ_GEQ(full_dack, mp_tp->mpt_snduna)) {
1275 		mptcp_data_ack_rcvd(mp_tp, tp, full_dack);
1276 		if (mp_tp->mpt_state > MPTCPS_FIN_WAIT_2) {
1277 			close_notify = 1;
1278 		}
1279 		if (mp_tp->mpt_flags & MPTCPF_RCVD_64BITACK) {
1280 			mp_tp->mpt_flags &= ~MPTCPF_RCVD_64BITACK;
1281 			mp_tp->mpt_flags &= ~MPTCPF_SND_64BITDSN;
1282 		}
1283 		mptcp_notify_mpready(tp->t_inpcb->inp_socket);
1284 		if (close_notify) {
1285 			mptcp_notify_close(tp->t_inpcb->inp_socket);
1286 		}
1287 	}
1288 
1289 	mptcp_update_window(mp_tp, full_dack, full_dsn, tiwin);
1290 }
1291 
1292 static void
mptcp_do_dss_opt_meat(u_char * cp __ended_by (optend),u_char * optend __unused,struct tcpcb * tp,struct tcphdr * th,uint8_t optlen)1293 mptcp_do_dss_opt_meat(u_char *cp __ended_by(optend), u_char *optend __unused, struct tcpcb *tp, struct tcphdr *th, uint8_t optlen)
1294 {
1295 	struct mptcp_dss_copt dss_rsp_s, *dss_rsp;
1296 	dss_rsp = MPTCP_OPT_GET(dss_rsp_s, cp, optend, optlen);
1297 	u_int64_t full_dack = 0;
1298 	u_int32_t tiwin = th->th_win << tp->snd_scale;
1299 	struct mptcb *mp_tp = tptomptp(tp);
1300 	int csum_len = 0;
1301 
1302 #define MPTCP_DSS_OPT_SZ_CHK(len, expected_len) {                                 \
1303 	if (len != expected_len) {                                                \
1304 	        os_log_error(mptcp_log_handle, "%s - %lx: bad len = %d dss: %x\n",\
1305 	            __func__, (unsigned long)VM_KERNEL_ADDRPERM(mp_tp->mpt_mpte), \
1306 	            len, dss_rsp->mdss_flags);                                    \
1307 	        return;                                                           \
1308 	}                                                                         \
1309 }
1310 
1311 	if (mp_tp->mpt_flags & MPTCPF_CHECKSUM) {
1312 		csum_len = 2;
1313 	}
1314 
1315 	dss_rsp->mdss_flags &= (MDSS_A | MDSS_a | MDSS_M | MDSS_m);
1316 	switch (dss_rsp->mdss_flags) {
1317 	case (MDSS_M):
1318 	{
1319 		/* 32-bit DSS, No Data ACK */
1320 		struct mptcp_dsn_opt dss_rsp1_s, *dss_rsp1;
1321 		dss_rsp1 = MPTCP_OPT_GET(dss_rsp1_s, cp, optend, optlen);
1322 
1323 		MPTCP_DSS_OPT_SZ_CHK(dss_rsp1->mdss_copt.mdss_len,
1324 		    sizeof(struct mptcp_dsn_opt) + csum_len);
1325 		if (csum_len == 0) {
1326 			mptcp_update_dss_rcv_state(dss_rsp1, tp, 0);
1327 		} else {
1328 			mptcp_update_dss_rcv_state(dss_rsp1, tp,
1329 			    *(uint16_t *)(void *)(cp +
1330 			    (dss_rsp1->mdss_copt.mdss_len - csum_len)));
1331 		}
1332 		break;
1333 	}
1334 	case (MDSS_A):
1335 	{
1336 		/* 32-bit Data ACK, no DSS */
1337 		struct mptcp_data_ack_opt dack_opt_s, *dack_opt;
1338 		dack_opt = MPTCP_OPT_GET(dack_opt_s, cp, optend, optlen);
1339 
1340 		MPTCP_DSS_OPT_SZ_CHK(dack_opt->mdss_copt.mdss_len,
1341 		    sizeof(struct mptcp_data_ack_opt));
1342 
1343 		u_int32_t dack = dack_opt->mdss_ack;
1344 		NTOHL(dack);
1345 		MPTCP_EXTEND_DSN(mp_tp->mpt_snduna, dack, full_dack);
1346 		mptcp_do_dss_opt_ack_meat(full_dack, mp_tp->mpt_sndwl1, tp, tiwin);
1347 		break;
1348 	}
1349 	case (MDSS_M | MDSS_A):
1350 	{
1351 		/* 32-bit Data ACK + 32-bit DSS */
1352 		struct mptcp_dss_ack_opt dss_ack_rsp_s, *dss_ack_rsp;
1353 		dss_ack_rsp = MPTCP_OPT_GET(dss_ack_rsp_s, cp, optend, optlen);
1354 		u_int64_t full_dsn;
1355 		uint16_t csum = 0;
1356 
1357 		MPTCP_DSS_OPT_SZ_CHK(dss_ack_rsp->mdss_copt.mdss_len,
1358 		    sizeof(struct mptcp_dss_ack_opt) + csum_len);
1359 
1360 		u_int32_t dack = dss_ack_rsp->mdss_ack;
1361 		NTOHL(dack);
1362 		MPTCP_EXTEND_DSN(mp_tp->mpt_snduna, dack, full_dack);
1363 
1364 		NTOHL(dss_ack_rsp->mdss_dsn);
1365 		NTOHL(dss_ack_rsp->mdss_subflow_seqn);
1366 		NTOHS(dss_ack_rsp->mdss_data_len);
1367 		MPTCP_EXTEND_DSN(mp_tp->mpt_rcvnxt, dss_ack_rsp->mdss_dsn, full_dsn);
1368 
1369 		mptcp_do_dss_opt_ack_meat(full_dack, full_dsn, tp, tiwin);
1370 
1371 		if (csum_len != 0) {
1372 			csum = *(uint16_t *)(void *)(cp + (dss_ack_rsp->mdss_copt.mdss_len - csum_len));
1373 		}
1374 
1375 		mptcp_update_rcv_state_meat(mp_tp, tp,
1376 		    full_dsn,
1377 		    dss_ack_rsp->mdss_subflow_seqn,
1378 		    dss_ack_rsp->mdss_data_len,
1379 		    csum);
1380 		break;
1381 	}
1382 	case (MDSS_M | MDSS_m):
1383 	{
1384 		/* 64-bit DSS , No Data ACK */
1385 		struct mptcp_dsn64_opt dsn64_s, *dsn64;
1386 		dsn64 = MPTCP_OPT_GET(dsn64_s, cp, optend, optlen);
1387 		u_int64_t full_dsn;
1388 		uint16_t csum = 0;
1389 
1390 		MPTCP_DSS_OPT_SZ_CHK(dsn64->mdss_copt.mdss_len,
1391 		    sizeof(struct mptcp_dsn64_opt) + csum_len);
1392 
1393 		mp_tp->mpt_flags |= MPTCPF_SND_64BITACK;
1394 
1395 		full_dsn = mptcp_ntoh64(dsn64->mdss_dsn);
1396 		NTOHL(dsn64->mdss_subflow_seqn);
1397 		NTOHS(dsn64->mdss_data_len);
1398 
1399 		if (csum_len != 0) {
1400 			csum = *(uint16_t *)(void *)(cp + dsn64->mdss_copt.mdss_len - csum_len);
1401 		}
1402 
1403 		mptcp_update_rcv_state_meat(mp_tp, tp, full_dsn,
1404 		    dsn64->mdss_subflow_seqn,
1405 		    dsn64->mdss_data_len,
1406 		    csum);
1407 		break;
1408 	}
1409 	case (MDSS_A | MDSS_a):
1410 	{
1411 		/* 64-bit Data ACK, no DSS */
1412 		struct mptcp_data_ack64_opt dack64_s, *dack64;
1413 		dack64 = MPTCP_OPT_GET(dack64_s, cp, optend, optlen);
1414 
1415 		MPTCP_DSS_OPT_SZ_CHK(dack64->mdss_copt.mdss_len,
1416 		    sizeof(struct mptcp_data_ack64_opt));
1417 
1418 		mp_tp->mpt_flags |= MPTCPF_RCVD_64BITACK;
1419 
1420 		full_dack = mptcp_ntoh64(dack64->mdss_ack);
1421 		mptcp_do_dss_opt_ack_meat(full_dack, mp_tp->mpt_sndwl1, tp, tiwin);
1422 		break;
1423 	}
1424 	case (MDSS_M | MDSS_m | MDSS_A):
1425 	{
1426 		/* 64-bit DSS + 32-bit Data ACK */
1427 		struct mptcp_dss64_ack32_opt dss_ack_rsp_s, *dss_ack_rsp;
1428 		dss_ack_rsp = MPTCP_OPT_GET(dss_ack_rsp_s, cp, optend, optlen);
1429 		u_int64_t full_dsn;
1430 		uint16_t csum = 0;
1431 
1432 		MPTCP_DSS_OPT_SZ_CHK(dss_ack_rsp->mdss_copt.mdss_len,
1433 		    sizeof(struct mptcp_dss64_ack32_opt) + csum_len);
1434 
1435 		u_int32_t dack = dss_ack_rsp->mdss_ack;
1436 		NTOHL(dack);
1437 		mp_tp->mpt_flags |= MPTCPF_SND_64BITACK;
1438 		MPTCP_EXTEND_DSN(mp_tp->mpt_snduna, dack, full_dack);
1439 
1440 		full_dsn = mptcp_ntoh64(dss_ack_rsp->mdss_dsn);
1441 		NTOHL(dss_ack_rsp->mdss_subflow_seqn);
1442 		NTOHS(dss_ack_rsp->mdss_data_len);
1443 
1444 		mptcp_do_dss_opt_ack_meat(full_dack, full_dsn, tp, tiwin);
1445 
1446 		if (csum_len != 0) {
1447 			csum = *(uint16_t *)(void *)(cp + dss_ack_rsp->mdss_copt.mdss_len - csum_len);
1448 		}
1449 
1450 		mptcp_update_rcv_state_meat(mp_tp, tp, full_dsn,
1451 		    dss_ack_rsp->mdss_subflow_seqn,
1452 		    dss_ack_rsp->mdss_data_len,
1453 		    csum);
1454 
1455 		break;
1456 	}
1457 	case (MDSS_M | MDSS_A | MDSS_a):
1458 	{
1459 		/* 32-bit DSS + 64-bit Data ACK */
1460 		struct mptcp_dss32_ack64_opt dss32_ack_64_opt_s, *dss32_ack64_opt;
1461 		dss32_ack64_opt = MPTCP_OPT_GET(dss32_ack_64_opt_s, cp, optend, optlen);
1462 		u_int64_t full_dsn;
1463 
1464 		MPTCP_DSS_OPT_SZ_CHK(
1465 			dss32_ack64_opt->mdss_copt.mdss_len,
1466 			sizeof(struct mptcp_dss32_ack64_opt) + csum_len);
1467 
1468 		full_dack = mptcp_ntoh64(dss32_ack64_opt->mdss_ack);
1469 		NTOHL(dss32_ack64_opt->mdss_dsn);
1470 		mp_tp->mpt_flags |= MPTCPF_RCVD_64BITACK;
1471 		MPTCP_EXTEND_DSN(mp_tp->mpt_rcvnxt,
1472 		    dss32_ack64_opt->mdss_dsn, full_dsn);
1473 		NTOHL(dss32_ack64_opt->mdss_subflow_seqn);
1474 		NTOHS(dss32_ack64_opt->mdss_data_len);
1475 
1476 		mptcp_do_dss_opt_ack_meat(full_dack, full_dsn, tp, tiwin);
1477 		if (csum_len == 0) {
1478 			mptcp_update_rcv_state_meat(mp_tp, tp, full_dsn,
1479 			    dss32_ack64_opt->mdss_subflow_seqn,
1480 			    dss32_ack64_opt->mdss_data_len, 0);
1481 		} else {
1482 			mptcp_update_rcv_state_meat(mp_tp, tp, full_dsn,
1483 			    dss32_ack64_opt->mdss_subflow_seqn,
1484 			    dss32_ack64_opt->mdss_data_len,
1485 			    *(uint16_t *)(void *)(cp +
1486 			    dss32_ack64_opt->mdss_copt.mdss_len -
1487 			    csum_len));
1488 		}
1489 		break;
1490 	}
1491 	case (MDSS_M | MDSS_m | MDSS_A | MDSS_a):
1492 	{
1493 		/* 64-bit DSS + 64-bit Data ACK */
1494 		struct mptcp_dss64_ack64_opt dss64_ack_64_s, *dss64_ack64;
1495 		dss64_ack64 = MPTCP_OPT_GET(dss64_ack_64_s, cp, optend, optlen);
1496 		u_int64_t full_dsn;
1497 
1498 		MPTCP_DSS_OPT_SZ_CHK(dss64_ack64->mdss_copt.mdss_len,
1499 		    sizeof(struct mptcp_dss64_ack64_opt) + csum_len);
1500 
1501 		mp_tp->mpt_flags |= MPTCPF_RCVD_64BITACK;
1502 		mp_tp->mpt_flags |= MPTCPF_SND_64BITACK;
1503 		full_dsn = mptcp_ntoh64(dss64_ack64->mdss_dsn);
1504 		full_dack = mptcp_ntoh64(dss64_ack64->mdss_dsn);
1505 		mptcp_do_dss_opt_ack_meat(full_dack, full_dsn, tp, tiwin);
1506 		NTOHL(dss64_ack64->mdss_subflow_seqn);
1507 		NTOHS(dss64_ack64->mdss_data_len);
1508 		if (csum_len == 0) {
1509 			mptcp_update_rcv_state_meat(mp_tp, tp, full_dsn,
1510 			    dss64_ack64->mdss_subflow_seqn,
1511 			    dss64_ack64->mdss_data_len, 0);
1512 		} else {
1513 			mptcp_update_rcv_state_meat(mp_tp, tp, full_dsn,
1514 			    dss64_ack64->mdss_subflow_seqn,
1515 			    dss64_ack64->mdss_data_len,
1516 			    *(uint16_t *)(void *)(cp +
1517 			    dss64_ack64->mdss_copt.mdss_len -
1518 			    csum_len));
1519 		}
1520 		break;
1521 	}
1522 	default:
1523 		break;
1524 	}
1525 }
1526 
1527 static void
mptcp_do_dss_opt(struct tcpcb * tp,u_char * cp __ended_by (optend),u_char * optend,struct tcphdr * th,uint8_t optlen)1528 mptcp_do_dss_opt(struct tcpcb *tp, u_char *cp __ended_by(optend), u_char *optend, struct tcphdr *th, uint8_t optlen)
1529 {
1530 	struct mptcp_dss_copt dss_rsp_s, *dss_rsp;
1531 	dss_rsp = MPTCP_OPT_GET(dss_rsp_s, cp, optend, optlen);
1532 	struct mptcb *mp_tp = tptomptp(tp);
1533 
1534 	if (!mp_tp) {
1535 		return;
1536 	}
1537 
1538 	if (dss_rsp->mdss_subtype == MPO_DSS) {
1539 		if (dss_rsp->mdss_flags & MDSS_F) {
1540 			tp->t_rcv_map.mpt_dfin = 1;
1541 		} else {
1542 			tp->t_rcv_map.mpt_dfin = 0;
1543 		}
1544 
1545 		mptcp_do_dss_opt_meat(cp, optend, tp, th, optlen);
1546 	}
1547 }
1548 
1549 static void
mptcp_do_fastclose_opt(struct tcpcb * tp,u_char * cp __ended_by (optend),u_char * optend __unused,struct tcphdr * th,uint8_t optlen)1550 mptcp_do_fastclose_opt(struct tcpcb *tp, u_char *cp __ended_by(optend), u_char *optend __unused, struct tcphdr *th, uint8_t optlen)
1551 {
1552 	struct mptcb *mp_tp = NULL;
1553 	struct mptcp_fastclose_opt fc_opt_s, *fc_opt;
1554 	fc_opt = MPTCP_OPT_GET(fc_opt_s, cp, optend, optlen);
1555 
1556 	if (th->th_flags != TH_ACK) {
1557 		return;
1558 	}
1559 
1560 	if (fc_opt->mfast_len != sizeof(struct mptcp_fastclose_opt)) {
1561 		tcpstat.tcps_invalid_opt++;
1562 		return;
1563 	}
1564 
1565 	mp_tp = tptomptp(tp);
1566 	if (!mp_tp) {
1567 		return;
1568 	}
1569 
1570 	if (fc_opt->mfast_key != mp_tp->mpt_localkey) {
1571 		tcpstat.tcps_invalid_opt++;
1572 		return;
1573 	}
1574 
1575 	/*
1576 	 * fastclose could make us more vulnerable to attacks, hence
1577 	 * accept only those that are at the next expected sequence number.
1578 	 */
1579 	if (th->th_seq != tp->rcv_nxt) {
1580 		tcpstat.tcps_invalid_opt++;
1581 		return;
1582 	}
1583 
1584 	/* Reset this flow */
1585 	tp->t_mpflags |= TMPF_FASTCLOSERCV;
1586 
1587 	if (tp->t_inpcb->inp_socket != NULL) {
1588 		soevent(tp->t_inpcb->inp_socket,
1589 		    SO_FILT_HINT_LOCKED | SO_FILT_HINT_MUSTRST);
1590 	}
1591 }
1592 
1593 
1594 static void
mptcp_do_mpfail_opt(struct tcpcb * tp,u_char * cp __ended_by (optend),u_char * optend __unused,struct tcphdr * th,uint8_t optlen)1595 mptcp_do_mpfail_opt(struct tcpcb *tp, u_char *cp __ended_by(optend), u_char *optend __unused, struct tcphdr *th, uint8_t optlen)
1596 {
1597 	struct mptcp_mpfail_opt fail_opt_s, *fail_opt;
1598 	fail_opt = MPTCP_OPT_GET(fail_opt_s, cp, optend, optlen);
1599 	u_int32_t mdss_subflow_seqn = 0;
1600 	struct mptcb *mp_tp;
1601 	int error = 0;
1602 
1603 	/*
1604 	 * mpfail could make us more vulnerable to attacks. Hence accept
1605 	 * only those that are the next expected sequence number.
1606 	 */
1607 	if (th->th_seq != tp->rcv_nxt) {
1608 		tcpstat.tcps_invalid_opt++;
1609 		return;
1610 	}
1611 
1612 	/* A packet without RST, must atleast have the ACK bit set */
1613 	if ((th->th_flags != TH_ACK) && (th->th_flags != TH_RST)) {
1614 		return;
1615 	}
1616 
1617 	if (fail_opt->mfail_len != sizeof(struct mptcp_mpfail_opt)) {
1618 		return;
1619 	}
1620 
1621 	mp_tp = tptomptp(tp);
1622 
1623 	mp_tp->mpt_flags |= MPTCPF_RECVD_MPFAIL;
1624 	mp_tp->mpt_dsn_at_csum_fail = mptcp_hton64(fail_opt->mfail_dsn);
1625 	error = mptcp_get_map_for_dsn(tp->t_inpcb->inp_socket,
1626 	    mp_tp->mpt_dsn_at_csum_fail, &mdss_subflow_seqn);
1627 	if (error == 0) {
1628 		mp_tp->mpt_ssn_at_csum_fail = mdss_subflow_seqn;
1629 	}
1630 
1631 	mptcp_notify_mpfail(tp->t_inpcb->inp_socket);
1632 }
1633 
1634 static boolean_t
mptcp_validate_add_addr_hmac(struct tcpcb * tp,u_char * hmac __sized_by (mac_len),u_char * msg __sized_by (msg_len),uint16_t msg_len,uint16_t mac_len)1635 mptcp_validate_add_addr_hmac(struct tcpcb *tp, u_char *hmac __sized_by(mac_len),
1636     u_char *msg __sized_by(msg_len), uint16_t msg_len, uint16_t mac_len)
1637 {
1638 	u_char digest[SHA256_DIGEST_LENGTH] = {0};
1639 	struct mptcb *mp_tp = tptomptp(tp);
1640 
1641 	VERIFY(mac_len <= SHA256_DIGEST_LENGTH);
1642 	mptcp_hmac_sha256(mp_tp->mpt_remotekey, mp_tp->mpt_localkey, msg, msg_len, digest);
1643 
1644 	if (bcmp(digest + SHA256_DIGEST_LENGTH - mac_len, hmac, mac_len) == 0) {
1645 		return true; /* matches */
1646 	} else {
1647 		return false;
1648 	}
1649 }
1650 
1651 static void
mptcp_do_add_addr_opt_v1(struct tcpcb * tp,u_char * cp __ended_by (optend),u_char * optend,uint8_t optlen)1652 mptcp_do_add_addr_opt_v1(struct tcpcb *tp, u_char *cp __ended_by(optend), u_char *optend, uint8_t optlen)
1653 {
1654 	struct mptcb *mp_tp = tptomptp(tp);
1655 	struct mptses *mpte = mp_tp->mpt_mpte;
1656 
1657 	struct mptcp_add_addr_opt addr_opt_s, *addr_opt;
1658 	addr_opt = MPTCP_OPT_GET(addr_opt_s, cp, optend, optlen);
1659 
1660 	if (addr_opt->maddr_len != MPTCP_V1_ADD_ADDR_OPT_LEN_V4 &&
1661 	    addr_opt->maddr_len != MPTCP_V1_ADD_ADDR_OPT_LEN_V4 + 2 &&
1662 	    addr_opt->maddr_len != MPTCP_V1_ADD_ADDR_OPT_LEN_V6 &&
1663 	    addr_opt->maddr_len != MPTCP_V1_ADD_ADDR_OPT_LEN_V6 + 2) {
1664 		os_log_error(mptcp_log_handle, "%s - %lx: Wrong ADD_ADDR length %u\n",
1665 		    __func__, (unsigned long)VM_KERNEL_ADDRPERM(mpte),
1666 		    addr_opt->maddr_len);
1667 
1668 		return;
1669 	}
1670 
1671 	if ((addr_opt->maddr_flags & MPTCP_V1_ADD_ADDR_ECHO) != 0) {
1672 		os_log(mptcp_log_handle, "%s - %lx: Received ADD_ADDR with echo bit\n",
1673 		    __func__, (unsigned long)VM_KERNEL_ADDRPERM(mpte));
1674 
1675 		return;
1676 	}
1677 
1678 	if (addr_opt->maddr_len < MPTCP_V1_ADD_ADDR_OPT_LEN_V6) {
1679 		struct sockaddr_in *dst = &mpte->mpte_sub_dst_v4;
1680 		struct in_addr *addr = &addr_opt->maddr_u.maddr_addrv4;
1681 		in_addr_t haddr = ntohl(addr->s_addr);
1682 
1683 		if (IN_ZERONET(haddr) ||
1684 		    IN_LOOPBACK(haddr) ||
1685 		    IN_LINKLOCAL(haddr) ||
1686 		    IN_DS_LITE(haddr) ||
1687 		    IN_6TO4_RELAY_ANYCAST(haddr) ||
1688 		    IN_MULTICAST(haddr) ||
1689 		    INADDR_BROADCAST == haddr ||
1690 		    IN_PRIVATE(haddr) ||
1691 		    IN_SHARED_ADDRESS_SPACE(haddr)) {
1692 			os_log_error(mptcp_log_handle, "%s - %lx: ADD_ADDR invalid addr: %x\n",
1693 			    __func__, (unsigned long)VM_KERNEL_ADDRPERM(mpte),
1694 			    addr->s_addr);
1695 
1696 			return;
1697 		}
1698 
1699 		u_char *hmac = (void *)(cp + addr_opt->maddr_len - HMAC_TRUNCATED_ADD_ADDR);
1700 		uint16_t msg_len = sizeof(struct mptcp_add_addr_hmac_msg_v4);
1701 		struct mptcp_add_addr_hmac_msg_v4 msg  = {0};
1702 		msg.maddr_addrid = addr_opt->maddr_addrid;
1703 		msg.maddr_addr = addr_opt->maddr_u.maddr_addrv4;
1704 		if (addr_opt->maddr_len > MPTCP_V1_ADD_ADDR_OPT_LEN_V4) {
1705 			msg.maddr_port = *(uint16_t *)(void *)(cp + addr_opt->maddr_len - HMAC_TRUNCATED_ADD_ADDR - 2);
1706 		}
1707 		if (!mptcp_validate_add_addr_hmac(tp, hmac, (u_char *)&msg, msg_len, HMAC_TRUNCATED_ADD_ADDR)) {
1708 			os_log_error(mptcp_log_handle, "%s - %lx: ADD_ADDR addr: %x invalid HMAC\n",
1709 			    __func__, (unsigned long)VM_KERNEL_ADDRPERM(mpte),
1710 			    addr->s_addr);
1711 			return;
1712 		}
1713 
1714 		dst->sin_len = sizeof(*dst);
1715 		dst->sin_family = AF_INET;
1716 		if (addr_opt->maddr_len > MPTCP_V1_ADD_ADDR_OPT_LEN_V4) {
1717 			dst->sin_port = *(uint16_t *)(void *)(cp + addr_opt->maddr_len - HMAC_TRUNCATED_ADD_ADDR - 2);
1718 		} else {
1719 			dst->sin_port = mpte->__mpte_dst_v4.sin_port;
1720 		}
1721 		dst->sin_addr.s_addr = addr->s_addr;
1722 		mpte->sub_dst_addr_id_v4 = addr_opt->maddr_addrid;
1723 		mpte->mpte_last_added_addr_is_v4 = TRUE;
1724 	} else {
1725 		struct sockaddr_in6 *dst = &mpte->mpte_sub_dst_v6;
1726 		struct in6_addr *addr = &addr_opt->maddr_u.maddr_addrv6;
1727 
1728 		if (IN6_IS_ADDR_LINKLOCAL(addr) ||
1729 		    IN6_IS_ADDR_MULTICAST(addr) ||
1730 		    IN6_IS_ADDR_UNSPECIFIED(addr) ||
1731 		    IN6_IS_ADDR_LOOPBACK(addr) ||
1732 		    IN6_IS_ADDR_V4COMPAT(addr) ||
1733 		    IN6_IS_ADDR_V4MAPPED(addr)) {
1734 			char dbuf[MAX_IPv6_STR_LEN];
1735 
1736 			inet_ntop(AF_INET6, addr, dbuf, sizeof(dbuf));
1737 			os_log_error(mptcp_log_handle, "%s - %lx: ADD_ADDRv6 invalid addr: %s\n",
1738 			    __func__, (unsigned long)VM_KERNEL_ADDRPERM(mpte),
1739 			    dbuf);
1740 
1741 			return;
1742 		}
1743 
1744 		u_char *hmac = (void *)(cp + addr_opt->maddr_len - HMAC_TRUNCATED_ADD_ADDR);
1745 		uint16_t msg_len = sizeof(struct mptcp_add_addr_hmac_msg_v6);
1746 		struct mptcp_add_addr_hmac_msg_v6 msg  = {0};
1747 		msg.maddr_addrid = addr_opt->maddr_addrid;
1748 		msg.maddr_addr = addr_opt->maddr_u.maddr_addrv6;
1749 		if (addr_opt->maddr_len > MPTCP_V1_ADD_ADDR_OPT_LEN_V6) {
1750 			msg.maddr_port = *(uint16_t *)(void *)(cp + addr_opt->maddr_len - HMAC_TRUNCATED_ADD_ADDR - 2);
1751 		}
1752 		if (!mptcp_validate_add_addr_hmac(tp, hmac, (u_char *)&msg, msg_len, HMAC_TRUNCATED_ADD_ADDR)) {
1753 			char dbuf[MAX_IPv6_STR_LEN];
1754 
1755 			inet_ntop(AF_INET6, addr, dbuf, sizeof(dbuf));
1756 			os_log_error(mptcp_log_handle, "%s - %lx: ADD_ADDR addr: %s invalid HMAC\n",
1757 			    __func__, (unsigned long)VM_KERNEL_ADDRPERM(mpte),
1758 			    dbuf);
1759 			return;
1760 		}
1761 
1762 		dst->sin6_len = sizeof(*dst);
1763 		dst->sin6_family = AF_INET6;
1764 		if (addr_opt->maddr_len > MPTCP_V1_ADD_ADDR_OPT_LEN_V6) {
1765 			dst->sin6_port = *(uint16_t *)(void *)(cp + addr_opt->maddr_len - HMAC_TRUNCATED_ADD_ADDR - 2);
1766 		} else {
1767 			dst->sin6_port = mpte->__mpte_dst_v6.sin6_port;
1768 		}
1769 		memcpy(&dst->sin6_addr, addr, sizeof(*addr));
1770 		mpte->sub_dst_addr_id_v6 = addr_opt->maddr_addrid;
1771 		mpte->mpte_last_added_addr_is_v4 = FALSE;
1772 	}
1773 
1774 	os_log(mptcp_log_handle, "%s - %lx: Received ADD_ADDRv1\n",
1775 	    __func__, (unsigned long)VM_KERNEL_ADDRPERM(mpte));
1776 
1777 	/* Once an incoming ADD_ADDR for v1 is valid, it means that the peer
1778 	 * receiver our keys.
1779 	 */
1780 	tp->t_mpflags &= ~TMPF_SND_KEYS;
1781 	tp->t_mpflags |= TMPF_MPTCP_ECHO_ADDR;
1782 	tp->t_flags |= TF_ACKNOW;
1783 	mptcp_sched_create_subflows(mpte);
1784 }
1785 
1786 static void
mptcp_do_add_addr_opt_v0(struct mptses * mpte,u_char * cp __ended_by (optend),u_char * optend __unused,uint8_t optlen)1787 mptcp_do_add_addr_opt_v0(struct mptses *mpte, u_char *cp __ended_by(optend), u_char *optend __unused, uint8_t optlen)
1788 {
1789 	struct mptcp_add_addr_opt addr_opt_s, *addr_opt;
1790 	addr_opt = MPTCP_OPT_GET(addr_opt_s, cp, optend, optlen);
1791 
1792 	if (addr_opt->maddr_len != MPTCP_V0_ADD_ADDR_OPT_LEN_V4 &&
1793 	    addr_opt->maddr_len != MPTCP_V0_ADD_ADDR_OPT_LEN_V6) {
1794 		os_log_error(mptcp_log_handle, "%s - %lx: Wrong ADD_ADDR length %u\n",
1795 		    __func__, (unsigned long)VM_KERNEL_ADDRPERM(mpte),
1796 		    addr_opt->maddr_len);
1797 
1798 		return;
1799 	}
1800 
1801 	if (addr_opt->maddr_len == MPTCP_V0_ADD_ADDR_OPT_LEN_V4 &&
1802 	    addr_opt->maddr_flags != MPTCP_V0_ADD_ADDR_IPV4) {
1803 		os_log_error(mptcp_log_handle, "%s - %lx: ADD_ADDR length for v4 but version is %u\n",
1804 		    __func__, (unsigned long)VM_KERNEL_ADDRPERM(mpte),
1805 		    addr_opt->maddr_flags);
1806 
1807 		return;
1808 	}
1809 
1810 	if (addr_opt->maddr_len == MPTCP_V0_ADD_ADDR_OPT_LEN_V6 &&
1811 	    addr_opt->maddr_flags != MPTCP_V0_ADD_ADDR_IPV6) {
1812 		os_log_error(mptcp_log_handle, "%s - %lx: ADD_ADDR length for v6 but version is %u\n",
1813 		    __func__, (unsigned long)VM_KERNEL_ADDRPERM(mpte),
1814 		    addr_opt->maddr_flags);
1815 
1816 		return;
1817 	}
1818 
1819 	if (addr_opt->maddr_len == MPTCP_V0_ADD_ADDR_OPT_LEN_V4) {
1820 		struct sockaddr_in *dst = &mpte->mpte_sub_dst_v4;
1821 		struct in_addr *addr = &addr_opt->maddr_u.maddr_addrv4;
1822 		in_addr_t haddr = ntohl(addr->s_addr);
1823 
1824 		if (IN_ZERONET(haddr) ||
1825 		    IN_LOOPBACK(haddr) ||
1826 		    IN_LINKLOCAL(haddr) ||
1827 		    IN_DS_LITE(haddr) ||
1828 		    IN_6TO4_RELAY_ANYCAST(haddr) ||
1829 		    IN_MULTICAST(haddr) ||
1830 		    INADDR_BROADCAST == haddr ||
1831 		    IN_PRIVATE(haddr) ||
1832 		    IN_SHARED_ADDRESS_SPACE(haddr)) {
1833 			os_log_error(mptcp_log_handle, "%s - %lx: ADD_ADDR invalid addr: %x\n",
1834 			    __func__, (unsigned long)VM_KERNEL_ADDRPERM(mpte),
1835 			    addr->s_addr);
1836 
1837 			return;
1838 		}
1839 
1840 		dst->sin_len = sizeof(*dst);
1841 		dst->sin_family = AF_INET;
1842 		dst->sin_port = mpte->__mpte_dst_v4.sin_port;
1843 		dst->sin_addr.s_addr = addr->s_addr;
1844 		mpte->mpte_last_added_addr_is_v4 = TRUE;
1845 	} else {
1846 		struct sockaddr_in6 *dst = &mpte->mpte_sub_dst_v6;
1847 		struct in6_addr *addr = &addr_opt->maddr_u.maddr_addrv6;
1848 
1849 		if (IN6_IS_ADDR_LINKLOCAL(addr) ||
1850 		    IN6_IS_ADDR_MULTICAST(addr) ||
1851 		    IN6_IS_ADDR_UNSPECIFIED(addr) ||
1852 		    IN6_IS_ADDR_LOOPBACK(addr) ||
1853 		    IN6_IS_ADDR_V4COMPAT(addr) ||
1854 		    IN6_IS_ADDR_V4MAPPED(addr)) {
1855 			char dbuf[MAX_IPv6_STR_LEN];
1856 
1857 			inet_ntop(AF_INET6, addr, dbuf, sizeof(dbuf));
1858 			os_log_error(mptcp_log_handle, "%s - %lx: ADD_ADDRv6 invalid addr: %s\n",
1859 			    __func__, (unsigned long)VM_KERNEL_ADDRPERM(mpte),
1860 			    dbuf);
1861 
1862 			return;
1863 		}
1864 
1865 		dst->sin6_len = sizeof(*dst);
1866 		dst->sin6_family = AF_INET6;
1867 		dst->sin6_port = mpte->__mpte_dst_v6.sin6_port;
1868 		dst->sin6_addr = *addr;
1869 		mpte->mpte_last_added_addr_is_v4 = FALSE;
1870 	}
1871 
1872 	os_log(mptcp_log_handle, "%s - %lx: Received ADD_ADDRv0\n",
1873 	    __func__, (unsigned long)VM_KERNEL_ADDRPERM(mpte));
1874 
1875 	mptcp_sched_create_subflows(mpte);
1876 }
1877 
1878 void
tcp_do_mptcp_options(struct tcpcb * tp,u_char * cp __ended_by (optend),u_char * optend,struct tcphdr * th,struct tcpopt * to,uint8_t optlen)1879 tcp_do_mptcp_options(struct tcpcb *tp, u_char *cp __ended_by(optend), u_char *optend, struct tcphdr *th,
1880     struct tcpopt *to, uint8_t optlen)
1881 {
1882 	int mptcp_subtype = 0;
1883 	struct mptcb *mp_tp = tptomptp(tp);
1884 
1885 	/* We expect the TCP stack to ensure this */
1886 	ASSERT(cp + optlen <= optend);
1887 
1888 	if (mp_tp == NULL) {
1889 		return;
1890 	}
1891 
1892 	socket_lock_assert_owned(mptetoso(mp_tp->mpt_mpte));
1893 
1894 	/* All MPTCP options have atleast 4 bytes */
1895 	if (optlen < 4) {
1896 		return;
1897 	}
1898 
1899 	mptcp_subtype = (cp[2] >> 4);
1900 
1901 	if (mptcp_sanitize_option(tp, mptcp_subtype) == 0) {
1902 		return;
1903 	}
1904 
1905 	switch (mptcp_subtype) {
1906 	case MPO_CAPABLE:
1907 		mptcp_do_mpcapable_opt(tp, cp, optend, th, optlen);
1908 		break;
1909 	case MPO_JOIN:
1910 		mptcp_do_mpjoin_opt(tp, cp, optend, th, optlen);
1911 		break;
1912 	case MPO_DSS:
1913 		mptcp_do_dss_opt(tp, cp, optend, th, optlen);
1914 		break;
1915 	case MPO_FASTCLOSE:
1916 		mptcp_do_fastclose_opt(tp, cp, optend, th, optlen);
1917 		break;
1918 	case MPO_FAIL:
1919 		mptcp_do_mpfail_opt(tp, cp, optend, th, optlen);
1920 		break;
1921 	case MPO_ADD_ADDR:
1922 		if (mp_tp->mpt_version == MPTCP_VERSION_0) {
1923 			mptcp_do_add_addr_opt_v0(mp_tp->mpt_mpte, cp, optend, optlen);
1924 		} else {
1925 			mptcp_do_add_addr_opt_v1(tp, cp, optend, optlen);
1926 		}
1927 		break;
1928 	case MPO_REMOVE_ADDR:           /* fall through */
1929 	case MPO_PRIO:
1930 		to->to_flags |= TOF_MPTCP;
1931 		break;
1932 	default:
1933 		break;
1934 	}
1935 	return;
1936 }
1937 
1938 /* REMOVE_ADDR option is sent when a source address goes away */
1939 static void
mptcp_send_remaddr_opt(struct tcpcb * tp,struct mptcp_remaddr_opt * opt)1940 mptcp_send_remaddr_opt(struct tcpcb *tp, struct mptcp_remaddr_opt *opt)
1941 {
1942 	bzero(opt, sizeof(*opt));
1943 	opt->mr_kind = TCPOPT_MULTIPATH;
1944 	opt->mr_len = sizeof(*opt);
1945 	opt->mr_subtype = MPO_REMOVE_ADDR;
1946 	opt->mr_addr_id = tp->t_rem_aid;
1947 	tp->t_mpflags &= ~TMPF_SND_REM_ADDR;
1948 }
1949 
1950 static int
mptcp_echo_add_addr(struct tcpcb * tp,u_char * cp __ended_by (optend),u_char * optend __unused,unsigned int optlen)1951 mptcp_echo_add_addr(struct tcpcb *tp, u_char *cp __ended_by(optend), u_char *optend __unused, unsigned int optlen)
1952 {
1953 	struct mptcp_add_addr_opt mpaddr;
1954 	struct mptcb *mp_tp = tptomptp(tp);
1955 	struct mptses *mpte = mp_tp->mpt_mpte;
1956 
1957 	// MPTCP v0 doesn't require echoing add_addr
1958 	if (mp_tp->mpt_version == MPTCP_VERSION_0) {
1959 		return optlen;
1960 	}
1961 
1962 	size_t mpaddr_size = mpte->mpte_last_added_addr_is_v4 ? MPTCP_V1_ADD_ADDR_ECHO_OPT_LEN_V4 : MPTCP_V1_ADD_ADDR_ECHO_OPT_LEN_V6;
1963 	if ((MAX_TCPOPTLEN - optlen) < mpaddr_size) {
1964 		return optlen;
1965 	}
1966 
1967 	bzero(&mpaddr, sizeof(mpaddr));
1968 	mpaddr.maddr_kind = TCPOPT_MULTIPATH;
1969 	mpaddr.maddr_len = (uint8_t)mpaddr_size;
1970 	mpaddr.maddr_subtype = MPO_ADD_ADDR;
1971 	mpaddr.maddr_flags = MPTCP_V1_ADD_ADDR_ECHO;
1972 	if (mpte->mpte_last_added_addr_is_v4) {
1973 		mpaddr.maddr_u.maddr_addrv4.s_addr = mpte->mpte_sub_dst_v4.sin_addr.s_addr;
1974 		mpaddr.maddr_addrid = mpte->sub_dst_addr_id_v4;
1975 	} else {
1976 		mpaddr.maddr_u.maddr_addrv6 = mpte->mpte_sub_dst_v6.sin6_addr;
1977 		mpaddr.maddr_addrid = mpte->sub_dst_addr_id_v6;
1978 	}
1979 
1980 	memcpy(cp + optlen, &mpaddr, mpaddr_size);
1981 	optlen += mpaddr_size;
1982 	tp->t_mpflags &= ~TMPF_MPTCP_ECHO_ADDR;
1983 	return optlen;
1984 }
1985 
1986 /* We send MP_PRIO option based on the values set by the SIOCSCONNORDER ioctl */
1987 static int
mptcp_snd_mpprio(struct tcpcb * tp,u_char * cp __ended_by (optend),u_char * optend __unused,int optlen)1988 mptcp_snd_mpprio(struct tcpcb *tp, u_char *cp __ended_by(optend), u_char *optend __unused, int optlen)
1989 {
1990 	struct mptcp_mpprio_addr_opt mpprio;
1991 	struct mptcb *mp_tp = tptomptp(tp);
1992 	size_t mpprio_size = sizeof(mpprio);
1993 	// MP_PRIO of MPTCPv1 doesn't include AddrID
1994 	if (mp_tp->mpt_version == MPTCP_VERSION_1) {
1995 		mpprio_size -= sizeof(uint8_t);
1996 	}
1997 
1998 	if (tp->t_state != TCPS_ESTABLISHED) {
1999 		tp->t_mpflags &= ~TMPF_SND_MPPRIO;
2000 		return optlen;
2001 	}
2002 
2003 	if ((MAX_TCPOPTLEN - optlen) < (int)mpprio_size) {
2004 		return optlen;
2005 	}
2006 
2007 	bzero(&mpprio, sizeof(mpprio));
2008 	mpprio.mpprio_kind = TCPOPT_MULTIPATH;
2009 	mpprio.mpprio_len = (uint8_t)mpprio_size;
2010 	mpprio.mpprio_subtype = MPO_PRIO;
2011 	if (tp->t_mpflags & TMPF_BACKUP_PATH) {
2012 		mpprio.mpprio_flags |= MPTCP_MPPRIO_BKP;
2013 	}
2014 	mpprio.mpprio_addrid = tp->t_local_aid;
2015 	memcpy(cp + optlen, &mpprio, mpprio_size);
2016 	optlen += mpprio_size;
2017 	tp->t_mpflags &= ~TMPF_SND_MPPRIO;
2018 	return optlen;
2019 }
2020