xref: /xnu-11417.140.69/bsd/netinet/ip_encap.c (revision 43a90889846e00bfb5cf1d255cdc0a701a1e05a4)
1 /*
2  * Copyright (c) 2000-2024 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*	$FreeBSD: src/sys/netinet/ip_encap.c,v 1.1.2.2 2001/07/03 11:01:46 ume Exp $	*/
29 /*	$KAME: ip_encap.c,v 1.41 2001/03/15 08:35:08 itojun Exp $	*/
30 
31 /*
32  * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
33  * All rights reserved.
34  *
35  * Redistribution and use in source and binary forms, with or without
36  * modification, are permitted provided that the following conditions
37  * are met:
38  * 1. Redistributions of source code must retain the above copyright
39  *    notice, this list of conditions and the following disclaimer.
40  * 2. Redistributions in binary form must reproduce the above copyright
41  *    notice, this list of conditions and the following disclaimer in the
42  *    documentation and/or other materials provided with the distribution.
43  * 3. Neither the name of the project nor the names of its contributors
44  *    may be used to endorse or promote products derived from this software
45  *    without specific prior written permission.
46  *
47  * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
48  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
49  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
50  * ARE DISCLAIMED.  IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
51  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
52  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
53  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
54  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
55  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
56  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
57  * SUCH DAMAGE.
58  */
59 /*
60  * My grandfather said that there's a devil inside tunnelling technology...
61  *
62  * We have surprisingly many protocols that want packets with IP protocol
63  * #4 or #41.  Here's a list of protocols that want protocol #41:
64  *	RFC1933 configured tunnel
65  *	RFC1933 automatic tunnel
66  *	RFC2401 IPsec tunnel
67  *	RFC2473 IPv6 generic packet tunnelling
68  *	RFC2529 6over4 tunnel
69  *	mobile-ip6 (uses RFC2473)
70  *	6to4 tunnel
71  * Here's a list of protocol that want protocol #4:
72  *	RFC1853 IPv4-in-IPv4 tunnelling
73  *	RFC2003 IPv4 encapsulation within IPv4
74  *	RFC2344 reverse tunnelling for mobile-ip4
75  *	RFC2401 IPsec tunnel
76  * Well, what can I say.  They impose different en/decapsulation mechanism
77  * from each other, so they need separate protocol handler.  The only one
78  * we can easily determine by protocol # is IPsec, which always has
79  * AH/ESP header right after outer IP header.
80  *
81  * So, clearly good old protosw does not work for protocol #4 and #41.
82  * The code will let you match protocol via src/dst address pair.
83  */
84 /* XXX is M_NETADDR correct? */
85 
86 #include <sys/param.h>
87 #include <sys/systm.h>
88 #include <sys/socket.h>
89 #include <sys/sockio.h>
90 #include <sys/mbuf.h>
91 #include <sys/mcache.h>
92 #include <sys/errno.h>
93 #include <sys/domain.h>
94 #include <sys/protosw.h>
95 #include <sys/queue.h>
96 
97 #include <net/if.h>
98 #include <net/route.h>
99 
100 #include <netinet/in.h>
101 #include <netinet/in_systm.h>
102 #include <netinet/ip.h>
103 #include <netinet/ip_var.h>
104 #include <netinet/ip_encap.h>
105 
106 #include <netinet/ip6.h>
107 #include <netinet6/ip6_var.h>
108 #include <netinet6/ip6protosw.h>
109 
110 #include <net/net_osdep.h>
111 #include <net/sockaddr_utils.h>
112 
113 #ifndef __APPLE__
114 #include <sys/kernel.h>
115 #include <sys/malloc.h>
116 MALLOC_DEFINE(M_NETADDR, "Export Host", "Export host address structure");
117 #endif
118 
119 static void encap_add_locked(struct encaptab *);
120 static int mask_match(const struct encaptab *, const struct sockaddr *,
121     const struct sockaddr *);
122 static void encap_fillarg(struct mbuf *, void *arg);
123 
124 LIST_HEAD(, encaptab) encaptab = LIST_HEAD_INITIALIZER(&encaptab);
125 
126 static LCK_GRP_DECLARE(encaptab_lock_grp, "encaptab lock");
127 static LCK_RW_DECLARE(encaptab_lock, &encaptab_lock_grp);
128 
129 #if INET
130 void
encap4_input(struct mbuf * m,int off)131 encap4_input(struct mbuf *m, int off)
132 {
133 	int proto;
134 	struct ip *__single ip;
135 	struct sockaddr_in s, d;
136 	const struct protosw *psw;
137 	struct encaptab *__single ep, *__single match;
138 	int prio, matchprio;
139 	void *__single match_arg = NULL;
140 
141 #ifndef __APPLE__
142 	va_start(ap, m);
143 	off = va_arg(ap, int);
144 	proto = va_arg(ap, int);
145 	va_end(ap);
146 #endif
147 
148 	/* Expect 32-bit aligned data pointer on strict-align platforms */
149 	MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(m);
150 
151 	ip = mtod(m, struct ip *);
152 #ifdef __APPLE__
153 	proto = ip->ip_p;
154 #endif
155 
156 	SOCKADDR_ZERO(&s, sizeof(s));
157 	s.sin_family = AF_INET;
158 	s.sin_len = sizeof(struct sockaddr_in);
159 	s.sin_addr = ip->ip_src;
160 	SOCKADDR_ZERO(&d, sizeof(d));
161 	d.sin_family = AF_INET;
162 	d.sin_len = sizeof(struct sockaddr_in);
163 	d.sin_addr = ip->ip_dst;
164 
165 	match = NULL;
166 	matchprio = 0;
167 
168 	lck_rw_lock_shared(&encaptab_lock);
169 	for (ep = LIST_FIRST(&encaptab); ep; ep = LIST_NEXT(ep, chain)) {
170 		if (ep->af != AF_INET) {
171 			continue;
172 		}
173 		if (ep->proto >= 0 && ep->proto != proto) {
174 			continue;
175 		}
176 		if (ep->func) {
177 			prio = (*ep->func)(m, off, proto, ep->arg);
178 		} else {
179 			/*
180 			 * it's inbound traffic, we need to match in reverse
181 			 * order
182 			 */
183 			prio = mask_match(ep, SA(&d), SA(&s));
184 		}
185 
186 		/*
187 		 * We prioritize the matches by using bit length of the
188 		 * matches.  mask_match() and user-supplied matching function
189 		 * should return the bit length of the matches (for example,
190 		 * if both src/dst are matched for IPv4, 64 should be returned).
191 		 * 0 or negative return value means "it did not match".
192 		 *
193 		 * The question is, since we have two "mask" portion, we
194 		 * cannot really define total order between entries.
195 		 * For example, which of these should be preferred?
196 		 * mask_match() returns 48 (32 + 16) for both of them.
197 		 *	src=3ffe::/16, dst=3ffe:501::/32
198 		 *	src=3ffe:501::/32, dst=3ffe::/16
199 		 *
200 		 * We need to loop through all the possible candidates
201 		 * to get the best match - the search takes O(n) for
202 		 * n attachments (i.e. interfaces).
203 		 */
204 		if (prio <= 0) {
205 			continue;
206 		}
207 		if (prio > matchprio) {
208 			matchprio = prio;
209 			match = ep;
210 			psw = (const struct protosw *)match->psw;
211 			match_arg = ep->arg;
212 		}
213 	}
214 	lck_rw_unlock_shared(&encaptab_lock);
215 
216 	if (match) {
217 		/* found a match, "match" has the best one */
218 		if (psw && psw->pr_input) {
219 			encap_fillarg(m, match_arg);
220 			(*psw->pr_input)(m, off);
221 		} else {
222 			m_freem(m);
223 		}
224 		return;
225 	}
226 
227 	/* last resort: inject to raw socket */
228 	rip_input(m, off);
229 }
230 #endif
231 
232 int
encap6_input(struct mbuf ** mp,int * offp,int proto)233 encap6_input(struct mbuf **mp, int *offp, int proto)
234 {
235 	mbuf_ref_t m = *mp;
236 	struct ip6_hdr *__single ip6;
237 	struct sockaddr_in6 s, d;
238 	const struct ip6protosw *__single psw;
239 	struct encaptab *__single ep, *__single match;
240 	int prio, matchprio;
241 	void *__single match_arg = NULL;
242 
243 	/* Expect 32-bit aligned data pointer on strict-align platforms */
244 	MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(m);
245 
246 	ip6 = mtod(m, struct ip6_hdr *);
247 	SOCKADDR_ZERO(&s, sizeof(s));
248 	s.sin6_family = AF_INET6;
249 	s.sin6_len = sizeof(struct sockaddr_in6);
250 	s.sin6_addr = ip6->ip6_src;
251 	SOCKADDR_ZERO(&d, sizeof(d));
252 	d.sin6_family = AF_INET6;
253 	d.sin6_len = sizeof(struct sockaddr_in6);
254 	d.sin6_addr = ip6->ip6_dst;
255 
256 	match = NULL;
257 	matchprio = 0;
258 
259 	lck_rw_lock_shared(&encaptab_lock);
260 	for (ep = LIST_FIRST(&encaptab); ep; ep = LIST_NEXT(ep, chain)) {
261 		if (ep->af != AF_INET6) {
262 			continue;
263 		}
264 		if (ep->proto >= 0 && ep->proto != proto) {
265 			continue;
266 		}
267 		if (ep->func) {
268 			prio = (*ep->func)(m, *offp, proto, ep->arg);
269 		} else {
270 			/*
271 			 * it's inbound traffic, we need to match in reverse
272 			 * order
273 			 */
274 			prio = mask_match(ep, SA(&d), SA(&s));
275 		}
276 
277 		/* see encap4_input() for issues here */
278 		if (prio <= 0) {
279 			continue;
280 		}
281 		if (prio > matchprio) {
282 			matchprio = prio;
283 			match = ep;
284 			psw = (const struct ip6protosw *)match->psw;
285 			match_arg = ep->arg;
286 		}
287 	}
288 	lck_rw_unlock_shared(&encaptab_lock);
289 
290 	if (match) {
291 		/* found a match */
292 		if (psw && psw->pr_input) {
293 			encap_fillarg(m, match_arg);
294 			return (*psw->pr_input)(mp, offp, proto);
295 		} else {
296 			m_freem(m);
297 			return IPPROTO_DONE;
298 		}
299 	}
300 
301 	/* last resort: inject to raw socket */
302 	return rip6_input(mp, offp, proto);
303 }
304 
305 static void
encap_add_locked(struct encaptab * ep)306 encap_add_locked(struct encaptab *ep)
307 {
308 	LCK_RW_ASSERT(&encaptab_lock, LCK_RW_ASSERT_EXCLUSIVE);
309 	LIST_INSERT_HEAD(&encaptab, ep, chain);
310 }
311 
312 /*
313  * sp (src ptr) is always my side, and dp (dst ptr) is always remote side.
314  * length of mask (sm and dm) is assumed to be same as sp/dp.
315  * Return value will be necessary as input (cookie) for encap_detach().
316  */
317 const struct encaptab *
encap_attach(int af,int proto,const struct sockaddr * sp,const struct sockaddr * sm,const struct sockaddr * dp,const struct sockaddr * dm,const struct protosw * psw,void * arg)318 encap_attach(int af, int proto, const struct sockaddr *sp,
319     const struct sockaddr *sm, const struct sockaddr *dp,
320     const struct sockaddr *dm, const struct protosw *psw, void *arg)
321 {
322 	struct encaptab *ep = NULL;
323 	struct encaptab *new_ep = NULL;
324 	int error;
325 
326 	/* sanity check on args */
327 	if (sp->sa_len > sizeof(new_ep->src) || dp->sa_len > sizeof(new_ep->dst)) {
328 		error = EINVAL;
329 		goto fail;
330 	}
331 	if (sp->sa_len != dp->sa_len) {
332 		error = EINVAL;
333 		goto fail;
334 	}
335 	if (af != sp->sa_family || af != dp->sa_family) {
336 		error = EINVAL;
337 		goto fail;
338 	}
339 
340 	new_ep = kalloc_type(struct encaptab, Z_WAITOK | Z_ZERO | Z_NOFAIL);
341 
342 	/* check if anyone have already attached with exactly same config */
343 	lck_rw_lock_exclusive(&encaptab_lock);
344 	for (ep = LIST_FIRST(&encaptab); ep; ep = LIST_NEXT(ep, chain)) {
345 		if (ep->af != af) {
346 			continue;
347 		}
348 		if (ep->proto != proto) {
349 			continue;
350 		}
351 		if (ep->src.ss_len != sp->sa_len ||
352 		    SOCKADDR_CMP(&ep->src, sp, sp->sa_len) != 0 ||
353 		    SOCKADDR_CMP(&ep->srcmask, sm, sp->sa_len) != 0) {
354 			continue;
355 		}
356 		if (ep->dst.ss_len != dp->sa_len ||
357 		    SOCKADDR_CMP(&ep->dst, dp, dp->sa_len) != 0 ||
358 		    SOCKADDR_CMP(&ep->dstmask, dm, dp->sa_len) != 0) {
359 			continue;
360 		}
361 
362 		error = EEXIST;
363 		goto fail_locked;
364 	}
365 
366 	new_ep->af = af;
367 	new_ep->proto = proto;
368 	SOCKADDR_COPY(sp, &new_ep->src, sp->sa_len);
369 	SOCKADDR_COPY(sm, &new_ep->srcmask, sp->sa_len);
370 	SOCKADDR_COPY(dp, &new_ep->dst, dp->sa_len);
371 	SOCKADDR_COPY(dm, &new_ep->dstmask, dp->sa_len);
372 	new_ep->psw = psw;
373 	new_ep->arg = arg;
374 
375 	encap_add_locked(new_ep);
376 	lck_rw_unlock_exclusive(&encaptab_lock);
377 
378 	error = 0;
379 	return new_ep;
380 
381 fail_locked:
382 	lck_rw_unlock_exclusive(&encaptab_lock);
383 	if (new_ep != NULL) {
384 		kfree_type(struct encaptab, new_ep);
385 	}
386 fail:
387 	return NULL;
388 }
389 
390 const struct encaptab *
encap_attach_func(int af,int proto,int (* func)(const struct mbuf *,int,int,void *),const struct protosw * psw,void * arg)391 encap_attach_func( int af, int proto,
392     int (*func)(const struct mbuf *, int, int, void *),
393     const struct protosw *psw, void *arg)
394 {
395 	struct encaptab *ep;
396 	int error;
397 
398 	/* sanity check on args */
399 	if (!func) {
400 		error = EINVAL;
401 		goto fail;
402 	}
403 
404 	ep = kalloc_type(struct encaptab, Z_WAITOK | Z_ZERO | Z_NOFAIL); /* XXX */
405 
406 	ep->af = af;
407 	ep->proto = proto;
408 	ep->func = func;
409 	ep->psw = psw;
410 	ep->arg = arg;
411 
412 	lck_rw_lock_exclusive(&encaptab_lock);
413 	encap_add_locked(ep);
414 	lck_rw_unlock_exclusive(&encaptab_lock);
415 
416 	error = 0;
417 	return ep;
418 
419 fail:
420 	return NULL;
421 }
422 
423 int
encap_detach(const struct encaptab * cookie)424 encap_detach(const struct encaptab *cookie)
425 {
426 	const struct encaptab *ep = cookie;
427 	struct encaptab *p;
428 
429 	lck_rw_lock_exclusive(&encaptab_lock);
430 	for (p = LIST_FIRST(&encaptab); p; p = LIST_NEXT(p, chain)) {
431 		if (p == ep) {
432 			LIST_REMOVE(p, chain);
433 			lck_rw_unlock_exclusive(&encaptab_lock);
434 			kfree_type(struct encaptab, p);    /*XXX*/
435 			return 0;
436 		}
437 	}
438 	lck_rw_unlock_exclusive(&encaptab_lock);
439 
440 	return EINVAL;
441 }
442 
443 static int
mask_match(const struct encaptab * ep,const struct sockaddr * sp,const struct sockaddr * dp)444 mask_match(const struct encaptab *ep, const struct sockaddr *sp,
445     const struct sockaddr *dp)
446 {
447 	struct sockaddr_storage s;
448 	struct sockaddr_storage d;
449 	int i;
450 	const u_int8_t *p, *q;
451 	u_int8_t *r;
452 	int matchlen;
453 
454 	if (sp->sa_len > sizeof(s) || dp->sa_len > sizeof(d)) {
455 		return 0;
456 	}
457 	if (sp->sa_family != ep->af || dp->sa_family != ep->af) {
458 		return 0;
459 	}
460 	if (sp->sa_len != ep->src.ss_len || dp->sa_len != ep->dst.ss_len) {
461 		return 0;
462 	}
463 
464 	matchlen = 0;
465 
466 	p = SA_BYTES(sp);
467 	q = SA_BYTES(&ep->srcmask);
468 	r = SA_BYTES(&s);
469 	for (i = 0; i < sp->sa_len; i++) {
470 		r[i] = p[i] & q[i];
471 		/* XXX estimate */
472 		matchlen += (q[i] ? 8 : 0);
473 	}
474 
475 	p = SA_BYTES(dp);
476 	q = SA_BYTES(&ep->dstmask);
477 	r = SA_BYTES(&s);
478 	for (i = 0; i < dp->sa_len; i++) {
479 		r[i] = p[i] & q[i];
480 		/* XXX rough estimate */
481 		matchlen += (q[i] ? 8 : 0);
482 	}
483 
484 	/* need to overwrite len/family portion as we don't compare them */
485 	s.ss_len = sp->sa_len;
486 	s.ss_family = sp->sa_family;
487 	d.ss_len = dp->sa_len;
488 	d.ss_family = dp->sa_family;
489 
490 	if (bcmp(&s, &ep->src, ep->src.ss_len) == 0 &&
491 	    bcmp(&d, &ep->dst, ep->dst.ss_len) == 0) {
492 		return matchlen;
493 	} else {
494 		return 0;
495 	}
496 }
497 
498 struct encaptabtag {
499 	void*                   *arg;
500 };
501 
502 static void
encap_fillarg(struct mbuf * m,void * arg)503 encap_fillarg(
504 	struct mbuf *m,
505 	void *arg)
506 {
507 	struct m_tag    *tag;
508 	struct encaptabtag *et;
509 
510 	tag = m_tag_create(KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_ENCAP,
511 	    sizeof(struct encaptabtag), M_WAITOK, m);
512 
513 	if (tag != NULL) {
514 		et = (struct encaptabtag*)(tag->m_tag_data);
515 		et->arg = arg;
516 		m_tag_prepend(m, tag);
517 	}
518 }
519 
520 void *
encap_getarg(struct mbuf * m)521 encap_getarg(struct mbuf *m)
522 {
523 	struct m_tag *__single tag;
524 	struct encaptabtag *__single et;
525 	void *__single p = NULL;
526 
527 	tag = m_tag_locate(m, KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_ENCAP);
528 	if (tag) {
529 		et = (struct encaptabtag*)(tag->m_tag_data);
530 		p = et->arg;
531 		m_tag_delete(m, tag);
532 	}
533 
534 	return p;
535 }
536 
537 struct encaptab_tag_container {
538 	struct m_tag            encaptab_m_tag;
539 	struct encaptabtag      encaptab_tag;
540 };
541 
542 static struct m_tag *
m_tag_kalloc_encap(u_int32_t id,u_int16_t type,uint16_t len,int wait)543 m_tag_kalloc_encap(u_int32_t id, u_int16_t type, uint16_t len, int wait)
544 {
545 	struct encaptab_tag_container *tag_container;
546 	struct m_tag *tag = NULL;
547 
548 	assert3u(id, ==, KERNEL_MODULE_TAG_ID);
549 	assert3u(type, ==, KERNEL_TAG_TYPE_ENCAP);
550 	assert3u(len, ==, sizeof(struct encaptabtag));
551 
552 	if (len != sizeof(struct encaptabtag)) {
553 		return NULL;
554 	}
555 
556 	tag_container = kalloc_type(struct encaptab_tag_container, wait | M_ZERO);
557 	if (tag_container != NULL) {
558 		tag = &tag_container->encaptab_m_tag;
559 
560 		assert3p(tag, ==, tag_container);
561 
562 		M_TAG_INIT(tag, id, type, len, &tag_container->encaptab_tag, NULL);
563 	}
564 
565 	return tag;
566 }
567 
568 static void
m_tag_kfree_encap(struct m_tag * tag)569 m_tag_kfree_encap(struct m_tag *tag)
570 {
571 	struct encaptab_tag_container *__single tag_container = (struct encaptab_tag_container *)tag;
572 
573 	assert3u(tag->m_tag_len, ==, sizeof(struct encaptabtag));
574 
575 	kfree_type(struct encaptab_tag_container, tag_container);
576 }
577 
578 void
encap_register_m_tag(void)579 encap_register_m_tag(void)
580 {
581 	int error;
582 
583 	error = m_register_internal_tag_type(KERNEL_TAG_TYPE_ENCAP, sizeof(struct encaptabtag),
584 	    m_tag_kalloc_encap, m_tag_kfree_encap);
585 
586 	assert3u(error, ==, 0);
587 }
588