xref: /xnu-10002.1.13/bsd/kern/uipc_mbuf2.c (revision 1031c584a5e37aff177559b9f69dbd3c8c3fd30a)
1 /*
2  * Copyright (c) 2000-2021 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*	$NetBSD: uipc_mbuf.c,v 1.40 1999/04/01 00:23:25 thorpej Exp $	*/
29 
30 /*
31  * Copyright (C) 1999 WIDE Project.
32  * All rights reserved.
33  *
34  * Redistribution and use in source and binary forms, with or without
35  * modification, are permitted provided that the following conditions
36  * are met:
37  * 1. Redistributions of source code must retain the above copyright
38  *    notice, this list of conditions and the following disclaimer.
39  * 2. Redistributions in binary form must reproduce the above copyright
40  *    notice, this list of conditions and the following disclaimer in the
41  *    documentation and/or other materials provided with the distribution.
42  * 3. Neither the name of the project nor the names of its contributors
43  *    may be used to endorse or promote products derived from this software
44  *    without specific prior written permission.
45  *
46  * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
47  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
48  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
49  * ARE DISCLAIMED.  IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
50  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
51  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
52  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
53  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
54  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
55  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
56  * SUCH DAMAGE.
57  */
58 
59 /*
60  * Copyright (c) 1982, 1986, 1988, 1991, 1993
61  *	The Regents of the University of California.  All rights reserved.
62  *
63  * Redistribution and use in source and binary forms, with or without
64  * modification, are permitted provided that the following conditions
65  * are met:
66  * 1. Redistributions of source code must retain the above copyright
67  *    notice, this list of conditions and the following disclaimer.
68  * 2. Redistributions in binary form must reproduce the above copyright
69  *    notice, this list of conditions and the following disclaimer in the
70  *    documentation and/or other materials provided with the distribution.
71  * 3. All advertising materials mentioning features or use of this software
72  *    must display the following acknowledgement:
73  *	This product includes software developed by the University of
74  *	California, Berkeley and its contributors.
75  * 4. Neither the name of the University nor the names of its contributors
76  *    may be used to endorse or promote products derived from this software
77  *    without specific prior written permission.
78  *
79  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
80  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
81  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
82  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
83  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
84  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
85  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
86  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
87  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
88  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
89  * SUCH DAMAGE.
90  *
91  *	@(#)uipc_mbuf.c	8.4 (Berkeley) 2/14/95
92  */
93 /*
94  * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
95  * support for mandatory and extensible security protections.  This notice
96  * is included in support of clause 2.2 (b) of the Apple Public License,
97  * Version 2.0.
98  */
99 
100 /*#define PULLDOWN_DEBUG*/
101 
102 #include <sys/param.h>
103 #include <sys/systm.h>
104 #include <sys/proc_internal.h>
105 #include <sys/malloc.h>
106 #include <sys/mbuf.h>
107 #include <sys/mcache.h>
108 #include <sys/sysctl.h>
109 
110 #include <netinet/in.h>
111 #include <netinet/ip_var.h>
112 #include <netinet/ip6.h>
113 #include <netinet6/ip6_var.h>
114 
115 #include <kern/assert.h>
116 
117 #include <os/log.h>
118 
119 #include <libkern/OSDebug.h>
120 
121 #include <ptrauth.h>
122 
123 #if defined(__i386__) || defined(__x86_64__)
124 #define MB_TAG_MBUF_DEFAULT 1
125 #else
126 #define MB_TAG_MBUF_DEFAULT 0
127 #endif /* defined(__i386__) || defined(__x86_64__) */
128 
129 SYSCTL_DECL(_kern_ipc);
130 
131 unsigned int mb_tag_mbuf = MB_TAG_MBUF_DEFAULT;
132 SYSCTL_UINT(_kern_ipc, OID_AUTO, mb_tag_mbuf,
133     CTLFLAG_RD | CTLFLAG_LOCKED, &mb_tag_mbuf, 0, "");
134 
135 struct m_tag_type_entry {
136 	m_tag_kalloc_func_t mt_alloc_func;
137 	m_tag_kfree_func_t mt_free_func;
138 	uint16_t mt_type;
139 	uint16_t mt_len;
140 };
141 
142 struct m_tag_type_stats {
143 	uint64_t mt_alloc_count;
144 	uint64_t mt_alloc_failed;
145 	uint64_t mt_free_count;
146 };
147 
148 SECURITY_READ_ONLY_LATE(static struct m_tag_type_entry) m_tag_type_table[KERNEL_TAG_TYPE_COUNT] = {};
149 
150 static struct m_tag_type_stats m_tag_type_stats[KERNEL_TAG_TYPE_COUNT] = {};
151 
152 static struct m_tag *m_tag_create_mbuf(uint32_t, uint16_t, uint16_t, int, struct mbuf *);
153 
154 /*
155  * ensure that [off, off + len) is contiguous on the mbuf chain "m".
156  * packet chain before "off" is kept untouched.
157  * if offp == NULL, the target will start at <retval, 0> on resulting chain.
158  * if offp != NULL, the target will start at <retval, *offp> on resulting chain.
159  *
160  * on error return (NULL return value), original "m" will be freed.
161  *
162  * XXX M_TRAILINGSPACE/M_LEADINGSPACE on shared cluster (sharedcluster)
163  */
164 struct mbuf *
m_pulldown(struct mbuf * m,int off,int len,int * offp)165 m_pulldown(struct mbuf *m, int off, int len, int *offp)
166 {
167 	struct mbuf *n = NULL, *o = NULL;
168 	int hlen = 0, tlen = 0, olen = 0;
169 	int sharedcluster = 0;
170 
171 	/* check invalid arguments. */
172 	VERIFY(len >= 0 && off >= 0);
173 
174 	if (m == NULL) {
175 		panic("m == NULL in m_pulldown()");
176 	}
177 	if (len > MCLBYTES) {
178 		m_freem(m);
179 		return NULL;    /* impossible */
180 	}
181 	int tmp_len = 0;
182 	if (os_add_overflow(off, len, &tmp_len)) {
183 		m_free(m);
184 		return NULL;
185 	}
186 
187 #ifdef PULLDOWN_DEBUG
188 	{
189 		struct mbuf *t;
190 		printf("before:");
191 		for (t = m; t; t = t->m_next) {
192 			printf(" %d", t->m_len);
193 		}
194 		printf("\n");
195 	}
196 #endif
197 	n = m;
198 
199 	/*
200 	 * Iterate and make n point to the mbuf
201 	 * within which the first byte at length
202 	 * offset is contained from the start of
203 	 * mbuf chain.
204 	 */
205 	while (n != NULL && off > 0) {
206 		if (n->m_len > off) {
207 			break;
208 		}
209 		off -= n->m_len;
210 		n = n->m_next;
211 	}
212 
213 	/* be sure to point non-empty mbuf */
214 	while (n != NULL && n->m_len == 0) {
215 		n = n->m_next;
216 	}
217 
218 	if (!n) {
219 		m_freem(m);
220 		return NULL;    /* mbuf chain too short */
221 	}
222 
223 	/*
224 	 * the target data is on <n, off>.
225 	 * if we got enough data on the mbuf "n", we're done.
226 	 *
227 	 * It should be noted, that we should only do this either
228 	 * when offset is 0, i.e. data is pointing to the start
229 	 * or when the caller specifies an out argument to get
230 	 * the offset value in the mbuf to work with data pointer
231 	 * correctly.
232 	 *
233 	 * If offset is not 0 and caller did not provide out-argument
234 	 * to get offset, we should split the mbuf even when the length
235 	 * is contained in current mbuf.
236 	 */
237 	if ((off == 0 || offp) && len <= n->m_len - off) {
238 		goto ok;
239 	}
240 
241 	/*
242 	 * when len <= n->m_len - off and off != 0, it is a special case.
243 	 * len bytes from <n, off> sits in single mbuf, but the caller does
244 	 * not like the starting position (off).
245 	 * chop the current mbuf into two pieces, set off to 0.
246 	 */
247 	if (len <= n->m_len - off) {
248 		o = m_copym(n, off, n->m_len - off, M_DONTWAIT);
249 		if (o == NULL) {
250 			m_freem(m);
251 			return NULL;    /* ENOBUFS */
252 		}
253 		n->m_len = off;
254 		o->m_next = n->m_next;
255 		n->m_next = o;
256 		n = n->m_next;
257 		off = 0;
258 		goto ok;
259 	}
260 
261 	/*
262 	 * we need to take hlen from <n, off> and tlen from <n->m_next, 0>,
263 	 * and construct contiguous mbuf with m_len == len.
264 	 * note that hlen + tlen == len, and tlen > 0.
265 	 *
266 	 * Read these variables as head length and tail length
267 	 */
268 	hlen = n->m_len - off;
269 	tlen = len - hlen;
270 
271 	/*
272 	 * ensure that we have enough trailing data on mbuf chain.
273 	 * if not, we can do nothing about the chain.
274 	 */
275 	olen = 0;
276 	for (o = n->m_next; o != NULL; o = o->m_next) {
277 		olen += o->m_len;
278 	}
279 	if (hlen + olen < len) {
280 		m_freem(m);
281 		return NULL;    /* mbuf chain too short */
282 	}
283 
284 	/*
285 	 * easy cases first.
286 	 * we need to use m_copydata() to get data from <n->m_next, 0>.
287 	 */
288 	if ((n->m_flags & M_EXT) == 0) {
289 		sharedcluster = 0;
290 	} else {
291 		if (m_get_ext_free(n) != NULL) {
292 			sharedcluster = 1;
293 		} else if (m_mclhasreference(n)) {
294 			sharedcluster = 1;
295 		} else {
296 			sharedcluster = 0;
297 		}
298 	}
299 
300 	/*
301 	 * If we have enough space left in current mbuf to accomodate
302 	 * tail length, copy tail length worth of data starting with next mbuf
303 	 * and adjust the length of next one accordingly.
304 	 */
305 	if ((off == 0 || offp) && M_TRAILINGSPACE(n) >= tlen
306 	    && !sharedcluster) {
307 		m_copydata(n->m_next, 0, tlen, mtod(n, caddr_t) + n->m_len);
308 		n->m_len += tlen;
309 		m_adj(n->m_next, tlen);
310 		goto ok;
311 	}
312 
313 	/*
314 	 * If have enough leading space in next mbuf to accomodate head length
315 	 * of current mbuf, and total resulting length of next mbuf is greater
316 	 * than or equal to requested len bytes, then just copy hlen from
317 	 * current to the next one and adjust sizes accordingly.
318 	 */
319 	if ((off == 0 || offp) && M_LEADINGSPACE(n->m_next) >= hlen &&
320 	    (n->m_next->m_len + hlen) >= len && !sharedcluster) {
321 		n->m_next->m_data -= hlen;
322 		n->m_next->m_len += hlen;
323 		bcopy(mtod(n, caddr_t) + off, mtod(n->m_next, caddr_t), hlen);
324 		n->m_len -= hlen;
325 		n = n->m_next;
326 		off = 0;
327 		goto ok;
328 	}
329 
330 	/*
331 	 * now, we need to do the hard way.  don't m_copy as there's no room
332 	 * on both end.
333 	 */
334 	MGET(o, M_DONTWAIT, m->m_type);
335 	if (o == NULL) {
336 		m_freem(m);
337 		return NULL;    /* ENOBUFS */
338 	}
339 	if (len > MHLEN) {      /* use MHLEN just for safety */
340 		MCLGET(o, M_DONTWAIT);
341 		if ((o->m_flags & M_EXT) == 0) {
342 			m_freem(m);
343 			m_free(o);
344 			return NULL;    /* ENOBUFS */
345 		}
346 	}
347 	/* get hlen from <n, off> into <o, 0> */
348 	o->m_len = hlen;
349 	bcopy(mtod(n, caddr_t) + off, mtod(o, caddr_t), hlen);
350 	n->m_len -= hlen;
351 	/* get tlen from <n->m_next, 0> into <o, hlen> */
352 	m_copydata(n->m_next, 0, tlen, mtod(o, caddr_t) + o->m_len);
353 	o->m_len += tlen;
354 	m_adj(n->m_next, tlen);
355 	o->m_next = n->m_next;
356 	n->m_next = o;
357 	n = o;
358 	off = 0;
359 
360 ok:
361 #ifdef PULLDOWN_DEBUG
362 	{
363 		struct mbuf *t;
364 		printf("after:");
365 		for (t = m; t; t = t->m_next) {
366 			printf("%c%d", t == n ? '*' : ' ', t->m_len);
367 		}
368 		printf(" (off=%d)\n", off);
369 	}
370 #endif
371 	if (offp) {
372 		*offp = off;
373 	}
374 	return n;
375 }
376 
377 static struct m_tag *
m_tag_kalloc_notsupp(__unused uint32_t id,__unused uint16_t type,__unused uint16_t len,__unused int wait)378 m_tag_kalloc_notsupp(__unused uint32_t id, __unused uint16_t type, __unused uint16_t len, __unused int wait)
379 {
380 	return NULL;
381 }
382 
383 static void
m_tag_kfree_notsupp(__unused struct m_tag * tag)384 m_tag_kfree_notsupp(__unused struct m_tag *tag)
385 {
386 	return;
387 }
388 
389 #if defined(HAS_APPLE_PAC)
390 /*
391  * combine into a uintptr_t the m_tag_type that is 16 bits with the m_tag_id is 32 bits
392  */
393 static uintptr_t
m_tag_cookie_from_id_and_type(struct m_tag * tag)394 m_tag_cookie_from_id_and_type(struct m_tag *tag)
395 {
396 	uintptr_t cookie;
397 
398 #ifdef __LP64__
399 	/*
400 	 * upper 4 bytes: 2 bytes of type
401 	 * lower 4 bytes: 4 bytes of id
402 	 */
403 	cookie = (((uintptr_t)tag->m_tag_type) << 32) | (uintptr_t)tag->m_tag_id;
404 #else
405 	/*
406 	 * upper 2 bytes: 2 bytes of type or-ed with upper 2 bytes of id
407 	 * lower 2 bytes: lower 2 bytes of id
408 	 */
409 	cookie = (((uintptr_t)tag->m_tag_type) << 16) | (uintptr_t)tag->m_tag_id;
410 #endif
411 	return cookie;
412 }
413 
414 void
m_tag_create_cookie(struct m_tag * tag)415 m_tag_create_cookie(struct m_tag *tag)
416 {
417 	uintptr_t cookie = m_tag_cookie_from_id_and_type(tag);
418 
419 	tag->m_tag_cookie = (uintptr_t) ptrauth_sign_unauthenticated((void *)cookie,
420 	    ptrauth_key_process_independent_data,
421 	    ptrauth_blend_discriminator((void *)(uintptr_t)(tag->m_tag_type | tag->m_tag_id),
422 	    ptrauth_string_discriminator("m_tag.m_tag_cookie")));
423 }
424 
425 static void
m_tag_verify_cookie(struct m_tag * tag)426 m_tag_verify_cookie(struct m_tag *tag)
427 {
428 	uintptr_t cookie = m_tag_cookie_from_id_and_type(tag);
429 	uintptr_t auth_cookie;
430 
431 	auth_cookie = (uintptr_t) ptrauth_auth_data((void *)(uintptr_t)tag->m_tag_cookie,
432 	    ptrauth_key_process_independent_data,
433 	    ptrauth_blend_discriminator((void *)(uintptr_t)(tag->m_tag_type | tag->m_tag_id),
434 	    ptrauth_string_discriminator("m_tag.m_tag_cookie")));
435 	if (cookie != auth_cookie) {
436 		panic("verify_m_tag_cookie bad m_tag cookie");
437 	}
438 }
439 
440 #else /* defined(HAS_APPLE_PAC) */
441 
442 void
m_tag_create_cookie(struct m_tag * tag)443 m_tag_create_cookie(struct m_tag *tag)
444 {
445 	tag->m_tag_cookie = M_TAG_VALID_PATTERN;
446 }
447 
448 static void
m_tag_verify_cookie(struct m_tag * tag)449 m_tag_verify_cookie(struct m_tag *tag)
450 {
451 	VERIFY(tag->m_tag_cookie == M_TAG_VALID_PATTERN);
452 }
453 
454 #endif /* defined(HAS_APPLE_PAC) */
455 
456 
457 struct m_tag *
m_tag_create(uint32_t id,uint16_t type,int len,int wait,struct mbuf * buf)458 m_tag_create(uint32_t id, uint16_t type, int len, int wait, struct mbuf *buf)
459 {
460 	if (mb_tag_mbuf != 0) {
461 		/*
462 		 * Create and return an m_tag, either by re-using space in a previous tag
463 		 * or by allocating a new mbuf/cluster
464 		 */
465 		return m_tag_create_mbuf(id, type, (uint16_t)len, wait, buf);
466 	} else {
467 		/*
468 		 * Each packet tag has its own allocation
469 		 */
470 		return m_tag_alloc(id, type, (uint16_t)len, wait);
471 	}
472 }
473 
474 /* Get a packet tag structure along with specified data following. */
475 static struct m_tag *
m_tag_alloc_mbuf(u_int32_t id,u_int16_t type,uint16_t len,int wait)476 m_tag_alloc_mbuf(u_int32_t id, u_int16_t type, uint16_t len, int wait)
477 {
478 	struct m_tag *t;
479 	void *mb_cl = NULL;
480 
481 	if (M_TAG_ALIGN(len) + sizeof(struct m_taghdr) <= MLEN) {
482 		struct mbuf *m = m_get(wait, MT_TAG);
483 		struct m_taghdr *hdr;
484 
485 		if (m == NULL) {
486 			return NULL;
487 		}
488 		mb_cl = m;
489 
490 		m->m_flags |= M_TAGHDR;
491 
492 		hdr = (struct m_taghdr *)(void *)m->m_data;
493 		VERIFY(IS_P2ALIGNED(hdr + 1, sizeof(u_int64_t)));
494 		hdr->mth_refcnt = 1;
495 		m->m_len += sizeof(struct m_taghdr);
496 		t = (struct m_tag *)(void *)(m->m_data + m->m_len);
497 		VERIFY(IS_P2ALIGNED(t, sizeof(u_int64_t)));
498 		m->m_len += M_TAG_ALIGN(len);
499 		VERIFY(m->m_len <= MLEN);
500 	} else if (len + sizeof(struct m_tag) <= MCLBYTES) {
501 		mb_cl = m_mclalloc(wait);
502 		t = (struct m_tag *)(void *)mb_cl;
503 	} else {
504 		t = NULL;
505 	}
506 
507 	if (__improbable(t == NULL)) {
508 		return NULL;
509 	}
510 
511 	VERIFY(IS_P2ALIGNED(t, sizeof(u_int64_t)));
512 	M_TAG_INIT(t, id, type, len, (void *)(t + 1), mb_cl);
513 	if (len > 0) {
514 		bzero(t->m_tag_data, len);
515 	}
516 	return t;
517 }
518 
519 static struct m_tag *
m_tag_create_mbuf(uint32_t id,uint16_t type,uint16_t len,int wait,struct mbuf * buf)520 m_tag_create_mbuf(uint32_t id, uint16_t type, uint16_t len, int wait, struct mbuf *buf)
521 {
522 	struct m_tag *t = NULL;
523 	struct m_tag *p;
524 	void *mb_cl = NULL;
525 
526 	if (len + sizeof(struct m_tag) + sizeof(struct m_taghdr) > MLEN) {
527 		return m_tag_alloc(id, type, len, wait);
528 	}
529 
530 	/*
531 	 * We've exhausted all external cases. Now, go through the m_tag
532 	 * chain and see if we can fit it in any of them.
533 	 * If not (t == NULL), call m_tag_alloc to store it in a new mbuf.
534 	 */
535 	p = SLIST_FIRST(&buf->m_pkthdr.tags);
536 	while (p != NULL) {
537 		/* 2KCL m_tag */
538 		if (M_TAG_ALIGN(p->m_tag_len) +
539 		    sizeof(struct m_taghdr) > MLEN) {
540 			p = SLIST_NEXT(p, m_tag_link);
541 			continue;
542 		}
543 
544 		m_tag_verify_cookie(p);
545 
546 		struct mbuf *m = p->m_tag_mb_cl;
547 		struct m_taghdr *hdr = (struct m_taghdr *)(void *)m->m_data;
548 
549 		VERIFY(IS_P2ALIGNED(hdr + 1, sizeof(u_int64_t)));
550 		VERIFY(m->m_flags & M_TAGHDR && !(m->m_flags & M_EXT));
551 
552 		/* The mbuf can store this m_tag */
553 		if (M_TAG_ALIGN(len) <= MLEN - m->m_len) {
554 			mb_cl = m;
555 			t = (struct m_tag *)(void *)(m->m_data + m->m_len);
556 			VERIFY(IS_P2ALIGNED(t, sizeof(u_int64_t)));
557 			hdr->mth_refcnt++;
558 			m->m_len += M_TAG_ALIGN(len);
559 			VERIFY(m->m_len <= MLEN);
560 			break;
561 		}
562 
563 		p = SLIST_NEXT(p, m_tag_link);
564 	}
565 
566 	if (t == NULL) {
567 		return m_tag_alloc(id, type, len, wait);
568 	}
569 
570 	M_TAG_INIT(t, id, type, len, (void *)(t + 1), mb_cl);
571 	if (len > 0) {
572 		bzero(t->m_tag_data, len);
573 	}
574 	return t;
575 }
576 
577 static void
m_tag_free_mbuf(struct m_tag * t)578 m_tag_free_mbuf(struct m_tag *t)
579 {
580 	if (__improbable(t == NULL)) {
581 		return;
582 	}
583 
584 	if (M_TAG_ALIGN(t->m_tag_len) + sizeof(struct m_taghdr) <= MLEN) {
585 		struct mbuf * m = t->m_tag_mb_cl;
586 
587 		VERIFY(m->m_flags & M_TAGHDR);
588 		struct m_taghdr *hdr = (struct m_taghdr *)(void *)m->m_data;
589 
590 		VERIFY(IS_P2ALIGNED(hdr + 1, sizeof(u_int64_t)));
591 
592 		/* No other tags in this mbuf */
593 		if (--hdr->mth_refcnt == 0) {
594 			m_free(m);
595 			return;
596 		}
597 
598 		/* Pattern-fill the header */
599 		u_int64_t *fill_ptr = (u_int64_t *)t;
600 		u_int64_t *end_ptr = (u_int64_t *)(t + 1);
601 		while (fill_ptr < end_ptr) {
602 			*fill_ptr = M_TAG_FREE_PATTERN;
603 			fill_ptr++;
604 		}
605 	} else {
606 		m_mclfree((caddr_t)t);
607 	}
608 }
609 
610 /*
611  * Allocations for external data are known to not have pointers for
612  * most platforms -- for macOS this is not guaranteed
613  */
614 #if XNU_TARGET_OS_OSX
615 
616 __typed_allocators_ignore_push
617 
618 static inline void *
m_tag_data_kalloc(uint16_t len,int wait)619 m_tag_data_kalloc(uint16_t len, int wait)
620 {
621 	return kheap_alloc(KHEAP_DEFAULT, len, wait | M_ZERO);
622 }
623 
624 static inline void
m_tag_data_free(struct m_tag * tag)625 m_tag_data_free(struct m_tag *tag)
626 {
627 	kheap_free(KHEAP_DEFAULT, tag->m_tag_data, tag->m_tag_len);
628 }
629 __typed_allocators_ignore_pop
630 
631 #else /* XNU_TARGET_OS_OSX */
632 
633 static inline void *
634 m_tag_data_kalloc(uint16_t len, int wait)
635 {
636 	return kalloc_data(len, wait | M_ZERO);
637 }
638 
639 static inline void
640 m_tag_data_free(struct m_tag *tag)
641 {
642 	kfree_data(tag->m_tag_data, tag->m_tag_len);
643 }
644 
645 #endif /* XNU_TARGET_OS_OSX */
646 
647 static struct m_tag *
m_tag_kalloc_external(uint32_t id,uint16_t type,uint16_t len,int wait)648 m_tag_kalloc_external(uint32_t id, uint16_t type, uint16_t len, int wait)
649 {
650 	struct m_tag *tag;
651 	void *data = NULL;
652 
653 	tag = kalloc_type(struct m_tag, wait | M_ZERO);
654 	if (__improbable(tag == NULL)) {
655 		return NULL;
656 	}
657 
658 	if (len > 0) {
659 		data = m_tag_data_kalloc(len, wait);
660 		if (__improbable(data == NULL)) {
661 			kfree_type(struct m_tag, tag);
662 			return NULL;
663 		}
664 	}
665 
666 	M_TAG_INIT(tag, id, type, len, data, NULL);
667 
668 	return tag;
669 }
670 
671 static void
m_tag_kfree_external(struct m_tag * tag)672 m_tag_kfree_external(struct m_tag *tag)
673 {
674 	if (tag->m_tag_data != NULL) {
675 		m_tag_data_free(tag);
676 	}
677 	kfree_type(struct m_tag, tag);
678 }
679 
680 static struct m_tag_type_entry *
get_m_tag_type_entry(uint32_t id,uint16_t type,struct m_tag_type_stats ** pmtts)681 get_m_tag_type_entry(uint32_t id, uint16_t type, struct m_tag_type_stats **pmtts)
682 {
683 	struct m_tag_type_entry *mtte = &m_tag_type_table[KERNEL_TAG_TYPE_NONE];
684 
685 	if (pmtts != NULL) {
686 		*pmtts = &m_tag_type_stats[KERNEL_TAG_TYPE_NONE];
687 	}
688 
689 	if (id == KERNEL_MODULE_TAG_ID) {
690 		switch (type) {
691 		case KERNEL_TAG_TYPE_DUMMYNET:
692 		case KERNEL_TAG_TYPE_IPFILT:
693 		case KERNEL_TAG_TYPE_ENCAP:
694 		case KERNEL_TAG_TYPE_INET6:
695 		case KERNEL_TAG_TYPE_IPSEC:
696 		case KERNEL_TAG_TYPE_CFIL_UDP:
697 		case KERNEL_TAG_TYPE_PF_REASS:
698 		case KERNEL_TAG_TYPE_AQM:
699 		case KERNEL_TAG_TYPE_DRVAUX:
700 			mtte = &m_tag_type_table[type];
701 			if (pmtts != NULL) {
702 				*pmtts = &m_tag_type_stats[type];
703 			}
704 			break;
705 		default:
706 #if DEBUG || DEVELOPMENT
707 			if (type > 0 && type < KERNEL_TAG_TYPE_COUNT) {
708 				panic("get_m_tag_type_entry unexpected m_tag type %u",
709 				    type);
710 			}
711 #endif /* DEBUG || DEVELOPMENT */
712 			break;
713 		}
714 	}
715 
716 	return mtte;
717 }
718 
719 static struct m_tag *
m_tag_kalloc(uint32_t id,uint16_t type,uint16_t len,int wait,struct m_tag_type_entry * mtte)720 m_tag_kalloc(uint32_t id, uint16_t type, uint16_t len, int wait, struct m_tag_type_entry *mtte)
721 {
722 	struct m_tag *tag = NULL;
723 
724 	tag = mtte->mt_alloc_func(id, type, len, wait);
725 
726 	if (__probable(tag != NULL)) {
727 		VERIFY(IS_P2ALIGNED(tag, sizeof(uint64_t)));
728 
729 		if (__improbable(tag->m_tag_data == NULL)) {
730 			VERIFY(len == 0);
731 		} else {
732 			VERIFY(len != 0);
733 			VERIFY(IS_P2ALIGNED(tag->m_tag_data, sizeof(uint64_t)));
734 		}
735 	}
736 	return tag;
737 }
738 
739 static void
m_tag_kfree(struct m_tag * tag,struct m_tag_type_entry * mtte)740 m_tag_kfree(struct m_tag *tag, struct m_tag_type_entry *mtte)
741 {
742 	mtte->mt_free_func(tag);
743 }
744 
745 struct m_tag *
m_tag_alloc(uint32_t id,uint16_t type,int len,int wait)746 m_tag_alloc(uint32_t id, uint16_t type, int len, int wait)
747 {
748 	struct m_tag *tag = NULL;
749 	struct m_tag_type_entry *mtte = NULL;
750 	struct m_tag_type_stats *mtts = NULL;
751 
752 	mtte = get_m_tag_type_entry(id, type, &mtts);
753 
754 	if (__improbable(len < 0 || len >= MCLBYTES - sizeof(struct m_tag))) {
755 		goto done;
756 	}
757 
758 	if (mb_tag_mbuf != 0) {
759 		tag = m_tag_alloc_mbuf(id, type, (uint16_t)len, wait);
760 	} else {
761 		tag = m_tag_kalloc(id, type, (uint16_t)len, wait, mtte);
762 	}
763 done:
764 	if (__probable(tag != NULL)) {
765 		m_tag_verify_cookie(tag);
766 		assert3u(tag->m_tag_id, ==, id);
767 		assert3u(tag->m_tag_type, ==, type);
768 		assert3u(tag->m_tag_len, ==, len);
769 
770 		os_atomic_inc(&mtts->mt_alloc_count, relaxed);
771 	} else {
772 		os_atomic_inc(&mtts->mt_alloc_failed, relaxed);
773 	}
774 
775 	return tag;
776 }
777 
778 /* Free a packet tag. */
779 void
m_tag_free(struct m_tag * tag)780 m_tag_free(struct m_tag *tag)
781 {
782 	struct m_tag_type_entry *mtte = NULL;
783 	struct m_tag_type_stats *mtts = NULL;
784 
785 	if (__improbable(tag == NULL)) {
786 		return;
787 	}
788 
789 	m_tag_verify_cookie(tag);
790 
791 	mtte = get_m_tag_type_entry(tag->m_tag_id, tag->m_tag_type, &mtts);
792 
793 	if (mb_tag_mbuf != 0) {
794 		m_tag_free_mbuf(tag);
795 	} else {
796 		m_tag_kfree(tag, mtte);
797 	}
798 
799 	os_atomic_inc(&mtts->mt_free_count, relaxed);
800 }
801 
802 void
mbuf_tag_init(void)803 mbuf_tag_init(void)
804 {
805 	for (uint16_t type = 0; type < KERNEL_TAG_TYPE_COUNT; type++) {
806 		m_tag_type_table[type].mt_type = type;
807 		m_tag_type_table[type].mt_len = 0;
808 		m_tag_type_table[type].mt_alloc_func = m_tag_kalloc_notsupp;
809 		m_tag_type_table[type].mt_free_func = m_tag_kfree_notsupp;
810 	}
811 	m_tag_type_table[KERNEL_TAG_TYPE_NONE].mt_alloc_func = m_tag_kalloc_external;
812 	m_tag_type_table[KERNEL_TAG_TYPE_NONE].mt_free_func = m_tag_kfree_external;
813 	m_tag_type_table[KERNEL_TAG_TYPE_DRVAUX].mt_alloc_func = m_tag_kalloc_external;
814 	m_tag_type_table[KERNEL_TAG_TYPE_DRVAUX].mt_free_func = m_tag_kfree_external;
815 
816 #if NETWORKING
817 	extern void pktsched_register_m_tag(void);
818 	pktsched_register_m_tag();
819 #endif /* NETWORKING */
820 
821 #if INET
822 	extern void ip6_register_m_tag(void);
823 	ip6_register_m_tag();
824 
825 	extern void ipfilter_register_m_tag(void);
826 	ipfilter_register_m_tag();
827 
828 	extern void encap_register_m_tag(void);
829 	encap_register_m_tag();
830 #endif /* INET */
831 
832 #if IPSEC
833 	extern void ipsec_register_m_tag(void);
834 	ipsec_register_m_tag();
835 #endif /* IPSEC */
836 
837 #if DUMMYNET
838 	extern void dummynet_register_m_tag(void);
839 	dummynet_register_m_tag();
840 #endif /* DUMMYNET */
841 
842 #if PF
843 	extern void pf_register_m_tag(void);
844 	pf_register_m_tag();
845 #endif /* PF */
846 
847 #if CONTENT_FILTER
848 	extern void cfil_register_m_tag(void);
849 	cfil_register_m_tag();
850 #endif /* CONTENT_FILTER */
851 }
852 
853 int
m_register_internal_tag_type(uint16_t type,uint16_t len,m_tag_kalloc_func_t alloc_func,m_tag_kfree_func_t free_func)854 m_register_internal_tag_type(uint16_t type, uint16_t len,
855     m_tag_kalloc_func_t alloc_func, m_tag_kfree_func_t free_func)
856 {
857 	int error = 0;
858 
859 	if (type <= 0 || type >= KERNEL_TAG_TYPE_DRVAUX) {
860 		error = EINVAL;
861 		goto done;
862 	}
863 	m_tag_type_table[type].mt_type = type;
864 	m_tag_type_table[type].mt_len = len;
865 	m_tag_type_table[type].mt_alloc_func = alloc_func;
866 	m_tag_type_table[type].mt_free_func = free_func;
867 
868 done:
869 	return error;
870 }
871 
872 /* Prepend a packet tag. */
873 void
m_tag_prepend(struct mbuf * m,struct m_tag * t)874 m_tag_prepend(struct mbuf *m, struct m_tag *t)
875 {
876 	VERIFY(m != NULL && t != NULL);
877 
878 	SLIST_INSERT_HEAD(&m->m_pkthdr.tags, t, m_tag_link);
879 }
880 
881 /* Unlink a packet tag. */
882 void
m_tag_unlink(struct mbuf * m,struct m_tag * t)883 m_tag_unlink(struct mbuf *m, struct m_tag *t)
884 {
885 	VERIFY(m->m_flags & M_PKTHDR);
886 	VERIFY(t != NULL);
887 
888 	SLIST_REMOVE(&m->m_pkthdr.tags, t, m_tag, m_tag_link);
889 }
890 
891 /* Unlink and free a packet tag. */
892 void
m_tag_delete(struct mbuf * m,struct m_tag * t)893 m_tag_delete(struct mbuf *m, struct m_tag *t)
894 {
895 	m_tag_unlink(m, t);
896 	m_tag_free(t);
897 }
898 
899 /* Unlink and free a packet tag chain, starting from given tag. */
900 void
m_tag_delete_chain(struct mbuf * m)901 m_tag_delete_chain(struct mbuf *m)
902 {
903 	struct m_tag *p, *q;
904 
905 	VERIFY(m->m_flags & M_PKTHDR);
906 
907 	p = SLIST_FIRST(&m->m_pkthdr.tags);
908 	if (p == NULL) {
909 		return;
910 	}
911 
912 	while ((q = SLIST_NEXT(p, m_tag_link)) != NULL) {
913 		m_tag_delete(m, q);
914 	}
915 	m_tag_delete(m, p);
916 }
917 
918 /* Find a tag, starting from a given position. */
919 struct m_tag *
m_tag_locate(struct mbuf * m,uint32_t id,uint16_t type)920 m_tag_locate(struct mbuf *m, uint32_t id, uint16_t type)
921 {
922 	struct m_tag *p;
923 
924 	VERIFY(m->m_flags & M_PKTHDR);
925 
926 	p = SLIST_FIRST(&m->m_pkthdr.tags);
927 
928 	while (p != NULL) {
929 		if (p->m_tag_id == id && p->m_tag_type == type) {
930 			m_tag_verify_cookie(p);
931 			return p;
932 		}
933 		p = SLIST_NEXT(p, m_tag_link);
934 	}
935 	return NULL;
936 }
937 
938 /* Copy a single tag. */
939 struct m_tag *
m_tag_copy(struct m_tag * t,int how)940 m_tag_copy(struct m_tag *t, int how)
941 {
942 	struct m_tag *p;
943 
944 	VERIFY(t != NULL);
945 
946 	p = m_tag_alloc(t->m_tag_id, t->m_tag_type, t->m_tag_len, how);
947 	if (p == NULL) {
948 		return NULL;
949 	}
950 	bcopy(t->m_tag_data, p->m_tag_data, t->m_tag_len); /* Copy the data */
951 	return p;
952 }
953 
954 /*
955  * Copy two tag chains. The destination mbuf (to) loses any attached
956  * tags even if the operation fails. This should not be a problem, as
957  * m_tag_copy_chain() is typically called with a newly-allocated
958  * destination mbuf.
959  */
960 int
m_tag_copy_chain(struct mbuf * to,struct mbuf * from,int how)961 m_tag_copy_chain(struct mbuf *to, struct mbuf *from, int how)
962 {
963 	struct m_tag *p, *t, *tprev = NULL;
964 
965 	VERIFY((to->m_flags & M_PKTHDR) && (from->m_flags & M_PKTHDR));
966 
967 	m_tag_delete_chain(to);
968 	SLIST_FOREACH(p, &from->m_pkthdr.tags, m_tag_link) {
969 		m_tag_verify_cookie(p);
970 		t = m_tag_copy(p, how);
971 		if (t == NULL) {
972 			m_tag_delete_chain(to);
973 			return 0;
974 		}
975 		if (tprev == NULL) {
976 			SLIST_INSERT_HEAD(&to->m_pkthdr.tags, t, m_tag_link);
977 		} else {
978 			SLIST_INSERT_AFTER(tprev, t, m_tag_link);
979 			tprev = t;
980 		}
981 	}
982 	return 1;
983 }
984 
985 /* Initialize dynamic and static tags on an mbuf. */
986 void
m_tag_init(struct mbuf * m,int all)987 m_tag_init(struct mbuf *m, int all)
988 {
989 	VERIFY(m->m_flags & M_PKTHDR);
990 
991 	SLIST_INIT(&m->m_pkthdr.tags);
992 	/*
993 	 * If the caller wants to preserve static mbuf tags
994 	 * (e.g. m_dup_pkthdr), don't zero them out.
995 	 */
996 	if (all) {
997 		bzero(&m->m_pkthdr.builtin_mtag._net_mtag,
998 		    sizeof(m->m_pkthdr.builtin_mtag._net_mtag));
999 	}
1000 }
1001 
1002 /* Get first tag in chain. */
1003 struct m_tag *
m_tag_first(struct mbuf * m)1004 m_tag_first(struct mbuf *m)
1005 {
1006 	VERIFY(m->m_flags & M_PKTHDR);
1007 
1008 	return SLIST_FIRST(&m->m_pkthdr.tags);
1009 }
1010 
1011 /* Get next tag in chain. */
1012 struct m_tag *
m_tag_next(struct mbuf * m,struct m_tag * t)1013 m_tag_next(struct mbuf *m, struct m_tag *t)
1014 {
1015 #pragma unused(m)
1016 	VERIFY(t != NULL);
1017 
1018 	return SLIST_NEXT(t, m_tag_link);
1019 }
1020 
1021 int
m_set_traffic_class(struct mbuf * m,mbuf_traffic_class_t tc)1022 m_set_traffic_class(struct mbuf *m, mbuf_traffic_class_t tc)
1023 {
1024 	uint32_t val = MBUF_TC2SCVAL(tc);      /* just the val portion */
1025 
1026 	return m_set_service_class(m, m_service_class_from_val(val));
1027 }
1028 
1029 mbuf_traffic_class_t
m_get_traffic_class(struct mbuf * m)1030 m_get_traffic_class(struct mbuf *m)
1031 {
1032 	return MBUF_SC2TC(m_get_service_class(m));
1033 }
1034 
1035 int
m_set_service_class(struct mbuf * m,mbuf_svc_class_t sc)1036 m_set_service_class(struct mbuf *m, mbuf_svc_class_t sc)
1037 {
1038 	int error = 0;
1039 
1040 	VERIFY(m->m_flags & M_PKTHDR);
1041 
1042 	if (MBUF_VALID_SC(sc)) {
1043 		m->m_pkthdr.pkt_svc = sc;
1044 	} else {
1045 		error = EINVAL;
1046 	}
1047 
1048 	return error;
1049 }
1050 
1051 mbuf_svc_class_t
m_get_service_class(struct mbuf * m)1052 m_get_service_class(struct mbuf *m)
1053 {
1054 	mbuf_svc_class_t sc;
1055 
1056 	VERIFY(m->m_flags & M_PKTHDR);
1057 
1058 	if (MBUF_VALID_SC(m->m_pkthdr.pkt_svc)) {
1059 		sc = m->m_pkthdr.pkt_svc;
1060 	} else {
1061 		sc = MBUF_SC_BE;
1062 	}
1063 
1064 	return sc;
1065 }
1066 
1067 mbuf_svc_class_t
m_service_class_from_idx(uint32_t i)1068 m_service_class_from_idx(uint32_t i)
1069 {
1070 	mbuf_svc_class_t sc = MBUF_SC_BE;
1071 
1072 	switch (i) {
1073 	case SCIDX_BK_SYS:
1074 		return MBUF_SC_BK_SYS;
1075 
1076 	case SCIDX_BK:
1077 		return MBUF_SC_BK;
1078 
1079 	case SCIDX_BE:
1080 		return MBUF_SC_BE;
1081 
1082 	case SCIDX_RD:
1083 		return MBUF_SC_RD;
1084 
1085 	case SCIDX_OAM:
1086 		return MBUF_SC_OAM;
1087 
1088 	case SCIDX_AV:
1089 		return MBUF_SC_AV;
1090 
1091 	case SCIDX_RV:
1092 		return MBUF_SC_RV;
1093 
1094 	case SCIDX_VI:
1095 		return MBUF_SC_VI;
1096 
1097 	case SCIDX_VO:
1098 		return MBUF_SC_VO;
1099 
1100 	case SCIDX_CTL:
1101 		return MBUF_SC_CTL;
1102 
1103 	default:
1104 		break;
1105 	}
1106 
1107 	VERIFY(0);
1108 	/* NOTREACHED */
1109 	return sc;
1110 }
1111 
1112 mbuf_svc_class_t
m_service_class_from_val(uint32_t v)1113 m_service_class_from_val(uint32_t v)
1114 {
1115 	mbuf_svc_class_t sc = MBUF_SC_BE;
1116 
1117 	switch (v) {
1118 	case SCVAL_BK_SYS:
1119 		return MBUF_SC_BK_SYS;
1120 
1121 	case SCVAL_BK:
1122 		return MBUF_SC_BK;
1123 
1124 	case SCVAL_BE:
1125 		return MBUF_SC_BE;
1126 
1127 	case SCVAL_RD:
1128 		return MBUF_SC_RD;
1129 
1130 	case SCVAL_OAM:
1131 		return MBUF_SC_OAM;
1132 
1133 	case SCVAL_AV:
1134 		return MBUF_SC_AV;
1135 
1136 	case SCVAL_RV:
1137 		return MBUF_SC_RV;
1138 
1139 	case SCVAL_VI:
1140 		return MBUF_SC_VI;
1141 
1142 	case SCVAL_VO:
1143 		return MBUF_SC_VO;
1144 
1145 	case SCVAL_CTL:
1146 		return MBUF_SC_CTL;
1147 
1148 	default:
1149 		break;
1150 	}
1151 
1152 	VERIFY(0);
1153 	/* NOTREACHED */
1154 	return sc;
1155 }
1156 
1157 uint16_t
m_adj_sum16(struct mbuf * m,uint32_t start,uint32_t dataoff,uint32_t datalen,uint32_t sum)1158 m_adj_sum16(struct mbuf *m, uint32_t start, uint32_t dataoff,
1159     uint32_t datalen, uint32_t sum)
1160 {
1161 	uint32_t total_sub = 0;                 /* total to subtract */
1162 	uint32_t mlen = m_pktlen(m);            /* frame length */
1163 	uint32_t bytes = (dataoff + datalen);   /* bytes covered by sum */
1164 	int len;
1165 
1166 	ASSERT(bytes <= mlen);
1167 
1168 	/*
1169 	 * Take care of excluding (len > 0) or including (len < 0)
1170 	 * extraneous octets at the beginning of the packet, taking
1171 	 * into account the start offset.
1172 	 */
1173 	len = (dataoff - start);
1174 	if (len > 0) {
1175 		total_sub = m_sum16(m, start, len);
1176 	} else if (len < 0) {
1177 		sum += m_sum16(m, dataoff, -len);
1178 	}
1179 
1180 	/*
1181 	 * Take care of excluding any postpended extraneous octets.
1182 	 */
1183 	len = (mlen - bytes);
1184 	if (len > 0) {
1185 		struct mbuf *m0 = m;
1186 		uint32_t extra = m_sum16(m, bytes, len);
1187 		uint32_t off = bytes, off0 = off;
1188 
1189 		while (off > 0) {
1190 			if (__improbable(m == NULL)) {
1191 				panic("%s: invalid mbuf chain %p [off %u, "
1192 				    "len %u]", __func__, m0, off0, len);
1193 				/* NOTREACHED */
1194 			}
1195 			if (off < m->m_len) {
1196 				break;
1197 			}
1198 			off -= m->m_len;
1199 			m = m->m_next;
1200 		}
1201 
1202 		/* if we started on odd-alignment, swap the value */
1203 		if ((uintptr_t)(mtod(m, uint8_t *) + off) & 1) {
1204 			total_sub += ((extra << 8) & 0xffff) | (extra >> 8);
1205 		} else {
1206 			total_sub += extra;
1207 		}
1208 
1209 		total_sub = (total_sub >> 16) + (total_sub & 0xffff);
1210 	}
1211 
1212 	/*
1213 	 * 1's complement subtract any extraneous octets.
1214 	 */
1215 	if (total_sub != 0) {
1216 		if (total_sub >= sum) {
1217 			sum = ~(total_sub - sum) & 0xffff;
1218 		} else {
1219 			sum -= total_sub;
1220 		}
1221 	}
1222 
1223 	/* fold 32-bit to 16-bit */
1224 	sum = (sum >> 16) + (sum & 0xffff);     /* 17-bit */
1225 	sum = (sum >> 16) + (sum & 0xffff);     /* 16-bit + carry */
1226 	sum = (sum >> 16) + (sum & 0xffff);     /* final carry */
1227 
1228 	return sum & 0xffff;
1229 }
1230 
1231 uint16_t
m_sum16(struct mbuf * m,uint32_t off,uint32_t len)1232 m_sum16(struct mbuf *m, uint32_t off, uint32_t len)
1233 {
1234 	int mlen;
1235 
1236 	/*
1237 	 * Sanity check
1238 	 *
1239 	 * Use m_length2() instead of m_length(), as we cannot rely on
1240 	 * the caller setting m_pkthdr.len correctly, if the mbuf is
1241 	 * a M_PKTHDR one.
1242 	 */
1243 	if ((mlen = m_length2(m, NULL)) < (off + len)) {
1244 		panic("%s: mbuf %p len (%d) < off+len (%d+%d)", __func__,
1245 		    m, mlen, off, len);
1246 		/* NOTREACHED */
1247 	}
1248 
1249 	return (uint16_t)os_cpu_in_cksum_mbuf(m, len, off, 0);
1250 }
1251 
1252 static int
sysctl_mb_tag_stats(__unused struct sysctl_oid * oidp,__unused void * arg1,__unused int arg2,struct sysctl_req * req)1253 sysctl_mb_tag_stats(__unused struct sysctl_oid *oidp,
1254     __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1255 {
1256 	int error = 0;
1257 
1258 	if (req->oldptr == USER_ADDR_NULL) {
1259 		req->oldidx = KERNEL_TAG_TYPE_COUNT * sizeof(struct m_tag_stats);
1260 		return 0;
1261 	}
1262 	if (req->newptr != USER_ADDR_NULL) {
1263 		return EPERM;
1264 	}
1265 
1266 	for (uint16_t i = 0; i < KERNEL_TAG_TYPE_COUNT; i++) {
1267 		struct m_tag_stats m_tag_stats = {};
1268 
1269 		m_tag_stats.mts_id = KERNEL_MODULE_TAG_ID;
1270 		m_tag_stats.mts_type = i;
1271 		m_tag_stats.mts_len = m_tag_type_table[i].mt_len;
1272 		m_tag_stats.mts_alloc_count = m_tag_type_stats[i].mt_alloc_count;
1273 		m_tag_stats.mts_alloc_failed = m_tag_type_stats[i].mt_alloc_failed;
1274 		m_tag_stats.mts_free_count = m_tag_type_stats[i].mt_free_count;
1275 
1276 		error = SYSCTL_OUT(req, &m_tag_stats, sizeof(struct m_tag_stats));
1277 	}
1278 
1279 	return error;
1280 }
1281 
1282 SYSCTL_PROC(_kern_ipc, OID_AUTO, mb_tag_stats,
1283     CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, NULL, 0,
1284     sysctl_mb_tag_stats, "S,m_tag_stats", "");
1285 
1286 #if DEBUG || DEVELOPMENT
1287 
1288 struct m_tag_test_entry {
1289 	bool            mtte_test_id;
1290 	bool            mtte_alloc_must_fail;
1291 	uint16_t        mtte_type;
1292 	int             mtte_len;
1293 };
1294 
1295 struct m_tag_test_entry
1296     m_tag_test_table[] = {
1297 	{
1298 		.mtte_test_id = false,
1299 		.mtte_alloc_must_fail = false,
1300 		.mtte_type = KERNEL_TAG_TYPE_DUMMYNET,
1301 		.mtte_len = 0,
1302 	},
1303 	{
1304 		.mtte_test_id = false,
1305 		.mtte_alloc_must_fail = false,
1306 		.mtte_type = KERNEL_TAG_TYPE_IPFILT,
1307 		.mtte_len = 0,
1308 	},
1309 	{
1310 		.mtte_test_id = false,
1311 		.mtte_alloc_must_fail = false,
1312 		.mtte_type = KERNEL_TAG_TYPE_ENCAP,
1313 		.mtte_len = 0,
1314 	},
1315 	{
1316 		.mtte_test_id = false,
1317 		.mtte_alloc_must_fail = false,
1318 		.mtte_type = KERNEL_TAG_TYPE_INET6,
1319 		.mtte_len = 0,
1320 	},
1321 	{
1322 		.mtte_test_id = false,
1323 		.mtte_alloc_must_fail = false,
1324 		.mtte_type = KERNEL_TAG_TYPE_IPSEC,
1325 		.mtte_len = 0,
1326 	},
1327 	{
1328 		.mtte_test_id = false,
1329 		.mtte_alloc_must_fail = false,
1330 		.mtte_type = KERNEL_TAG_TYPE_CFIL_UDP,
1331 		.mtte_len = 0,
1332 	},
1333 	{
1334 		.mtte_test_id = false,
1335 		.mtte_alloc_must_fail = false,
1336 		.mtte_type = KERNEL_TAG_TYPE_PF_REASS,
1337 		.mtte_len = 0,
1338 	},
1339 	{
1340 		.mtte_test_id = false,
1341 		.mtte_alloc_must_fail = false,
1342 		.mtte_type = KERNEL_TAG_TYPE_AQM,
1343 		.mtte_len = 0,
1344 	},
1345 	{
1346 		.mtte_test_id = false,
1347 		.mtte_alloc_must_fail = false,
1348 		.mtte_type = KERNEL_TAG_TYPE_DRVAUX,
1349 		.mtte_len = 0,
1350 	},
1351 
1352 	{
1353 		.mtte_test_id = false,
1354 		.mtte_alloc_must_fail = false,
1355 		.mtte_type = 0,
1356 		.mtte_len = MLEN,
1357 	},
1358 	{
1359 		.mtte_test_id = false,
1360 		.mtte_alloc_must_fail = false,
1361 		.mtte_type = KERNEL_TAG_TYPE_COUNT,
1362 		.mtte_len = MLEN,
1363 	},
1364 	{
1365 		.mtte_test_id = false,
1366 		.mtte_alloc_must_fail = true,
1367 		.mtte_type = 0,
1368 		.mtte_len = MCLBYTES,
1369 	},
1370 	{
1371 		.mtte_test_id = false,
1372 		.mtte_alloc_must_fail = true,
1373 		.mtte_type = KERNEL_TAG_TYPE_COUNT,
1374 		.mtte_len = MCLBYTES,
1375 	},
1376 
1377 	{
1378 		.mtte_test_id = true,
1379 		.mtte_alloc_must_fail = false,
1380 		.mtte_type = 0,
1381 		.mtte_len = 0,
1382 	},
1383 	{
1384 		.mtte_test_id = true,
1385 		.mtte_alloc_must_fail = false,
1386 		.mtte_type = 0,
1387 		.mtte_len = MLEN,
1388 	},
1389 	{
1390 		.mtte_test_id = true,
1391 		.mtte_alloc_must_fail = true,
1392 		.mtte_type = 0,
1393 		.mtte_len = -1,
1394 	},
1395 	{
1396 		.mtte_test_id = true,
1397 		.mtte_alloc_must_fail = true,
1398 		.mtte_type = 0,
1399 		.mtte_len = MCLBYTES,
1400 	},
1401 };
1402 
1403 #define M_TAG_TEST_TABLE_COUNT (sizeof(m_tag_test_table) / sizeof(struct m_tag_test_entry))
1404 
1405 #define M_TAG_TEST_ID "com.apple.test.m_tag"
1406 
1407 static int
do_m_tag_test(mbuf_tag_id_t test_tag_id)1408 do_m_tag_test(mbuf_tag_id_t test_tag_id)
1409 {
1410 	int error = 0;
1411 	struct mbuf *m = NULL;
1412 
1413 	m = m_getpacket();
1414 	if (m == NULL) {
1415 		os_log_error(OS_LOG_DEFAULT, "%s: m_getpacket failed", __func__);
1416 		error = ENOMEM;
1417 		goto done;
1418 	}
1419 
1420 	for (int i = 0; i < M_TAG_TEST_TABLE_COUNT; i++) {
1421 		struct m_tag_test_entry *entry = &m_tag_test_table[i];
1422 		struct m_tag *tag = NULL;
1423 		uint32_t id = test_tag_id;
1424 		int len = entry->mtte_len;
1425 		uint16_t type = entry->mtte_type;
1426 
1427 		if (entry->mtte_test_id == false) {
1428 			id = KERNEL_MODULE_TAG_ID;
1429 			switch (type) {
1430 			case KERNEL_TAG_TYPE_DUMMYNET:
1431 			case KERNEL_TAG_TYPE_IPFILT:
1432 			case KERNEL_TAG_TYPE_ENCAP:
1433 			case KERNEL_TAG_TYPE_INET6:
1434 			case KERNEL_TAG_TYPE_IPSEC:
1435 			case KERNEL_TAG_TYPE_CFIL_UDP:
1436 			case KERNEL_TAG_TYPE_PF_REASS:
1437 			case KERNEL_TAG_TYPE_AQM:
1438 				/* subsystems that use mbuf tags are optional */
1439 				if (m_tag_type_table[type].mt_alloc_func == m_tag_kalloc_notsupp) {
1440 					continue;
1441 				}
1442 				len = m_tag_type_table[type].mt_len;
1443 				if (entry->mtte_alloc_must_fail == true) {
1444 					os_log_error(OS_LOG_DEFAULT,
1445 					    "%s: FAIL m_tag_create(%u, %u, %u) must not fail",
1446 					    __func__, id, type, len);
1447 					error = EINVAL;
1448 					goto done;
1449 				}
1450 				break;
1451 			default:
1452 				break;
1453 			}
1454 		}
1455 		tag = m_tag_create(id, type, len, M_WAIT, m);
1456 		if (tag == NULL) {
1457 			if (entry->mtte_alloc_must_fail == false) {
1458 				os_log_error(OS_LOG_DEFAULT,
1459 				    "%s: FAIL m_tag_create(%u, %u, %u) unexpected failure",
1460 				    __func__, id, type, len);
1461 				error = ENOMEM;
1462 				goto done;
1463 			} else {
1464 				os_log(OS_LOG_DEFAULT,
1465 				    "%s: PASS m_tag_create(%u, %u, %u) expected failure",
1466 				    __func__, id, type, len);
1467 			}
1468 		} else {
1469 			if (entry->mtte_alloc_must_fail == true) {
1470 				os_log_error(OS_LOG_DEFAULT,
1471 				    "%s: FAIL m_tag_create(%u, %u, %u) unexpected success",
1472 				    __func__, id, type, len);
1473 				error = EINVAL;
1474 				goto done;
1475 			} else {
1476 				os_log(OS_LOG_DEFAULT,
1477 				    "%s: PASS m_tag_create(%u, %u, %u) expected success",
1478 				    __func__, id, type, len);
1479 			}
1480 			m_tag_prepend(m, tag);
1481 		}
1482 	}
1483 done:
1484 	if (m != NULL) {
1485 		m_freem(m);
1486 	}
1487 	os_log_error(OS_LOG_DEFAULT,
1488 	    "%s: %s error %d",
1489 	    __func__, error == 0 ? "PASS" : "FAIL", error);
1490 	return error;
1491 }
1492 
1493 static int
do_test_m_tag_unlink(mbuf_tag_id_t test_tag_id)1494 do_test_m_tag_unlink(mbuf_tag_id_t test_tag_id)
1495 {
1496 	struct mbuf *m = NULL;
1497 	int error = 0;
1498 
1499 	m = m_gethdr(M_WAITOK, MT_DATA);
1500 	if (m == NULL) {
1501 		error = ENOMEM;
1502 		goto done;
1503 	}
1504 	for (int i = 0; i < M_TAG_TEST_TABLE_COUNT; i++) {
1505 		struct m_tag_test_entry *entry = &m_tag_test_table[i];
1506 		struct m_tag *tag = NULL;
1507 		uint32_t id = test_tag_id;
1508 		int len = entry->mtte_len;
1509 		uint16_t type = entry->mtte_type;
1510 
1511 		if (entry->mtte_alloc_must_fail == true) {
1512 			continue;
1513 		}
1514 
1515 		if (entry->mtte_test_id == false) {
1516 			id = KERNEL_MODULE_TAG_ID;
1517 			switch (type) {
1518 			case KERNEL_TAG_TYPE_DUMMYNET:
1519 			case KERNEL_TAG_TYPE_IPFILT:
1520 			case KERNEL_TAG_TYPE_ENCAP:
1521 			case KERNEL_TAG_TYPE_INET6:
1522 			case KERNEL_TAG_TYPE_IPSEC:
1523 			case KERNEL_TAG_TYPE_CFIL_UDP:
1524 			case KERNEL_TAG_TYPE_PF_REASS:
1525 			case KERNEL_TAG_TYPE_AQM:
1526 				/* subsystems that use mbuf tags are optional */
1527 				if (m_tag_type_table[type].mt_alloc_func == m_tag_kalloc_notsupp) {
1528 					continue;
1529 				}
1530 				len = m_tag_type_table[type].mt_len;
1531 				break;
1532 			default:
1533 				continue;
1534 			}
1535 		}
1536 		tag = m_tag_create(id, type, len, M_WAIT, m);
1537 		if (tag == NULL) {
1538 			os_log_error(OS_LOG_DEFAULT,
1539 			    "%s: FAIL m_tag_create(%u, %u, %u) failure",
1540 			    __func__, id, type, len);
1541 			error = ENOMEM;
1542 			goto done;
1543 		} else {
1544 			os_log_error(OS_LOG_DEFAULT,
1545 			    "%s: PASS m_tag_create(%u, %u, %u) success",
1546 			    __func__, id, type, len);
1547 			m_tag_prepend(m, tag);
1548 		}
1549 	}
1550 
1551 	struct m_tag *cfil_tag = m_tag_locate(m, KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_CFIL_UDP);
1552 	if (cfil_tag == NULL) {
1553 		os_log_error(OS_LOG_DEFAULT,
1554 		    "%s: FAIL m_tag_locate(KERNEL_TAG_TYPE_CFIL_UDP) failure",
1555 		    __func__);
1556 		error = EINVAL;
1557 		goto done;
1558 	} else {
1559 		os_log_error(OS_LOG_DEFAULT,
1560 		    "%s: PASS m_tag_locate(KERNEL_TAG_TYPE_CFIL_UDP) success",
1561 		    __func__);
1562 	}
1563 
1564 	/*
1565 	 * Unlink the mbuf tag, free the mbuf and finally free the mbuf tag
1566 	 */
1567 	m_tag_unlink(m, cfil_tag);
1568 
1569 	m_freem(m);
1570 	m = NULL;
1571 
1572 	m_tag_free(cfil_tag);
1573 
1574 done:
1575 	if (m != NULL) {
1576 		m_freem(m);
1577 	}
1578 	os_log_error(OS_LOG_DEFAULT,
1579 	    "%s: %s error %d",
1580 	    __func__, error == 0 ? "PASS" : "FAIL", error);
1581 	return error;
1582 }
1583 
1584 static int
sysctl_mb_tag_test(__unused struct sysctl_oid * oidp,__unused void * arg1,__unused int arg2,struct sysctl_req * req)1585 sysctl_mb_tag_test(__unused struct sysctl_oid *oidp,
1586     __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1587 {
1588 	int error;
1589 	int newvalue;
1590 	int changed;
1591 	int value = 0;
1592 	mbuf_tag_id_t test_tag_id;
1593 
1594 	if ((error = sysctl_io_number(req, value, sizeof(int),
1595 	    &newvalue, &changed)) != 0) {
1596 		goto done;
1597 	}
1598 	if (!changed && newvalue == value) {
1599 		goto done;
1600 	}
1601 	error = mbuf_tag_id_find(M_TAG_TEST_ID, &test_tag_id);
1602 	if (error != 0) {
1603 		os_log_error(OS_LOG_DEFAULT, "%s: mbuf_tag_id_find failed error %d",
1604 		    __func__, error);
1605 		goto done;
1606 	}
1607 	error = do_m_tag_test(test_tag_id);
1608 	if (error != 0) {
1609 		goto done;
1610 	}
1611 	error = do_test_m_tag_unlink(test_tag_id);
1612 	if (error != 0) {
1613 		goto done;
1614 	}
1615 done:
1616 	return error;
1617 }
1618 
1619 SYSCTL_PROC(_kern_ipc, OID_AUTO, mb_tag_test,
1620     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, NULL, 0,
1621     sysctl_mb_tag_test, "I", "mbuf test");
1622 
1623 #endif /* DEBUG || DEVELOPMENT */
1624