1 /*
2 * Copyright (c) 2000-2021 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /* $NetBSD: uipc_mbuf.c,v 1.40 1999/04/01 00:23:25 thorpej Exp $ */
29
30 /*
31 * Copyright (C) 1999 WIDE Project.
32 * All rights reserved.
33 *
34 * Redistribution and use in source and binary forms, with or without
35 * modification, are permitted provided that the following conditions
36 * are met:
37 * 1. Redistributions of source code must retain the above copyright
38 * notice, this list of conditions and the following disclaimer.
39 * 2. Redistributions in binary form must reproduce the above copyright
40 * notice, this list of conditions and the following disclaimer in the
41 * documentation and/or other materials provided with the distribution.
42 * 3. Neither the name of the project nor the names of its contributors
43 * may be used to endorse or promote products derived from this software
44 * without specific prior written permission.
45 *
46 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
47 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
48 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
49 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
50 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
51 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
52 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
53 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
54 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
55 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
56 * SUCH DAMAGE.
57 */
58
59 /*
60 * Copyright (c) 1982, 1986, 1988, 1991, 1993
61 * The Regents of the University of California. All rights reserved.
62 *
63 * Redistribution and use in source and binary forms, with or without
64 * modification, are permitted provided that the following conditions
65 * are met:
66 * 1. Redistributions of source code must retain the above copyright
67 * notice, this list of conditions and the following disclaimer.
68 * 2. Redistributions in binary form must reproduce the above copyright
69 * notice, this list of conditions and the following disclaimer in the
70 * documentation and/or other materials provided with the distribution.
71 * 3. All advertising materials mentioning features or use of this software
72 * must display the following acknowledgement:
73 * This product includes software developed by the University of
74 * California, Berkeley and its contributors.
75 * 4. Neither the name of the University nor the names of its contributors
76 * may be used to endorse or promote products derived from this software
77 * without specific prior written permission.
78 *
79 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
80 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
81 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
82 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
83 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
84 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
85 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
86 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
87 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
88 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
89 * SUCH DAMAGE.
90 *
91 * @(#)uipc_mbuf.c 8.4 (Berkeley) 2/14/95
92 */
93 /*
94 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
95 * support for mandatory and extensible security protections. This notice
96 * is included in support of clause 2.2 (b) of the Apple Public License,
97 * Version 2.0.
98 */
99
100 /*#define PULLDOWN_DEBUG*/
101
102 #include <sys/param.h>
103 #include <sys/systm.h>
104 #include <sys/proc_internal.h>
105 #include <sys/malloc.h>
106 #include <sys/mbuf.h>
107 #include <sys/mcache.h>
108 #include <sys/sysctl.h>
109
110 #include <netinet/in.h>
111 #include <netinet/ip_var.h>
112 #include <netinet/ip6.h>
113 #include <netinet6/ip6_var.h>
114
115 #include <kern/assert.h>
116
117 #include <os/log.h>
118
119 #include <libkern/OSDebug.h>
120
121 #include <ptrauth.h>
122
123 #if defined(__i386__) || defined(__x86_64__)
124 #define MB_TAG_MBUF 1
125 #endif /* defined(__i386__) || defined(__x86_64__) */
126
127 SYSCTL_DECL(_kern_ipc);
128
129 struct m_tag_type_entry {
130 m_tag_kalloc_func_t mt_alloc_func;
131 m_tag_kfree_func_t mt_free_func;
132 uint16_t mt_type;
133 uint16_t mt_len;
134 };
135
136 typedef struct m_tag_type_entry * __single m_tag_type_entry_ref_t;
137
138 struct m_tag_type_stats {
139 uint64_t mt_alloc_count;
140 uint64_t mt_alloc_failed;
141 uint64_t mt_free_count;
142 };
143
144 typedef struct m_tag_type_stats * __single m_tag_type_stats_ref_t;
145
146 SECURITY_READ_ONLY_LATE(static struct m_tag_type_entry) m_tag_type_table[KERNEL_TAG_TYPE_COUNT] = {};
147
148 static struct m_tag_type_stats m_tag_type_stats[KERNEL_TAG_TYPE_COUNT] = {};
149
150 #ifdef MB_TAG_MBUF
151 static struct m_tag *m_tag_create_mbuf(uint32_t, uint16_t, uint16_t, int, struct mbuf *);
152 #endif /* MB_TAG_MBUF */
153
154 /*
155 * ensure that [off, off + len) is contiguous on the mbuf chain "m".
156 * packet chain before "off" is kept untouched.
157 * if offp == NULL, the target will start at <retval, 0> on resulting chain.
158 * if offp != NULL, the target will start at <retval, *offp> on resulting chain.
159 *
160 * on error return (NULL return value), original "m" will be freed.
161 *
162 * XXX M_TRAILINGSPACE/M_LEADINGSPACE on shared cluster (sharedcluster)
163 */
164 struct mbuf *
m_pulldown(struct mbuf * m,int off,int len,int * offp)165 m_pulldown(struct mbuf *m, int off, int len, int *offp)
166 {
167 struct mbuf *n = NULL, *o = NULL;
168 int hlen = 0, tlen = 0, olen = 0;
169 int sharedcluster = 0;
170
171 /* check invalid arguments. */
172 VERIFY(len >= 0 && off >= 0);
173
174 if (m == NULL) {
175 panic("m == NULL in m_pulldown()");
176 }
177 if (len > MCLBYTES) {
178 m_freem(m);
179 return NULL; /* impossible */
180 }
181 int tmp_len = 0;
182 if (os_add_overflow(off, len, &tmp_len)) {
183 m_free(m);
184 return NULL;
185 }
186
187 #ifdef PULLDOWN_DEBUG
188 {
189 struct mbuf *t;
190 printf("before:");
191 for (t = m; t; t = t->m_next) {
192 printf(" %d", t->m_len);
193 }
194 printf("\n");
195 }
196 #endif
197 n = m;
198
199 /*
200 * Iterate and make n point to the mbuf
201 * within which the first byte at length
202 * offset is contained from the start of
203 * mbuf chain.
204 */
205 while (n != NULL && off > 0) {
206 if (n->m_len > off) {
207 break;
208 }
209 off -= n->m_len;
210 n = n->m_next;
211 }
212
213 /* be sure to point non-empty mbuf */
214 while (n != NULL && n->m_len == 0) {
215 n = n->m_next;
216 }
217
218 if (!n) {
219 m_freem(m);
220 return NULL; /* mbuf chain too short */
221 }
222
223 /*
224 * the target data is on <n, off>.
225 * if we got enough data on the mbuf "n", we're done.
226 *
227 * It should be noted, that we should only do this either
228 * when offset is 0, i.e. data is pointing to the start
229 * or when the caller specifies an out argument to get
230 * the offset value in the mbuf to work with data pointer
231 * correctly.
232 *
233 * If offset is not 0 and caller did not provide out-argument
234 * to get offset, we should split the mbuf even when the length
235 * is contained in current mbuf.
236 */
237 if ((off == 0 || offp) && len <= n->m_len - off) {
238 goto ok;
239 }
240
241 /*
242 * when len <= n->m_len - off and off != 0, it is a special case.
243 * len bytes from <n, off> sits in single mbuf, but the caller does
244 * not like the starting position (off).
245 * chop the current mbuf into two pieces, set off to 0.
246 */
247 if (len <= n->m_len - off) {
248 o = m_copym(n, off, n->m_len - off, M_DONTWAIT);
249 if (o == NULL) {
250 m_freem(m);
251 return NULL; /* ENOBUFS */
252 }
253 n->m_len = off;
254 o->m_next = n->m_next;
255 n->m_next = o;
256 n = n->m_next;
257 off = 0;
258 goto ok;
259 }
260
261 /*
262 * we need to take hlen from <n, off> and tlen from <n->m_next, 0>,
263 * and construct contiguous mbuf with m_len == len.
264 * note that hlen + tlen == len, and tlen > 0.
265 *
266 * Read these variables as head length and tail length
267 */
268 hlen = n->m_len - off;
269 tlen = len - hlen;
270
271 /*
272 * ensure that we have enough trailing data on mbuf chain.
273 * if not, we can do nothing about the chain.
274 */
275 olen = 0;
276 for (o = n->m_next; o != NULL; o = o->m_next) {
277 olen += o->m_len;
278 }
279 if (hlen + olen < len) {
280 m_freem(m);
281 return NULL; /* mbuf chain too short */
282 }
283
284 /*
285 * easy cases first.
286 * we need to use m_copydata() to get data from <n->m_next, 0>.
287 */
288 if ((n->m_flags & M_EXT) == 0) {
289 sharedcluster = 0;
290 } else {
291 if (m_get_ext_free(n) != NULL) {
292 sharedcluster = 1;
293 } else if (m_mclhasreference(n)) {
294 sharedcluster = 1;
295 } else {
296 sharedcluster = 0;
297 }
298 }
299
300 /*
301 * If we have enough space left in current mbuf to accomodate
302 * tail length, copy tail length worth of data starting with next mbuf
303 * and adjust the length of next one accordingly.
304 */
305 if ((off == 0 || offp) && M_TRAILINGSPACE(n) >= tlen
306 && !sharedcluster) {
307 m_copydata(n->m_next, 0, tlen, mtod(n, caddr_t) + n->m_len);
308 n->m_len += tlen;
309 m_adj(n->m_next, tlen);
310 goto ok;
311 }
312
313 /*
314 * If have enough leading space in next mbuf to accomodate head length
315 * of current mbuf, and total resulting length of next mbuf is greater
316 * than or equal to requested len bytes, then just copy hlen from
317 * current to the next one and adjust sizes accordingly.
318 */
319 if ((off == 0 || offp) && M_LEADINGSPACE(n->m_next) >= hlen &&
320 (n->m_next->m_len + hlen) >= len && !sharedcluster) {
321 n->m_next->m_data -= hlen;
322 n->m_next->m_len += hlen;
323 bcopy(mtod(n, caddr_t) + off, mtod(n->m_next, caddr_t), hlen);
324 n->m_len -= hlen;
325 n = n->m_next;
326 off = 0;
327 goto ok;
328 }
329
330 /*
331 * now, we need to do the hard way. don't m_copy as there's no room
332 * on both end.
333 */
334 MGET(o, M_DONTWAIT, m->m_type);
335 if (o == NULL) {
336 m_freem(m);
337 return NULL; /* ENOBUFS */
338 }
339 if (len > MHLEN) { /* use MHLEN just for safety */
340 MCLGET(o, M_DONTWAIT);
341 if ((o->m_flags & M_EXT) == 0) {
342 m_freem(m);
343 m_free(o);
344 return NULL; /* ENOBUFS */
345 }
346 }
347 /* get hlen from <n, off> into <o, 0> */
348 o->m_len = hlen;
349 bcopy(mtod(n, caddr_t) + off, mtod(o, caddr_t), hlen);
350 n->m_len -= hlen;
351 /* get tlen from <n->m_next, 0> into <o, hlen> */
352 m_copydata(n->m_next, 0, tlen, mtod(o, caddr_t) + o->m_len);
353 o->m_len += tlen;
354 m_adj(n->m_next, tlen);
355 o->m_next = n->m_next;
356 n->m_next = o;
357 n = o;
358 off = 0;
359
360 ok:
361 #ifdef PULLDOWN_DEBUG
362 {
363 struct mbuf *t;
364 printf("after:");
365 for (t = m; t; t = t->m_next) {
366 printf("%c%d", t == n ? '*' : ' ', t->m_len);
367 }
368 printf(" (off=%d)\n", off);
369 }
370 #endif
371 if (offp) {
372 *offp = off;
373 }
374 return n;
375 }
376
377 static struct m_tag *
m_tag_kalloc_notsupp(__unused uint32_t id,__unused uint16_t type,__unused uint16_t len,__unused int wait)378 m_tag_kalloc_notsupp(__unused uint32_t id, __unused uint16_t type, __unused uint16_t len, __unused int wait)
379 {
380 return NULL;
381 }
382
383 static void
m_tag_kfree_notsupp(__unused struct m_tag * tag)384 m_tag_kfree_notsupp(__unused struct m_tag *tag)
385 {
386 return;
387 }
388
389 #if defined(HAS_APPLE_PAC)
390 /*
391 * combine into a uintptr_t the m_tag_type that is 16 bits with the m_tag_id is 32 bits
392 */
393 static uintptr_t
m_tag_cookie_from_id_and_type(struct m_tag * tag)394 m_tag_cookie_from_id_and_type(struct m_tag *tag)
395 {
396 uintptr_t cookie;
397
398 #ifdef __LP64__
399 /*
400 * upper 4 bytes: 2 bytes of type
401 * lower 4 bytes: 4 bytes of id
402 */
403 cookie = (((uintptr_t)tag->m_tag_type) << 32) | (uintptr_t)tag->m_tag_id;
404 #else
405 /*
406 * upper 2 bytes: 2 bytes of type or-ed with upper 2 bytes of id
407 * lower 2 bytes: lower 2 bytes of id
408 */
409 cookie = (((uintptr_t)tag->m_tag_type) << 16) | (uintptr_t)tag->m_tag_id;
410 #endif
411 return cookie;
412 }
413
414 void
m_tag_create_cookie(struct m_tag * tag)415 m_tag_create_cookie(struct m_tag *tag)
416 {
417 uintptr_t cookie = m_tag_cookie_from_id_and_type(tag);
418
419 tag->m_tag_cookie = (uintptr_t) ptrauth_sign_unauthenticated((void *)cookie,
420 ptrauth_key_process_independent_data,
421 ptrauth_blend_discriminator((void *)(uintptr_t)(tag->m_tag_type | tag->m_tag_id),
422 ptrauth_string_discriminator("m_tag.m_tag_cookie")));
423 }
424
425 static void
m_tag_verify_cookie(struct m_tag * tag)426 m_tag_verify_cookie(struct m_tag *tag)
427 {
428 uintptr_t cookie = m_tag_cookie_from_id_and_type(tag);
429 uintptr_t auth_cookie;
430
431 auth_cookie = (uintptr_t) ptrauth_auth_data((void *)(uintptr_t)tag->m_tag_cookie,
432 ptrauth_key_process_independent_data,
433 ptrauth_blend_discriminator((void *)(uintptr_t)(tag->m_tag_type | tag->m_tag_id),
434 ptrauth_string_discriminator("m_tag.m_tag_cookie")));
435 if (cookie != auth_cookie) {
436 panic("verify_m_tag_cookie bad m_tag cookie");
437 }
438 }
439
440 #else /* defined(HAS_APPLE_PAC) */
441
442 void
m_tag_create_cookie(struct m_tag * tag)443 m_tag_create_cookie(struct m_tag *tag)
444 {
445 tag->m_tag_cookie = M_TAG_VALID_PATTERN;
446 }
447
448 static void
m_tag_verify_cookie(struct m_tag * tag)449 m_tag_verify_cookie(struct m_tag *tag)
450 {
451 VERIFY(tag->m_tag_cookie == M_TAG_VALID_PATTERN);
452 }
453
454 #endif /* defined(HAS_APPLE_PAC) */
455
456
457 struct m_tag *
m_tag_create(uint32_t id,uint16_t type,int len,int wait,struct mbuf * buf)458 m_tag_create(uint32_t id, uint16_t type, int len, int wait, struct mbuf *buf)
459 {
460 #ifdef MB_TAG_MBUF
461 /*
462 * Create and return an m_tag, either by re-using space in a previous tag
463 * or by allocating a new mbuf/cluster
464 */
465 return m_tag_create_mbuf(id, type, (uint16_t)len, wait, buf);
466 #else /* MB_TAG_MBUF */
467 #pragma unused(buf)
468 /*
469 * Each packet tag has its own allocation
470 */
471 return m_tag_alloc(id, type, (uint16_t)len, wait);
472 #endif /* MB_TAG_MBUF */
473 }
474
475 #ifdef MB_TAG_MBUF
476 /* Get a packet tag structure along with specified data following. */
477 static struct m_tag *
m_tag_alloc_mbuf(u_int32_t id,u_int16_t type,uint16_t len,int wait)478 m_tag_alloc_mbuf(u_int32_t id, u_int16_t type, uint16_t len, int wait)
479 {
480 struct m_tag *t;
481 void *mb_cl = NULL;
482
483 if (M_TAG_ALIGN(len) + sizeof(struct m_taghdr) <= MLEN) {
484 struct mbuf *m = m_get(wait, MT_TAG);
485 struct m_taghdr *hdr;
486
487 if (m == NULL) {
488 return NULL;
489 }
490 mb_cl = m;
491
492 m->m_flags |= M_TAGHDR;
493
494 hdr = (struct m_taghdr *)(void *)m->m_data;
495 VERIFY(IS_P2ALIGNED(hdr + 1, sizeof(u_int64_t)));
496 hdr->mth_refcnt = 1;
497 m->m_len += sizeof(struct m_taghdr);
498 t = (struct m_tag *)(void *)(m->m_data + m->m_len);
499 VERIFY(IS_P2ALIGNED(t, sizeof(u_int64_t)));
500 m->m_len += M_TAG_ALIGN(len);
501 VERIFY(m->m_len <= MLEN);
502 } else if (len + sizeof(struct m_tag) <= MCLBYTES) {
503 mb_cl = m_mclalloc(wait);
504 t = (struct m_tag *)(void *)mb_cl;
505 } else {
506 t = NULL;
507 }
508
509 if (__improbable(t == NULL)) {
510 return NULL;
511 }
512
513 VERIFY(IS_P2ALIGNED(t, sizeof(u_int64_t)));
514 M_TAG_INIT(t, id, type, len, (void *)(t + 1), mb_cl);
515 if (len > 0) {
516 bzero(t->m_tag_data, len);
517 }
518 return t;
519 }
520
521 static struct m_tag *
m_tag_create_mbuf(uint32_t id,uint16_t type,uint16_t len,int wait,struct mbuf * buf)522 m_tag_create_mbuf(uint32_t id, uint16_t type, uint16_t len, int wait, struct mbuf *buf)
523 {
524 struct m_tag *t = NULL;
525 struct m_tag *p;
526 void *mb_cl = NULL;
527
528 if (len + sizeof(struct m_tag) + sizeof(struct m_taghdr) > MLEN) {
529 return m_tag_alloc(id, type, len, wait);
530 }
531
532 /*
533 * We've exhausted all external cases. Now, go through the m_tag
534 * chain and see if we can fit it in any of them.
535 * If not (t == NULL), call m_tag_alloc to store it in a new mbuf.
536 */
537 p = SLIST_FIRST(&buf->m_pkthdr.tags);
538 while (p != NULL) {
539 /* 2KCL m_tag */
540 if (M_TAG_ALIGN(p->m_tag_len) +
541 sizeof(struct m_taghdr) > MLEN) {
542 p = SLIST_NEXT(p, m_tag_link);
543 continue;
544 }
545
546 m_tag_verify_cookie(p);
547
548 struct mbuf *m = p->m_tag_mb_cl;
549 struct m_taghdr *hdr = (struct m_taghdr *)(void *)m->m_data;
550
551 VERIFY(IS_P2ALIGNED(hdr + 1, sizeof(u_int64_t)));
552 VERIFY(m->m_flags & M_TAGHDR && !(m->m_flags & M_EXT));
553
554 /* The mbuf can store this m_tag */
555 if (M_TAG_ALIGN(len) <= MLEN - m->m_len) {
556 mb_cl = m;
557 t = (struct m_tag *)(void *)(m->m_data + m->m_len);
558 VERIFY(IS_P2ALIGNED(t, sizeof(u_int64_t)));
559 hdr->mth_refcnt++;
560 m->m_len += M_TAG_ALIGN(len);
561 VERIFY(m->m_len <= MLEN);
562 break;
563 }
564
565 p = SLIST_NEXT(p, m_tag_link);
566 }
567
568 if (t == NULL) {
569 return m_tag_alloc(id, type, len, wait);
570 }
571
572 M_TAG_INIT(t, id, type, len, (void *)(t + 1), mb_cl);
573 if (len > 0) {
574 bzero(t->m_tag_data, len);
575 }
576 return t;
577 }
578
579 static void
m_tag_free_mbuf(struct m_tag * t)580 m_tag_free_mbuf(struct m_tag *t)
581 {
582 if (__improbable(t == NULL)) {
583 return;
584 }
585
586 if (M_TAG_ALIGN(t->m_tag_len) + sizeof(struct m_taghdr) <= MLEN) {
587 struct mbuf * m = t->m_tag_mb_cl;
588
589 VERIFY(m->m_flags & M_TAGHDR);
590 struct m_taghdr *hdr = (struct m_taghdr *)(void *)m->m_data;
591
592 VERIFY(IS_P2ALIGNED(hdr + 1, sizeof(u_int64_t)));
593
594 /* No other tags in this mbuf */
595 if (--hdr->mth_refcnt == 0) {
596 m_free(m);
597 return;
598 }
599
600 /* Pattern-fill the header */
601 u_int64_t *fill_ptr = (u_int64_t *)t;
602 u_int64_t *end_ptr = (u_int64_t *)(t + 1);
603 while (fill_ptr < end_ptr) {
604 *fill_ptr = M_TAG_FREE_PATTERN;
605 fill_ptr++;
606 }
607 } else {
608 m_mclfree((caddr_t)t);
609 }
610 }
611 #endif /* MB_TAG_MBUF */
612
613 /*
614 * Allocations for external data are known to not have pointers for
615 * most platforms -- for macOS this is not guaranteed
616 */
617 #if XNU_TARGET_OS_OSX
618
619 __typed_allocators_ignore_push
620
621 static inline void * __bidi_indexable
m_tag_data_kalloc(uint16_t len,int wait)622 m_tag_data_kalloc(uint16_t len, int wait)
623 {
624 return kheap_alloc(KHEAP_DEFAULT, len, wait | M_ZERO);
625 }
626
627 static inline void
m_tag_data_free(struct m_tag * tag)628 m_tag_data_free(struct m_tag *tag)
629 {
630 void *tag_data = tag->m_tag_data;
631 size_t tag_len = tag->m_tag_len;
632 kheap_free(KHEAP_DEFAULT, tag_data, tag_len);
633 tag->m_tag_data = NULL;
634 tag->m_tag_len = 0;
635 }
636 __typed_allocators_ignore_pop
637
638 #else /* XNU_TARGET_OS_OSX */
639
640 static inline void * __bidi_indexable
641 m_tag_data_kalloc(uint16_t len, int wait)
642 {
643 return kalloc_data(len, wait | M_ZERO);
644 }
645
646 static inline void
647 m_tag_data_free(struct m_tag *tag)
648 {
649 kfree_data_sized_by(tag->m_tag_data, tag->m_tag_len);
650 }
651
652 #endif /* XNU_TARGET_OS_OSX */
653
654 static struct m_tag *
m_tag_kalloc_external(uint32_t id,uint16_t type,uint16_t len,int wait)655 m_tag_kalloc_external(uint32_t id, uint16_t type, uint16_t len, int wait)
656 {
657 struct m_tag *tag;
658 void *data = NULL;
659
660 tag = kalloc_type(struct m_tag, wait | M_ZERO);
661 if (__improbable(tag == NULL)) {
662 return NULL;
663 }
664
665 if (len > 0) {
666 data = m_tag_data_kalloc(len, wait);
667 if (__improbable(data == NULL)) {
668 kfree_type(struct m_tag, tag);
669 return NULL;
670 }
671 }
672
673 M_TAG_INIT(tag, id, type, len, data, NULL);
674
675 return tag;
676 }
677
678 static void
m_tag_kfree_external(struct m_tag * tag)679 m_tag_kfree_external(struct m_tag *tag)
680 {
681 if (tag->m_tag_data != NULL) {
682 m_tag_data_free(tag);
683 }
684 kfree_type(struct m_tag, tag);
685 }
686
687 static struct m_tag_type_entry *
get_m_tag_type_entry(uint32_t id,uint16_t type,struct m_tag_type_stats ** pmtts)688 get_m_tag_type_entry(uint32_t id, uint16_t type, struct m_tag_type_stats **pmtts)
689 {
690 m_tag_type_entry_ref_t mtte = &m_tag_type_table[KERNEL_TAG_TYPE_NONE];
691
692 if (pmtts != NULL) {
693 *pmtts = &m_tag_type_stats[KERNEL_TAG_TYPE_NONE];
694 }
695
696 if (id == KERNEL_MODULE_TAG_ID) {
697 switch (type) {
698 case KERNEL_TAG_TYPE_DUMMYNET:
699 case KERNEL_TAG_TYPE_IPFILT:
700 case KERNEL_TAG_TYPE_ENCAP:
701 case KERNEL_TAG_TYPE_INET6:
702 case KERNEL_TAG_TYPE_IPSEC:
703 case KERNEL_TAG_TYPE_CFIL_UDP:
704 case KERNEL_TAG_TYPE_PF_REASS:
705 case KERNEL_TAG_TYPE_AQM:
706 case KERNEL_TAG_TYPE_DRVAUX:
707 mtte = &m_tag_type_table[type];
708 if (pmtts != NULL) {
709 *pmtts = &m_tag_type_stats[type];
710 }
711 break;
712 default:
713 #if DEBUG || DEVELOPMENT
714 if (type > 0 && type < KERNEL_TAG_TYPE_COUNT) {
715 panic("get_m_tag_type_entry unexpected m_tag type %u",
716 type);
717 }
718 #endif /* DEBUG || DEVELOPMENT */
719 break;
720 }
721 }
722
723 return mtte;
724 }
725
726 #ifndef MB_TAG_MBUF
727 static struct m_tag *
m_tag_kalloc(uint32_t id,uint16_t type,uint16_t len,int wait,struct m_tag_type_entry * mtte)728 m_tag_kalloc(uint32_t id, uint16_t type, uint16_t len, int wait, struct m_tag_type_entry *mtte)
729 {
730 struct m_tag *tag = NULL;
731
732 tag = mtte->mt_alloc_func(id, type, len, wait);
733
734 if (__probable(tag != NULL)) {
735 VERIFY(IS_P2ALIGNED(tag, sizeof(uint64_t)));
736
737 if (__improbable(tag->m_tag_data == NULL)) {
738 VERIFY(len == 0);
739 } else {
740 VERIFY(len != 0);
741 VERIFY(IS_P2ALIGNED(tag->m_tag_data, sizeof(uint64_t)));
742 }
743 }
744 return tag;
745 }
746
747 static void
m_tag_kfree(struct m_tag * tag,struct m_tag_type_entry * mtte)748 m_tag_kfree(struct m_tag *tag, struct m_tag_type_entry *mtte)
749 {
750 mtte->mt_free_func(tag);
751 }
752 #endif /* MB_TAG_MBUF */
753
754 struct m_tag *
m_tag_alloc(uint32_t id,uint16_t type,int len,int wait)755 m_tag_alloc(uint32_t id, uint16_t type, int len, int wait)
756 {
757 struct m_tag *tag = NULL;
758 m_tag_type_entry_ref_t mtte = NULL;
759 m_tag_type_stats_ref_t mtts = NULL;
760
761 mtte = get_m_tag_type_entry(id, type, &mtts);
762
763 if (__improbable(len < 0 || len >= MCLBYTES - sizeof(struct m_tag))) {
764 goto done;
765 }
766
767 #ifdef MB_TAG_MBUF
768 tag = m_tag_alloc_mbuf(id, type, (uint16_t)len, wait);
769 #else /* MB_TAG_MBUF */
770 /*
771 * Using Z_NOWAIT could cause retransmission delays when there aren't
772 * many other colocated types in the zone that would prime it. Use
773 * Z_NOPAGEWAIT instead which will only fail to allocate when zalloc
774 * needs to block on the VM for pages.
775 */
776 if (wait & Z_NOWAIT) {
777 wait &= ~Z_NOWAIT;
778 wait |= Z_NOPAGEWAIT;
779 }
780 tag = m_tag_kalloc(id, type, (uint16_t)len, wait, mtte);
781 #endif /* MB_TAG_MBUF */
782
783 done:
784 if (__probable(tag != NULL)) {
785 m_tag_verify_cookie(tag);
786 assert3u(tag->m_tag_id, ==, id);
787 assert3u(tag->m_tag_type, ==, type);
788 assert3u(tag->m_tag_len, ==, len);
789
790 os_atomic_inc(&mtts->mt_alloc_count, relaxed);
791 } else {
792 os_atomic_inc(&mtts->mt_alloc_failed, relaxed);
793 }
794
795 return tag;
796 }
797
798 /* Free a packet tag. */
799 void
m_tag_free(struct m_tag * tag)800 m_tag_free(struct m_tag *tag)
801 {
802 m_tag_type_entry_ref_t mtte = NULL;
803 m_tag_type_stats_ref_t mtts = NULL;
804
805 if (__improbable(tag == NULL)) {
806 return;
807 }
808
809 m_tag_verify_cookie(tag);
810
811 mtte = get_m_tag_type_entry(tag->m_tag_id, tag->m_tag_type, &mtts);
812
813 #ifdef MB_TAG_MBUF
814 m_tag_free_mbuf(tag);
815 #else /* MB_TAG_MBUF */
816 m_tag_kfree(tag, mtte);
817 #endif /* MB_TAG_MBUF */
818
819 os_atomic_inc(&mtts->mt_free_count, relaxed);
820 }
821
822 void
mbuf_tag_init(void)823 mbuf_tag_init(void)
824 {
825 for (uint16_t type = 0; type < KERNEL_TAG_TYPE_COUNT; type++) {
826 m_tag_type_table[type].mt_type = type;
827 m_tag_type_table[type].mt_len = 0;
828 m_tag_type_table[type].mt_alloc_func = m_tag_kalloc_notsupp;
829 m_tag_type_table[type].mt_free_func = m_tag_kfree_notsupp;
830 }
831 m_tag_type_table[KERNEL_TAG_TYPE_NONE].mt_alloc_func = m_tag_kalloc_external;
832 m_tag_type_table[KERNEL_TAG_TYPE_NONE].mt_free_func = m_tag_kfree_external;
833 m_tag_type_table[KERNEL_TAG_TYPE_DRVAUX].mt_alloc_func = m_tag_kalloc_external;
834 m_tag_type_table[KERNEL_TAG_TYPE_DRVAUX].mt_free_func = m_tag_kfree_external;
835
836 #if NETWORKING
837 extern void pktsched_register_m_tag(void);
838 pktsched_register_m_tag();
839 #endif /* NETWORKING */
840
841 #if INET
842 extern void ip6_register_m_tag(void);
843 ip6_register_m_tag();
844
845 extern void ipfilter_register_m_tag(void);
846 ipfilter_register_m_tag();
847
848 extern void encap_register_m_tag(void);
849 encap_register_m_tag();
850 #endif /* INET */
851
852 #if IPSEC
853 extern void ipsec_register_m_tag(void);
854 ipsec_register_m_tag();
855 #endif /* IPSEC */
856
857 #if DUMMYNET
858 extern void dummynet_register_m_tag(void);
859 dummynet_register_m_tag();
860 #endif /* DUMMYNET */
861
862 #if PF
863 extern void pf_register_m_tag(void);
864 pf_register_m_tag();
865 #endif /* PF */
866
867 #if CONTENT_FILTER
868 extern void cfil_register_m_tag(void);
869 cfil_register_m_tag();
870 #endif /* CONTENT_FILTER */
871 }
872
873 int
m_register_internal_tag_type(uint16_t type,uint16_t len,m_tag_kalloc_func_t alloc_func,m_tag_kfree_func_t free_func)874 m_register_internal_tag_type(uint16_t type, uint16_t len,
875 m_tag_kalloc_func_t alloc_func, m_tag_kfree_func_t free_func)
876 {
877 int error = 0;
878
879 if (type <= 0 || type >= KERNEL_TAG_TYPE_DRVAUX) {
880 error = EINVAL;
881 goto done;
882 }
883 m_tag_type_table[type].mt_type = type;
884 m_tag_type_table[type].mt_len = len;
885 m_tag_type_table[type].mt_alloc_func = alloc_func;
886 m_tag_type_table[type].mt_free_func = free_func;
887
888 done:
889 return error;
890 }
891
892 /* Prepend a packet tag. */
893 void
m_tag_prepend(struct mbuf * m,struct m_tag * t)894 m_tag_prepend(struct mbuf *m, struct m_tag *t)
895 {
896 SLIST_INSERT_HEAD(&m->m_pkthdr.tags, t, m_tag_link);
897 }
898
899 /* Unlink a packet tag. */
900 void
m_tag_unlink(struct mbuf * m,struct m_tag * t)901 m_tag_unlink(struct mbuf *m, struct m_tag *t)
902 {
903 SLIST_REMOVE(&m->m_pkthdr.tags, t, m_tag, m_tag_link);
904 }
905
906 /* Unlink and free a packet tag. */
907 void
m_tag_delete(struct mbuf * m,struct m_tag * t)908 m_tag_delete(struct mbuf *m, struct m_tag *t)
909 {
910 m_tag_unlink(m, t);
911 m_tag_free(t);
912 }
913
914 /* Unlink and free a packet tag chain, starting from given tag. */
915 void
m_tag_delete_chain(struct mbuf * m)916 m_tag_delete_chain(struct mbuf *m)
917 {
918 struct m_tag *p, *q;
919
920 p = SLIST_FIRST(&m->m_pkthdr.tags);
921 if (p == NULL) {
922 return;
923 }
924
925 while ((q = SLIST_NEXT(p, m_tag_link)) != NULL) {
926 m_tag_delete(m, q);
927 }
928 m_tag_delete(m, p);
929 }
930
931 /* Find a tag, starting from a given position. */
932 struct m_tag *
m_tag_locate(struct mbuf * m,uint32_t id,uint16_t type)933 m_tag_locate(struct mbuf *m, uint32_t id, uint16_t type)
934 {
935 struct m_tag *p;
936
937 VERIFY(m->m_flags & M_PKTHDR);
938
939 p = SLIST_FIRST(&m->m_pkthdr.tags);
940
941 while (p != NULL) {
942 if (p->m_tag_id == id && p->m_tag_type == type) {
943 m_tag_verify_cookie(p);
944 return p;
945 }
946 p = SLIST_NEXT(p, m_tag_link);
947 }
948 return NULL;
949 }
950
951 /* Copy a single tag. */
952 struct m_tag *
m_tag_copy(struct m_tag * t,int how)953 m_tag_copy(struct m_tag *t, int how)
954 {
955 struct m_tag *p;
956
957 VERIFY(t != NULL);
958
959 p = m_tag_alloc(t->m_tag_id, t->m_tag_type, t->m_tag_len, how);
960 if (p == NULL) {
961 return NULL;
962 }
963 bcopy(t->m_tag_data, p->m_tag_data, t->m_tag_len); /* Copy the data */
964 return p;
965 }
966
967 /*
968 * Copy two tag chains. The destination mbuf (to) loses any attached
969 * tags even if the operation fails. This should not be a problem, as
970 * m_tag_copy_chain() is typically called with a newly-allocated
971 * destination mbuf.
972 */
973 int
m_tag_copy_chain(struct mbuf * to,struct mbuf * from,int how)974 m_tag_copy_chain(struct mbuf *to, struct mbuf *from, int how)
975 {
976 struct m_tag *p, *t, *tprev = NULL;
977
978 VERIFY((to->m_flags & M_PKTHDR) && (from->m_flags & M_PKTHDR));
979
980 m_tag_delete_chain(to);
981 SLIST_FOREACH(p, &from->m_pkthdr.tags, m_tag_link) {
982 m_tag_verify_cookie(p);
983 t = m_tag_copy(p, how);
984 if (t == NULL) {
985 m_tag_delete_chain(to);
986 return 0;
987 }
988 if (tprev == NULL) {
989 SLIST_INSERT_HEAD(&to->m_pkthdr.tags, t, m_tag_link);
990 } else {
991 SLIST_INSERT_AFTER(tprev, t, m_tag_link);
992 tprev = t;
993 }
994 }
995 return 1;
996 }
997
998 /* Initialize dynamic and static tags on an mbuf. */
999 void
m_tag_init(struct mbuf * m,int all)1000 m_tag_init(struct mbuf *m, int all)
1001 {
1002 VERIFY(m->m_flags & M_PKTHDR);
1003
1004 SLIST_INIT(&m->m_pkthdr.tags);
1005 /*
1006 * If the caller wants to preserve static mbuf tags
1007 * (e.g. m_dup_pkthdr), don't zero them out.
1008 */
1009 if (all) {
1010 bzero(&m->m_pkthdr.builtin_mtag._net_mtag,
1011 sizeof(m->m_pkthdr.builtin_mtag._net_mtag));
1012 }
1013 }
1014
1015 /* Get first tag in chain. */
1016 struct m_tag *
m_tag_first(struct mbuf * m)1017 m_tag_first(struct mbuf *m)
1018 {
1019 VERIFY(m->m_flags & M_PKTHDR);
1020
1021 return SLIST_FIRST(&m->m_pkthdr.tags);
1022 }
1023
1024 /* Get next tag in chain. */
1025 struct m_tag *
m_tag_next(struct mbuf * m,struct m_tag * t)1026 m_tag_next(struct mbuf *m, struct m_tag *t)
1027 {
1028 #pragma unused(m)
1029 VERIFY(t != NULL);
1030
1031 return SLIST_NEXT(t, m_tag_link);
1032 }
1033
1034 int
m_set_traffic_class(struct mbuf * m,mbuf_traffic_class_t tc)1035 m_set_traffic_class(struct mbuf *m, mbuf_traffic_class_t tc)
1036 {
1037 uint32_t val = MBUF_TC2SCVAL(tc); /* just the val portion */
1038
1039 return m_set_service_class(m, m_service_class_from_val(val));
1040 }
1041
1042 mbuf_traffic_class_t
m_get_traffic_class(struct mbuf * m)1043 m_get_traffic_class(struct mbuf *m)
1044 {
1045 return MBUF_SC2TC(m_get_service_class(m));
1046 }
1047
1048 int
m_set_service_class(struct mbuf * m,mbuf_svc_class_t sc)1049 m_set_service_class(struct mbuf *m, mbuf_svc_class_t sc)
1050 {
1051 int error = 0;
1052
1053 VERIFY(m->m_flags & M_PKTHDR);
1054
1055 if (MBUF_VALID_SC(sc)) {
1056 m->m_pkthdr.pkt_svc = sc;
1057 } else {
1058 error = EINVAL;
1059 }
1060
1061 return error;
1062 }
1063
1064 mbuf_svc_class_t
m_get_service_class(struct mbuf * m)1065 m_get_service_class(struct mbuf *m)
1066 {
1067 mbuf_svc_class_t sc;
1068
1069 VERIFY(m->m_flags & M_PKTHDR);
1070
1071 if (MBUF_VALID_SC(m->m_pkthdr.pkt_svc)) {
1072 sc = m->m_pkthdr.pkt_svc;
1073 } else {
1074 sc = MBUF_SC_BE;
1075 }
1076
1077 return sc;
1078 }
1079
1080 mbuf_svc_class_t
m_service_class_from_idx(uint32_t i)1081 m_service_class_from_idx(uint32_t i)
1082 {
1083 mbuf_svc_class_t sc = MBUF_SC_BE;
1084
1085 switch (i) {
1086 case SCIDX_BK_SYS:
1087 return MBUF_SC_BK_SYS;
1088
1089 case SCIDX_BK:
1090 return MBUF_SC_BK;
1091
1092 case SCIDX_BE:
1093 return MBUF_SC_BE;
1094
1095 case SCIDX_RD:
1096 return MBUF_SC_RD;
1097
1098 case SCIDX_OAM:
1099 return MBUF_SC_OAM;
1100
1101 case SCIDX_AV:
1102 return MBUF_SC_AV;
1103
1104 case SCIDX_RV:
1105 return MBUF_SC_RV;
1106
1107 case SCIDX_VI:
1108 return MBUF_SC_VI;
1109
1110 case SCIDX_VO:
1111 return MBUF_SC_VO;
1112
1113 case SCIDX_CTL:
1114 return MBUF_SC_CTL;
1115
1116 default:
1117 break;
1118 }
1119
1120 VERIFY(0);
1121 /* NOTREACHED */
1122 return sc;
1123 }
1124
1125 mbuf_svc_class_t
m_service_class_from_val(uint32_t v)1126 m_service_class_from_val(uint32_t v)
1127 {
1128 mbuf_svc_class_t sc = MBUF_SC_BE;
1129
1130 switch (v) {
1131 case SCVAL_BK_SYS:
1132 return MBUF_SC_BK_SYS;
1133
1134 case SCVAL_BK:
1135 return MBUF_SC_BK;
1136
1137 case SCVAL_BE:
1138 return MBUF_SC_BE;
1139
1140 case SCVAL_RD:
1141 return MBUF_SC_RD;
1142
1143 case SCVAL_OAM:
1144 return MBUF_SC_OAM;
1145
1146 case SCVAL_AV:
1147 return MBUF_SC_AV;
1148
1149 case SCVAL_RV:
1150 return MBUF_SC_RV;
1151
1152 case SCVAL_VI:
1153 return MBUF_SC_VI;
1154
1155 case SCVAL_VO:
1156 return MBUF_SC_VO;
1157
1158 case SCVAL_CTL:
1159 return MBUF_SC_CTL;
1160
1161 default:
1162 break;
1163 }
1164
1165 VERIFY(0);
1166 /* NOTREACHED */
1167 return sc;
1168 }
1169
1170 uint16_t
m_adj_sum16(struct mbuf * m,uint32_t start,uint32_t dataoff,uint32_t datalen,uint32_t sum)1171 m_adj_sum16(struct mbuf *m, uint32_t start, uint32_t dataoff,
1172 uint32_t datalen, uint32_t sum)
1173 {
1174 uint32_t total_sub = 0; /* total to subtract */
1175 uint32_t mlen = m_pktlen(m); /* frame length */
1176 uint32_t bytes = (dataoff + datalen); /* bytes covered by sum */
1177 int len;
1178
1179 ASSERT(bytes <= mlen);
1180
1181 /*
1182 * Take care of excluding (len > 0) or including (len < 0)
1183 * extraneous octets at the beginning of the packet, taking
1184 * into account the start offset.
1185 */
1186 len = (dataoff - start);
1187 if (len > 0) {
1188 total_sub = m_sum16(m, start, len);
1189 } else if (len < 0) {
1190 sum += m_sum16(m, dataoff, -len);
1191 }
1192
1193 /*
1194 * Take care of excluding any postpended extraneous octets.
1195 */
1196 len = (mlen - bytes);
1197 if (len > 0) {
1198 struct mbuf *m0 = m;
1199 uint32_t extra = m_sum16(m, bytes, len);
1200 uint32_t off = bytes, off0 = off;
1201
1202 while (off > 0) {
1203 if (__improbable(m == NULL)) {
1204 panic("%s: invalid mbuf chain %p [off %u, "
1205 "len %u]", __func__, m0, off0, len);
1206 /* NOTREACHED */
1207 }
1208 if (off < m->m_len) {
1209 break;
1210 }
1211 off -= m->m_len;
1212 m = m->m_next;
1213 }
1214
1215 /* if we started on odd-alignment, swap the value */
1216 if ((uintptr_t)(mtod(m, uint8_t *) + off) & 1) {
1217 total_sub += ((extra << 8) & 0xffff) | (extra >> 8);
1218 } else {
1219 total_sub += extra;
1220 }
1221
1222 total_sub = (total_sub >> 16) + (total_sub & 0xffff);
1223 }
1224
1225 /*
1226 * 1's complement subtract any extraneous octets.
1227 */
1228 if (total_sub != 0) {
1229 if (total_sub >= sum) {
1230 sum = ~(total_sub - sum) & 0xffff;
1231 } else {
1232 sum -= total_sub;
1233 }
1234 }
1235
1236 /* fold 32-bit to 16-bit */
1237 sum = (sum >> 16) + (sum & 0xffff); /* 17-bit */
1238 sum = (sum >> 16) + (sum & 0xffff); /* 16-bit + carry */
1239 sum = (sum >> 16) + (sum & 0xffff); /* final carry */
1240
1241 return sum & 0xffff;
1242 }
1243
1244 uint16_t
m_sum16(struct mbuf * m,uint32_t off,uint32_t len)1245 m_sum16(struct mbuf *m, uint32_t off, uint32_t len)
1246 {
1247 int mlen;
1248
1249 /*
1250 * Sanity check
1251 *
1252 * Use m_length2() instead of m_length(), as we cannot rely on
1253 * the caller setting m_pkthdr.len correctly, if the mbuf is
1254 * a M_PKTHDR one.
1255 */
1256 if ((mlen = m_length2(m, NULL)) < (off + len)) {
1257 panic("%s: mbuf %p len (%d) < off+len (%d+%d)", __func__,
1258 m, mlen, off, len);
1259 /* NOTREACHED */
1260 }
1261
1262 return (uint16_t)os_cpu_in_cksum_mbuf(m, len, off, 0);
1263 }
1264
1265 static int
sysctl_mb_tag_stats(__unused struct sysctl_oid * oidp,__unused void * arg1,__unused int arg2,struct sysctl_req * req)1266 sysctl_mb_tag_stats(__unused struct sysctl_oid *oidp,
1267 __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1268 {
1269 int error = 0;
1270
1271 if (req->oldptr == USER_ADDR_NULL) {
1272 req->oldidx = KERNEL_TAG_TYPE_COUNT * sizeof(struct m_tag_stats);
1273 return 0;
1274 }
1275 if (req->newptr != USER_ADDR_NULL) {
1276 return EPERM;
1277 }
1278
1279 for (uint16_t i = 0; i < KERNEL_TAG_TYPE_COUNT; i++) {
1280 struct m_tag_stats m_tag_stats = {};
1281
1282 m_tag_stats.mts_id = KERNEL_MODULE_TAG_ID;
1283 m_tag_stats.mts_type = i;
1284 m_tag_stats.mts_len = m_tag_type_table[i].mt_len;
1285 m_tag_stats.mts_alloc_count = m_tag_type_stats[i].mt_alloc_count;
1286 m_tag_stats.mts_alloc_failed = m_tag_type_stats[i].mt_alloc_failed;
1287 m_tag_stats.mts_free_count = m_tag_type_stats[i].mt_free_count;
1288
1289 error = SYSCTL_OUT(req, &m_tag_stats, sizeof(struct m_tag_stats));
1290 }
1291
1292 return error;
1293 }
1294
1295 SYSCTL_PROC(_kern_ipc, OID_AUTO, mb_tag_stats,
1296 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, NULL, 0,
1297 sysctl_mb_tag_stats, "S,m_tag_stats", "");
1298
1299 #if DEBUG || DEVELOPMENT
1300
1301 struct m_tag_test_entry {
1302 bool mtte_test_id;
1303 bool mtte_alloc_must_fail;
1304 uint16_t mtte_type;
1305 int mtte_len;
1306 };
1307
1308 struct m_tag_test_entry
1309 m_tag_test_table[] = {
1310 {
1311 .mtte_test_id = false,
1312 .mtte_alloc_must_fail = false,
1313 .mtte_type = KERNEL_TAG_TYPE_DUMMYNET,
1314 .mtte_len = 0,
1315 },
1316 {
1317 .mtte_test_id = false,
1318 .mtte_alloc_must_fail = false,
1319 .mtte_type = KERNEL_TAG_TYPE_IPFILT,
1320 .mtte_len = 0,
1321 },
1322 {
1323 .mtte_test_id = false,
1324 .mtte_alloc_must_fail = false,
1325 .mtte_type = KERNEL_TAG_TYPE_ENCAP,
1326 .mtte_len = 0,
1327 },
1328 {
1329 .mtte_test_id = false,
1330 .mtte_alloc_must_fail = false,
1331 .mtte_type = KERNEL_TAG_TYPE_INET6,
1332 .mtte_len = 0,
1333 },
1334 {
1335 .mtte_test_id = false,
1336 .mtte_alloc_must_fail = false,
1337 .mtte_type = KERNEL_TAG_TYPE_IPSEC,
1338 .mtte_len = 0,
1339 },
1340 {
1341 .mtte_test_id = false,
1342 .mtte_alloc_must_fail = false,
1343 .mtte_type = KERNEL_TAG_TYPE_CFIL_UDP,
1344 .mtte_len = 0,
1345 },
1346 {
1347 .mtte_test_id = false,
1348 .mtte_alloc_must_fail = false,
1349 .mtte_type = KERNEL_TAG_TYPE_PF_REASS,
1350 .mtte_len = 0,
1351 },
1352 {
1353 .mtte_test_id = false,
1354 .mtte_alloc_must_fail = false,
1355 .mtte_type = KERNEL_TAG_TYPE_AQM,
1356 .mtte_len = 0,
1357 },
1358 {
1359 .mtte_test_id = false,
1360 .mtte_alloc_must_fail = false,
1361 .mtte_type = KERNEL_TAG_TYPE_DRVAUX,
1362 .mtte_len = 0,
1363 },
1364
1365 {
1366 .mtte_test_id = false,
1367 .mtte_alloc_must_fail = false,
1368 .mtte_type = 0,
1369 .mtte_len = MLEN,
1370 },
1371 {
1372 .mtte_test_id = false,
1373 .mtte_alloc_must_fail = false,
1374 .mtte_type = KERNEL_TAG_TYPE_COUNT,
1375 .mtte_len = MLEN,
1376 },
1377 {
1378 .mtte_test_id = false,
1379 .mtte_alloc_must_fail = true,
1380 .mtte_type = 0,
1381 .mtte_len = MCLBYTES,
1382 },
1383 {
1384 .mtte_test_id = false,
1385 .mtte_alloc_must_fail = true,
1386 .mtte_type = KERNEL_TAG_TYPE_COUNT,
1387 .mtte_len = MCLBYTES,
1388 },
1389
1390 {
1391 .mtte_test_id = true,
1392 .mtte_alloc_must_fail = false,
1393 .mtte_type = 0,
1394 .mtte_len = 0,
1395 },
1396 {
1397 .mtte_test_id = true,
1398 .mtte_alloc_must_fail = false,
1399 .mtte_type = 0,
1400 .mtte_len = MLEN,
1401 },
1402 {
1403 .mtte_test_id = true,
1404 .mtte_alloc_must_fail = true,
1405 .mtte_type = 0,
1406 .mtte_len = -1,
1407 },
1408 {
1409 .mtte_test_id = true,
1410 .mtte_alloc_must_fail = true,
1411 .mtte_type = 0,
1412 .mtte_len = MCLBYTES,
1413 },
1414 };
1415
1416 #define M_TAG_TEST_TABLE_COUNT (sizeof(m_tag_test_table) / sizeof(struct m_tag_test_entry))
1417
1418 #define M_TAG_TEST_ID "com.apple.test.m_tag"
1419
1420 static int
do_m_tag_test(mbuf_tag_id_t test_tag_id)1421 do_m_tag_test(mbuf_tag_id_t test_tag_id)
1422 {
1423 int error = 0;
1424 struct mbuf *m = NULL;
1425
1426 m = m_getpacket();
1427 if (m == NULL) {
1428 os_log_error(OS_LOG_DEFAULT, "%s: m_getpacket failed", __func__);
1429 error = ENOMEM;
1430 goto done;
1431 }
1432
1433 for (int i = 0; i < M_TAG_TEST_TABLE_COUNT; i++) {
1434 struct m_tag_test_entry *entry = &m_tag_test_table[i];
1435 struct m_tag *tag = NULL;
1436 uint32_t id = test_tag_id;
1437 int len = entry->mtte_len;
1438 uint16_t type = entry->mtte_type;
1439
1440 if (entry->mtte_test_id == false) {
1441 id = KERNEL_MODULE_TAG_ID;
1442 switch (type) {
1443 case KERNEL_TAG_TYPE_DUMMYNET:
1444 case KERNEL_TAG_TYPE_IPFILT:
1445 case KERNEL_TAG_TYPE_ENCAP:
1446 case KERNEL_TAG_TYPE_INET6:
1447 case KERNEL_TAG_TYPE_IPSEC:
1448 case KERNEL_TAG_TYPE_CFIL_UDP:
1449 case KERNEL_TAG_TYPE_PF_REASS:
1450 case KERNEL_TAG_TYPE_AQM:
1451 /* subsystems that use mbuf tags are optional */
1452 if (m_tag_type_table[type].mt_alloc_func == m_tag_kalloc_notsupp) {
1453 continue;
1454 }
1455 len = m_tag_type_table[type].mt_len;
1456 if (entry->mtte_alloc_must_fail == true) {
1457 os_log_error(OS_LOG_DEFAULT,
1458 "%s: FAIL m_tag_create(%u, %u, %u) must not fail",
1459 __func__, id, type, len);
1460 error = EINVAL;
1461 goto done;
1462 }
1463 break;
1464 default:
1465 break;
1466 }
1467 }
1468 tag = m_tag_create(id, type, len, M_WAIT, m);
1469 if (tag == NULL) {
1470 if (entry->mtte_alloc_must_fail == false) {
1471 os_log_error(OS_LOG_DEFAULT,
1472 "%s: FAIL m_tag_create(%u, %u, %u) unexpected failure",
1473 __func__, id, type, len);
1474 error = ENOMEM;
1475 goto done;
1476 } else {
1477 os_log(OS_LOG_DEFAULT,
1478 "%s: PASS m_tag_create(%u, %u, %u) expected failure",
1479 __func__, id, type, len);
1480 }
1481 } else {
1482 if (entry->mtte_alloc_must_fail == true) {
1483 os_log_error(OS_LOG_DEFAULT,
1484 "%s: FAIL m_tag_create(%u, %u, %u) unexpected success",
1485 __func__, id, type, len);
1486 error = EINVAL;
1487 goto done;
1488 } else {
1489 os_log(OS_LOG_DEFAULT,
1490 "%s: PASS m_tag_create(%u, %u, %u) expected success",
1491 __func__, id, type, len);
1492 }
1493 m_tag_prepend(m, tag);
1494 }
1495 }
1496 done:
1497 if (m != NULL) {
1498 m_freem(m);
1499 }
1500 os_log_error(OS_LOG_DEFAULT,
1501 "%s: %s error %d",
1502 __func__, error == 0 ? "PASS" : "FAIL", error);
1503 return error;
1504 }
1505
1506 static int
do_test_m_tag_unlink(mbuf_tag_id_t test_tag_id)1507 do_test_m_tag_unlink(mbuf_tag_id_t test_tag_id)
1508 {
1509 struct mbuf *m = NULL;
1510 int error = 0;
1511
1512 m = m_gethdr(M_WAITOK, MT_DATA);
1513 if (m == NULL) {
1514 error = ENOMEM;
1515 goto done;
1516 }
1517 for (int i = 0; i < M_TAG_TEST_TABLE_COUNT; i++) {
1518 struct m_tag_test_entry *entry = &m_tag_test_table[i];
1519 struct m_tag *tag = NULL;
1520 uint32_t id = test_tag_id;
1521 int len = entry->mtte_len;
1522 uint16_t type = entry->mtte_type;
1523
1524 if (entry->mtte_alloc_must_fail == true) {
1525 continue;
1526 }
1527
1528 if (entry->mtte_test_id == false) {
1529 id = KERNEL_MODULE_TAG_ID;
1530 switch (type) {
1531 case KERNEL_TAG_TYPE_DUMMYNET:
1532 case KERNEL_TAG_TYPE_IPFILT:
1533 case KERNEL_TAG_TYPE_ENCAP:
1534 case KERNEL_TAG_TYPE_INET6:
1535 case KERNEL_TAG_TYPE_IPSEC:
1536 case KERNEL_TAG_TYPE_CFIL_UDP:
1537 case KERNEL_TAG_TYPE_PF_REASS:
1538 case KERNEL_TAG_TYPE_AQM:
1539 /* subsystems that use mbuf tags are optional */
1540 if (m_tag_type_table[type].mt_alloc_func == m_tag_kalloc_notsupp) {
1541 continue;
1542 }
1543 len = m_tag_type_table[type].mt_len;
1544 break;
1545 default:
1546 continue;
1547 }
1548 }
1549 tag = m_tag_create(id, type, len, M_WAIT, m);
1550 if (tag == NULL) {
1551 os_log_error(OS_LOG_DEFAULT,
1552 "%s: FAIL m_tag_create(%u, %u, %u) failure",
1553 __func__, id, type, len);
1554 error = ENOMEM;
1555 goto done;
1556 } else {
1557 os_log_error(OS_LOG_DEFAULT,
1558 "%s: PASS m_tag_create(%u, %u, %u) success",
1559 __func__, id, type, len);
1560 m_tag_prepend(m, tag);
1561 }
1562 }
1563
1564 struct m_tag *cfil_tag = m_tag_locate(m, KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_CFIL_UDP);
1565 if (cfil_tag == NULL) {
1566 os_log_error(OS_LOG_DEFAULT,
1567 "%s: FAIL m_tag_locate(KERNEL_TAG_TYPE_CFIL_UDP) failure",
1568 __func__);
1569 error = EINVAL;
1570 goto done;
1571 } else {
1572 os_log_error(OS_LOG_DEFAULT,
1573 "%s: PASS m_tag_locate(KERNEL_TAG_TYPE_CFIL_UDP) success",
1574 __func__);
1575 }
1576
1577 /*
1578 * Unlink the mbuf tag, free the mbuf and finally free the mbuf tag
1579 */
1580 m_tag_unlink(m, cfil_tag);
1581
1582 m_freem(m);
1583 m = NULL;
1584
1585 m_tag_free(cfil_tag);
1586
1587 done:
1588 if (m != NULL) {
1589 m_freem(m);
1590 }
1591 os_log_error(OS_LOG_DEFAULT,
1592 "%s: %s error %d",
1593 __func__, error == 0 ? "PASS" : "FAIL", error);
1594 return error;
1595 }
1596
1597 static int
sysctl_mb_tag_test(__unused struct sysctl_oid * oidp,__unused void * arg1,__unused int arg2,struct sysctl_req * req)1598 sysctl_mb_tag_test(__unused struct sysctl_oid *oidp,
1599 __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1600 {
1601 int error;
1602 int newvalue;
1603 int changed;
1604 int value = 0;
1605 mbuf_tag_id_t test_tag_id;
1606
1607 if ((error = sysctl_io_number(req, value, sizeof(int),
1608 &newvalue, &changed)) != 0) {
1609 goto done;
1610 }
1611 if (!changed && newvalue == value) {
1612 goto done;
1613 }
1614 error = mbuf_tag_id_find(M_TAG_TEST_ID, &test_tag_id);
1615 if (error != 0) {
1616 os_log_error(OS_LOG_DEFAULT, "%s: mbuf_tag_id_find failed error %d",
1617 __func__, error);
1618 goto done;
1619 }
1620 error = do_m_tag_test(test_tag_id);
1621 if (error != 0) {
1622 goto done;
1623 }
1624 error = do_test_m_tag_unlink(test_tag_id);
1625 if (error != 0) {
1626 goto done;
1627 }
1628 done:
1629 return error;
1630 }
1631
1632 SYSCTL_PROC(_kern_ipc, OID_AUTO, mb_tag_test,
1633 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, NULL, 0,
1634 sysctl_mb_tag_test, "I", "mbuf test");
1635
1636 #endif /* DEBUG || DEVELOPMENT */
1637