1 /*
2 * Copyright (c) 2000-2021 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /* $NetBSD: uipc_mbuf.c,v 1.40 1999/04/01 00:23:25 thorpej Exp $ */
29
30 /*
31 * Copyright (C) 1999 WIDE Project.
32 * All rights reserved.
33 *
34 * Redistribution and use in source and binary forms, with or without
35 * modification, are permitted provided that the following conditions
36 * are met:
37 * 1. Redistributions of source code must retain the above copyright
38 * notice, this list of conditions and the following disclaimer.
39 * 2. Redistributions in binary form must reproduce the above copyright
40 * notice, this list of conditions and the following disclaimer in the
41 * documentation and/or other materials provided with the distribution.
42 * 3. Neither the name of the project nor the names of its contributors
43 * may be used to endorse or promote products derived from this software
44 * without specific prior written permission.
45 *
46 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
47 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
48 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
49 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
50 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
51 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
52 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
53 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
54 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
55 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
56 * SUCH DAMAGE.
57 */
58
59 /*
60 * Copyright (c) 1982, 1986, 1988, 1991, 1993
61 * The Regents of the University of California. All rights reserved.
62 *
63 * Redistribution and use in source and binary forms, with or without
64 * modification, are permitted provided that the following conditions
65 * are met:
66 * 1. Redistributions of source code must retain the above copyright
67 * notice, this list of conditions and the following disclaimer.
68 * 2. Redistributions in binary form must reproduce the above copyright
69 * notice, this list of conditions and the following disclaimer in the
70 * documentation and/or other materials provided with the distribution.
71 * 3. All advertising materials mentioning features or use of this software
72 * must display the following acknowledgement:
73 * This product includes software developed by the University of
74 * California, Berkeley and its contributors.
75 * 4. Neither the name of the University nor the names of its contributors
76 * may be used to endorse or promote products derived from this software
77 * without specific prior written permission.
78 *
79 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
80 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
81 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
82 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
83 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
84 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
85 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
86 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
87 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
88 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
89 * SUCH DAMAGE.
90 *
91 * @(#)uipc_mbuf.c 8.4 (Berkeley) 2/14/95
92 */
93 /*
94 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
95 * support for mandatory and extensible security protections. This notice
96 * is included in support of clause 2.2 (b) of the Apple Public License,
97 * Version 2.0.
98 */
99
100 /*#define PULLDOWN_DEBUG*/
101
102 #include <sys/param.h>
103 #include <sys/systm.h>
104 #include <sys/proc_internal.h>
105 #include <sys/malloc.h>
106 #include <sys/mbuf.h>
107 #include <sys/mcache.h>
108 #include <sys/sysctl.h>
109
110 #include <netinet/in.h>
111 #include <netinet/ip_var.h>
112 #include <netinet/ip6.h>
113 #include <netinet6/ip6_var.h>
114
115 #include <kern/assert.h>
116
117 #include <os/log.h>
118
119 #include <libkern/OSDebug.h>
120
121 #include <ptrauth.h>
122
123 #if defined(__i386__) || defined(__x86_64__)
124 #define MB_TAG_MBUF 1
125 #endif /* defined(__i386__) || defined(__x86_64__) */
126
127 SYSCTL_DECL(_kern_ipc);
128
129 struct m_tag_type_entry {
130 m_tag_kalloc_func_t mt_alloc_func;
131 m_tag_kfree_func_t mt_free_func;
132 uint16_t mt_type;
133 uint16_t mt_len;
134 };
135
136 struct m_tag_type_stats {
137 uint64_t mt_alloc_count;
138 uint64_t mt_alloc_failed;
139 uint64_t mt_free_count;
140 };
141
142 SECURITY_READ_ONLY_LATE(static struct m_tag_type_entry) m_tag_type_table[KERNEL_TAG_TYPE_COUNT] = {};
143
144 static struct m_tag_type_stats m_tag_type_stats[KERNEL_TAG_TYPE_COUNT] = {};
145
146 #ifdef MB_TAG_MBUF
147 static struct m_tag *m_tag_create_mbuf(uint32_t, uint16_t, uint16_t, int, struct mbuf *);
148 #endif /* MB_TAG_MBUF */
149
150 /*
151 * ensure that [off, off + len) is contiguous on the mbuf chain "m".
152 * packet chain before "off" is kept untouched.
153 * if offp == NULL, the target will start at <retval, 0> on resulting chain.
154 * if offp != NULL, the target will start at <retval, *offp> on resulting chain.
155 *
156 * on error return (NULL return value), original "m" will be freed.
157 *
158 * XXX M_TRAILINGSPACE/M_LEADINGSPACE on shared cluster (sharedcluster)
159 */
160 struct mbuf *
m_pulldown(struct mbuf * m,int off,int len,int * offp)161 m_pulldown(struct mbuf *m, int off, int len, int *offp)
162 {
163 struct mbuf *n = NULL, *o = NULL;
164 int hlen = 0, tlen = 0, olen = 0;
165 int sharedcluster = 0;
166
167 /* check invalid arguments. */
168 VERIFY(len >= 0 && off >= 0);
169
170 if (m == NULL) {
171 panic("m == NULL in m_pulldown()");
172 }
173 if (len > MCLBYTES) {
174 m_freem(m);
175 return NULL; /* impossible */
176 }
177 int tmp_len = 0;
178 if (os_add_overflow(off, len, &tmp_len)) {
179 m_free(m);
180 return NULL;
181 }
182
183 #ifdef PULLDOWN_DEBUG
184 {
185 struct mbuf *t;
186 printf("before:");
187 for (t = m; t; t = t->m_next) {
188 printf(" %d", t->m_len);
189 }
190 printf("\n");
191 }
192 #endif
193 n = m;
194
195 /*
196 * Iterate and make n point to the mbuf
197 * within which the first byte at length
198 * offset is contained from the start of
199 * mbuf chain.
200 */
201 while (n != NULL && off > 0) {
202 if (n->m_len > off) {
203 break;
204 }
205 off -= n->m_len;
206 n = n->m_next;
207 }
208
209 /* be sure to point non-empty mbuf */
210 while (n != NULL && n->m_len == 0) {
211 n = n->m_next;
212 }
213
214 if (!n) {
215 m_freem(m);
216 return NULL; /* mbuf chain too short */
217 }
218
219 /*
220 * the target data is on <n, off>.
221 * if we got enough data on the mbuf "n", we're done.
222 *
223 * It should be noted, that we should only do this either
224 * when offset is 0, i.e. data is pointing to the start
225 * or when the caller specifies an out argument to get
226 * the offset value in the mbuf to work with data pointer
227 * correctly.
228 *
229 * If offset is not 0 and caller did not provide out-argument
230 * to get offset, we should split the mbuf even when the length
231 * is contained in current mbuf.
232 */
233 if ((off == 0 || offp) && len <= n->m_len - off) {
234 goto ok;
235 }
236
237 /*
238 * when len <= n->m_len - off and off != 0, it is a special case.
239 * len bytes from <n, off> sits in single mbuf, but the caller does
240 * not like the starting position (off).
241 * chop the current mbuf into two pieces, set off to 0.
242 */
243 if (len <= n->m_len - off) {
244 o = m_copym(n, off, n->m_len - off, M_DONTWAIT);
245 if (o == NULL) {
246 m_freem(m);
247 return NULL; /* ENOBUFS */
248 }
249 n->m_len = off;
250 o->m_next = n->m_next;
251 n->m_next = o;
252 n = n->m_next;
253 off = 0;
254 goto ok;
255 }
256
257 /*
258 * we need to take hlen from <n, off> and tlen from <n->m_next, 0>,
259 * and construct contiguous mbuf with m_len == len.
260 * note that hlen + tlen == len, and tlen > 0.
261 *
262 * Read these variables as head length and tail length
263 */
264 hlen = n->m_len - off;
265 tlen = len - hlen;
266
267 /*
268 * ensure that we have enough trailing data on mbuf chain.
269 * if not, we can do nothing about the chain.
270 */
271 olen = 0;
272 for (o = n->m_next; o != NULL; o = o->m_next) {
273 olen += o->m_len;
274 }
275 if (hlen + olen < len) {
276 m_freem(m);
277 return NULL; /* mbuf chain too short */
278 }
279
280 /*
281 * easy cases first.
282 * we need to use m_copydata() to get data from <n->m_next, 0>.
283 */
284 if ((n->m_flags & M_EXT) == 0) {
285 sharedcluster = 0;
286 } else {
287 if (m_get_ext_free(n) != NULL) {
288 sharedcluster = 1;
289 } else if (m_mclhasreference(n)) {
290 sharedcluster = 1;
291 } else {
292 sharedcluster = 0;
293 }
294 }
295
296 /*
297 * If we have enough space left in current mbuf to accomodate
298 * tail length, copy tail length worth of data starting with next mbuf
299 * and adjust the length of next one accordingly.
300 */
301 if ((off == 0 || offp) && M_TRAILINGSPACE(n) >= tlen
302 && !sharedcluster) {
303 m_copydata(n->m_next, 0, tlen, mtod(n, caddr_t) + n->m_len);
304 n->m_len += tlen;
305 m_adj(n->m_next, tlen);
306 goto ok;
307 }
308
309 /*
310 * If have enough leading space in next mbuf to accomodate head length
311 * of current mbuf, and total resulting length of next mbuf is greater
312 * than or equal to requested len bytes, then just copy hlen from
313 * current to the next one and adjust sizes accordingly.
314 */
315 if ((off == 0 || offp) && M_LEADINGSPACE(n->m_next) >= hlen &&
316 (n->m_next->m_len + hlen) >= len && !sharedcluster) {
317 n->m_next->m_data -= hlen;
318 n->m_next->m_len += hlen;
319 bcopy(mtod(n, caddr_t) + off, mtod(n->m_next, caddr_t), hlen);
320 n->m_len -= hlen;
321 n = n->m_next;
322 off = 0;
323 goto ok;
324 }
325
326 /*
327 * now, we need to do the hard way. don't m_copy as there's no room
328 * on both end.
329 */
330 MGET(o, M_DONTWAIT, m->m_type);
331 if (o == NULL) {
332 m_freem(m);
333 return NULL; /* ENOBUFS */
334 }
335 if (len > MHLEN) { /* use MHLEN just for safety */
336 MCLGET(o, M_DONTWAIT);
337 if ((o->m_flags & M_EXT) == 0) {
338 m_freem(m);
339 m_free(o);
340 return NULL; /* ENOBUFS */
341 }
342 }
343 /* get hlen from <n, off> into <o, 0> */
344 o->m_len = hlen;
345 bcopy(mtod(n, caddr_t) + off, mtod(o, caddr_t), hlen);
346 n->m_len -= hlen;
347 /* get tlen from <n->m_next, 0> into <o, hlen> */
348 m_copydata(n->m_next, 0, tlen, mtod(o, caddr_t) + o->m_len);
349 o->m_len += tlen;
350 m_adj(n->m_next, tlen);
351 o->m_next = n->m_next;
352 n->m_next = o;
353 n = o;
354 off = 0;
355
356 ok:
357 #ifdef PULLDOWN_DEBUG
358 {
359 struct mbuf *t;
360 printf("after:");
361 for (t = m; t; t = t->m_next) {
362 printf("%c%d", t == n ? '*' : ' ', t->m_len);
363 }
364 printf(" (off=%d)\n", off);
365 }
366 #endif
367 if (offp) {
368 *offp = off;
369 }
370 return n;
371 }
372
373 static struct m_tag *
m_tag_kalloc_notsupp(__unused uint32_t id,__unused uint16_t type,__unused uint16_t len,__unused int wait)374 m_tag_kalloc_notsupp(__unused uint32_t id, __unused uint16_t type, __unused uint16_t len, __unused int wait)
375 {
376 return NULL;
377 }
378
379 static void
m_tag_kfree_notsupp(__unused struct m_tag * tag)380 m_tag_kfree_notsupp(__unused struct m_tag *tag)
381 {
382 return;
383 }
384
385 #if defined(HAS_APPLE_PAC)
386 /*
387 * combine into a uintptr_t the m_tag_type that is 16 bits with the m_tag_id is 32 bits
388 */
389 static uintptr_t
m_tag_cookie_from_id_and_type(struct m_tag * tag)390 m_tag_cookie_from_id_and_type(struct m_tag *tag)
391 {
392 uintptr_t cookie;
393
394 #ifdef __LP64__
395 /*
396 * upper 4 bytes: 2 bytes of type
397 * lower 4 bytes: 4 bytes of id
398 */
399 cookie = (((uintptr_t)tag->m_tag_type) << 32) | (uintptr_t)tag->m_tag_id;
400 #else
401 /*
402 * upper 2 bytes: 2 bytes of type or-ed with upper 2 bytes of id
403 * lower 2 bytes: lower 2 bytes of id
404 */
405 cookie = (((uintptr_t)tag->m_tag_type) << 16) | (uintptr_t)tag->m_tag_id;
406 #endif
407 return cookie;
408 }
409
410 void
m_tag_create_cookie(struct m_tag * tag)411 m_tag_create_cookie(struct m_tag *tag)
412 {
413 uintptr_t cookie = m_tag_cookie_from_id_and_type(tag);
414
415 tag->m_tag_cookie = (uintptr_t) ptrauth_sign_unauthenticated((void *)cookie,
416 ptrauth_key_process_independent_data,
417 ptrauth_blend_discriminator((void *)(uintptr_t)(tag->m_tag_type | tag->m_tag_id),
418 ptrauth_string_discriminator("m_tag.m_tag_cookie")));
419 }
420
421 static void
m_tag_verify_cookie(struct m_tag * tag)422 m_tag_verify_cookie(struct m_tag *tag)
423 {
424 uintptr_t cookie = m_tag_cookie_from_id_and_type(tag);
425 uintptr_t auth_cookie;
426
427 auth_cookie = (uintptr_t) ptrauth_auth_data((void *)(uintptr_t)tag->m_tag_cookie,
428 ptrauth_key_process_independent_data,
429 ptrauth_blend_discriminator((void *)(uintptr_t)(tag->m_tag_type | tag->m_tag_id),
430 ptrauth_string_discriminator("m_tag.m_tag_cookie")));
431 if (cookie != auth_cookie) {
432 panic("verify_m_tag_cookie bad m_tag cookie");
433 }
434 }
435
436 #else /* defined(HAS_APPLE_PAC) */
437
438 void
m_tag_create_cookie(struct m_tag * tag)439 m_tag_create_cookie(struct m_tag *tag)
440 {
441 tag->m_tag_cookie = M_TAG_VALID_PATTERN;
442 }
443
444 static void
m_tag_verify_cookie(struct m_tag * tag)445 m_tag_verify_cookie(struct m_tag *tag)
446 {
447 VERIFY(tag->m_tag_cookie == M_TAG_VALID_PATTERN);
448 }
449
450 #endif /* defined(HAS_APPLE_PAC) */
451
452
453 struct m_tag *
m_tag_create(uint32_t id,uint16_t type,int len,int wait,struct mbuf * buf)454 m_tag_create(uint32_t id, uint16_t type, int len, int wait, struct mbuf *buf)
455 {
456 #ifdef MB_TAG_MBUF
457 /*
458 * Create and return an m_tag, either by re-using space in a previous tag
459 * or by allocating a new mbuf/cluster
460 */
461 return m_tag_create_mbuf(id, type, (uint16_t)len, wait, buf);
462 #else /* MB_TAG_MBUF */
463 #pragma unused(buf)
464 /*
465 * Each packet tag has its own allocation
466 */
467 return m_tag_alloc(id, type, (uint16_t)len, wait);
468 #endif /* MB_TAG_MBUF */
469 }
470
471 #ifdef MB_TAG_MBUF
472 /* Get a packet tag structure along with specified data following. */
473 static struct m_tag *
m_tag_alloc_mbuf(u_int32_t id,u_int16_t type,uint16_t len,int wait)474 m_tag_alloc_mbuf(u_int32_t id, u_int16_t type, uint16_t len, int wait)
475 {
476 struct m_tag *t;
477 void *mb_cl = NULL;
478
479 if (M_TAG_ALIGN(len) + sizeof(struct m_taghdr) <= MLEN) {
480 struct mbuf *m = m_get(wait, MT_TAG);
481 struct m_taghdr *hdr;
482
483 if (m == NULL) {
484 return NULL;
485 }
486 mb_cl = m;
487
488 m->m_flags |= M_TAGHDR;
489
490 hdr = (struct m_taghdr *)(void *)m->m_data;
491 VERIFY(IS_P2ALIGNED(hdr + 1, sizeof(u_int64_t)));
492 hdr->mth_refcnt = 1;
493 m->m_len += sizeof(struct m_taghdr);
494 t = (struct m_tag *)(void *)(m->m_data + m->m_len);
495 VERIFY(IS_P2ALIGNED(t, sizeof(u_int64_t)));
496 m->m_len += M_TAG_ALIGN(len);
497 VERIFY(m->m_len <= MLEN);
498 } else if (len + sizeof(struct m_tag) <= MCLBYTES) {
499 mb_cl = m_mclalloc(wait);
500 t = (struct m_tag *)(void *)mb_cl;
501 } else {
502 t = NULL;
503 }
504
505 if (__improbable(t == NULL)) {
506 return NULL;
507 }
508
509 VERIFY(IS_P2ALIGNED(t, sizeof(u_int64_t)));
510 M_TAG_INIT(t, id, type, len, (void *)(t + 1), mb_cl);
511 if (len > 0) {
512 bzero(t->m_tag_data, len);
513 }
514 return t;
515 }
516
517 static struct m_tag *
m_tag_create_mbuf(uint32_t id,uint16_t type,uint16_t len,int wait,struct mbuf * buf)518 m_tag_create_mbuf(uint32_t id, uint16_t type, uint16_t len, int wait, struct mbuf *buf)
519 {
520 struct m_tag *t = NULL;
521 struct m_tag *p;
522 void *mb_cl = NULL;
523
524 if (len + sizeof(struct m_tag) + sizeof(struct m_taghdr) > MLEN) {
525 return m_tag_alloc(id, type, len, wait);
526 }
527
528 /*
529 * We've exhausted all external cases. Now, go through the m_tag
530 * chain and see if we can fit it in any of them.
531 * If not (t == NULL), call m_tag_alloc to store it in a new mbuf.
532 */
533 p = SLIST_FIRST(&buf->m_pkthdr.tags);
534 while (p != NULL) {
535 /* 2KCL m_tag */
536 if (M_TAG_ALIGN(p->m_tag_len) +
537 sizeof(struct m_taghdr) > MLEN) {
538 p = SLIST_NEXT(p, m_tag_link);
539 continue;
540 }
541
542 m_tag_verify_cookie(p);
543
544 struct mbuf *m = p->m_tag_mb_cl;
545 struct m_taghdr *hdr = (struct m_taghdr *)(void *)m->m_data;
546
547 VERIFY(IS_P2ALIGNED(hdr + 1, sizeof(u_int64_t)));
548 VERIFY(m->m_flags & M_TAGHDR && !(m->m_flags & M_EXT));
549
550 /* The mbuf can store this m_tag */
551 if (M_TAG_ALIGN(len) <= MLEN - m->m_len) {
552 mb_cl = m;
553 t = (struct m_tag *)(void *)(m->m_data + m->m_len);
554 VERIFY(IS_P2ALIGNED(t, sizeof(u_int64_t)));
555 hdr->mth_refcnt++;
556 m->m_len += M_TAG_ALIGN(len);
557 VERIFY(m->m_len <= MLEN);
558 break;
559 }
560
561 p = SLIST_NEXT(p, m_tag_link);
562 }
563
564 if (t == NULL) {
565 return m_tag_alloc(id, type, len, wait);
566 }
567
568 M_TAG_INIT(t, id, type, len, (void *)(t + 1), mb_cl);
569 if (len > 0) {
570 bzero(t->m_tag_data, len);
571 }
572 return t;
573 }
574
575 static void
m_tag_free_mbuf(struct m_tag * t)576 m_tag_free_mbuf(struct m_tag *t)
577 {
578 if (__improbable(t == NULL)) {
579 return;
580 }
581
582 if (M_TAG_ALIGN(t->m_tag_len) + sizeof(struct m_taghdr) <= MLEN) {
583 struct mbuf * m = t->m_tag_mb_cl;
584
585 VERIFY(m->m_flags & M_TAGHDR);
586 struct m_taghdr *hdr = (struct m_taghdr *)(void *)m->m_data;
587
588 VERIFY(IS_P2ALIGNED(hdr + 1, sizeof(u_int64_t)));
589
590 /* No other tags in this mbuf */
591 if (--hdr->mth_refcnt == 0) {
592 m_free(m);
593 return;
594 }
595
596 /* Pattern-fill the header */
597 u_int64_t *fill_ptr = (u_int64_t *)t;
598 u_int64_t *end_ptr = (u_int64_t *)(t + 1);
599 while (fill_ptr < end_ptr) {
600 *fill_ptr = M_TAG_FREE_PATTERN;
601 fill_ptr++;
602 }
603 } else {
604 m_mclfree((caddr_t)t);
605 }
606 }
607 #endif /* MB_TAG_MBUF */
608
609 /*
610 * Allocations for external data are known to not have pointers for
611 * most platforms -- for macOS this is not guaranteed
612 */
613 #if XNU_TARGET_OS_OSX
614
615 __typed_allocators_ignore_push
616
617 static inline void *
m_tag_data_kalloc(uint16_t len,int wait)618 m_tag_data_kalloc(uint16_t len, int wait)
619 {
620 return kheap_alloc(KHEAP_DEFAULT, len, wait | M_ZERO);
621 }
622
623 static inline void
m_tag_data_free(struct m_tag * tag)624 m_tag_data_free(struct m_tag *tag)
625 {
626 kheap_free(KHEAP_DEFAULT, tag->m_tag_data, tag->m_tag_len);
627 }
628 __typed_allocators_ignore_pop
629
630 #else /* XNU_TARGET_OS_OSX */
631
632 static inline void *
633 m_tag_data_kalloc(uint16_t len, int wait)
634 {
635 return kalloc_data(len, wait | M_ZERO);
636 }
637
638 static inline void
639 m_tag_data_free(struct m_tag *tag)
640 {
641 kfree_data(tag->m_tag_data, tag->m_tag_len);
642 }
643
644 #endif /* XNU_TARGET_OS_OSX */
645
646 static struct m_tag *
m_tag_kalloc_external(uint32_t id,uint16_t type,uint16_t len,int wait)647 m_tag_kalloc_external(uint32_t id, uint16_t type, uint16_t len, int wait)
648 {
649 struct m_tag *tag;
650 void *data = NULL;
651
652 tag = kalloc_type(struct m_tag, wait | M_ZERO);
653 if (__improbable(tag == NULL)) {
654 return NULL;
655 }
656
657 if (len > 0) {
658 data = m_tag_data_kalloc(len, wait);
659 if (__improbable(data == NULL)) {
660 kfree_type(struct m_tag, tag);
661 return NULL;
662 }
663 }
664
665 M_TAG_INIT(tag, id, type, len, data, NULL);
666
667 return tag;
668 }
669
670 static void
m_tag_kfree_external(struct m_tag * tag)671 m_tag_kfree_external(struct m_tag *tag)
672 {
673 if (tag->m_tag_data != NULL) {
674 m_tag_data_free(tag);
675 }
676 kfree_type(struct m_tag, tag);
677 }
678
679 static struct m_tag_type_entry *
get_m_tag_type_entry(uint32_t id,uint16_t type,struct m_tag_type_stats ** pmtts)680 get_m_tag_type_entry(uint32_t id, uint16_t type, struct m_tag_type_stats **pmtts)
681 {
682 struct m_tag_type_entry *mtte = &m_tag_type_table[KERNEL_TAG_TYPE_NONE];
683
684 if (pmtts != NULL) {
685 *pmtts = &m_tag_type_stats[KERNEL_TAG_TYPE_NONE];
686 }
687
688 if (id == KERNEL_MODULE_TAG_ID) {
689 switch (type) {
690 case KERNEL_TAG_TYPE_DUMMYNET:
691 case KERNEL_TAG_TYPE_IPFILT:
692 case KERNEL_TAG_TYPE_ENCAP:
693 case KERNEL_TAG_TYPE_INET6:
694 case KERNEL_TAG_TYPE_IPSEC:
695 case KERNEL_TAG_TYPE_CFIL_UDP:
696 case KERNEL_TAG_TYPE_PF_REASS:
697 case KERNEL_TAG_TYPE_AQM:
698 case KERNEL_TAG_TYPE_DRVAUX:
699 mtte = &m_tag_type_table[type];
700 if (pmtts != NULL) {
701 *pmtts = &m_tag_type_stats[type];
702 }
703 break;
704 default:
705 #if DEBUG || DEVELOPMENT
706 if (type > 0 && type < KERNEL_TAG_TYPE_COUNT) {
707 panic("get_m_tag_type_entry unexpected m_tag type %u",
708 type);
709 }
710 #endif /* DEBUG || DEVELOPMENT */
711 break;
712 }
713 }
714
715 return mtte;
716 }
717
718 #ifndef MB_TAG_MBUF
719 static struct m_tag *
m_tag_kalloc(uint32_t id,uint16_t type,uint16_t len,int wait,struct m_tag_type_entry * mtte)720 m_tag_kalloc(uint32_t id, uint16_t type, uint16_t len, int wait, struct m_tag_type_entry *mtte)
721 {
722 struct m_tag *tag = NULL;
723
724 tag = mtte->mt_alloc_func(id, type, len, wait);
725
726 if (__probable(tag != NULL)) {
727 VERIFY(IS_P2ALIGNED(tag, sizeof(uint64_t)));
728
729 if (__improbable(tag->m_tag_data == NULL)) {
730 VERIFY(len == 0);
731 } else {
732 VERIFY(len != 0);
733 VERIFY(IS_P2ALIGNED(tag->m_tag_data, sizeof(uint64_t)));
734 }
735 }
736 return tag;
737 }
738
739 static void
m_tag_kfree(struct m_tag * tag,struct m_tag_type_entry * mtte)740 m_tag_kfree(struct m_tag *tag, struct m_tag_type_entry *mtte)
741 {
742 mtte->mt_free_func(tag);
743 }
744 #endif /* MB_TAG_MBUF */
745
746 struct m_tag *
m_tag_alloc(uint32_t id,uint16_t type,int len,int wait)747 m_tag_alloc(uint32_t id, uint16_t type, int len, int wait)
748 {
749 struct m_tag *tag = NULL;
750 struct m_tag_type_entry *mtte = NULL;
751 struct m_tag_type_stats *mtts = NULL;
752
753 mtte = get_m_tag_type_entry(id, type, &mtts);
754
755 if (__improbable(len < 0 || len >= MCLBYTES - sizeof(struct m_tag))) {
756 goto done;
757 }
758
759 #ifdef MB_TAG_MBUF
760 tag = m_tag_alloc_mbuf(id, type, (uint16_t)len, wait);
761 #else /* MB_TAG_MBUF */
762 /*
763 * Using Z_NOWAIT could cause retransmission delays when there aren't
764 * many other colocated types in the zone that would prime it. Use
765 * Z_NOPAGEWAIT instead which will only fail to allocate when zalloc
766 * needs to block on the VM for pages.
767 */
768 if (wait & Z_NOWAIT) {
769 wait &= ~Z_NOWAIT;
770 wait |= Z_NOPAGEWAIT;
771 }
772 tag = m_tag_kalloc(id, type, (uint16_t)len, wait, mtte);
773 #endif /* MB_TAG_MBUF */
774
775 done:
776 if (__probable(tag != NULL)) {
777 m_tag_verify_cookie(tag);
778 assert3u(tag->m_tag_id, ==, id);
779 assert3u(tag->m_tag_type, ==, type);
780 assert3u(tag->m_tag_len, ==, len);
781
782 os_atomic_inc(&mtts->mt_alloc_count, relaxed);
783 } else {
784 os_atomic_inc(&mtts->mt_alloc_failed, relaxed);
785 }
786
787 return tag;
788 }
789
790 /* Free a packet tag. */
791 void
m_tag_free(struct m_tag * tag)792 m_tag_free(struct m_tag *tag)
793 {
794 struct m_tag_type_entry *mtte = NULL;
795 struct m_tag_type_stats *mtts = NULL;
796
797 if (__improbable(tag == NULL)) {
798 return;
799 }
800
801 m_tag_verify_cookie(tag);
802
803 mtte = get_m_tag_type_entry(tag->m_tag_id, tag->m_tag_type, &mtts);
804
805 #ifdef MB_TAG_MBUF
806 m_tag_free_mbuf(tag);
807 #else /* MB_TAG_MBUF */
808 m_tag_kfree(tag, mtte);
809 #endif /* MB_TAG_MBUF */
810
811 os_atomic_inc(&mtts->mt_free_count, relaxed);
812 }
813
814 void
mbuf_tag_init(void)815 mbuf_tag_init(void)
816 {
817 for (uint16_t type = 0; type < KERNEL_TAG_TYPE_COUNT; type++) {
818 m_tag_type_table[type].mt_type = type;
819 m_tag_type_table[type].mt_len = 0;
820 m_tag_type_table[type].mt_alloc_func = m_tag_kalloc_notsupp;
821 m_tag_type_table[type].mt_free_func = m_tag_kfree_notsupp;
822 }
823 m_tag_type_table[KERNEL_TAG_TYPE_NONE].mt_alloc_func = m_tag_kalloc_external;
824 m_tag_type_table[KERNEL_TAG_TYPE_NONE].mt_free_func = m_tag_kfree_external;
825 m_tag_type_table[KERNEL_TAG_TYPE_DRVAUX].mt_alloc_func = m_tag_kalloc_external;
826 m_tag_type_table[KERNEL_TAG_TYPE_DRVAUX].mt_free_func = m_tag_kfree_external;
827
828 #if NETWORKING
829 extern void pktsched_register_m_tag(void);
830 pktsched_register_m_tag();
831 #endif /* NETWORKING */
832
833 #if INET
834 extern void ip6_register_m_tag(void);
835 ip6_register_m_tag();
836
837 extern void ipfilter_register_m_tag(void);
838 ipfilter_register_m_tag();
839
840 extern void encap_register_m_tag(void);
841 encap_register_m_tag();
842 #endif /* INET */
843
844 #if IPSEC
845 extern void ipsec_register_m_tag(void);
846 ipsec_register_m_tag();
847 #endif /* IPSEC */
848
849 #if DUMMYNET
850 extern void dummynet_register_m_tag(void);
851 dummynet_register_m_tag();
852 #endif /* DUMMYNET */
853
854 #if PF
855 extern void pf_register_m_tag(void);
856 pf_register_m_tag();
857 #endif /* PF */
858
859 #if CONTENT_FILTER
860 extern void cfil_register_m_tag(void);
861 cfil_register_m_tag();
862 #endif /* CONTENT_FILTER */
863 }
864
865 int
m_register_internal_tag_type(uint16_t type,uint16_t len,m_tag_kalloc_func_t alloc_func,m_tag_kfree_func_t free_func)866 m_register_internal_tag_type(uint16_t type, uint16_t len,
867 m_tag_kalloc_func_t alloc_func, m_tag_kfree_func_t free_func)
868 {
869 int error = 0;
870
871 if (type <= 0 || type >= KERNEL_TAG_TYPE_DRVAUX) {
872 error = EINVAL;
873 goto done;
874 }
875 m_tag_type_table[type].mt_type = type;
876 m_tag_type_table[type].mt_len = len;
877 m_tag_type_table[type].mt_alloc_func = alloc_func;
878 m_tag_type_table[type].mt_free_func = free_func;
879
880 done:
881 return error;
882 }
883
884 /* Prepend a packet tag. */
885 void
m_tag_prepend(struct mbuf * m,struct m_tag * t)886 m_tag_prepend(struct mbuf *m, struct m_tag *t)
887 {
888 VERIFY(m != NULL && t != NULL);
889
890 SLIST_INSERT_HEAD(&m->m_pkthdr.tags, t, m_tag_link);
891 }
892
893 /* Unlink a packet tag. */
894 void
m_tag_unlink(struct mbuf * m,struct m_tag * t)895 m_tag_unlink(struct mbuf *m, struct m_tag *t)
896 {
897 VERIFY(m->m_flags & M_PKTHDR);
898 VERIFY(t != NULL);
899
900 SLIST_REMOVE(&m->m_pkthdr.tags, t, m_tag, m_tag_link);
901 }
902
903 /* Unlink and free a packet tag. */
904 void
m_tag_delete(struct mbuf * m,struct m_tag * t)905 m_tag_delete(struct mbuf *m, struct m_tag *t)
906 {
907 m_tag_unlink(m, t);
908 m_tag_free(t);
909 }
910
911 /* Unlink and free a packet tag chain, starting from given tag. */
912 void
m_tag_delete_chain(struct mbuf * m)913 m_tag_delete_chain(struct mbuf *m)
914 {
915 struct m_tag *p, *q;
916
917 VERIFY(m->m_flags & M_PKTHDR);
918
919 p = SLIST_FIRST(&m->m_pkthdr.tags);
920 if (p == NULL) {
921 return;
922 }
923
924 while ((q = SLIST_NEXT(p, m_tag_link)) != NULL) {
925 m_tag_delete(m, q);
926 }
927 m_tag_delete(m, p);
928 }
929
930 /* Find a tag, starting from a given position. */
931 struct m_tag *
m_tag_locate(struct mbuf * m,uint32_t id,uint16_t type)932 m_tag_locate(struct mbuf *m, uint32_t id, uint16_t type)
933 {
934 struct m_tag *p;
935
936 VERIFY(m->m_flags & M_PKTHDR);
937
938 p = SLIST_FIRST(&m->m_pkthdr.tags);
939
940 while (p != NULL) {
941 if (p->m_tag_id == id && p->m_tag_type == type) {
942 m_tag_verify_cookie(p);
943 return p;
944 }
945 p = SLIST_NEXT(p, m_tag_link);
946 }
947 return NULL;
948 }
949
950 /* Copy a single tag. */
951 struct m_tag *
m_tag_copy(struct m_tag * t,int how)952 m_tag_copy(struct m_tag *t, int how)
953 {
954 struct m_tag *p;
955
956 VERIFY(t != NULL);
957
958 p = m_tag_alloc(t->m_tag_id, t->m_tag_type, t->m_tag_len, how);
959 if (p == NULL) {
960 return NULL;
961 }
962 bcopy(t->m_tag_data, p->m_tag_data, t->m_tag_len); /* Copy the data */
963 return p;
964 }
965
966 /*
967 * Copy two tag chains. The destination mbuf (to) loses any attached
968 * tags even if the operation fails. This should not be a problem, as
969 * m_tag_copy_chain() is typically called with a newly-allocated
970 * destination mbuf.
971 */
972 int
m_tag_copy_chain(struct mbuf * to,struct mbuf * from,int how)973 m_tag_copy_chain(struct mbuf *to, struct mbuf *from, int how)
974 {
975 struct m_tag *p, *t, *tprev = NULL;
976
977 VERIFY((to->m_flags & M_PKTHDR) && (from->m_flags & M_PKTHDR));
978
979 m_tag_delete_chain(to);
980 SLIST_FOREACH(p, &from->m_pkthdr.tags, m_tag_link) {
981 m_tag_verify_cookie(p);
982 t = m_tag_copy(p, how);
983 if (t == NULL) {
984 m_tag_delete_chain(to);
985 return 0;
986 }
987 if (tprev == NULL) {
988 SLIST_INSERT_HEAD(&to->m_pkthdr.tags, t, m_tag_link);
989 } else {
990 SLIST_INSERT_AFTER(tprev, t, m_tag_link);
991 tprev = t;
992 }
993 }
994 return 1;
995 }
996
997 /* Initialize dynamic and static tags on an mbuf. */
998 void
m_tag_init(struct mbuf * m,int all)999 m_tag_init(struct mbuf *m, int all)
1000 {
1001 VERIFY(m->m_flags & M_PKTHDR);
1002
1003 SLIST_INIT(&m->m_pkthdr.tags);
1004 /*
1005 * If the caller wants to preserve static mbuf tags
1006 * (e.g. m_dup_pkthdr), don't zero them out.
1007 */
1008 if (all) {
1009 bzero(&m->m_pkthdr.builtin_mtag._net_mtag,
1010 sizeof(m->m_pkthdr.builtin_mtag._net_mtag));
1011 }
1012 }
1013
1014 /* Get first tag in chain. */
1015 struct m_tag *
m_tag_first(struct mbuf * m)1016 m_tag_first(struct mbuf *m)
1017 {
1018 VERIFY(m->m_flags & M_PKTHDR);
1019
1020 return SLIST_FIRST(&m->m_pkthdr.tags);
1021 }
1022
1023 /* Get next tag in chain. */
1024 struct m_tag *
m_tag_next(struct mbuf * m,struct m_tag * t)1025 m_tag_next(struct mbuf *m, struct m_tag *t)
1026 {
1027 #pragma unused(m)
1028 VERIFY(t != NULL);
1029
1030 return SLIST_NEXT(t, m_tag_link);
1031 }
1032
1033 int
m_set_traffic_class(struct mbuf * m,mbuf_traffic_class_t tc)1034 m_set_traffic_class(struct mbuf *m, mbuf_traffic_class_t tc)
1035 {
1036 uint32_t val = MBUF_TC2SCVAL(tc); /* just the val portion */
1037
1038 return m_set_service_class(m, m_service_class_from_val(val));
1039 }
1040
1041 mbuf_traffic_class_t
m_get_traffic_class(struct mbuf * m)1042 m_get_traffic_class(struct mbuf *m)
1043 {
1044 return MBUF_SC2TC(m_get_service_class(m));
1045 }
1046
1047 int
m_set_service_class(struct mbuf * m,mbuf_svc_class_t sc)1048 m_set_service_class(struct mbuf *m, mbuf_svc_class_t sc)
1049 {
1050 int error = 0;
1051
1052 VERIFY(m->m_flags & M_PKTHDR);
1053
1054 if (MBUF_VALID_SC(sc)) {
1055 m->m_pkthdr.pkt_svc = sc;
1056 } else {
1057 error = EINVAL;
1058 }
1059
1060 return error;
1061 }
1062
1063 mbuf_svc_class_t
m_get_service_class(struct mbuf * m)1064 m_get_service_class(struct mbuf *m)
1065 {
1066 mbuf_svc_class_t sc;
1067
1068 VERIFY(m->m_flags & M_PKTHDR);
1069
1070 if (MBUF_VALID_SC(m->m_pkthdr.pkt_svc)) {
1071 sc = m->m_pkthdr.pkt_svc;
1072 } else {
1073 sc = MBUF_SC_BE;
1074 }
1075
1076 return sc;
1077 }
1078
1079 mbuf_svc_class_t
m_service_class_from_idx(uint32_t i)1080 m_service_class_from_idx(uint32_t i)
1081 {
1082 mbuf_svc_class_t sc = MBUF_SC_BE;
1083
1084 switch (i) {
1085 case SCIDX_BK_SYS:
1086 return MBUF_SC_BK_SYS;
1087
1088 case SCIDX_BK:
1089 return MBUF_SC_BK;
1090
1091 case SCIDX_BE:
1092 return MBUF_SC_BE;
1093
1094 case SCIDX_RD:
1095 return MBUF_SC_RD;
1096
1097 case SCIDX_OAM:
1098 return MBUF_SC_OAM;
1099
1100 case SCIDX_AV:
1101 return MBUF_SC_AV;
1102
1103 case SCIDX_RV:
1104 return MBUF_SC_RV;
1105
1106 case SCIDX_VI:
1107 return MBUF_SC_VI;
1108
1109 case SCIDX_VO:
1110 return MBUF_SC_VO;
1111
1112 case SCIDX_CTL:
1113 return MBUF_SC_CTL;
1114
1115 default:
1116 break;
1117 }
1118
1119 VERIFY(0);
1120 /* NOTREACHED */
1121 return sc;
1122 }
1123
1124 mbuf_svc_class_t
m_service_class_from_val(uint32_t v)1125 m_service_class_from_val(uint32_t v)
1126 {
1127 mbuf_svc_class_t sc = MBUF_SC_BE;
1128
1129 switch (v) {
1130 case SCVAL_BK_SYS:
1131 return MBUF_SC_BK_SYS;
1132
1133 case SCVAL_BK:
1134 return MBUF_SC_BK;
1135
1136 case SCVAL_BE:
1137 return MBUF_SC_BE;
1138
1139 case SCVAL_RD:
1140 return MBUF_SC_RD;
1141
1142 case SCVAL_OAM:
1143 return MBUF_SC_OAM;
1144
1145 case SCVAL_AV:
1146 return MBUF_SC_AV;
1147
1148 case SCVAL_RV:
1149 return MBUF_SC_RV;
1150
1151 case SCVAL_VI:
1152 return MBUF_SC_VI;
1153
1154 case SCVAL_VO:
1155 return MBUF_SC_VO;
1156
1157 case SCVAL_CTL:
1158 return MBUF_SC_CTL;
1159
1160 default:
1161 break;
1162 }
1163
1164 VERIFY(0);
1165 /* NOTREACHED */
1166 return sc;
1167 }
1168
1169 uint16_t
m_adj_sum16(struct mbuf * m,uint32_t start,uint32_t dataoff,uint32_t datalen,uint32_t sum)1170 m_adj_sum16(struct mbuf *m, uint32_t start, uint32_t dataoff,
1171 uint32_t datalen, uint32_t sum)
1172 {
1173 uint32_t total_sub = 0; /* total to subtract */
1174 uint32_t mlen = m_pktlen(m); /* frame length */
1175 uint32_t bytes = (dataoff + datalen); /* bytes covered by sum */
1176 int len;
1177
1178 ASSERT(bytes <= mlen);
1179
1180 /*
1181 * Take care of excluding (len > 0) or including (len < 0)
1182 * extraneous octets at the beginning of the packet, taking
1183 * into account the start offset.
1184 */
1185 len = (dataoff - start);
1186 if (len > 0) {
1187 total_sub = m_sum16(m, start, len);
1188 } else if (len < 0) {
1189 sum += m_sum16(m, dataoff, -len);
1190 }
1191
1192 /*
1193 * Take care of excluding any postpended extraneous octets.
1194 */
1195 len = (mlen - bytes);
1196 if (len > 0) {
1197 struct mbuf *m0 = m;
1198 uint32_t extra = m_sum16(m, bytes, len);
1199 uint32_t off = bytes, off0 = off;
1200
1201 while (off > 0) {
1202 if (__improbable(m == NULL)) {
1203 panic("%s: invalid mbuf chain %p [off %u, "
1204 "len %u]", __func__, m0, off0, len);
1205 /* NOTREACHED */
1206 }
1207 if (off < m->m_len) {
1208 break;
1209 }
1210 off -= m->m_len;
1211 m = m->m_next;
1212 }
1213
1214 /* if we started on odd-alignment, swap the value */
1215 if ((uintptr_t)(mtod(m, uint8_t *) + off) & 1) {
1216 total_sub += ((extra << 8) & 0xffff) | (extra >> 8);
1217 } else {
1218 total_sub += extra;
1219 }
1220
1221 total_sub = (total_sub >> 16) + (total_sub & 0xffff);
1222 }
1223
1224 /*
1225 * 1's complement subtract any extraneous octets.
1226 */
1227 if (total_sub != 0) {
1228 if (total_sub >= sum) {
1229 sum = ~(total_sub - sum) & 0xffff;
1230 } else {
1231 sum -= total_sub;
1232 }
1233 }
1234
1235 /* fold 32-bit to 16-bit */
1236 sum = (sum >> 16) + (sum & 0xffff); /* 17-bit */
1237 sum = (sum >> 16) + (sum & 0xffff); /* 16-bit + carry */
1238 sum = (sum >> 16) + (sum & 0xffff); /* final carry */
1239
1240 return sum & 0xffff;
1241 }
1242
1243 uint16_t
m_sum16(struct mbuf * m,uint32_t off,uint32_t len)1244 m_sum16(struct mbuf *m, uint32_t off, uint32_t len)
1245 {
1246 int mlen;
1247
1248 /*
1249 * Sanity check
1250 *
1251 * Use m_length2() instead of m_length(), as we cannot rely on
1252 * the caller setting m_pkthdr.len correctly, if the mbuf is
1253 * a M_PKTHDR one.
1254 */
1255 if ((mlen = m_length2(m, NULL)) < (off + len)) {
1256 panic("%s: mbuf %p len (%d) < off+len (%d+%d)", __func__,
1257 m, mlen, off, len);
1258 /* NOTREACHED */
1259 }
1260
1261 return (uint16_t)os_cpu_in_cksum_mbuf(m, len, off, 0);
1262 }
1263
1264 static int
sysctl_mb_tag_stats(__unused struct sysctl_oid * oidp,__unused void * arg1,__unused int arg2,struct sysctl_req * req)1265 sysctl_mb_tag_stats(__unused struct sysctl_oid *oidp,
1266 __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1267 {
1268 int error = 0;
1269
1270 if (req->oldptr == USER_ADDR_NULL) {
1271 req->oldidx = KERNEL_TAG_TYPE_COUNT * sizeof(struct m_tag_stats);
1272 return 0;
1273 }
1274 if (req->newptr != USER_ADDR_NULL) {
1275 return EPERM;
1276 }
1277
1278 for (uint16_t i = 0; i < KERNEL_TAG_TYPE_COUNT; i++) {
1279 struct m_tag_stats m_tag_stats = {};
1280
1281 m_tag_stats.mts_id = KERNEL_MODULE_TAG_ID;
1282 m_tag_stats.mts_type = i;
1283 m_tag_stats.mts_len = m_tag_type_table[i].mt_len;
1284 m_tag_stats.mts_alloc_count = m_tag_type_stats[i].mt_alloc_count;
1285 m_tag_stats.mts_alloc_failed = m_tag_type_stats[i].mt_alloc_failed;
1286 m_tag_stats.mts_free_count = m_tag_type_stats[i].mt_free_count;
1287
1288 error = SYSCTL_OUT(req, &m_tag_stats, sizeof(struct m_tag_stats));
1289 }
1290
1291 return error;
1292 }
1293
1294 SYSCTL_PROC(_kern_ipc, OID_AUTO, mb_tag_stats,
1295 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, NULL, 0,
1296 sysctl_mb_tag_stats, "S,m_tag_stats", "");
1297
1298 #if DEBUG || DEVELOPMENT
1299
1300 struct m_tag_test_entry {
1301 bool mtte_test_id;
1302 bool mtte_alloc_must_fail;
1303 uint16_t mtte_type;
1304 int mtte_len;
1305 };
1306
1307 struct m_tag_test_entry
1308 m_tag_test_table[] = {
1309 {
1310 .mtte_test_id = false,
1311 .mtte_alloc_must_fail = false,
1312 .mtte_type = KERNEL_TAG_TYPE_DUMMYNET,
1313 .mtte_len = 0,
1314 },
1315 {
1316 .mtte_test_id = false,
1317 .mtte_alloc_must_fail = false,
1318 .mtte_type = KERNEL_TAG_TYPE_IPFILT,
1319 .mtte_len = 0,
1320 },
1321 {
1322 .mtte_test_id = false,
1323 .mtte_alloc_must_fail = false,
1324 .mtte_type = KERNEL_TAG_TYPE_ENCAP,
1325 .mtte_len = 0,
1326 },
1327 {
1328 .mtte_test_id = false,
1329 .mtte_alloc_must_fail = false,
1330 .mtte_type = KERNEL_TAG_TYPE_INET6,
1331 .mtte_len = 0,
1332 },
1333 {
1334 .mtte_test_id = false,
1335 .mtte_alloc_must_fail = false,
1336 .mtte_type = KERNEL_TAG_TYPE_IPSEC,
1337 .mtte_len = 0,
1338 },
1339 {
1340 .mtte_test_id = false,
1341 .mtte_alloc_must_fail = false,
1342 .mtte_type = KERNEL_TAG_TYPE_CFIL_UDP,
1343 .mtte_len = 0,
1344 },
1345 {
1346 .mtte_test_id = false,
1347 .mtte_alloc_must_fail = false,
1348 .mtte_type = KERNEL_TAG_TYPE_PF_REASS,
1349 .mtte_len = 0,
1350 },
1351 {
1352 .mtte_test_id = false,
1353 .mtte_alloc_must_fail = false,
1354 .mtte_type = KERNEL_TAG_TYPE_AQM,
1355 .mtte_len = 0,
1356 },
1357 {
1358 .mtte_test_id = false,
1359 .mtte_alloc_must_fail = false,
1360 .mtte_type = KERNEL_TAG_TYPE_DRVAUX,
1361 .mtte_len = 0,
1362 },
1363
1364 {
1365 .mtte_test_id = false,
1366 .mtte_alloc_must_fail = false,
1367 .mtte_type = 0,
1368 .mtte_len = MLEN,
1369 },
1370 {
1371 .mtte_test_id = false,
1372 .mtte_alloc_must_fail = false,
1373 .mtte_type = KERNEL_TAG_TYPE_COUNT,
1374 .mtte_len = MLEN,
1375 },
1376 {
1377 .mtte_test_id = false,
1378 .mtte_alloc_must_fail = true,
1379 .mtte_type = 0,
1380 .mtte_len = MCLBYTES,
1381 },
1382 {
1383 .mtte_test_id = false,
1384 .mtte_alloc_must_fail = true,
1385 .mtte_type = KERNEL_TAG_TYPE_COUNT,
1386 .mtte_len = MCLBYTES,
1387 },
1388
1389 {
1390 .mtte_test_id = true,
1391 .mtte_alloc_must_fail = false,
1392 .mtte_type = 0,
1393 .mtte_len = 0,
1394 },
1395 {
1396 .mtte_test_id = true,
1397 .mtte_alloc_must_fail = false,
1398 .mtte_type = 0,
1399 .mtte_len = MLEN,
1400 },
1401 {
1402 .mtte_test_id = true,
1403 .mtte_alloc_must_fail = true,
1404 .mtte_type = 0,
1405 .mtte_len = -1,
1406 },
1407 {
1408 .mtte_test_id = true,
1409 .mtte_alloc_must_fail = true,
1410 .mtte_type = 0,
1411 .mtte_len = MCLBYTES,
1412 },
1413 };
1414
1415 #define M_TAG_TEST_TABLE_COUNT (sizeof(m_tag_test_table) / sizeof(struct m_tag_test_entry))
1416
1417 #define M_TAG_TEST_ID "com.apple.test.m_tag"
1418
1419 static int
do_m_tag_test(mbuf_tag_id_t test_tag_id)1420 do_m_tag_test(mbuf_tag_id_t test_tag_id)
1421 {
1422 int error = 0;
1423 struct mbuf *m = NULL;
1424
1425 m = m_getpacket();
1426 if (m == NULL) {
1427 os_log_error(OS_LOG_DEFAULT, "%s: m_getpacket failed", __func__);
1428 error = ENOMEM;
1429 goto done;
1430 }
1431
1432 for (int i = 0; i < M_TAG_TEST_TABLE_COUNT; i++) {
1433 struct m_tag_test_entry *entry = &m_tag_test_table[i];
1434 struct m_tag *tag = NULL;
1435 uint32_t id = test_tag_id;
1436 int len = entry->mtte_len;
1437 uint16_t type = entry->mtte_type;
1438
1439 if (entry->mtte_test_id == false) {
1440 id = KERNEL_MODULE_TAG_ID;
1441 switch (type) {
1442 case KERNEL_TAG_TYPE_DUMMYNET:
1443 case KERNEL_TAG_TYPE_IPFILT:
1444 case KERNEL_TAG_TYPE_ENCAP:
1445 case KERNEL_TAG_TYPE_INET6:
1446 case KERNEL_TAG_TYPE_IPSEC:
1447 case KERNEL_TAG_TYPE_CFIL_UDP:
1448 case KERNEL_TAG_TYPE_PF_REASS:
1449 case KERNEL_TAG_TYPE_AQM:
1450 /* subsystems that use mbuf tags are optional */
1451 if (m_tag_type_table[type].mt_alloc_func == m_tag_kalloc_notsupp) {
1452 continue;
1453 }
1454 len = m_tag_type_table[type].mt_len;
1455 if (entry->mtte_alloc_must_fail == true) {
1456 os_log_error(OS_LOG_DEFAULT,
1457 "%s: FAIL m_tag_create(%u, %u, %u) must not fail",
1458 __func__, id, type, len);
1459 error = EINVAL;
1460 goto done;
1461 }
1462 break;
1463 default:
1464 break;
1465 }
1466 }
1467 tag = m_tag_create(id, type, len, M_WAIT, m);
1468 if (tag == NULL) {
1469 if (entry->mtte_alloc_must_fail == false) {
1470 os_log_error(OS_LOG_DEFAULT,
1471 "%s: FAIL m_tag_create(%u, %u, %u) unexpected failure",
1472 __func__, id, type, len);
1473 error = ENOMEM;
1474 goto done;
1475 } else {
1476 os_log(OS_LOG_DEFAULT,
1477 "%s: PASS m_tag_create(%u, %u, %u) expected failure",
1478 __func__, id, type, len);
1479 }
1480 } else {
1481 if (entry->mtte_alloc_must_fail == true) {
1482 os_log_error(OS_LOG_DEFAULT,
1483 "%s: FAIL m_tag_create(%u, %u, %u) unexpected success",
1484 __func__, id, type, len);
1485 error = EINVAL;
1486 goto done;
1487 } else {
1488 os_log(OS_LOG_DEFAULT,
1489 "%s: PASS m_tag_create(%u, %u, %u) expected success",
1490 __func__, id, type, len);
1491 }
1492 m_tag_prepend(m, tag);
1493 }
1494 }
1495 done:
1496 if (m != NULL) {
1497 m_freem(m);
1498 }
1499 os_log_error(OS_LOG_DEFAULT,
1500 "%s: %s error %d",
1501 __func__, error == 0 ? "PASS" : "FAIL", error);
1502 return error;
1503 }
1504
1505 static int
do_test_m_tag_unlink(mbuf_tag_id_t test_tag_id)1506 do_test_m_tag_unlink(mbuf_tag_id_t test_tag_id)
1507 {
1508 struct mbuf *m = NULL;
1509 int error = 0;
1510
1511 m = m_gethdr(M_WAITOK, MT_DATA);
1512 if (m == NULL) {
1513 error = ENOMEM;
1514 goto done;
1515 }
1516 for (int i = 0; i < M_TAG_TEST_TABLE_COUNT; i++) {
1517 struct m_tag_test_entry *entry = &m_tag_test_table[i];
1518 struct m_tag *tag = NULL;
1519 uint32_t id = test_tag_id;
1520 int len = entry->mtte_len;
1521 uint16_t type = entry->mtte_type;
1522
1523 if (entry->mtte_alloc_must_fail == true) {
1524 continue;
1525 }
1526
1527 if (entry->mtte_test_id == false) {
1528 id = KERNEL_MODULE_TAG_ID;
1529 switch (type) {
1530 case KERNEL_TAG_TYPE_DUMMYNET:
1531 case KERNEL_TAG_TYPE_IPFILT:
1532 case KERNEL_TAG_TYPE_ENCAP:
1533 case KERNEL_TAG_TYPE_INET6:
1534 case KERNEL_TAG_TYPE_IPSEC:
1535 case KERNEL_TAG_TYPE_CFIL_UDP:
1536 case KERNEL_TAG_TYPE_PF_REASS:
1537 case KERNEL_TAG_TYPE_AQM:
1538 /* subsystems that use mbuf tags are optional */
1539 if (m_tag_type_table[type].mt_alloc_func == m_tag_kalloc_notsupp) {
1540 continue;
1541 }
1542 len = m_tag_type_table[type].mt_len;
1543 break;
1544 default:
1545 continue;
1546 }
1547 }
1548 tag = m_tag_create(id, type, len, M_WAIT, m);
1549 if (tag == NULL) {
1550 os_log_error(OS_LOG_DEFAULT,
1551 "%s: FAIL m_tag_create(%u, %u, %u) failure",
1552 __func__, id, type, len);
1553 error = ENOMEM;
1554 goto done;
1555 } else {
1556 os_log_error(OS_LOG_DEFAULT,
1557 "%s: PASS m_tag_create(%u, %u, %u) success",
1558 __func__, id, type, len);
1559 m_tag_prepend(m, tag);
1560 }
1561 }
1562
1563 struct m_tag *cfil_tag = m_tag_locate(m, KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_CFIL_UDP);
1564 if (cfil_tag == NULL) {
1565 os_log_error(OS_LOG_DEFAULT,
1566 "%s: FAIL m_tag_locate(KERNEL_TAG_TYPE_CFIL_UDP) failure",
1567 __func__);
1568 error = EINVAL;
1569 goto done;
1570 } else {
1571 os_log_error(OS_LOG_DEFAULT,
1572 "%s: PASS m_tag_locate(KERNEL_TAG_TYPE_CFIL_UDP) success",
1573 __func__);
1574 }
1575
1576 /*
1577 * Unlink the mbuf tag, free the mbuf and finally free the mbuf tag
1578 */
1579 m_tag_unlink(m, cfil_tag);
1580
1581 m_freem(m);
1582 m = NULL;
1583
1584 m_tag_free(cfil_tag);
1585
1586 done:
1587 if (m != NULL) {
1588 m_freem(m);
1589 }
1590 os_log_error(OS_LOG_DEFAULT,
1591 "%s: %s error %d",
1592 __func__, error == 0 ? "PASS" : "FAIL", error);
1593 return error;
1594 }
1595
1596 static int
sysctl_mb_tag_test(__unused struct sysctl_oid * oidp,__unused void * arg1,__unused int arg2,struct sysctl_req * req)1597 sysctl_mb_tag_test(__unused struct sysctl_oid *oidp,
1598 __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1599 {
1600 int error;
1601 int newvalue;
1602 int changed;
1603 int value = 0;
1604 mbuf_tag_id_t test_tag_id;
1605
1606 if ((error = sysctl_io_number(req, value, sizeof(int),
1607 &newvalue, &changed)) != 0) {
1608 goto done;
1609 }
1610 if (!changed && newvalue == value) {
1611 goto done;
1612 }
1613 error = mbuf_tag_id_find(M_TAG_TEST_ID, &test_tag_id);
1614 if (error != 0) {
1615 os_log_error(OS_LOG_DEFAULT, "%s: mbuf_tag_id_find failed error %d",
1616 __func__, error);
1617 goto done;
1618 }
1619 error = do_m_tag_test(test_tag_id);
1620 if (error != 0) {
1621 goto done;
1622 }
1623 error = do_test_m_tag_unlink(test_tag_id);
1624 if (error != 0) {
1625 goto done;
1626 }
1627 done:
1628 return error;
1629 }
1630
1631 SYSCTL_PROC(_kern_ipc, OID_AUTO, mb_tag_test,
1632 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, NULL, 0,
1633 sysctl_mb_tag_test, "I", "mbuf test");
1634
1635 #endif /* DEBUG || DEVELOPMENT */
1636