xref: /xnu-10002.81.5/bsd/kern/kpi_mbuf.c (revision 5e3eaea39dcf651e66cb99ba7d70e32cc4a99587)
1 /*
2  * Copyright (c) 2004-2021 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #define __KPI__
30 
31 #include <sys/param.h>
32 #include <sys/mbuf.h>
33 #include <sys/mcache.h>
34 #include <sys/socket.h>
35 #include <kern/debug.h>
36 #include <libkern/OSAtomic.h>
37 #include <string.h>
38 #include <net/dlil.h>
39 #include <netinet/in.h>
40 #include <netinet/ip_var.h>
41 
42 #include <os/log.h>
43 
44 #include "net/net_str_id.h"
45 
46 /* mbuf flags visible to KPI clients; do not add private flags here */
47 static const mbuf_flags_t mbuf_flags_mask = (MBUF_EXT | MBUF_PKTHDR | MBUF_EOR |
48     MBUF_LOOP | MBUF_BCAST | MBUF_MCAST | MBUF_FRAG | MBUF_FIRSTFRAG |
49     MBUF_LASTFRAG | MBUF_PROMISC | MBUF_HASFCS);
50 
51 /* Unalterable mbuf flags */
52 static const mbuf_flags_t mbuf_cflags_mask = (MBUF_EXT);
53 
54 #define MAX_MBUF_TX_COMPL_FUNC 32
55 mbuf_tx_compl_func
56     mbuf_tx_compl_table[MAX_MBUF_TX_COMPL_FUNC];
57 extern lck_rw_t mbuf_tx_compl_tbl_lock;
58 u_int32_t mbuf_tx_compl_index = 0;
59 
60 #if (DEVELOPMENT || DEBUG)
61 int mbuf_tx_compl_debug = 0;
62 uint64_t mbuf_tx_compl_requested __attribute__((aligned(8))) = 0;
63 uint64_t mbuf_tx_compl_callbacks __attribute__((aligned(8))) = 0;
64 uint64_t mbuf_tx_compl_aborted __attribute__((aligned(8))) = 0;
65 
66 SYSCTL_DECL(_kern_ipc);
67 SYSCTL_NODE(_kern_ipc, OID_AUTO, mbtxcf,
68     CTLFLAG_RW | CTLFLAG_LOCKED, 0, "");
69 SYSCTL_INT(_kern_ipc_mbtxcf, OID_AUTO, debug,
70     CTLFLAG_RW | CTLFLAG_LOCKED, &mbuf_tx_compl_debug, 0, "");
71 SYSCTL_INT(_kern_ipc_mbtxcf, OID_AUTO, index,
72     CTLFLAG_RD | CTLFLAG_LOCKED, &mbuf_tx_compl_index, 0, "");
73 SYSCTL_QUAD(_kern_ipc_mbtxcf, OID_AUTO, requested,
74     CTLFLAG_RD | CTLFLAG_LOCKED, &mbuf_tx_compl_requested, "");
75 SYSCTL_QUAD(_kern_ipc_mbtxcf, OID_AUTO, callbacks,
76     CTLFLAG_RD | CTLFLAG_LOCKED, &mbuf_tx_compl_callbacks, "");
77 SYSCTL_QUAD(_kern_ipc_mbtxcf, OID_AUTO, aborted,
78     CTLFLAG_RD | CTLFLAG_LOCKED, &mbuf_tx_compl_aborted, "");
79 #endif /* (DEBUG || DEVELOPMENT) */
80 
81 void *
mbuf_data(mbuf_t mbuf)82 mbuf_data(mbuf_t mbuf)
83 {
84 	return mbuf->m_data;
85 }
86 
87 void *
mbuf_datastart(mbuf_t mbuf)88 mbuf_datastart(mbuf_t mbuf)
89 {
90 	if (mbuf->m_flags & M_EXT) {
91 		return mbuf->m_ext.ext_buf;
92 	}
93 	if (mbuf->m_flags & M_PKTHDR) {
94 		return mbuf->m_pktdat;
95 	}
96 	return mbuf->m_dat;
97 }
98 
99 errno_t
mbuf_setdata(mbuf_t mbuf,void * data,size_t len)100 mbuf_setdata(mbuf_t mbuf, void *data, size_t len)
101 {
102 	size_t  start = (size_t)((char *)mbuf_datastart(mbuf));
103 	size_t  maxlen = mbuf_maxlen(mbuf);
104 
105 	if ((size_t)data < start || ((size_t)data) + len > start + maxlen) {
106 		return EINVAL;
107 	}
108 	mbuf->m_data = data;
109 	mbuf->m_len = (int32_t)len;
110 
111 	return 0;
112 }
113 
114 errno_t
mbuf_align_32(mbuf_t mbuf,size_t len)115 mbuf_align_32(mbuf_t mbuf, size_t len)
116 {
117 	if ((mbuf->m_flags & M_EXT) != 0 && m_mclhasreference(mbuf)) {
118 		return ENOTSUP;
119 	}
120 	mbuf->m_data = mbuf_datastart(mbuf);
121 	mbuf->m_data +=
122 	    ((mbuf_trailingspace(mbuf) - len) & ~(sizeof(u_int32_t) - 1));
123 
124 	return 0;
125 }
126 
127 /*
128  * This function is used to provide mcl_to_paddr via symbol indirection,
129  * please avoid any change in behavior or remove the indirection in
130  * config/Unsupported*
131  */
132 addr64_t
mbuf_data_to_physical(void * ptr)133 mbuf_data_to_physical(void *ptr)
134 {
135 	return (addr64_t)mcl_to_paddr(ptr);
136 }
137 
138 errno_t
mbuf_get(mbuf_how_t how,mbuf_type_t type,mbuf_t * mbuf)139 mbuf_get(mbuf_how_t how, mbuf_type_t type, mbuf_t *mbuf)
140 {
141 	/* Must set *mbuf to NULL in failure case */
142 	*mbuf = m_get(how, type);
143 
144 	return *mbuf == NULL ? ENOMEM : 0;
145 }
146 
147 errno_t
mbuf_gethdr(mbuf_how_t how,mbuf_type_t type,mbuf_t * mbuf)148 mbuf_gethdr(mbuf_how_t how, mbuf_type_t type, mbuf_t *mbuf)
149 {
150 	/* Must set *mbuf to NULL in failure case */
151 	*mbuf = m_gethdr(how, type);
152 
153 	return *mbuf == NULL ? ENOMEM : 0;
154 }
155 
156 errno_t
mbuf_attachcluster(mbuf_how_t how,mbuf_type_t type,mbuf_t * mbuf,caddr_t extbuf,void (* extfree)(caddr_t,u_int,caddr_t),size_t extsize,caddr_t extarg)157 mbuf_attachcluster(mbuf_how_t how, mbuf_type_t type, mbuf_t *mbuf,
158     caddr_t extbuf, void (*extfree)(caddr_t, u_int, caddr_t),
159     size_t extsize, caddr_t extarg)
160 {
161 	if (mbuf == NULL || extbuf == NULL || extfree == NULL || extsize == 0) {
162 		return EINVAL;
163 	}
164 
165 	if ((*mbuf = m_clattach(*mbuf, type, extbuf,
166 	    extfree, extsize, extarg, how, 0)) == NULL) {
167 		return ENOMEM;
168 	}
169 
170 	return 0;
171 }
172 
173 errno_t
mbuf_ring_cluster_alloc(mbuf_how_t how,mbuf_type_t type,mbuf_t * mbuf,void (* extfree)(caddr_t,u_int,caddr_t),size_t * size)174 mbuf_ring_cluster_alloc(mbuf_how_t how, mbuf_type_t type, mbuf_t *mbuf,
175     void (*extfree)(caddr_t, u_int, caddr_t), size_t *size)
176 {
177 	caddr_t extbuf = NULL;
178 	errno_t err;
179 
180 	if (mbuf == NULL || extfree == NULL || size == NULL || *size == 0) {
181 		return EINVAL;
182 	}
183 
184 	if ((err = mbuf_alloccluster(how, size, &extbuf)) != 0) {
185 		return err;
186 	}
187 
188 	if ((*mbuf = m_clattach(*mbuf, type, extbuf,
189 	    extfree, *size, NULL, how, 1)) == NULL) {
190 		mbuf_freecluster(extbuf, *size);
191 		return ENOMEM;
192 	}
193 
194 	return 0;
195 }
196 
197 int
mbuf_ring_cluster_is_active(mbuf_t mbuf)198 mbuf_ring_cluster_is_active(mbuf_t mbuf)
199 {
200 	return m_ext_paired_is_active(mbuf);
201 }
202 
203 errno_t
mbuf_ring_cluster_activate(mbuf_t mbuf)204 mbuf_ring_cluster_activate(mbuf_t mbuf)
205 {
206 	if (mbuf_ring_cluster_is_active(mbuf)) {
207 		return EBUSY;
208 	}
209 
210 	m_ext_paired_activate(mbuf);
211 	return 0;
212 }
213 
214 errno_t
mbuf_cluster_set_prop(mbuf_t mbuf,u_int32_t oldprop,u_int32_t newprop)215 mbuf_cluster_set_prop(mbuf_t mbuf, u_int32_t oldprop, u_int32_t newprop)
216 {
217 	if (mbuf == NULL || !(mbuf->m_flags & M_EXT)) {
218 		return EINVAL;
219 	}
220 
221 	return m_ext_set_prop(mbuf, oldprop, newprop) ? 0 : EBUSY;
222 }
223 
224 errno_t
mbuf_cluster_get_prop(mbuf_t mbuf,u_int32_t * prop)225 mbuf_cluster_get_prop(mbuf_t mbuf, u_int32_t *prop)
226 {
227 	if (mbuf == NULL || prop == NULL || !(mbuf->m_flags & M_EXT)) {
228 		return EINVAL;
229 	}
230 
231 	*prop = m_ext_get_prop(mbuf);
232 	return 0;
233 }
234 
235 errno_t
mbuf_alloccluster(mbuf_how_t how,size_t * size,caddr_t * addr)236 mbuf_alloccluster(mbuf_how_t how, size_t *size, caddr_t *addr)
237 {
238 	if (size == NULL || *size == 0 || addr == NULL) {
239 		return EINVAL;
240 	}
241 
242 	*addr = NULL;
243 
244 	/* Jumbo cluster pool not available? */
245 	if (*size > MBIGCLBYTES && njcl == 0) {
246 		return ENOTSUP;
247 	}
248 
249 	if (*size <= MCLBYTES && (*addr = m_mclalloc(how)) != NULL) {
250 		*size = MCLBYTES;
251 	} else if (*size > MCLBYTES && *size <= MBIGCLBYTES &&
252 	    (*addr = m_bigalloc(how)) != NULL) {
253 		*size = MBIGCLBYTES;
254 	} else if (*size > MBIGCLBYTES && *size <= M16KCLBYTES &&
255 	    (*addr = m_16kalloc(how)) != NULL) {
256 		*size = M16KCLBYTES;
257 	} else {
258 		*size = 0;
259 	}
260 
261 	if (*addr == NULL) {
262 		return ENOMEM;
263 	}
264 
265 	return 0;
266 }
267 
268 void
mbuf_freecluster(caddr_t addr,size_t size)269 mbuf_freecluster(caddr_t addr, size_t size)
270 {
271 	if (size != MCLBYTES && size != MBIGCLBYTES && size != M16KCLBYTES) {
272 		panic("%s: invalid size (%ld) for cluster %p", __func__,
273 		    size, (void *)addr);
274 	}
275 
276 	if (size == MCLBYTES) {
277 		m_mclfree(addr);
278 	} else if (size == MBIGCLBYTES) {
279 		m_bigfree(addr, MBIGCLBYTES, NULL);
280 	} else if (njcl > 0) {
281 		m_16kfree(addr, M16KCLBYTES, NULL);
282 	} else {
283 		panic("%s: freeing jumbo cluster to an empty pool", __func__);
284 	}
285 }
286 
287 errno_t
mbuf_getcluster(mbuf_how_t how,mbuf_type_t type,size_t size,mbuf_t * mbuf)288 mbuf_getcluster(mbuf_how_t how, mbuf_type_t type, size_t size, mbuf_t *mbuf)
289 {
290 	/* Must set *mbuf to NULL in failure case */
291 	errno_t error = 0;
292 	int     created = 0;
293 
294 	if (mbuf == NULL) {
295 		return EINVAL;
296 	}
297 	if (*mbuf == NULL) {
298 		*mbuf = m_get(how, type);
299 		if (*mbuf == NULL) {
300 			return ENOMEM;
301 		}
302 		created = 1;
303 	}
304 	/*
305 	 * At the time this code was written, m_{mclget,mbigget,m16kget}
306 	 * would always return the same value that was passed in to it.
307 	 */
308 	if (size == MCLBYTES) {
309 		*mbuf = m_mclget(*mbuf, how);
310 	} else if (size == MBIGCLBYTES) {
311 		*mbuf = m_mbigget(*mbuf, how);
312 	} else if (size == M16KCLBYTES) {
313 		if (njcl > 0) {
314 			*mbuf = m_m16kget(*mbuf, how);
315 		} else {
316 			/* Jumbo cluster pool not available? */
317 			error = ENOTSUP;
318 			goto out;
319 		}
320 	} else {
321 		error = EINVAL;
322 		goto out;
323 	}
324 	if (*mbuf == NULL || ((*mbuf)->m_flags & M_EXT) == 0) {
325 		error = ENOMEM;
326 	}
327 out:
328 	if (created && error != 0) {
329 		mbuf_free(*mbuf);
330 		*mbuf = NULL;
331 	}
332 	return error;
333 }
334 
335 errno_t
mbuf_mclget(mbuf_how_t how,mbuf_type_t type,mbuf_t * mbuf)336 mbuf_mclget(mbuf_how_t how, mbuf_type_t type, mbuf_t *mbuf)
337 {
338 	/* Must set *mbuf to NULL in failure case */
339 	errno_t error = 0;
340 	int             created = 0;
341 	if (mbuf == NULL) {
342 		return EINVAL;
343 	}
344 	if (*mbuf == NULL) {
345 		error = mbuf_get(how, type, mbuf);
346 		if (error) {
347 			return error;
348 		}
349 		created = 1;
350 	}
351 
352 	/*
353 	 * At the time this code was written, m_mclget would always
354 	 * return the same value that was passed in to it.
355 	 */
356 	*mbuf = m_mclget(*mbuf, how);
357 
358 	if (created && ((*mbuf)->m_flags & M_EXT) == 0) {
359 		mbuf_free(*mbuf);
360 		*mbuf = NULL;
361 	}
362 	if (*mbuf == NULL || ((*mbuf)->m_flags & M_EXT) == 0) {
363 		error = ENOMEM;
364 	}
365 	return error;
366 }
367 
368 
369 errno_t
mbuf_getpacket(mbuf_how_t how,mbuf_t * mbuf)370 mbuf_getpacket(mbuf_how_t how, mbuf_t *mbuf)
371 {
372 	/* Must set *mbuf to NULL in failure case */
373 	errno_t error = 0;
374 
375 	*mbuf = m_getpacket_how(how);
376 
377 	if (*mbuf == NULL) {
378 		if (how == MBUF_WAITOK) {
379 			error = ENOMEM;
380 		} else {
381 			error = EWOULDBLOCK;
382 		}
383 	}
384 
385 	return error;
386 }
387 
388 /*
389  * This function is used to provide m_free via symbol indirection, please avoid
390  * any change in behavior or remove the indirection in config/Unsupported*
391  */
392 mbuf_t
mbuf_free(mbuf_t mbuf)393 mbuf_free(mbuf_t mbuf)
394 {
395 	return m_free(mbuf);
396 }
397 
398 /*
399  * This function is used to provide m_freem via symbol indirection, please avoid
400  * any change in behavior or remove the indirection in config/Unsupported*
401  */
402 void
mbuf_freem(mbuf_t mbuf)403 mbuf_freem(mbuf_t mbuf)
404 {
405 	m_freem(mbuf);
406 }
407 
408 int
mbuf_freem_list(mbuf_t mbuf)409 mbuf_freem_list(mbuf_t mbuf)
410 {
411 	return m_freem_list(mbuf);
412 }
413 
414 size_t
mbuf_leadingspace(const mbuf_t mbuf)415 mbuf_leadingspace(const mbuf_t mbuf)
416 {
417 	return M_LEADINGSPACE(mbuf);
418 }
419 
420 /*
421  * This function is used to provide m_trailingspace via symbol indirection,
422  * please avoid any change in behavior or remove the indirection in
423  * config/Unsupported*
424  */
425 size_t
mbuf_trailingspace(const mbuf_t mbuf)426 mbuf_trailingspace(const mbuf_t mbuf)
427 {
428 	return M_TRAILINGSPACE(mbuf);
429 }
430 
431 /* Manipulation */
432 errno_t
mbuf_copym(const mbuf_t src,size_t offset,size_t len,mbuf_how_t how,mbuf_t * new_mbuf)433 mbuf_copym(const mbuf_t src, size_t offset, size_t len,
434     mbuf_how_t how, mbuf_t *new_mbuf)
435 {
436 	/* Must set *mbuf to NULL in failure case */
437 	*new_mbuf = m_copym(src, (int)offset, (int)len, how);
438 
439 	return *new_mbuf == NULL ? ENOMEM : 0;
440 }
441 
442 errno_t
mbuf_dup(const mbuf_t src,mbuf_how_t how,mbuf_t * new_mbuf)443 mbuf_dup(const mbuf_t src, mbuf_how_t how, mbuf_t *new_mbuf)
444 {
445 	/* Must set *new_mbuf to NULL in failure case */
446 	*new_mbuf = m_dup(src, how);
447 
448 	return *new_mbuf == NULL ? ENOMEM : 0;
449 }
450 
451 errno_t
mbuf_prepend(mbuf_t * orig,size_t len,mbuf_how_t how)452 mbuf_prepend(mbuf_t *orig, size_t len, mbuf_how_t how)
453 {
454 	/* Must set *orig to NULL in failure case */
455 	*orig = m_prepend_2(*orig, (int)len, how, 0);
456 
457 	return *orig == NULL ? ENOMEM : 0;
458 }
459 
460 errno_t
mbuf_split(mbuf_t src,size_t offset,mbuf_how_t how,mbuf_t * new_mbuf)461 mbuf_split(mbuf_t src, size_t offset,
462     mbuf_how_t how, mbuf_t *new_mbuf)
463 {
464 	/* Must set *new_mbuf to NULL in failure case */
465 	*new_mbuf = m_split(src, (int)offset, how);
466 
467 	return *new_mbuf == NULL ? ENOMEM : 0;
468 }
469 
470 errno_t
mbuf_pullup(mbuf_t * mbuf,size_t len)471 mbuf_pullup(mbuf_t *mbuf, size_t len)
472 {
473 	/* Must set *mbuf to NULL in failure case */
474 	*mbuf = m_pullup(*mbuf, (int)len);
475 
476 	return *mbuf == NULL ? ENOMEM : 0;
477 }
478 
479 errno_t
mbuf_pulldown(mbuf_t src,size_t * offset,size_t len,mbuf_t * location)480 mbuf_pulldown(mbuf_t src, size_t *offset, size_t len, mbuf_t *location)
481 {
482 	/* Must set *location to NULL in failure case */
483 	int new_offset;
484 	*location = m_pulldown(src, (int)*offset, (int)len, &new_offset);
485 	*offset = new_offset;
486 
487 	return *location == NULL ? ENOMEM : 0;
488 }
489 
490 /*
491  * This function is used to provide m_adj via symbol indirection, please avoid
492  * any change in behavior or remove the indirection in config/Unsupported*
493  */
494 void
mbuf_adj(mbuf_t mbuf,int len)495 mbuf_adj(mbuf_t mbuf, int len)
496 {
497 	m_adj(mbuf, len);
498 }
499 
500 errno_t
mbuf_adjustlen(mbuf_t m,int amount)501 mbuf_adjustlen(mbuf_t m, int amount)
502 {
503 	/* Verify m_len will be valid after adding amount */
504 	if (amount > 0) {
505 		size_t used = (size_t)mbuf_data(m) - (size_t)mbuf_datastart(m) +
506 		    m->m_len;
507 
508 		if ((size_t)(amount + used) > mbuf_maxlen(m)) {
509 			return EINVAL;
510 		}
511 	} else if (-amount > m->m_len) {
512 		return EINVAL;
513 	}
514 
515 	m->m_len += amount;
516 	return 0;
517 }
518 
519 mbuf_t
mbuf_concatenate(mbuf_t dst,mbuf_t src)520 mbuf_concatenate(mbuf_t dst, mbuf_t src)
521 {
522 	if (dst == NULL) {
523 		return NULL;
524 	}
525 
526 	m_cat(dst, src);
527 
528 	/* return dst as is in the current implementation */
529 	return dst;
530 }
531 errno_t
mbuf_copydata(const mbuf_t m0,size_t off,size_t len,void * out_data)532 mbuf_copydata(const mbuf_t m0, size_t off, size_t len, void *out_data)
533 {
534 	/* Copied m_copydata, added error handling (don't just panic) */
535 	size_t count;
536 	mbuf_t  m = m0;
537 
538 	if (off >= INT_MAX || len >= INT_MAX) {
539 		return EINVAL;
540 	}
541 
542 	while (off > 0) {
543 		if (m == 0) {
544 			return EINVAL;
545 		}
546 		if (off < (size_t)m->m_len) {
547 			break;
548 		}
549 		off -= m->m_len;
550 		m = m->m_next;
551 	}
552 	while (len > 0) {
553 		if (m == 0) {
554 			return EINVAL;
555 		}
556 		count = m->m_len - off > len ? len : m->m_len - off;
557 		bcopy(mtod(m, caddr_t) + off, out_data, count);
558 		len -= count;
559 		out_data = ((char *)out_data) + count;
560 		off = 0;
561 		m = m->m_next;
562 	}
563 
564 	return 0;
565 }
566 
567 int
mbuf_mclhasreference(mbuf_t mbuf)568 mbuf_mclhasreference(mbuf_t mbuf)
569 {
570 	if ((mbuf->m_flags & M_EXT)) {
571 		return m_mclhasreference(mbuf);
572 	} else {
573 		return 0;
574 	}
575 }
576 
577 
578 /* mbuf header */
579 mbuf_t
mbuf_next(const mbuf_t mbuf)580 mbuf_next(const mbuf_t mbuf)
581 {
582 	return mbuf->m_next;
583 }
584 
585 errno_t
mbuf_setnext(mbuf_t mbuf,mbuf_t next)586 mbuf_setnext(mbuf_t mbuf, mbuf_t next)
587 {
588 	if (next && ((next)->m_nextpkt != NULL ||
589 	    (next)->m_type == MT_FREE)) {
590 		return EINVAL;
591 	}
592 	mbuf->m_next = next;
593 
594 	return 0;
595 }
596 
597 mbuf_t
mbuf_nextpkt(const mbuf_t mbuf)598 mbuf_nextpkt(const mbuf_t mbuf)
599 {
600 	return mbuf->m_nextpkt;
601 }
602 
603 void
mbuf_setnextpkt(mbuf_t mbuf,mbuf_t nextpkt)604 mbuf_setnextpkt(mbuf_t mbuf, mbuf_t nextpkt)
605 {
606 	mbuf->m_nextpkt = nextpkt;
607 }
608 
609 size_t
mbuf_len(const mbuf_t mbuf)610 mbuf_len(const mbuf_t mbuf)
611 {
612 	return mbuf->m_len;
613 }
614 
615 void
mbuf_setlen(mbuf_t mbuf,size_t len)616 mbuf_setlen(mbuf_t mbuf, size_t len)
617 {
618 	mbuf->m_len = (int32_t)len;
619 }
620 
621 size_t
mbuf_maxlen(const mbuf_t mbuf)622 mbuf_maxlen(const mbuf_t mbuf)
623 {
624 	if (mbuf->m_flags & M_EXT) {
625 		return mbuf->m_ext.ext_size;
626 	}
627 	return &mbuf->m_dat[MLEN] - ((char *)mbuf_datastart(mbuf));
628 }
629 
630 mbuf_type_t
mbuf_type(const mbuf_t mbuf)631 mbuf_type(const mbuf_t mbuf)
632 {
633 	return mbuf->m_type;
634 }
635 
636 errno_t
mbuf_settype(mbuf_t mbuf,mbuf_type_t new_type)637 mbuf_settype(mbuf_t mbuf, mbuf_type_t new_type)
638 {
639 	if (new_type == MBUF_TYPE_FREE) {
640 		return EINVAL;
641 	}
642 
643 	m_mchtype(mbuf, new_type);
644 
645 	return 0;
646 }
647 
648 mbuf_flags_t
mbuf_flags(const mbuf_t mbuf)649 mbuf_flags(const mbuf_t mbuf)
650 {
651 	return mbuf->m_flags & mbuf_flags_mask;
652 }
653 
654 errno_t
mbuf_setflags(mbuf_t mbuf,mbuf_flags_t flags)655 mbuf_setflags(mbuf_t mbuf, mbuf_flags_t flags)
656 {
657 	errno_t ret = 0;
658 	mbuf_flags_t oflags = mbuf->m_flags;
659 
660 	/*
661 	 * 1. Return error if public but un-alterable flags are changed
662 	 *    in flags argument.
663 	 * 2. Return error if bits other than public flags are set in passed
664 	 *    flags argument.
665 	 *    Please note that private flag bits must be passed as reset by
666 	 *    kexts, as they must use mbuf_flags KPI to get current set of
667 	 *    mbuf flags and mbuf_flags KPI does not expose private flags.
668 	 */
669 	if ((flags ^ oflags) & mbuf_cflags_mask) {
670 		ret = EINVAL;
671 	} else if (flags & ~mbuf_flags_mask) {
672 		ret = EINVAL;
673 	} else {
674 		mbuf->m_flags = (uint16_t)flags | (mbuf->m_flags & ~mbuf_flags_mask);
675 		/*
676 		 * If M_PKTHDR bit has changed, we have work to do;
677 		 * m_reinit() will take care of setting/clearing the
678 		 * bit, as well as the rest of bookkeeping.
679 		 */
680 		if ((oflags ^ mbuf->m_flags) & M_PKTHDR) {
681 			mbuf->m_flags ^= M_PKTHDR;      /* restore */
682 			ret = m_reinit(mbuf,
683 			    (mbuf->m_flags & M_PKTHDR) ? 0 : 1);
684 		}
685 	}
686 
687 	return ret;
688 }
689 
690 errno_t
mbuf_setflags_mask(mbuf_t mbuf,mbuf_flags_t flags,mbuf_flags_t mask)691 mbuf_setflags_mask(mbuf_t mbuf, mbuf_flags_t flags, mbuf_flags_t mask)
692 {
693 	errno_t ret = 0;
694 
695 	if (mask & (~mbuf_flags_mask | mbuf_cflags_mask)) {
696 		ret = EINVAL;
697 	} else {
698 		mbuf_flags_t oflags = mbuf->m_flags;
699 		mbuf->m_flags = (uint16_t)((flags & mask) | (mbuf->m_flags & ~mask));
700 		/*
701 		 * If M_PKTHDR bit has changed, we have work to do;
702 		 * m_reinit() will take care of setting/clearing the
703 		 * bit, as well as the rest of bookkeeping.
704 		 */
705 		if ((oflags ^ mbuf->m_flags) & M_PKTHDR) {
706 			mbuf->m_flags ^= M_PKTHDR;      /* restore */
707 			ret = m_reinit(mbuf,
708 			    (mbuf->m_flags & M_PKTHDR) ? 0 : 1);
709 		}
710 	}
711 
712 	return ret;
713 }
714 
715 errno_t
mbuf_copy_pkthdr(mbuf_t dest,const mbuf_t src)716 mbuf_copy_pkthdr(mbuf_t dest, const mbuf_t src)
717 {
718 	if (((src)->m_flags & M_PKTHDR) == 0) {
719 		return EINVAL;
720 	}
721 
722 	m_copy_pkthdr(dest, src);
723 
724 	return 0;
725 }
726 
727 size_t
mbuf_pkthdr_len(const mbuf_t mbuf)728 mbuf_pkthdr_len(const mbuf_t mbuf)
729 {
730 	if (((mbuf)->m_flags & M_PKTHDR) == 0) {
731 		return 0;
732 	}
733 	/*
734 	 * While we Assert for development or debug builds,
735 	 * also make sure we never return negative length
736 	 * for release build.
737 	 */
738 	ASSERT(mbuf->m_pkthdr.len >= 0);
739 	if (mbuf->m_pkthdr.len < 0) {
740 		return 0;
741 	}
742 	return mbuf->m_pkthdr.len;
743 }
744 
745 __private_extern__ size_t
mbuf_pkthdr_maxlen(mbuf_t m)746 mbuf_pkthdr_maxlen(mbuf_t m)
747 {
748 	size_t maxlen = 0;
749 	mbuf_t n = m;
750 
751 	while (n) {
752 		maxlen += mbuf_maxlen(n);
753 		n = mbuf_next(n);
754 	}
755 	return maxlen;
756 }
757 
758 void
mbuf_pkthdr_setlen(mbuf_t mbuf,size_t len)759 mbuf_pkthdr_setlen(mbuf_t mbuf, size_t len)
760 {
761 	if (len > INT32_MAX) {
762 		len = INT32_MAX;
763 	}
764 
765 	mbuf->m_pkthdr.len = (int)len;
766 }
767 
768 void
mbuf_pkthdr_adjustlen(mbuf_t mbuf,int amount)769 mbuf_pkthdr_adjustlen(mbuf_t mbuf, int amount)
770 {
771 	mbuf->m_pkthdr.len += amount;
772 }
773 
774 ifnet_t
mbuf_pkthdr_rcvif(const mbuf_t mbuf)775 mbuf_pkthdr_rcvif(const mbuf_t mbuf)
776 {
777 	/*
778 	 * If we reference count ifnets, we should take a reference here
779 	 * before returning
780 	 */
781 	return mbuf->m_pkthdr.rcvif;
782 }
783 
784 errno_t
mbuf_pkthdr_setrcvif(mbuf_t mbuf,ifnet_t ifnet)785 mbuf_pkthdr_setrcvif(mbuf_t mbuf, ifnet_t ifnet)
786 {
787 	/* May want to walk ifnet list to determine if interface is valid */
788 	mbuf->m_pkthdr.rcvif = (struct ifnet *)ifnet;
789 	return 0;
790 }
791 
792 void*
mbuf_pkthdr_header(const mbuf_t mbuf)793 mbuf_pkthdr_header(const mbuf_t mbuf)
794 {
795 	return mbuf->m_pkthdr.pkt_hdr;
796 }
797 
798 void
mbuf_pkthdr_setheader(mbuf_t mbuf,void * header)799 mbuf_pkthdr_setheader(mbuf_t mbuf, void *header)
800 {
801 	mbuf->m_pkthdr.pkt_hdr = (void*)header;
802 }
803 
804 void
mbuf_inbound_modified(mbuf_t mbuf)805 mbuf_inbound_modified(mbuf_t mbuf)
806 {
807 	/* Invalidate hardware generated checksum flags */
808 	mbuf->m_pkthdr.csum_flags = 0;
809 }
810 
811 void
mbuf_outbound_finalize(struct mbuf * m,u_int32_t pf,size_t o)812 mbuf_outbound_finalize(struct mbuf *m, u_int32_t pf, size_t o)
813 {
814 	/* Generate the packet in software, client needs it */
815 	switch (pf) {
816 	case PF_INET:
817 		(void) in_finalize_cksum(m, (uint32_t)o, m->m_pkthdr.csum_flags);
818 		break;
819 
820 	case PF_INET6:
821 		/*
822 		 * Checksum offload should not have been enabled when
823 		 * extension headers exist; indicate that the callee
824 		 * should skip such case by setting optlen to -1.
825 		 */
826 		(void) in6_finalize_cksum(m, (uint32_t)o, -1, -1, m->m_pkthdr.csum_flags);
827 		break;
828 
829 	default:
830 		break;
831 	}
832 }
833 
834 errno_t
mbuf_set_vlan_tag(mbuf_t mbuf,u_int16_t vlan)835 mbuf_set_vlan_tag(
836 	mbuf_t mbuf,
837 	u_int16_t vlan)
838 {
839 	mbuf->m_pkthdr.csum_flags |= CSUM_VLAN_TAG_VALID;
840 	mbuf->m_pkthdr.vlan_tag = vlan;
841 
842 	return 0;
843 }
844 
845 errno_t
mbuf_get_vlan_tag(mbuf_t mbuf,u_int16_t * vlan)846 mbuf_get_vlan_tag(
847 	mbuf_t mbuf,
848 	u_int16_t *vlan)
849 {
850 	if ((mbuf->m_pkthdr.csum_flags & CSUM_VLAN_TAG_VALID) == 0) {
851 		return ENXIO; // No vlan tag set
852 	}
853 	*vlan = mbuf->m_pkthdr.vlan_tag;
854 
855 	return 0;
856 }
857 
858 errno_t
mbuf_clear_vlan_tag(mbuf_t mbuf)859 mbuf_clear_vlan_tag(
860 	mbuf_t mbuf)
861 {
862 	mbuf->m_pkthdr.csum_flags &= ~CSUM_VLAN_TAG_VALID;
863 	mbuf->m_pkthdr.vlan_tag = 0;
864 
865 	return 0;
866 }
867 
868 static const mbuf_csum_request_flags_t mbuf_valid_csum_request_flags =
869     MBUF_CSUM_REQ_IP | MBUF_CSUM_REQ_TCP | MBUF_CSUM_REQ_UDP |
870     MBUF_CSUM_PARTIAL | MBUF_CSUM_REQ_TCPIPV6 | MBUF_CSUM_REQ_UDPIPV6;
871 
872 errno_t
mbuf_set_csum_requested(mbuf_t mbuf,mbuf_csum_request_flags_t request,u_int32_t value)873 mbuf_set_csum_requested(
874 	mbuf_t mbuf,
875 	mbuf_csum_request_flags_t request,
876 	u_int32_t value)
877 {
878 	request &= mbuf_valid_csum_request_flags;
879 	mbuf->m_pkthdr.csum_flags =
880 	    (mbuf->m_pkthdr.csum_flags & 0xffff0000) | request;
881 	mbuf->m_pkthdr.csum_data = value;
882 
883 	return 0;
884 }
885 
886 static const mbuf_tso_request_flags_t mbuf_valid_tso_request_flags =
887     MBUF_TSO_IPV4 | MBUF_TSO_IPV6;
888 
889 errno_t
mbuf_get_tso_requested(mbuf_t mbuf,mbuf_tso_request_flags_t * request,u_int32_t * value)890 mbuf_get_tso_requested(
891 	mbuf_t mbuf,
892 	mbuf_tso_request_flags_t *request,
893 	u_int32_t *value)
894 {
895 	if (mbuf == NULL || (mbuf->m_flags & M_PKTHDR) == 0 ||
896 	    request == NULL || value == NULL) {
897 		return EINVAL;
898 	}
899 
900 	*request = mbuf->m_pkthdr.csum_flags;
901 	*request &= mbuf_valid_tso_request_flags;
902 	if (*request && value != NULL) {
903 		*value = mbuf->m_pkthdr.tso_segsz;
904 	}
905 
906 	return 0;
907 }
908 
909 errno_t
mbuf_get_csum_requested(mbuf_t mbuf,mbuf_csum_request_flags_t * request,u_int32_t * value)910 mbuf_get_csum_requested(
911 	mbuf_t mbuf,
912 	mbuf_csum_request_flags_t *request,
913 	u_int32_t *value)
914 {
915 	*request = mbuf->m_pkthdr.csum_flags;
916 	*request &= mbuf_valid_csum_request_flags;
917 	if (value != NULL) {
918 		*value = mbuf->m_pkthdr.csum_data;
919 	}
920 
921 	return 0;
922 }
923 
924 errno_t
mbuf_clear_csum_requested(mbuf_t mbuf)925 mbuf_clear_csum_requested(
926 	mbuf_t mbuf)
927 {
928 	mbuf->m_pkthdr.csum_flags &= 0xffff0000;
929 	mbuf->m_pkthdr.csum_data = 0;
930 
931 	return 0;
932 }
933 
934 static const mbuf_csum_performed_flags_t mbuf_valid_csum_performed_flags =
935     MBUF_CSUM_DID_IP | MBUF_CSUM_IP_GOOD | MBUF_CSUM_DID_DATA |
936     MBUF_CSUM_PSEUDO_HDR | MBUF_CSUM_PARTIAL;
937 
938 errno_t
mbuf_set_csum_performed(mbuf_t mbuf,mbuf_csum_performed_flags_t performed,u_int32_t value)939 mbuf_set_csum_performed(
940 	mbuf_t mbuf,
941 	mbuf_csum_performed_flags_t performed,
942 	u_int32_t value)
943 {
944 	performed &= mbuf_valid_csum_performed_flags;
945 	mbuf->m_pkthdr.csum_flags =
946 	    (mbuf->m_pkthdr.csum_flags & 0xffff0000) | performed;
947 	mbuf->m_pkthdr.csum_data = value;
948 
949 	return 0;
950 }
951 
952 errno_t
mbuf_get_csum_performed(mbuf_t mbuf,mbuf_csum_performed_flags_t * performed,u_int32_t * value)953 mbuf_get_csum_performed(
954 	mbuf_t mbuf,
955 	mbuf_csum_performed_flags_t *performed,
956 	u_int32_t *value)
957 {
958 	*performed =
959 	    mbuf->m_pkthdr.csum_flags & mbuf_valid_csum_performed_flags;
960 	*value = mbuf->m_pkthdr.csum_data;
961 
962 	return 0;
963 }
964 
965 errno_t
mbuf_clear_csum_performed(mbuf_t mbuf)966 mbuf_clear_csum_performed(
967 	mbuf_t mbuf)
968 {
969 	mbuf->m_pkthdr.csum_flags &= 0xffff0000;
970 	mbuf->m_pkthdr.csum_data = 0;
971 
972 	return 0;
973 }
974 
975 errno_t
mbuf_inet_cksum(mbuf_t mbuf,int protocol,u_int32_t offset,u_int32_t length,u_int16_t * csum)976 mbuf_inet_cksum(mbuf_t mbuf, int protocol, u_int32_t offset, u_int32_t length,
977     u_int16_t *csum)
978 {
979 	if (mbuf == NULL || length == 0 || csum == NULL ||
980 	    (u_int32_t)mbuf->m_pkthdr.len < (offset + length)) {
981 		return EINVAL;
982 	}
983 
984 	*csum = inet_cksum(mbuf, protocol, offset, length);
985 	return 0;
986 }
987 
988 errno_t
mbuf_inet6_cksum(mbuf_t mbuf,int protocol,u_int32_t offset,u_int32_t length,u_int16_t * csum)989 mbuf_inet6_cksum(mbuf_t mbuf, int protocol, u_int32_t offset, u_int32_t length,
990     u_int16_t *csum)
991 {
992 	if (mbuf == NULL || length == 0 || csum == NULL ||
993 	    (u_int32_t)mbuf->m_pkthdr.len < (offset + length)) {
994 		return EINVAL;
995 	}
996 
997 	*csum = inet6_cksum(mbuf, protocol, offset, length);
998 	return 0;
999 }
1000 
1001 /*
1002  * Mbuf tag KPIs
1003  */
1004 
1005 #define MTAG_FIRST_ID FIRST_KPI_STR_ID
1006 
1007 errno_t
mbuf_tag_id_find(const char * string,mbuf_tag_id_t * out_id)1008 mbuf_tag_id_find(
1009 	const char              *string,
1010 	mbuf_tag_id_t   *out_id)
1011 {
1012 	return net_str_id_find_internal(string, out_id, NSI_MBUF_TAG, 1);
1013 }
1014 
1015 errno_t
mbuf_tag_allocate(mbuf_t mbuf,mbuf_tag_id_t id,mbuf_tag_type_t type,size_t length,mbuf_how_t how,void ** data_p)1016 mbuf_tag_allocate(
1017 	mbuf_t                  mbuf,
1018 	mbuf_tag_id_t   id,
1019 	mbuf_tag_type_t type,
1020 	size_t                  length,
1021 	mbuf_how_t              how,
1022 	void**                  data_p)
1023 {
1024 	struct m_tag *tag;
1025 	u_int32_t mtag_id_first, mtag_id_last;
1026 
1027 	if (data_p != NULL) {
1028 		*data_p = NULL;
1029 	}
1030 
1031 	/* Sanity check parameters */
1032 	(void) net_str_id_first_last(&mtag_id_first, &mtag_id_last,
1033 	    NSI_MBUF_TAG);
1034 	if (mbuf == NULL || (mbuf->m_flags & M_PKTHDR) == 0 ||
1035 	    id < mtag_id_first || id > mtag_id_last || length < 1 ||
1036 	    (length & 0xffff0000) != 0 || data_p == NULL) {
1037 		return EINVAL;
1038 	}
1039 
1040 	/* Make sure this mtag hasn't already been allocated */
1041 	tag = m_tag_locate(mbuf, id, type);
1042 	if (tag != NULL) {
1043 		return EEXIST;
1044 	}
1045 
1046 	/* Allocate an mtag */
1047 	tag = m_tag_create(id, type, (int)length, how, mbuf);
1048 	if (tag == NULL) {
1049 		return how == M_WAITOK ? ENOMEM : EWOULDBLOCK;
1050 	}
1051 
1052 	/* Attach the mtag and set *data_p */
1053 	m_tag_prepend(mbuf, tag);
1054 	*data_p = tag->m_tag_data;
1055 
1056 	return 0;
1057 }
1058 
1059 errno_t
mbuf_tag_find(mbuf_t mbuf,mbuf_tag_id_t id,mbuf_tag_type_t type,size_t * length,void ** data_p)1060 mbuf_tag_find(
1061 	mbuf_t mbuf,
1062 	mbuf_tag_id_t id,
1063 	mbuf_tag_type_t type,
1064 	size_t *length,
1065 	void **data_p)
1066 {
1067 	struct m_tag *tag;
1068 	u_int32_t mtag_id_first, mtag_id_last;
1069 
1070 	if (length != NULL) {
1071 		*length = 0;
1072 	}
1073 	if (data_p != NULL) {
1074 		*data_p = NULL;
1075 	}
1076 
1077 	/* Sanity check parameters */
1078 	(void) net_str_id_first_last(&mtag_id_first, &mtag_id_last,
1079 	    NSI_MBUF_TAG);
1080 	if (mbuf == NULL || (mbuf->m_flags & M_PKTHDR) == 0 ||
1081 	    id < mtag_id_first || id > mtag_id_last || length == NULL ||
1082 	    data_p == NULL) {
1083 		return EINVAL;
1084 	}
1085 
1086 	/* Locate an mtag */
1087 	tag = m_tag_locate(mbuf, id, type);
1088 	if (tag == NULL) {
1089 		return ENOENT;
1090 	}
1091 
1092 	/* Copy out the pointer to the data and the lenght value */
1093 	*length = tag->m_tag_len;
1094 	*data_p = tag->m_tag_data;
1095 
1096 	return 0;
1097 }
1098 
1099 void
mbuf_tag_free(mbuf_t mbuf,mbuf_tag_id_t id,mbuf_tag_type_t type)1100 mbuf_tag_free(
1101 	mbuf_t                  mbuf,
1102 	mbuf_tag_id_t   id,
1103 	mbuf_tag_type_t type)
1104 {
1105 	struct m_tag *tag;
1106 	u_int32_t mtag_id_first, mtag_id_last;
1107 
1108 	/* Sanity check parameters */
1109 	(void) net_str_id_first_last(&mtag_id_first, &mtag_id_last,
1110 	    NSI_MBUF_TAG);
1111 	if (mbuf == NULL || (mbuf->m_flags & M_PKTHDR) == 0 ||
1112 	    id < mtag_id_first || id > mtag_id_last) {
1113 		return;
1114 	}
1115 
1116 	tag = m_tag_locate(mbuf, id, type);
1117 	if (tag == NULL) {
1118 		return;
1119 	}
1120 
1121 	m_tag_delete(mbuf, tag);
1122 }
1123 
1124 /*
1125  * Maximum length of driver auxiliary data; keep this small to
1126  * fit in a single mbuf to avoid wasting memory, rounded down to
1127  * the nearest 64-bit boundary.  This takes into account mbuf
1128  * tag-related (m_taghdr + m_tag) as well m_drvaux_tag structs.
1129  */
1130 #define MBUF_DRVAUX_MAXLEN                                              \
1131 	P2ROUNDDOWN(MLEN -                                              \
1132 	M_TAG_ALIGN(sizeof (struct m_drvaux_tag)), sizeof (uint64_t))
1133 
1134 errno_t
mbuf_add_drvaux(mbuf_t mbuf,mbuf_how_t how,u_int32_t family,u_int32_t subfamily,size_t length,void ** data_p)1135 mbuf_add_drvaux(mbuf_t mbuf, mbuf_how_t how, u_int32_t family,
1136     u_int32_t subfamily, size_t length, void **data_p)
1137 {
1138 	struct m_drvaux_tag *p;
1139 	struct m_tag *tag;
1140 
1141 	if (mbuf == NULL || !(mbuf->m_flags & M_PKTHDR) ||
1142 	    length == 0 || length > MBUF_DRVAUX_MAXLEN) {
1143 		return EINVAL;
1144 	}
1145 
1146 	if (data_p != NULL) {
1147 		*data_p = NULL;
1148 	}
1149 
1150 	/* Check if one is already associated */
1151 	if ((tag = m_tag_locate(mbuf, KERNEL_MODULE_TAG_ID,
1152 	    KERNEL_TAG_TYPE_DRVAUX)) != NULL) {
1153 		return EEXIST;
1154 	}
1155 
1156 	/* Tag is (m_drvaux_tag + module specific data) */
1157 	if ((tag = m_tag_create(KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_DRVAUX,
1158 	    (int)(sizeof(*p) + length), how, mbuf)) == NULL) {
1159 		return (how == MBUF_WAITOK) ? ENOMEM : EWOULDBLOCK;
1160 	}
1161 
1162 	p = (struct m_drvaux_tag *)(tag->m_tag_data);
1163 	p->da_family = family;
1164 	p->da_subfamily = subfamily;
1165 	p->da_length = (int)length;
1166 
1167 	/* Associate the tag */
1168 	m_tag_prepend(mbuf, tag);
1169 
1170 	if (data_p != NULL) {
1171 		*data_p = (p + 1);
1172 	}
1173 
1174 	return 0;
1175 }
1176 
1177 errno_t
mbuf_find_drvaux(mbuf_t mbuf,u_int32_t * family_p,u_int32_t * subfamily_p,u_int32_t * length_p,void ** data_p)1178 mbuf_find_drvaux(mbuf_t mbuf, u_int32_t *family_p, u_int32_t *subfamily_p,
1179     u_int32_t *length_p, void **data_p)
1180 {
1181 	struct m_drvaux_tag *p;
1182 	struct m_tag *tag;
1183 
1184 	if (mbuf == NULL || !(mbuf->m_flags & M_PKTHDR) || data_p == NULL) {
1185 		return EINVAL;
1186 	}
1187 
1188 	*data_p = NULL;
1189 
1190 	if ((tag = m_tag_locate(mbuf, KERNEL_MODULE_TAG_ID,
1191 	    KERNEL_TAG_TYPE_DRVAUX)) == NULL) {
1192 		return ENOENT;
1193 	}
1194 
1195 	/* Must be at least size of m_drvaux_tag */
1196 	VERIFY(tag->m_tag_len >= sizeof(*p));
1197 
1198 	p = (struct m_drvaux_tag *)(tag->m_tag_data);
1199 	VERIFY(p->da_length > 0 && p->da_length <= MBUF_DRVAUX_MAXLEN);
1200 
1201 	if (family_p != NULL) {
1202 		*family_p = p->da_family;
1203 	}
1204 	if (subfamily_p != NULL) {
1205 		*subfamily_p = p->da_subfamily;
1206 	}
1207 	if (length_p != NULL) {
1208 		*length_p = p->da_length;
1209 	}
1210 
1211 	*data_p = (p + 1);
1212 
1213 	return 0;
1214 }
1215 
1216 void
mbuf_del_drvaux(mbuf_t mbuf)1217 mbuf_del_drvaux(mbuf_t mbuf)
1218 {
1219 	struct m_tag *tag;
1220 
1221 	if (mbuf == NULL || !(mbuf->m_flags & M_PKTHDR)) {
1222 		return;
1223 	}
1224 
1225 	if ((tag = m_tag_locate(mbuf, KERNEL_MODULE_TAG_ID,
1226 	    KERNEL_TAG_TYPE_DRVAUX)) != NULL) {
1227 		m_tag_delete(mbuf, tag);
1228 	}
1229 }
1230 
1231 /* mbuf stats */
1232 void
mbuf_stats(struct mbuf_stat * stats)1233 mbuf_stats(struct mbuf_stat *stats)
1234 {
1235 	stats->mbufs = mbstat.m_mbufs;
1236 	stats->clusters = mbstat.m_clusters;
1237 	stats->clfree = mbstat.m_clfree;
1238 	stats->drops = mbstat.m_drops;
1239 	stats->wait = mbstat.m_wait;
1240 	stats->drain = mbstat.m_drain;
1241 	__builtin_memcpy(stats->mtypes, mbstat.m_mtypes, sizeof(stats->mtypes));
1242 	stats->mcfail = mbstat.m_mcfail;
1243 	stats->mpfail = mbstat.m_mpfail;
1244 	stats->msize = mbstat.m_msize;
1245 	stats->mclbytes = mbstat.m_mclbytes;
1246 	stats->minclsize = mbstat.m_minclsize;
1247 	stats->mlen = mbstat.m_mlen;
1248 	stats->mhlen = mbstat.m_mhlen;
1249 	stats->bigclusters = mbstat.m_bigclusters;
1250 	stats->bigclfree = mbstat.m_bigclfree;
1251 	stats->bigmclbytes = mbstat.m_bigmclbytes;
1252 }
1253 
1254 errno_t
mbuf_allocpacket(mbuf_how_t how,size_t packetlen,unsigned int * maxchunks,mbuf_t * mbuf)1255 mbuf_allocpacket(mbuf_how_t how, size_t packetlen, unsigned int *maxchunks,
1256     mbuf_t *mbuf)
1257 {
1258 	errno_t error = 0;
1259 	struct mbuf *m;
1260 	unsigned int numpkts = 1;
1261 	unsigned int numchunks = maxchunks != NULL ? *maxchunks : 0;
1262 
1263 	if (packetlen == 0) {
1264 		error = EINVAL;
1265 		os_log(OS_LOG_DEFAULT, "mbuf_allocpacket %d", __LINE__);
1266 		goto out;
1267 	}
1268 	m = m_allocpacket_internal(&numpkts, packetlen,
1269 	    maxchunks != NULL ? &numchunks : NULL, how, 1, 0);
1270 	if (m == NULL) {
1271 		if (maxchunks != NULL && *maxchunks && numchunks > *maxchunks) {
1272 			error = ENOBUFS;
1273 			os_log(OS_LOG_DEFAULT, "mbuf_allocpacket %d", __LINE__);
1274 		} else {
1275 			error = ENOMEM;
1276 			os_log(OS_LOG_DEFAULT, "mbuf_allocpacket %d", __LINE__);
1277 		}
1278 	} else {
1279 		if (maxchunks != NULL) {
1280 			*maxchunks = numchunks;
1281 		}
1282 		error = 0;
1283 		*mbuf = m;
1284 	}
1285 out:
1286 	return error;
1287 }
1288 
1289 errno_t
mbuf_allocpacket_list(unsigned int numpkts,mbuf_how_t how,size_t packetlen,unsigned int * maxchunks,mbuf_t * mbuf)1290 mbuf_allocpacket_list(unsigned int numpkts, mbuf_how_t how, size_t packetlen,
1291     unsigned int *maxchunks, mbuf_t *mbuf)
1292 {
1293 	errno_t error = 0;
1294 	struct mbuf *m;
1295 	unsigned int numchunks = maxchunks ? *maxchunks : 0;
1296 
1297 	if (numpkts == 0) {
1298 		error = EINVAL;
1299 		goto out;
1300 	}
1301 	if (packetlen == 0) {
1302 		error = EINVAL;
1303 		goto out;
1304 	}
1305 	m = m_allocpacket_internal(&numpkts, packetlen,
1306 	    maxchunks != NULL ? &numchunks : NULL, how, 1, 0);
1307 	if (m == NULL) {
1308 		if (maxchunks != NULL && *maxchunks && numchunks > *maxchunks) {
1309 			error = ENOBUFS;
1310 		} else {
1311 			error = ENOMEM;
1312 		}
1313 	} else {
1314 		if (maxchunks != NULL) {
1315 			*maxchunks = numchunks;
1316 		}
1317 		error = 0;
1318 		*mbuf = m;
1319 	}
1320 out:
1321 	return error;
1322 }
1323 
1324 __private_extern__ size_t
mbuf_pkt_list_len(mbuf_t m)1325 mbuf_pkt_list_len(mbuf_t m)
1326 {
1327 	size_t len = 0;
1328 	mbuf_t n = m;
1329 
1330 	while (n) {
1331 		len += mbuf_pkthdr_len(n);
1332 		n = mbuf_nextpkt(n);
1333 	}
1334 	return len;
1335 }
1336 
1337 __private_extern__ size_t
mbuf_pkt_list_maxlen(mbuf_t m)1338 mbuf_pkt_list_maxlen(mbuf_t m)
1339 {
1340 	size_t maxlen = 0;
1341 	mbuf_t n = m;
1342 
1343 	while (n) {
1344 		maxlen += mbuf_pkthdr_maxlen(n);
1345 		n = mbuf_nextpkt(n);
1346 	}
1347 	return maxlen;
1348 }
1349 
1350 /*
1351  * mbuf_copyback differs from m_copyback in a few ways:
1352  * 1) mbuf_copyback will allocate clusters for new mbufs we append
1353  * 2) mbuf_copyback will grow the last mbuf in the chain if possible
1354  * 3) mbuf_copyback reports whether or not the operation succeeded
1355  * 4) mbuf_copyback allows the caller to specify M_WAITOK or M_NOWAIT
1356  */
1357 errno_t
mbuf_copyback(mbuf_t m,size_t off,size_t len,const void * data,mbuf_how_t how)1358 mbuf_copyback(
1359 	mbuf_t          m,
1360 	size_t          off,
1361 	size_t          len,
1362 	const void      *data,
1363 	mbuf_how_t      how)
1364 {
1365 	size_t  mlen;
1366 	mbuf_t  m_start = m;
1367 	mbuf_t  n;
1368 	int             totlen = 0;
1369 	errno_t         result = 0;
1370 	const char      *cp = data;
1371 
1372 	if (m == NULL || len == 0 || data == NULL) {
1373 		return EINVAL;
1374 	}
1375 
1376 	while (off > (mlen = m->m_len)) {
1377 		off -= mlen;
1378 		totlen += mlen;
1379 		if (m->m_next == 0) {
1380 			n = m_getclr(how, m->m_type);
1381 			if (n == 0) {
1382 				result = ENOBUFS;
1383 				goto out;
1384 			}
1385 			n->m_len = (int32_t)MIN(MLEN, len + off);
1386 			m->m_next = n;
1387 		}
1388 		m = m->m_next;
1389 	}
1390 
1391 	while (len > 0) {
1392 		mlen = MIN(m->m_len - off, len);
1393 		if (mlen < len && m->m_next == NULL &&
1394 		    mbuf_trailingspace(m) > 0) {
1395 			size_t  grow = MIN(mbuf_trailingspace(m), len - mlen);
1396 			mlen += grow;
1397 			m->m_len += grow;
1398 		}
1399 		bcopy(cp, off + (char *)mbuf_data(m), (unsigned)mlen);
1400 		cp += mlen;
1401 		len -= mlen;
1402 		mlen += off;
1403 		off = 0;
1404 		totlen += mlen;
1405 		if (len == 0) {
1406 			break;
1407 		}
1408 		if (m->m_next == 0) {
1409 			n = m_get(how, m->m_type);
1410 			if (n == NULL) {
1411 				result = ENOBUFS;
1412 				goto out;
1413 			}
1414 			if (len > MINCLSIZE) {
1415 				/*
1416 				 * cluster allocation failure is okay,
1417 				 * we can grow chain
1418 				 */
1419 				mbuf_mclget(how, m->m_type, &n);
1420 			}
1421 			n->m_len = (int32_t)MIN(mbuf_maxlen(n), len);
1422 			m->m_next = n;
1423 		}
1424 		m = m->m_next;
1425 	}
1426 
1427 out:
1428 	if ((m_start->m_flags & M_PKTHDR) && (m_start->m_pkthdr.len < totlen)) {
1429 		m_start->m_pkthdr.len = totlen;
1430 	}
1431 
1432 	return result;
1433 }
1434 
1435 u_int32_t
mbuf_get_mlen(void)1436 mbuf_get_mlen(void)
1437 {
1438 	return _MLEN;
1439 }
1440 
1441 u_int32_t
mbuf_get_mhlen(void)1442 mbuf_get_mhlen(void)
1443 {
1444 	return _MHLEN;
1445 }
1446 
1447 u_int32_t
mbuf_get_minclsize(void)1448 mbuf_get_minclsize(void)
1449 {
1450 	return MHLEN + MLEN;
1451 }
1452 
1453 u_int32_t
mbuf_get_msize(void)1454 mbuf_get_msize(void)
1455 {
1456 	return _MSIZE;
1457 }
1458 
1459 u_int32_t
mbuf_get_traffic_class_max_count(void)1460 mbuf_get_traffic_class_max_count(void)
1461 {
1462 	return MBUF_TC_MAX;
1463 }
1464 
1465 errno_t
mbuf_get_traffic_class_index(mbuf_traffic_class_t tc,u_int32_t * index)1466 mbuf_get_traffic_class_index(mbuf_traffic_class_t tc, u_int32_t *index)
1467 {
1468 	if (index == NULL || (u_int32_t)tc >= MBUF_TC_MAX) {
1469 		return EINVAL;
1470 	}
1471 
1472 	*index = MBUF_SCIDX(m_service_class_from_val(MBUF_TC2SCVAL(tc)));
1473 	return 0;
1474 }
1475 
1476 mbuf_traffic_class_t
mbuf_get_traffic_class(mbuf_t m)1477 mbuf_get_traffic_class(mbuf_t m)
1478 {
1479 	if (m == NULL || !(m->m_flags & M_PKTHDR)) {
1480 		return MBUF_TC_BE;
1481 	}
1482 
1483 	return m_get_traffic_class(m);
1484 }
1485 
1486 errno_t
mbuf_set_traffic_class(mbuf_t m,mbuf_traffic_class_t tc)1487 mbuf_set_traffic_class(mbuf_t m, mbuf_traffic_class_t tc)
1488 {
1489 	if (m == NULL || !(m->m_flags & M_PKTHDR) ||
1490 	    ((u_int32_t)tc >= MBUF_TC_MAX)) {
1491 		return EINVAL;
1492 	}
1493 
1494 	return m_set_traffic_class(m, tc);
1495 }
1496 
1497 int
mbuf_is_traffic_class_privileged(mbuf_t m)1498 mbuf_is_traffic_class_privileged(mbuf_t m)
1499 {
1500 	if (m == NULL || !(m->m_flags & M_PKTHDR) ||
1501 	    !MBUF_VALID_SC(m->m_pkthdr.pkt_svc)) {
1502 		return 0;
1503 	}
1504 
1505 	return (m->m_pkthdr.pkt_flags & PKTF_PRIO_PRIVILEGED) ? 1 : 0;
1506 }
1507 
1508 u_int32_t
mbuf_get_service_class_max_count(void)1509 mbuf_get_service_class_max_count(void)
1510 {
1511 	return MBUF_SC_MAX_CLASSES;
1512 }
1513 
1514 errno_t
mbuf_get_service_class_index(mbuf_svc_class_t sc,u_int32_t * index)1515 mbuf_get_service_class_index(mbuf_svc_class_t sc, u_int32_t *index)
1516 {
1517 	if (index == NULL || !MBUF_VALID_SC(sc)) {
1518 		return EINVAL;
1519 	}
1520 
1521 	*index = MBUF_SCIDX(sc);
1522 	return 0;
1523 }
1524 
1525 mbuf_svc_class_t
mbuf_get_service_class(mbuf_t m)1526 mbuf_get_service_class(mbuf_t m)
1527 {
1528 	if (m == NULL || !(m->m_flags & M_PKTHDR)) {
1529 		return MBUF_SC_BE;
1530 	}
1531 
1532 	return m_get_service_class(m);
1533 }
1534 
1535 errno_t
mbuf_set_service_class(mbuf_t m,mbuf_svc_class_t sc)1536 mbuf_set_service_class(mbuf_t m, mbuf_svc_class_t sc)
1537 {
1538 	if (m == NULL || !(m->m_flags & M_PKTHDR)) {
1539 		return EINVAL;
1540 	}
1541 
1542 	return m_set_service_class(m, sc);
1543 }
1544 
1545 errno_t
mbuf_pkthdr_aux_flags(mbuf_t m,mbuf_pkthdr_aux_flags_t * flagsp)1546 mbuf_pkthdr_aux_flags(mbuf_t m, mbuf_pkthdr_aux_flags_t *flagsp)
1547 {
1548 	u_int32_t flags;
1549 
1550 	if (m == NULL || !(m->m_flags & M_PKTHDR) || flagsp == NULL) {
1551 		return EINVAL;
1552 	}
1553 
1554 	*flagsp = 0;
1555 	flags = m->m_pkthdr.pkt_flags;
1556 	if ((flags & (PKTF_INET_RESOLVE | PKTF_RESOLVE_RTR)) ==
1557 	    (PKTF_INET_RESOLVE | PKTF_RESOLVE_RTR)) {
1558 		*flagsp |= MBUF_PKTAUXF_INET_RESOLVE_RTR;
1559 	}
1560 	if ((flags & (PKTF_INET6_RESOLVE | PKTF_RESOLVE_RTR)) ==
1561 	    (PKTF_INET6_RESOLVE | PKTF_RESOLVE_RTR)) {
1562 		*flagsp |= MBUF_PKTAUXF_INET6_RESOLVE_RTR;
1563 	}
1564 
1565 	/* These 2 flags are mutually exclusive */
1566 	VERIFY((*flagsp &
1567 	    (MBUF_PKTAUXF_INET_RESOLVE_RTR | MBUF_PKTAUXF_INET6_RESOLVE_RTR)) !=
1568 	    (MBUF_PKTAUXF_INET_RESOLVE_RTR | MBUF_PKTAUXF_INET6_RESOLVE_RTR));
1569 
1570 	return 0;
1571 }
1572 
1573 errno_t
mbuf_get_driver_scratch(mbuf_t m,u_int8_t ** area,size_t * area_len)1574 mbuf_get_driver_scratch(mbuf_t m, u_int8_t **area, size_t *area_len)
1575 {
1576 	if (m == NULL || area == NULL || area_len == NULL ||
1577 	    !(m->m_flags & M_PKTHDR)) {
1578 		return EINVAL;
1579 	}
1580 
1581 	*area_len = m_scratch_get(m, area);
1582 	return 0;
1583 }
1584 
1585 errno_t
mbuf_get_unsent_data_bytes(const mbuf_t m,u_int32_t * unsent_data)1586 mbuf_get_unsent_data_bytes(const mbuf_t m, u_int32_t *unsent_data)
1587 {
1588 	if (m == NULL || unsent_data == NULL || !(m->m_flags & M_PKTHDR)) {
1589 		return EINVAL;
1590 	}
1591 
1592 	if (!(m->m_pkthdr.pkt_flags & PKTF_VALID_UNSENT_DATA)) {
1593 		return EINVAL;
1594 	}
1595 
1596 	*unsent_data = m->m_pkthdr.bufstatus_if +
1597 	    m->m_pkthdr.bufstatus_sndbuf;
1598 	return 0;
1599 }
1600 
1601 errno_t
mbuf_get_buffer_status(const mbuf_t m,mbuf_buffer_status_t * buf_status)1602 mbuf_get_buffer_status(const mbuf_t m, mbuf_buffer_status_t *buf_status)
1603 {
1604 	if (m == NULL || buf_status == NULL || !(m->m_flags & M_PKTHDR) ||
1605 	    !(m->m_pkthdr.pkt_flags & PKTF_VALID_UNSENT_DATA)) {
1606 		return EINVAL;
1607 	}
1608 
1609 	buf_status->buf_interface = m->m_pkthdr.bufstatus_if;
1610 	buf_status->buf_sndbuf = m->m_pkthdr.bufstatus_sndbuf;
1611 	return 0;
1612 }
1613 
1614 errno_t
mbuf_pkt_new_flow(const mbuf_t m,u_int32_t * retval)1615 mbuf_pkt_new_flow(const mbuf_t m, u_int32_t *retval)
1616 {
1617 	if (m == NULL || retval == NULL || !(m->m_flags & M_PKTHDR)) {
1618 		return EINVAL;
1619 	}
1620 	if (m->m_pkthdr.pkt_flags & PKTF_NEW_FLOW) {
1621 		*retval = 1;
1622 	} else {
1623 		*retval = 0;
1624 	}
1625 	return 0;
1626 }
1627 
1628 errno_t
mbuf_last_pkt(const mbuf_t m,u_int32_t * retval)1629 mbuf_last_pkt(const mbuf_t m, u_int32_t *retval)
1630 {
1631 	if (m == NULL || retval == NULL || !(m->m_flags & M_PKTHDR)) {
1632 		return EINVAL;
1633 	}
1634 	if (m->m_pkthdr.pkt_flags & PKTF_LAST_PKT) {
1635 		*retval = 1;
1636 	} else {
1637 		*retval = 0;
1638 	}
1639 	return 0;
1640 }
1641 
1642 errno_t
mbuf_get_timestamp(mbuf_t m,u_int64_t * ts,boolean_t * valid)1643 mbuf_get_timestamp(mbuf_t m, u_int64_t *ts, boolean_t *valid)
1644 {
1645 	if (m == NULL || !(m->m_flags & M_PKTHDR) || ts == NULL) {
1646 		return EINVAL;
1647 	}
1648 
1649 	if ((m->m_pkthdr.pkt_flags & PKTF_TS_VALID) == 0) {
1650 		if (valid != NULL) {
1651 			*valid = FALSE;
1652 		}
1653 		*ts = 0;
1654 	} else {
1655 		if (valid != NULL) {
1656 			*valid = TRUE;
1657 		}
1658 		*ts = m->m_pkthdr.pkt_timestamp;
1659 	}
1660 	return 0;
1661 }
1662 
1663 errno_t
mbuf_set_timestamp(mbuf_t m,u_int64_t ts,boolean_t valid)1664 mbuf_set_timestamp(mbuf_t m, u_int64_t ts, boolean_t valid)
1665 {
1666 	if (m == NULL || !(m->m_flags & M_PKTHDR)) {
1667 		return EINVAL;
1668 	}
1669 
1670 	if (valid == FALSE) {
1671 		m->m_pkthdr.pkt_flags &= ~PKTF_TS_VALID;
1672 		m->m_pkthdr.pkt_timestamp = 0;
1673 	} else {
1674 		m->m_pkthdr.pkt_flags |= PKTF_TS_VALID;
1675 		m->m_pkthdr.pkt_timestamp = ts;
1676 	}
1677 	return 0;
1678 }
1679 
1680 errno_t
mbuf_get_status(mbuf_t m,kern_return_t * status)1681 mbuf_get_status(mbuf_t m, kern_return_t *status)
1682 {
1683 	if (m == NULL || !(m->m_flags & M_PKTHDR) || status == NULL) {
1684 		return EINVAL;
1685 	}
1686 
1687 	if ((m->m_pkthdr.pkt_flags & PKTF_DRIVER_MTAG) == 0) {
1688 		*status = 0;
1689 	} else {
1690 		*status = m->m_pkthdr.drv_tx_status;
1691 	}
1692 	return 0;
1693 }
1694 
1695 static void
driver_mtag_init(mbuf_t m)1696 driver_mtag_init(mbuf_t m)
1697 {
1698 	if ((m->m_pkthdr.pkt_flags & PKTF_DRIVER_MTAG) == 0) {
1699 		m->m_pkthdr.pkt_flags |= PKTF_DRIVER_MTAG;
1700 		bzero(&m->m_pkthdr.driver_mtag,
1701 		    sizeof(m->m_pkthdr.driver_mtag));
1702 	}
1703 }
1704 
1705 errno_t
mbuf_set_status(mbuf_t m,kern_return_t status)1706 mbuf_set_status(mbuf_t m, kern_return_t status)
1707 {
1708 	if (m == NULL || !(m->m_flags & M_PKTHDR)) {
1709 		return EINVAL;
1710 	}
1711 
1712 	driver_mtag_init(m);
1713 
1714 	m->m_pkthdr.drv_tx_status = status;
1715 
1716 	return 0;
1717 }
1718 
1719 errno_t
mbuf_get_flowid(mbuf_t m,u_int16_t * flowid)1720 mbuf_get_flowid(mbuf_t m, u_int16_t *flowid)
1721 {
1722 	if (m == NULL || !(m->m_flags & M_PKTHDR) || flowid == NULL) {
1723 		return EINVAL;
1724 	}
1725 
1726 	if ((m->m_pkthdr.pkt_flags & PKTF_DRIVER_MTAG) == 0) {
1727 		*flowid = 0;
1728 	} else {
1729 		*flowid = m->m_pkthdr.drv_flowid;
1730 	}
1731 	return 0;
1732 }
1733 
1734 errno_t
mbuf_set_flowid(mbuf_t m,u_int16_t flowid)1735 mbuf_set_flowid(mbuf_t m, u_int16_t flowid)
1736 {
1737 	if (m == NULL || !(m->m_flags & M_PKTHDR)) {
1738 		return EINVAL;
1739 	}
1740 
1741 	driver_mtag_init(m);
1742 
1743 	m->m_pkthdr.drv_flowid = flowid;
1744 
1745 	return 0;
1746 }
1747 
1748 errno_t
mbuf_get_tx_compl_data(mbuf_t m,uintptr_t * arg,uintptr_t * data)1749 mbuf_get_tx_compl_data(mbuf_t m, uintptr_t *arg, uintptr_t *data)
1750 {
1751 	if (m == NULL || !(m->m_flags & M_PKTHDR) || arg == NULL ||
1752 	    data == NULL) {
1753 		return EINVAL;
1754 	}
1755 
1756 	if ((m->m_pkthdr.pkt_flags & PKTF_DRIVER_MTAG) == 0) {
1757 		*arg = 0;
1758 		*data = 0;
1759 	} else {
1760 		*arg = m->m_pkthdr.drv_tx_compl_arg;
1761 		*data = m->m_pkthdr.drv_tx_compl_data;
1762 	}
1763 	return 0;
1764 }
1765 
1766 errno_t
mbuf_set_tx_compl_data(mbuf_t m,uintptr_t arg,uintptr_t data)1767 mbuf_set_tx_compl_data(mbuf_t m, uintptr_t arg, uintptr_t data)
1768 {
1769 	if (m == NULL || !(m->m_flags & M_PKTHDR)) {
1770 		return EINVAL;
1771 	}
1772 
1773 	driver_mtag_init(m);
1774 
1775 	m->m_pkthdr.drv_tx_compl_arg = arg;
1776 	m->m_pkthdr.drv_tx_compl_data = data;
1777 
1778 	return 0;
1779 }
1780 
1781 static u_int32_t
get_tx_compl_callback_index_locked(mbuf_tx_compl_func callback)1782 get_tx_compl_callback_index_locked(mbuf_tx_compl_func callback)
1783 {
1784 	u_int32_t i;
1785 
1786 	for (i = 0; i < MAX_MBUF_TX_COMPL_FUNC; i++) {
1787 		if (mbuf_tx_compl_table[i] == callback) {
1788 			return i;
1789 		}
1790 	}
1791 	return UINT32_MAX;
1792 }
1793 
1794 static u_int32_t
get_tx_compl_callback_index(mbuf_tx_compl_func callback)1795 get_tx_compl_callback_index(mbuf_tx_compl_func callback)
1796 {
1797 	u_int32_t i;
1798 
1799 	lck_rw_lock_shared(&mbuf_tx_compl_tbl_lock);
1800 
1801 	i = get_tx_compl_callback_index_locked(callback);
1802 
1803 	lck_rw_unlock_shared(&mbuf_tx_compl_tbl_lock);
1804 
1805 	return i;
1806 }
1807 
1808 mbuf_tx_compl_func
m_get_tx_compl_callback(u_int32_t idx)1809 m_get_tx_compl_callback(u_int32_t idx)
1810 {
1811 	mbuf_tx_compl_func cb;
1812 
1813 	if (idx >= MAX_MBUF_TX_COMPL_FUNC) {
1814 		ASSERT(0);
1815 		return NULL;
1816 	}
1817 	lck_rw_lock_shared(&mbuf_tx_compl_tbl_lock);
1818 	cb = mbuf_tx_compl_table[idx];
1819 	lck_rw_unlock_shared(&mbuf_tx_compl_tbl_lock);
1820 	return cb;
1821 }
1822 
1823 errno_t
mbuf_register_tx_compl_callback(mbuf_tx_compl_func callback)1824 mbuf_register_tx_compl_callback(mbuf_tx_compl_func callback)
1825 {
1826 	int i;
1827 	errno_t error;
1828 
1829 	if (callback == NULL) {
1830 		return EINVAL;
1831 	}
1832 
1833 	lck_rw_lock_exclusive(&mbuf_tx_compl_tbl_lock);
1834 
1835 	i = get_tx_compl_callback_index_locked(callback);
1836 	if (i != -1) {
1837 		error = EEXIST;
1838 		goto unlock;
1839 	}
1840 
1841 	/* assume the worst */
1842 	error = ENOSPC;
1843 	for (i = 0; i < MAX_MBUF_TX_COMPL_FUNC; i++) {
1844 		if (mbuf_tx_compl_table[i] == NULL) {
1845 			mbuf_tx_compl_table[i] = callback;
1846 			error = 0;
1847 			goto unlock;
1848 		}
1849 	}
1850 unlock:
1851 	lck_rw_unlock_exclusive(&mbuf_tx_compl_tbl_lock);
1852 
1853 	return error;
1854 }
1855 
1856 errno_t
mbuf_unregister_tx_compl_callback(mbuf_tx_compl_func callback)1857 mbuf_unregister_tx_compl_callback(mbuf_tx_compl_func callback)
1858 {
1859 	int i;
1860 	errno_t error;
1861 
1862 	if (callback == NULL) {
1863 		return EINVAL;
1864 	}
1865 
1866 	lck_rw_lock_exclusive(&mbuf_tx_compl_tbl_lock);
1867 
1868 	/* assume the worst */
1869 	error = ENOENT;
1870 	for (i = 0; i < MAX_MBUF_TX_COMPL_FUNC; i++) {
1871 		if (mbuf_tx_compl_table[i] == callback) {
1872 			mbuf_tx_compl_table[i] = NULL;
1873 			error = 0;
1874 			goto unlock;
1875 		}
1876 	}
1877 unlock:
1878 	lck_rw_unlock_exclusive(&mbuf_tx_compl_tbl_lock);
1879 
1880 	return error;
1881 }
1882 
1883 errno_t
mbuf_get_timestamp_requested(mbuf_t m,boolean_t * requested)1884 mbuf_get_timestamp_requested(mbuf_t m, boolean_t *requested)
1885 {
1886 	if (m == NULL || !(m->m_flags & M_PKTHDR)) {
1887 		return EINVAL;
1888 	}
1889 
1890 	if ((m->m_pkthdr.pkt_flags & PKTF_TX_COMPL_TS_REQ) == 0) {
1891 		*requested = FALSE;
1892 	} else {
1893 		*requested = TRUE;
1894 	}
1895 	return 0;
1896 }
1897 
1898 errno_t
mbuf_set_timestamp_requested(mbuf_t m,uintptr_t * pktid,mbuf_tx_compl_func callback)1899 mbuf_set_timestamp_requested(mbuf_t m, uintptr_t *pktid,
1900     mbuf_tx_compl_func callback)
1901 {
1902 	size_t i;
1903 
1904 	if (m == NULL || !(m->m_flags & M_PKTHDR) || callback == NULL ||
1905 	    pktid == NULL) {
1906 		return EINVAL;
1907 	}
1908 
1909 	i = get_tx_compl_callback_index(callback);
1910 	if (i == UINT32_MAX) {
1911 		return ENOENT;
1912 	}
1913 
1914 	m_add_crumb(m, PKT_CRUMB_TS_COMP_REQ);
1915 
1916 #if (DEBUG || DEVELOPMENT)
1917 	VERIFY(i < sizeof(m->m_pkthdr.pkt_compl_callbacks));
1918 #endif /* (DEBUG || DEVELOPMENT) */
1919 
1920 	if ((m->m_pkthdr.pkt_flags & PKTF_TX_COMPL_TS_REQ) == 0) {
1921 		m->m_pkthdr.pkt_compl_callbacks = 0;
1922 		m->m_pkthdr.pkt_flags |= PKTF_TX_COMPL_TS_REQ;
1923 		m->m_pkthdr.pkt_compl_context =
1924 		    os_atomic_inc_orig(&mbuf_tx_compl_index, relaxed);
1925 
1926 #if (DEBUG || DEVELOPMENT)
1927 		os_atomic_inc(&mbuf_tx_compl_requested, relaxed);
1928 #endif /* (DEBUG || DEVELOPMENT) */
1929 	}
1930 	m->m_pkthdr.pkt_compl_callbacks |= (1 << i);
1931 	*pktid = m->m_pkthdr.pkt_compl_context;
1932 
1933 	return 0;
1934 }
1935 
1936 void
m_do_tx_compl_callback(struct mbuf * m,struct ifnet * ifp)1937 m_do_tx_compl_callback(struct mbuf *m, struct ifnet *ifp)
1938 {
1939 	int i;
1940 
1941 	if (m == NULL) {
1942 		return;
1943 	}
1944 
1945 	if ((m->m_pkthdr.pkt_flags & PKTF_TX_COMPL_TS_REQ) == 0) {
1946 		return;
1947 	}
1948 
1949 	m_add_crumb(m, PKT_CRUMB_TS_COMP_CB);
1950 
1951 #if (DEBUG || DEVELOPMENT)
1952 	if (mbuf_tx_compl_debug != 0 && ifp != NULL &&
1953 	    (ifp->if_xflags & IFXF_TIMESTAMP_ENABLED) != 0 &&
1954 	    (m->m_pkthdr.pkt_flags & PKTF_TS_VALID) == 0) {
1955 		struct timespec now;
1956 
1957 		nanouptime(&now);
1958 		net_timernsec(&now, &m->m_pkthdr.pkt_timestamp);
1959 	}
1960 #endif /* (DEBUG || DEVELOPMENT) */
1961 
1962 	for (i = 0; i < MAX_MBUF_TX_COMPL_FUNC; i++) {
1963 		mbuf_tx_compl_func callback;
1964 
1965 		if ((m->m_pkthdr.pkt_compl_callbacks & (1 << i)) == 0) {
1966 			continue;
1967 		}
1968 
1969 		lck_rw_lock_shared(&mbuf_tx_compl_tbl_lock);
1970 		callback = mbuf_tx_compl_table[i];
1971 		lck_rw_unlock_shared(&mbuf_tx_compl_tbl_lock);
1972 
1973 		if (callback != NULL) {
1974 			callback(m->m_pkthdr.pkt_compl_context,
1975 			    ifp,
1976 			    (m->m_pkthdr.pkt_flags & PKTF_TS_VALID) ?
1977 			    m->m_pkthdr.pkt_timestamp: 0,
1978 			    m->m_pkthdr.drv_tx_compl_arg,
1979 			    m->m_pkthdr.drv_tx_compl_data,
1980 			    m->m_pkthdr.drv_tx_status);
1981 		}
1982 	}
1983 #if (DEBUG || DEVELOPMENT)
1984 	if (m->m_pkthdr.pkt_compl_callbacks != 0) {
1985 		os_atomic_inc(&mbuf_tx_compl_callbacks, relaxed);
1986 		if (ifp == NULL) {
1987 			os_atomic_inc(&mbuf_tx_compl_aborted, relaxed);
1988 		}
1989 	}
1990 #endif /* (DEBUG || DEVELOPMENT) */
1991 	m->m_pkthdr.pkt_compl_callbacks = 0;
1992 }
1993 
1994 errno_t
mbuf_get_keepalive_flag(mbuf_t m,boolean_t * is_keepalive)1995 mbuf_get_keepalive_flag(mbuf_t m, boolean_t *is_keepalive)
1996 {
1997 	if (m == NULL || is_keepalive == NULL || !(m->m_flags & M_PKTHDR)) {
1998 		return EINVAL;
1999 	}
2000 
2001 	*is_keepalive = (m->m_pkthdr.pkt_flags & PKTF_KEEPALIVE);
2002 
2003 	return 0;
2004 }
2005 
2006 errno_t
mbuf_set_keepalive_flag(mbuf_t m,boolean_t is_keepalive)2007 mbuf_set_keepalive_flag(mbuf_t m, boolean_t is_keepalive)
2008 {
2009 	if (m == NULL || !(m->m_flags & M_PKTHDR)) {
2010 		return EINVAL;
2011 	}
2012 
2013 	if (is_keepalive) {
2014 		m->m_pkthdr.pkt_flags |= PKTF_KEEPALIVE;
2015 	} else {
2016 		m->m_pkthdr.pkt_flags &= ~PKTF_KEEPALIVE;
2017 	}
2018 
2019 	return 0;
2020 }
2021