xref: /xnu-11417.140.69/bsd/kern/kpi_mbuf.c (revision 43a90889846e00bfb5cf1d255cdc0a701a1e05a4)
1 /*
2  * Copyright (c) 2004-2024 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #define __KPI__
30 
31 #include <sys/param.h>
32 #include <sys/mbuf.h>
33 #include <sys/mcache.h>
34 #include <sys/socket.h>
35 #include <kern/debug.h>
36 #include <libkern/OSAtomic.h>
37 #include <string.h>
38 #include <net/dlil.h>
39 #include <netinet/in.h>
40 #include <netinet/ip_var.h>
41 
42 #include <os/log.h>
43 
44 #include "net/net_str_id.h"
45 
46 /* mbuf flags visible to KPI clients; do not add private flags here */
47 static const mbuf_flags_t mbuf_flags_mask = (MBUF_EXT | MBUF_PKTHDR | MBUF_EOR |
48     MBUF_LOOP | MBUF_BCAST | MBUF_MCAST | MBUF_FRAG | MBUF_FIRSTFRAG |
49     MBUF_LASTFRAG | MBUF_PROMISC | MBUF_HASFCS);
50 
51 /* Unalterable mbuf flags */
52 static const mbuf_flags_t mbuf_cflags_mask = (MBUF_EXT);
53 
54 #define MAX_MBUF_TX_COMPL_FUNC 8
55 mbuf_tx_compl_func
56     mbuf_tx_compl_table[MAX_MBUF_TX_COMPL_FUNC];
57 extern lck_rw_t mbuf_tx_compl_tbl_lock;
58 u_int32_t mbuf_tx_compl_index = 0;
59 
60 #if (DEVELOPMENT || DEBUG)
61 int mbuf_tx_compl_debug = 0;
62 uint64_t mbuf_tx_compl_requested __attribute__((aligned(8))) = 0;
63 uint64_t mbuf_tx_compl_callbacks __attribute__((aligned(8))) = 0;
64 uint64_t mbuf_tx_compl_aborted __attribute__((aligned(8))) = 0;
65 
66 SYSCTL_DECL(_kern_ipc);
67 SYSCTL_NODE(_kern_ipc, OID_AUTO, mbtxcf,
68     CTLFLAG_RW | CTLFLAG_LOCKED, 0, "");
69 SYSCTL_INT(_kern_ipc_mbtxcf, OID_AUTO, debug,
70     CTLFLAG_RW | CTLFLAG_LOCKED, &mbuf_tx_compl_debug, 0, "");
71 SYSCTL_INT(_kern_ipc_mbtxcf, OID_AUTO, index,
72     CTLFLAG_RD | CTLFLAG_LOCKED, &mbuf_tx_compl_index, 0, "");
73 SYSCTL_QUAD(_kern_ipc_mbtxcf, OID_AUTO, requested,
74     CTLFLAG_RD | CTLFLAG_LOCKED, &mbuf_tx_compl_requested, "");
75 SYSCTL_QUAD(_kern_ipc_mbtxcf, OID_AUTO, callbacks,
76     CTLFLAG_RD | CTLFLAG_LOCKED, &mbuf_tx_compl_callbacks, "");
77 SYSCTL_QUAD(_kern_ipc_mbtxcf, OID_AUTO, aborted,
78     CTLFLAG_RD | CTLFLAG_LOCKED, &mbuf_tx_compl_aborted, "");
79 #endif /* (DEBUG || DEVELOPMENT) */
80 
81 void *
mbuf_data(mbuf_t mbuf)82 mbuf_data(mbuf_t mbuf)
83 {
84 	return m_mtod_current(mbuf);
85 }
86 
87 void *
mbuf_datastart(mbuf_t mbuf)88 mbuf_datastart(mbuf_t mbuf)
89 {
90 	if (mbuf->m_flags & M_EXT) {
91 		return mbuf->m_ext.ext_buf;
92 	}
93 	if (mbuf->m_flags & M_PKTHDR) {
94 		return mbuf->m_pktdat;
95 	}
96 	return mbuf->m_dat;
97 }
98 
99 errno_t
mbuf_setdata(mbuf_t mbuf,void * data,size_t len)100 mbuf_setdata(mbuf_t mbuf, void *data, size_t len)
101 {
102 	size_t  start = (size_t)((char *)mbuf_datastart(mbuf));
103 	size_t  maxlen = mbuf_maxlen(mbuf);
104 
105 	if ((size_t)data < start || ((size_t)data) + len > start + maxlen) {
106 		return EINVAL;
107 	}
108 	mbuf->m_data = (uintptr_t)data;
109 	mbuf->m_len = (int32_t)len;
110 
111 	return 0;
112 }
113 
114 errno_t
mbuf_align_32(mbuf_t mbuf,size_t len)115 mbuf_align_32(mbuf_t mbuf, size_t len)
116 {
117 	if ((mbuf->m_flags & M_EXT) != 0 && m_mclhasreference(mbuf)) {
118 		return ENOTSUP;
119 	}
120 	mbuf->m_data = (uintptr_t)mbuf_datastart(mbuf);
121 	mbuf->m_data +=
122 	    ((mbuf_trailingspace(mbuf) - len) & ~(sizeof(u_int32_t) - 1));
123 
124 	return 0;
125 }
126 
127 /*
128  * This function is used to provide mcl_to_paddr via symbol indirection,
129  * please avoid any change in behavior or remove the indirection in
130  * config/Unsupported*
131  */
132 addr64_t
mbuf_data_to_physical(void * ptr)133 mbuf_data_to_physical(void *ptr)
134 {
135 	return (addr64_t)mcl_to_paddr(ptr);
136 }
137 
138 errno_t
mbuf_get(mbuf_how_t how,mbuf_type_t type,mbuf_t * mbuf)139 mbuf_get(mbuf_how_t how, mbuf_type_t type, mbuf_t *mbuf)
140 {
141 	/* Must set *mbuf to NULL in failure case */
142 	*mbuf = m_get(how, type);
143 
144 	return *mbuf == NULL ? ENOMEM : 0;
145 }
146 
147 errno_t
mbuf_gethdr(mbuf_how_t how,mbuf_type_t type,mbuf_t * mbuf)148 mbuf_gethdr(mbuf_how_t how, mbuf_type_t type, mbuf_t *mbuf)
149 {
150 	/* Must set *mbuf to NULL in failure case */
151 	*mbuf = m_gethdr(how, type);
152 
153 	return *mbuf == NULL ? ENOMEM : 0;
154 }
155 
156 errno_t
mbuf_attachcluster(mbuf_how_t how,mbuf_type_t type,mbuf_t * mbuf,caddr_t extbuf __sized_by_or_null (extsize),void (* extfree)(caddr_t,u_int,caddr_t),size_t extsize,caddr_t extarg)157 mbuf_attachcluster(mbuf_how_t how, mbuf_type_t type, mbuf_t *mbuf,
158     caddr_t extbuf __sized_by_or_null(extsize), void (*extfree)(caddr_t, u_int, caddr_t),
159     size_t extsize, caddr_t extarg)
160 {
161 	if (mbuf == NULL || extbuf == NULL || extfree == NULL || extsize == 0) {
162 		return EINVAL;
163 	}
164 
165 	if ((*mbuf = m_clattach(*mbuf, type, extbuf,
166 	    extfree, extsize, extarg, how, 0)) == NULL) {
167 		return ENOMEM;
168 	}
169 
170 	return 0;
171 }
172 
173 errno_t
mbuf_ring_cluster_alloc(mbuf_how_t how,mbuf_type_t type,mbuf_t * mbuf,void (* extfree)(caddr_t,u_int,caddr_t),size_t * size)174 mbuf_ring_cluster_alloc(mbuf_how_t how, mbuf_type_t type, mbuf_t *mbuf,
175     void (*extfree)(caddr_t, u_int, caddr_t), size_t *size)
176 {
177 	size_t extsize = 0;
178 	caddr_t extbuf __sized_by_or_null(extsize) = NULL;
179 	errno_t err;
180 
181 	if (mbuf == NULL || extfree == NULL || size == NULL || *size == 0) {
182 		return EINVAL;
183 	}
184 
185 	extsize = *size;
186 	extbuf = NULL;
187 
188 	if ((err = mbuf_alloccluster(how, &extsize, &extbuf)) != 0) {
189 		return err;
190 	}
191 
192 	VERIFY((extsize == 0 && extbuf == NULL) || (extsize != 0 && extbuf != NULL));
193 
194 	if ((*mbuf = m_clattach(*mbuf, type, extbuf,
195 	    extfree, extsize, NULL, how, 1)) == NULL) {
196 		mbuf_freecluster(extbuf, extsize);
197 		return ENOMEM;
198 	}
199 
200 	*size = extsize;
201 
202 	return 0;
203 }
204 
205 int
mbuf_ring_cluster_is_active(mbuf_t mbuf)206 mbuf_ring_cluster_is_active(mbuf_t mbuf)
207 {
208 	return m_ext_paired_is_active(mbuf);
209 }
210 
211 errno_t
mbuf_ring_cluster_activate(mbuf_t mbuf)212 mbuf_ring_cluster_activate(mbuf_t mbuf)
213 {
214 	if (mbuf_ring_cluster_is_active(mbuf)) {
215 		return EBUSY;
216 	}
217 
218 	m_ext_paired_activate(mbuf);
219 	return 0;
220 }
221 
222 errno_t
mbuf_cluster_set_prop(mbuf_t mbuf,u_int32_t oldprop,u_int32_t newprop)223 mbuf_cluster_set_prop(mbuf_t mbuf, u_int32_t oldprop, u_int32_t newprop)
224 {
225 	if (mbuf == NULL || !(mbuf->m_flags & M_EXT)) {
226 		return EINVAL;
227 	}
228 
229 	return m_ext_set_prop(mbuf, oldprop, newprop) ? 0 : EBUSY;
230 }
231 
232 errno_t
mbuf_cluster_get_prop(mbuf_t mbuf,u_int32_t * prop)233 mbuf_cluster_get_prop(mbuf_t mbuf, u_int32_t *prop)
234 {
235 	if (mbuf == NULL || prop == NULL || !(mbuf->m_flags & M_EXT)) {
236 		return EINVAL;
237 	}
238 
239 	*prop = m_ext_get_prop(mbuf);
240 	return 0;
241 }
242 
243 errno_t
mbuf_alloccluster(mbuf_how_t how,size_t * size,char * __sized_by_or_null (* size)* addr)244 mbuf_alloccluster(mbuf_how_t how, size_t *size, char * __sized_by_or_null(*size) *addr)
245 {
246 	if (size == NULL || *size == 0 || addr == NULL) {
247 		return EINVAL;
248 	}
249 	caddr_t _addr = NULL;
250 	size_t _size = *size;
251 
252 	/* Jumbo cluster pool not available? */
253 	if (_size > MBIGCLBYTES && njcl == 0) {
254 		return ENOTSUP;
255 	}
256 
257 	if (_size <= MCLBYTES && (_addr = m_mclalloc(how)) != NULL) {
258 		_size = MCLBYTES;
259 	} else if (_size > MCLBYTES && _size <= MBIGCLBYTES &&
260 	    (_addr = m_bigalloc(how)) != NULL) {
261 		_size = MBIGCLBYTES;
262 	} else if (_size > MBIGCLBYTES && _size <= M16KCLBYTES &&
263 	    (_addr = m_16kalloc(how)) != NULL) {
264 		_size = M16KCLBYTES;
265 	} else {
266 		_size = 0;
267 	}
268 
269 	if (_addr == NULL) {
270 		return ENOMEM;
271 	}
272 
273 	*size = _size;
274 	*addr = _addr;
275 
276 	return 0;
277 }
278 
279 void
mbuf_freecluster(caddr_t addr,size_t size)280 mbuf_freecluster(caddr_t addr, size_t size)
281 {
282 	if (size != MCLBYTES && size != MBIGCLBYTES && size != M16KCLBYTES) {
283 		panic("%s: invalid size (%ld) for cluster %p", __func__,
284 		    size, (void *)addr);
285 	}
286 
287 	if (size == MCLBYTES) {
288 		m_mclfree(addr);
289 	} else if (size == MBIGCLBYTES) {
290 		m_bigfree(addr, MBIGCLBYTES, NULL);
291 	} else if (njcl > 0) {
292 		m_16kfree(addr, M16KCLBYTES, NULL);
293 	} else {
294 		panic("%s: freeing jumbo cluster to an empty pool", __func__);
295 	}
296 }
297 
298 errno_t
mbuf_getcluster(mbuf_how_t how,mbuf_type_t type,size_t size,mbuf_t * mbuf)299 mbuf_getcluster(mbuf_how_t how, mbuf_type_t type, size_t size, mbuf_t *mbuf)
300 {
301 	/* Must set *mbuf to NULL in failure case */
302 	errno_t error = 0;
303 	int     created = 0;
304 
305 	if (mbuf == NULL) {
306 		return EINVAL;
307 	}
308 	if (*mbuf == NULL) {
309 		*mbuf = m_get(how, type);
310 		if (*mbuf == NULL) {
311 			return ENOMEM;
312 		}
313 		created = 1;
314 	}
315 	/*
316 	 * At the time this code was written, m_{mclget,mbigget,m16kget}
317 	 * would always return the same value that was passed in to it.
318 	 */
319 	if (size == MCLBYTES) {
320 		*mbuf = m_mclget(*mbuf, how);
321 	} else if (size == MBIGCLBYTES) {
322 		*mbuf = m_mbigget(*mbuf, how);
323 	} else if (size == M16KCLBYTES) {
324 		if (njcl > 0) {
325 			*mbuf = m_m16kget(*mbuf, how);
326 		} else {
327 			/* Jumbo cluster pool not available? */
328 			error = ENOTSUP;
329 			goto out;
330 		}
331 	} else {
332 		error = EINVAL;
333 		goto out;
334 	}
335 	if (*mbuf == NULL || ((*mbuf)->m_flags & M_EXT) == 0) {
336 		error = ENOMEM;
337 	}
338 out:
339 	if (created && error != 0) {
340 		mbuf_free(*mbuf);
341 		*mbuf = NULL;
342 	}
343 	return error;
344 }
345 
346 errno_t
mbuf_mclget(mbuf_how_t how,mbuf_type_t type,mbuf_t * mbuf)347 mbuf_mclget(mbuf_how_t how, mbuf_type_t type, mbuf_t *mbuf)
348 {
349 	/* Must set *mbuf to NULL in failure case */
350 	errno_t error = 0;
351 	int             created = 0;
352 	if (mbuf == NULL) {
353 		return EINVAL;
354 	}
355 	if (*mbuf == NULL) {
356 		error = mbuf_get(how, type, mbuf);
357 		if (error) {
358 			return error;
359 		}
360 		created = 1;
361 	}
362 
363 	/*
364 	 * At the time this code was written, m_mclget would always
365 	 * return the same value that was passed in to it.
366 	 */
367 	*mbuf = m_mclget(*mbuf, how);
368 
369 	if (created && ((*mbuf)->m_flags & M_EXT) == 0) {
370 		mbuf_free(*mbuf);
371 		*mbuf = NULL;
372 	}
373 	if (*mbuf == NULL || ((*mbuf)->m_flags & M_EXT) == 0) {
374 		error = ENOMEM;
375 	}
376 	return error;
377 }
378 
379 
380 errno_t
mbuf_getpacket(mbuf_how_t how,mbuf_t * mbuf)381 mbuf_getpacket(mbuf_how_t how, mbuf_t *mbuf)
382 {
383 	/* Must set *mbuf to NULL in failure case */
384 	errno_t error = 0;
385 
386 	*mbuf = m_getpacket_how(how);
387 
388 	if (*mbuf == NULL) {
389 		if (how == MBUF_WAITOK) {
390 			error = ENOMEM;
391 		} else {
392 			error = EWOULDBLOCK;
393 		}
394 	}
395 
396 	return error;
397 }
398 
399 /*
400  * This function is used to provide m_free via symbol indirection, please avoid
401  * any change in behavior or remove the indirection in config/Unsupported*
402  */
403 mbuf_t
mbuf_free(mbuf_t mbuf)404 mbuf_free(mbuf_t mbuf)
405 {
406 	return m_free(mbuf);
407 }
408 
409 /*
410  * This function is used to provide m_freem via symbol indirection, please avoid
411  * any change in behavior or remove the indirection in config/Unsupported*
412  */
413 void
mbuf_freem(mbuf_t mbuf)414 mbuf_freem(mbuf_t mbuf)
415 {
416 	m_freem(mbuf);
417 }
418 
419 int
mbuf_freem_list(mbuf_t mbuf)420 mbuf_freem_list(mbuf_t mbuf)
421 {
422 	return m_freem_list(mbuf);
423 }
424 
425 size_t
mbuf_leadingspace(const mbuf_t mbuf)426 mbuf_leadingspace(const mbuf_t mbuf)
427 {
428 	return M_LEADINGSPACE(mbuf);
429 }
430 
431 /*
432  * This function is used to provide m_trailingspace via symbol indirection,
433  * please avoid any change in behavior or remove the indirection in
434  * config/Unsupported*
435  */
436 size_t
mbuf_trailingspace(const mbuf_t mbuf)437 mbuf_trailingspace(const mbuf_t mbuf)
438 {
439 	return M_TRAILINGSPACE(mbuf);
440 }
441 
442 /* Manipulation */
443 errno_t
mbuf_copym(const mbuf_t src,size_t offset,size_t len,mbuf_how_t how,mbuf_t * new_mbuf)444 mbuf_copym(const mbuf_t src, size_t offset, size_t len,
445     mbuf_how_t how, mbuf_t *new_mbuf)
446 {
447 	/* Must set *mbuf to NULL in failure case */
448 	*new_mbuf = m_copym(src, (int)offset, (int)len, how);
449 
450 	return *new_mbuf == NULL ? ENOMEM : 0;
451 }
452 
453 errno_t
mbuf_dup(const mbuf_t src,mbuf_how_t how,mbuf_t * new_mbuf)454 mbuf_dup(const mbuf_t src, mbuf_how_t how, mbuf_t *new_mbuf)
455 {
456 	/* Must set *new_mbuf to NULL in failure case */
457 	*new_mbuf = m_dup(src, how);
458 
459 	return *new_mbuf == NULL ? ENOMEM : 0;
460 }
461 
462 errno_t
mbuf_prepend(mbuf_t * orig,size_t len,mbuf_how_t how)463 mbuf_prepend(mbuf_t *orig, size_t len, mbuf_how_t how)
464 {
465 	/* Must set *orig to NULL in failure case */
466 	*orig = m_prepend_2(*orig, (int)len, how, 0);
467 
468 	return *orig == NULL ? ENOMEM : 0;
469 }
470 
471 errno_t
mbuf_split(mbuf_t src,size_t offset,mbuf_how_t how,mbuf_t * new_mbuf)472 mbuf_split(mbuf_t src, size_t offset,
473     mbuf_how_t how, mbuf_t *new_mbuf)
474 {
475 	/* Must set *new_mbuf to NULL in failure case */
476 	*new_mbuf = m_split(src, (int)offset, how);
477 
478 	return *new_mbuf == NULL ? ENOMEM : 0;
479 }
480 
481 errno_t
mbuf_pullup(mbuf_t * mbuf,size_t len)482 mbuf_pullup(mbuf_t *mbuf, size_t len)
483 {
484 	/* Must set *mbuf to NULL in failure case */
485 	*mbuf = m_pullup(*mbuf, (int)len);
486 
487 	return *mbuf == NULL ? ENOMEM : 0;
488 }
489 
490 errno_t
mbuf_pulldown(mbuf_t src,size_t * offset,size_t len,mbuf_t * location)491 mbuf_pulldown(mbuf_t src, size_t *offset, size_t len, mbuf_t *location)
492 {
493 	/* Must set *location to NULL in failure case */
494 	int new_offset;
495 	*location = m_pulldown(src, (int)*offset, (int)len, &new_offset);
496 	*offset = new_offset;
497 
498 	return *location == NULL ? ENOMEM : 0;
499 }
500 
501 /*
502  * This function is used to provide m_adj via symbol indirection, please avoid
503  * any change in behavior or remove the indirection in config/Unsupported*
504  */
505 void
mbuf_adj(mbuf_t mbuf,int len)506 mbuf_adj(mbuf_t mbuf, int len)
507 {
508 	m_adj(mbuf, len);
509 }
510 
511 errno_t
mbuf_adjustlen(mbuf_t m,int amount)512 mbuf_adjustlen(mbuf_t m, int amount)
513 {
514 	/* Verify m_len will be valid after adding amount */
515 	if (amount > 0) {
516 		size_t used = (size_t)mtod(m, void*) - (size_t)mbuf_datastart(m) +
517 		    m->m_len;
518 
519 		if ((size_t)(amount + used) > mbuf_maxlen(m)) {
520 			return EINVAL;
521 		}
522 	} else if (-amount > m->m_len) {
523 		return EINVAL;
524 	}
525 
526 	m->m_len += amount;
527 	return 0;
528 }
529 
530 mbuf_t
mbuf_concatenate(mbuf_t dst,mbuf_t src)531 mbuf_concatenate(mbuf_t dst, mbuf_t src)
532 {
533 	if (dst == NULL) {
534 		return NULL;
535 	}
536 
537 	m_cat(dst, src);
538 
539 	/* return dst as is in the current implementation */
540 	return dst;
541 }
542 
543 errno_t
mbuf_copydata(const mbuf_t m0,size_t off,size_t len,void * out_data __sized_by_or_null (len))544 mbuf_copydata(const mbuf_t m0, size_t off, size_t len, void *out_data __sized_by_or_null(len))
545 {
546 	/* Copied m_copydata, added error handling (don't just panic) */
547 	size_t count;
548 	mbuf_t  m = m0;
549 
550 	if (off >= INT_MAX || len >= INT_MAX) {
551 		return EINVAL;
552 	}
553 
554 	/*
555 	 * Empty destination buffer is permitted.
556 	 */
557 	if (out_data == NULL || len == 0) {
558 		return 0;
559 	}
560 
561 	while (off > 0) {
562 		if (m == 0) {
563 			return EINVAL;
564 		}
565 		if (off < (size_t)m->m_len) {
566 			break;
567 		}
568 		off -= m->m_len;
569 		m = m->m_next;
570 	}
571 	while (len > 0) {
572 		if (m == 0) {
573 			return EINVAL;
574 		}
575 		count = m->m_len - off > len ? len : m->m_len - off;
576 		bcopy(mtod(m, caddr_t) + off, out_data, count);
577 		len -= count;
578 		out_data = ((char *)out_data) + count;
579 		off = 0;
580 		m = m->m_next;
581 	}
582 
583 	return 0;
584 }
585 
586 int
mbuf_mclhasreference(mbuf_t mbuf)587 mbuf_mclhasreference(mbuf_t mbuf)
588 {
589 	if ((mbuf->m_flags & M_EXT)) {
590 		return m_mclhasreference(mbuf);
591 	} else {
592 		return 0;
593 	}
594 }
595 
596 
597 /* mbuf header */
598 mbuf_t
mbuf_next(const mbuf_t mbuf)599 mbuf_next(const mbuf_t mbuf)
600 {
601 	return mbuf->m_next;
602 }
603 
604 errno_t
mbuf_setnext(mbuf_t mbuf,mbuf_t next)605 mbuf_setnext(mbuf_t mbuf, mbuf_t next)
606 {
607 	if (next && ((next)->m_nextpkt != NULL ||
608 	    (next)->m_type == MT_FREE)) {
609 		return EINVAL;
610 	}
611 	mbuf->m_next = next;
612 
613 	return 0;
614 }
615 
616 mbuf_t
mbuf_nextpkt(const mbuf_t mbuf)617 mbuf_nextpkt(const mbuf_t mbuf)
618 {
619 	return mbuf->m_nextpkt;
620 }
621 
622 void
mbuf_setnextpkt(mbuf_t mbuf,mbuf_t nextpkt)623 mbuf_setnextpkt(mbuf_t mbuf, mbuf_t nextpkt)
624 {
625 	mbuf->m_nextpkt = nextpkt;
626 }
627 
628 size_t
mbuf_len(const mbuf_t mbuf)629 mbuf_len(const mbuf_t mbuf)
630 {
631 	return mbuf->m_len;
632 }
633 
634 void
mbuf_setlen(mbuf_t mbuf,size_t len)635 mbuf_setlen(mbuf_t mbuf, size_t len)
636 {
637 	mbuf->m_len = (int32_t)len;
638 }
639 
640 size_t
mbuf_maxlen(const mbuf_t mbuf)641 mbuf_maxlen(const mbuf_t mbuf)
642 {
643 	if (mbuf->m_flags & M_EXT) {
644 		return mbuf->m_ext.ext_size;
645 	}
646 	return &mbuf->m_dat[MLEN] - ((char *)mbuf_datastart(mbuf));
647 }
648 
649 mbuf_type_t
mbuf_type(const mbuf_t mbuf)650 mbuf_type(const mbuf_t mbuf)
651 {
652 	return mbuf->m_type;
653 }
654 
655 errno_t
mbuf_settype(mbuf_t mbuf,mbuf_type_t new_type)656 mbuf_settype(mbuf_t mbuf, mbuf_type_t new_type)
657 {
658 	if (new_type == MBUF_TYPE_FREE) {
659 		return EINVAL;
660 	}
661 
662 	m_mchtype(mbuf, new_type);
663 
664 	return 0;
665 }
666 
667 mbuf_flags_t
mbuf_flags(const mbuf_t mbuf)668 mbuf_flags(const mbuf_t mbuf)
669 {
670 	return mbuf->m_flags & mbuf_flags_mask;
671 }
672 
673 errno_t
mbuf_setflags(mbuf_t mbuf,mbuf_flags_t flags)674 mbuf_setflags(mbuf_t mbuf, mbuf_flags_t flags)
675 {
676 	errno_t ret = 0;
677 	mbuf_flags_t oflags = mbuf->m_flags;
678 
679 	/*
680 	 * 1. Return error if public but un-alterable flags are changed
681 	 *    in flags argument.
682 	 * 2. Return error if bits other than public flags are set in passed
683 	 *    flags argument.
684 	 *    Please note that private flag bits must be passed as reset by
685 	 *    kexts, as they must use mbuf_flags KPI to get current set of
686 	 *    mbuf flags and mbuf_flags KPI does not expose private flags.
687 	 */
688 	if ((flags ^ oflags) & mbuf_cflags_mask) {
689 		ret = EINVAL;
690 	} else if (flags & ~mbuf_flags_mask) {
691 		ret = EINVAL;
692 	} else {
693 		mbuf->m_flags = (uint16_t)flags | (mbuf->m_flags & ~mbuf_flags_mask);
694 		/*
695 		 * If M_PKTHDR bit has changed, we have work to do;
696 		 * m_reinit() will take care of setting/clearing the
697 		 * bit, as well as the rest of bookkeeping.
698 		 */
699 		if ((oflags ^ mbuf->m_flags) & M_PKTHDR) {
700 			mbuf->m_flags ^= M_PKTHDR;      /* restore */
701 			ret = m_reinit(mbuf,
702 			    (mbuf->m_flags & M_PKTHDR) ? 0 : 1);
703 		}
704 	}
705 
706 	return ret;
707 }
708 
709 errno_t
mbuf_setflags_mask(mbuf_t mbuf,mbuf_flags_t flags,mbuf_flags_t mask)710 mbuf_setflags_mask(mbuf_t mbuf, mbuf_flags_t flags, mbuf_flags_t mask)
711 {
712 	errno_t ret = 0;
713 
714 	if (mask & (~mbuf_flags_mask | mbuf_cflags_mask)) {
715 		ret = EINVAL;
716 	} else {
717 		mbuf_flags_t oflags = mbuf->m_flags;
718 		mbuf->m_flags = (uint16_t)((flags & mask) | (mbuf->m_flags & ~mask));
719 		/*
720 		 * If M_PKTHDR bit has changed, we have work to do;
721 		 * m_reinit() will take care of setting/clearing the
722 		 * bit, as well as the rest of bookkeeping.
723 		 */
724 		if ((oflags ^ mbuf->m_flags) & M_PKTHDR) {
725 			mbuf->m_flags ^= M_PKTHDR;      /* restore */
726 			ret = m_reinit(mbuf,
727 			    (mbuf->m_flags & M_PKTHDR) ? 0 : 1);
728 		}
729 	}
730 
731 	return ret;
732 }
733 
734 errno_t
mbuf_copy_pkthdr(mbuf_t dest,const mbuf_t src)735 mbuf_copy_pkthdr(mbuf_t dest, const mbuf_t src)
736 {
737 	if (((src)->m_flags & M_PKTHDR) == 0) {
738 		return EINVAL;
739 	}
740 
741 	m_copy_pkthdr(dest, src);
742 
743 	return 0;
744 }
745 
746 size_t
mbuf_pkthdr_len(const mbuf_t mbuf)747 mbuf_pkthdr_len(const mbuf_t mbuf)
748 {
749 	if (((mbuf)->m_flags & M_PKTHDR) == 0) {
750 		return 0;
751 	}
752 	/*
753 	 * While we Assert for development or debug builds,
754 	 * also make sure we never return negative length
755 	 * for release build.
756 	 */
757 	ASSERT(mbuf->m_pkthdr.len >= 0);
758 	if (mbuf->m_pkthdr.len < 0) {
759 		return 0;
760 	}
761 	return mbuf->m_pkthdr.len;
762 }
763 
764 __private_extern__ size_t
mbuf_pkthdr_maxlen(mbuf_t m)765 mbuf_pkthdr_maxlen(mbuf_t m)
766 {
767 	size_t maxlen = 0;
768 	mbuf_t n = m;
769 
770 	while (n) {
771 		maxlen += mbuf_maxlen(n);
772 		n = mbuf_next(n);
773 	}
774 	return maxlen;
775 }
776 
777 void
mbuf_pkthdr_setlen(mbuf_t mbuf,size_t len)778 mbuf_pkthdr_setlen(mbuf_t mbuf, size_t len)
779 {
780 	if (len > INT32_MAX) {
781 		len = INT32_MAX;
782 	}
783 
784 	mbuf->m_pkthdr.len = (int)len;
785 }
786 
787 void
mbuf_pkthdr_adjustlen(mbuf_t mbuf,int amount)788 mbuf_pkthdr_adjustlen(mbuf_t mbuf, int amount)
789 {
790 	mbuf->m_pkthdr.len += amount;
791 }
792 
793 ifnet_t
mbuf_pkthdr_rcvif(const mbuf_t mbuf)794 mbuf_pkthdr_rcvif(const mbuf_t mbuf)
795 {
796 	/*
797 	 * If we reference count ifnets, we should take a reference here
798 	 * before returning
799 	 */
800 	return mbuf->m_pkthdr.rcvif;
801 }
802 
803 errno_t
mbuf_pkthdr_setrcvif(mbuf_t mbuf,ifnet_t ifnet)804 mbuf_pkthdr_setrcvif(mbuf_t mbuf, ifnet_t ifnet)
805 {
806 	/* May want to walk ifnet list to determine if interface is valid */
807 	mbuf->m_pkthdr.rcvif = (struct ifnet *)ifnet;
808 	return 0;
809 }
810 
811 void*
mbuf_pkthdr_header(const mbuf_t mbuf)812 mbuf_pkthdr_header(const mbuf_t mbuf)
813 {
814 	return mbuf->m_pkthdr.pkt_hdr;
815 }
816 
817 void
mbuf_pkthdr_setheader(mbuf_t mbuf,void * header)818 mbuf_pkthdr_setheader(mbuf_t mbuf, void *header)
819 {
820 	mbuf->m_pkthdr.pkt_hdr = (void*)header;
821 }
822 
823 void
mbuf_inbound_modified(mbuf_t mbuf)824 mbuf_inbound_modified(mbuf_t mbuf)
825 {
826 	/* Invalidate hardware generated checksum flags */
827 	mbuf->m_pkthdr.csum_flags = 0;
828 }
829 
830 void
mbuf_outbound_finalize(struct mbuf * m,u_int32_t pf,size_t o)831 mbuf_outbound_finalize(struct mbuf *m, u_int32_t pf, size_t o)
832 {
833 	/* Generate the packet in software, client needs it */
834 	switch (pf) {
835 	case PF_INET:
836 		(void) in_finalize_cksum(m, (uint32_t)o, m->m_pkthdr.csum_flags);
837 		break;
838 
839 	case PF_INET6:
840 		/*
841 		 * Checksum offload should not have been enabled when
842 		 * extension headers exist; indicate that the callee
843 		 * should skip such case by setting optlen to -1.
844 		 */
845 		(void) in6_finalize_cksum(m, (uint32_t)o, -1, -1, m->m_pkthdr.csum_flags);
846 		break;
847 
848 	default:
849 		break;
850 	}
851 }
852 
853 errno_t
mbuf_set_vlan_tag(mbuf_t mbuf,u_int16_t vlan)854 mbuf_set_vlan_tag(
855 	mbuf_t mbuf,
856 	u_int16_t vlan)
857 {
858 	mbuf->m_pkthdr.csum_flags |= CSUM_VLAN_TAG_VALID;
859 	mbuf->m_pkthdr.vlan_tag = vlan;
860 
861 	return 0;
862 }
863 
864 errno_t
mbuf_get_vlan_tag(mbuf_t mbuf,u_int16_t * vlan)865 mbuf_get_vlan_tag(
866 	mbuf_t mbuf,
867 	u_int16_t *vlan)
868 {
869 	if ((mbuf->m_pkthdr.csum_flags & CSUM_VLAN_TAG_VALID) == 0) {
870 		return ENXIO; // No vlan tag set
871 	}
872 	*vlan = mbuf->m_pkthdr.vlan_tag;
873 
874 	return 0;
875 }
876 
877 errno_t
mbuf_clear_vlan_tag(mbuf_t mbuf)878 mbuf_clear_vlan_tag(
879 	mbuf_t mbuf)
880 {
881 	mbuf->m_pkthdr.csum_flags &= ~CSUM_VLAN_TAG_VALID;
882 	mbuf->m_pkthdr.vlan_tag = 0;
883 
884 	return 0;
885 }
886 
887 static const mbuf_csum_request_flags_t mbuf_valid_csum_request_flags =
888     MBUF_CSUM_REQ_IP | MBUF_CSUM_REQ_TCP | MBUF_CSUM_REQ_UDP |
889     MBUF_CSUM_PARTIAL | MBUF_CSUM_REQ_TCPIPV6 | MBUF_CSUM_REQ_UDPIPV6;
890 
891 errno_t
mbuf_set_csum_requested(mbuf_t mbuf,mbuf_csum_request_flags_t request,u_int32_t value)892 mbuf_set_csum_requested(
893 	mbuf_t mbuf,
894 	mbuf_csum_request_flags_t request,
895 	u_int32_t value)
896 {
897 	request &= mbuf_valid_csum_request_flags;
898 	mbuf->m_pkthdr.csum_flags =
899 	    (mbuf->m_pkthdr.csum_flags & 0xffff0000) | request;
900 	mbuf->m_pkthdr.csum_data = value;
901 
902 	return 0;
903 }
904 
905 static const mbuf_tso_request_flags_t mbuf_valid_tso_request_flags =
906     MBUF_TSO_IPV4 | MBUF_TSO_IPV6;
907 
908 errno_t
mbuf_get_tso_requested(mbuf_t mbuf,mbuf_tso_request_flags_t * request,u_int32_t * mss)909 mbuf_get_tso_requested(
910 	mbuf_t mbuf,
911 	mbuf_tso_request_flags_t *request,
912 	u_int32_t *mss)
913 {
914 	if (mbuf == NULL || (mbuf->m_flags & M_PKTHDR) == 0 ||
915 	    request == NULL || mss == NULL) {
916 		return EINVAL;
917 	}
918 
919 	*request = mbuf->m_pkthdr.csum_flags;
920 	*request &= mbuf_valid_tso_request_flags;
921 	if (*request != 0) {
922 		*mss = mbuf->m_pkthdr.tx_seg_size;
923 	}
924 
925 	return 0;
926 }
927 
928 static inline mbuf_gso_type_t
gso_type_from_tso_request_flags(mbuf_tso_request_flags_t flags)929 gso_type_from_tso_request_flags(mbuf_tso_request_flags_t flags)
930 {
931 	mbuf_gso_type_t type = MBUF_GSO_TYPE_NONE;
932 
933 	if ((flags & MBUF_TSO_IPV4) != 0) {
934 		type = MBUF_GSO_TYPE_IPV4;
935 	} else if ((flags & MBUF_TSO_IPV6) != 0) {
936 		type = MBUF_GSO_TYPE_IPV6;
937 	}
938 	return type;
939 }
940 
941 errno_t
mbuf_get_gso_info(mbuf_t mbuf,mbuf_gso_type_t * type,uint16_t * ret_seg_size,uint16_t * ret_hdr_len)942 mbuf_get_gso_info(
943 	mbuf_t mbuf,
944 	mbuf_gso_type_t *type,
945 	uint16_t *ret_seg_size,
946 	uint16_t *ret_hdr_len)
947 {
948 	mbuf_tso_request_flags_t flags;
949 	uint16_t       hdr_len = 0;
950 	uint16_t       seg_size = 0;
951 
952 	if (mbuf == NULL || (mbuf->m_flags & M_PKTHDR) == 0 ||
953 	    type == NULL || ret_seg_size == NULL || ret_hdr_len == NULL) {
954 		return EINVAL;
955 	}
956 	flags = mbuf->m_pkthdr.csum_flags & mbuf_valid_tso_request_flags;
957 	if (flags != 0) {
958 		seg_size = mbuf->m_pkthdr.tx_seg_size;
959 		hdr_len = mbuf->m_pkthdr.tx_hdr_len;
960 	}
961 	*type = gso_type_from_tso_request_flags(flags);
962 	*ret_seg_size = seg_size;
963 	*ret_hdr_len = hdr_len;
964 	return 0;
965 }
966 
967 errno_t
mbuf_set_gso_info(mbuf_t mbuf,mbuf_gso_type_t type,uint16_t seg_size,uint16_t hdr_len)968 mbuf_set_gso_info(
969 	mbuf_t mbuf,
970 	mbuf_gso_type_t type,
971 	uint16_t seg_size,
972 	uint16_t hdr_len)
973 {
974 	errno_t         error = EINVAL;
975 	mbuf_tso_request_flags_t flags = 0;
976 
977 	if (mbuf == NULL || (mbuf->m_flags & M_PKTHDR) == 0) {
978 		goto done;
979 	}
980 	switch (type) {
981 	case MBUF_GSO_TYPE_NONE:
982 		break;
983 	case MBUF_GSO_TYPE_IPV4:
984 		flags = MBUF_TSO_IPV4;
985 		break;
986 	case MBUF_GSO_TYPE_IPV6:
987 		flags = MBUF_TSO_IPV6;
988 		break;
989 	default:
990 		/* unsupported type */
991 		goto done;
992 	}
993 	switch (flags) {
994 	case 0:
995 		/* clearing GSO, seg_size and hdr_len must be zero */
996 		if (seg_size != 0 || hdr_len != 0) {
997 			goto done;
998 		}
999 		mbuf->m_pkthdr.csum_flags &= ~mbuf_valid_tso_request_flags;
1000 		mbuf->m_pkthdr.tx_seg_size = 0;
1001 		mbuf->m_pkthdr.tx_hdr_len = 0;
1002 		error = 0;
1003 		break;
1004 	default:
1005 		if (seg_size == 0) {
1006 			/* must specify seg_size */
1007 			goto done;
1008 		}
1009 		mbuf->m_pkthdr.csum_flags |= flags;
1010 		mbuf->m_pkthdr.tx_seg_size = seg_size;
1011 		mbuf->m_pkthdr.tx_hdr_len = hdr_len;
1012 		error = 0;
1013 		break;
1014 	}
1015 done:
1016 	return error;
1017 }
1018 
1019 errno_t
mbuf_get_lro_info(mbuf_t mbuf,uint8_t * seg_cnt,uint8_t * dup_ack_cnt)1020 mbuf_get_lro_info(
1021 	mbuf_t mbuf,
1022 	uint8_t * seg_cnt,
1023 	uint8_t * dup_ack_cnt)
1024 {
1025 	errno_t         error = EINVAL;
1026 
1027 	if (mbuf == NULL || (mbuf->m_flags & M_PKTHDR) == 0) {
1028 		goto done;
1029 	}
1030 	if (seg_cnt == NULL || dup_ack_cnt == NULL) {
1031 		goto done;
1032 	}
1033 	*seg_cnt = mbuf->m_pkthdr.rx_seg_cnt;
1034 	*dup_ack_cnt = 0;
1035 	error = 0;
1036 done:
1037 	return error;
1038 }
1039 
1040 errno_t
mbuf_set_lro_info(mbuf_t mbuf,uint8_t seg_cnt,uint8_t dup_ack_cnt)1041 mbuf_set_lro_info(
1042 	mbuf_t mbuf,
1043 	uint8_t seg_cnt,
1044 	uint8_t dup_ack_cnt)
1045 {
1046 	errno_t         error = EINVAL;
1047 
1048 	if (mbuf == NULL || (mbuf->m_flags & M_PKTHDR) == 0 ||
1049 	    dup_ack_cnt != 0 || seg_cnt == 1) {
1050 		goto done;
1051 	}
1052 	mbuf->m_pkthdr.rx_seg_cnt = seg_cnt;
1053 done:
1054 	return error;
1055 }
1056 
1057 errno_t
mbuf_get_csum_requested(mbuf_t mbuf,mbuf_csum_request_flags_t * request,u_int32_t * value)1058 mbuf_get_csum_requested(
1059 	mbuf_t mbuf,
1060 	mbuf_csum_request_flags_t *request,
1061 	u_int32_t *value)
1062 {
1063 	*request = mbuf->m_pkthdr.csum_flags;
1064 	*request &= mbuf_valid_csum_request_flags;
1065 	if (value != NULL) {
1066 		*value = mbuf->m_pkthdr.csum_data;
1067 	}
1068 
1069 	return 0;
1070 }
1071 
1072 errno_t
mbuf_clear_csum_requested(mbuf_t mbuf)1073 mbuf_clear_csum_requested(
1074 	mbuf_t mbuf)
1075 {
1076 	mbuf->m_pkthdr.csum_flags &= 0xffff0000;
1077 	mbuf->m_pkthdr.csum_data = 0;
1078 
1079 	return 0;
1080 }
1081 
1082 static const mbuf_csum_performed_flags_t mbuf_valid_csum_performed_flags =
1083     MBUF_CSUM_DID_IP | MBUF_CSUM_IP_GOOD | MBUF_CSUM_DID_DATA |
1084     MBUF_CSUM_PSEUDO_HDR | MBUF_CSUM_PARTIAL;
1085 
1086 errno_t
mbuf_set_csum_performed(mbuf_t mbuf,mbuf_csum_performed_flags_t performed,u_int32_t value)1087 mbuf_set_csum_performed(
1088 	mbuf_t mbuf,
1089 	mbuf_csum_performed_flags_t performed,
1090 	u_int32_t value)
1091 {
1092 	performed &= mbuf_valid_csum_performed_flags;
1093 	mbuf->m_pkthdr.csum_flags =
1094 	    (mbuf->m_pkthdr.csum_flags & 0xffff0000) | performed;
1095 	mbuf->m_pkthdr.csum_data = value;
1096 
1097 	return 0;
1098 }
1099 
1100 errno_t
mbuf_get_csum_performed(mbuf_t mbuf,mbuf_csum_performed_flags_t * performed,u_int32_t * value)1101 mbuf_get_csum_performed(
1102 	mbuf_t mbuf,
1103 	mbuf_csum_performed_flags_t *performed,
1104 	u_int32_t *value)
1105 {
1106 	*performed =
1107 	    mbuf->m_pkthdr.csum_flags & mbuf_valid_csum_performed_flags;
1108 	*value = mbuf->m_pkthdr.csum_data;
1109 
1110 	return 0;
1111 }
1112 
1113 errno_t
mbuf_clear_csum_performed(mbuf_t mbuf)1114 mbuf_clear_csum_performed(
1115 	mbuf_t mbuf)
1116 {
1117 	mbuf->m_pkthdr.csum_flags &= 0xffff0000;
1118 	mbuf->m_pkthdr.csum_data = 0;
1119 
1120 	return 0;
1121 }
1122 
1123 errno_t
mbuf_inet_cksum(mbuf_t mbuf,int protocol,u_int32_t offset,u_int32_t length,u_int16_t * csum)1124 mbuf_inet_cksum(mbuf_t mbuf, int protocol, u_int32_t offset, u_int32_t length,
1125     u_int16_t *csum)
1126 {
1127 	if (mbuf == NULL || length == 0 || csum == NULL ||
1128 	    (u_int32_t)mbuf->m_pkthdr.len < (offset + length)) {
1129 		return EINVAL;
1130 	}
1131 
1132 	*csum = inet_cksum(mbuf, protocol, offset, length);
1133 	return 0;
1134 }
1135 
1136 errno_t
mbuf_inet6_cksum(mbuf_t mbuf,int protocol,u_int32_t offset,u_int32_t length,u_int16_t * csum)1137 mbuf_inet6_cksum(mbuf_t mbuf, int protocol, u_int32_t offset, u_int32_t length,
1138     u_int16_t *csum)
1139 {
1140 	if (mbuf == NULL || length == 0 || csum == NULL ||
1141 	    (u_int32_t)mbuf->m_pkthdr.len < (offset + length)) {
1142 		return EINVAL;
1143 	}
1144 
1145 	*csum = inet6_cksum(mbuf, protocol, offset, length);
1146 	return 0;
1147 }
1148 
1149 /*
1150  * Mbuf tag KPIs
1151  */
1152 
1153 #define MTAG_FIRST_ID FIRST_KPI_STR_ID
1154 
1155 errno_t
mbuf_tag_id_find(const char * string,mbuf_tag_id_t * out_id)1156 mbuf_tag_id_find(
1157 	const char              *string,
1158 	mbuf_tag_id_t   *out_id)
1159 {
1160 	return net_str_id_find_internal(string, out_id, NSI_MBUF_TAG, 1);
1161 }
1162 
1163 errno_t
mbuf_tag_allocate(mbuf_t mbuf,mbuf_tag_id_t id,mbuf_tag_type_t type,size_t length,mbuf_how_t how,void ** data_p)1164 mbuf_tag_allocate(
1165 	mbuf_t                  mbuf,
1166 	mbuf_tag_id_t   id,
1167 	mbuf_tag_type_t type,
1168 	size_t                  length,
1169 	mbuf_how_t              how,
1170 	void**                  data_p)
1171 {
1172 	struct m_tag *tag;
1173 	u_int32_t mtag_id_first, mtag_id_last;
1174 
1175 	if (data_p != NULL) {
1176 		*data_p = NULL;
1177 	}
1178 
1179 	/* Sanity check parameters */
1180 	(void) net_str_id_first_last(&mtag_id_first, &mtag_id_last,
1181 	    NSI_MBUF_TAG);
1182 	if (mbuf == NULL || (mbuf->m_flags & M_PKTHDR) == 0 ||
1183 	    id < mtag_id_first || id > mtag_id_last || length < 1 ||
1184 	    (length & 0xffff0000) != 0 || data_p == NULL) {
1185 		return EINVAL;
1186 	}
1187 
1188 	/* Make sure this mtag hasn't already been allocated */
1189 	tag = m_tag_locate(mbuf, id, type);
1190 	if (tag != NULL) {
1191 		return EEXIST;
1192 	}
1193 
1194 	/* Allocate an mtag */
1195 	tag = m_tag_create(id, type, (int)length, how, mbuf);
1196 	if (tag == NULL) {
1197 		return how == M_WAITOK ? ENOMEM : EWOULDBLOCK;
1198 	}
1199 
1200 	/* Attach the mtag and set *data_p */
1201 	m_tag_prepend(mbuf, tag);
1202 	*data_p = tag->m_tag_data;
1203 
1204 	return 0;
1205 }
1206 
1207 errno_t
mbuf_tag_find(mbuf_t mbuf,mbuf_tag_id_t id,mbuf_tag_type_t type,size_t * length,void ** data_p)1208 mbuf_tag_find(
1209 	mbuf_t mbuf,
1210 	mbuf_tag_id_t id,
1211 	mbuf_tag_type_t type,
1212 	size_t *length,
1213 	void **data_p)
1214 {
1215 	struct m_tag *tag;
1216 	u_int32_t mtag_id_first, mtag_id_last;
1217 
1218 	if (length != NULL) {
1219 		*length = 0;
1220 	}
1221 	if (data_p != NULL) {
1222 		*data_p = NULL;
1223 	}
1224 
1225 	/* Sanity check parameters */
1226 	(void) net_str_id_first_last(&mtag_id_first, &mtag_id_last,
1227 	    NSI_MBUF_TAG);
1228 	if (mbuf == NULL || (mbuf->m_flags & M_PKTHDR) == 0 ||
1229 	    id < mtag_id_first || id > mtag_id_last || length == NULL ||
1230 	    data_p == NULL) {
1231 		return EINVAL;
1232 	}
1233 
1234 	/* Locate an mtag */
1235 	tag = m_tag_locate(mbuf, id, type);
1236 	if (tag == NULL) {
1237 		return ENOENT;
1238 	}
1239 
1240 	/* Copy out the pointer to the data and the lenght value */
1241 	*length = tag->m_tag_len;
1242 	*data_p = tag->m_tag_data;
1243 
1244 	return 0;
1245 }
1246 
1247 void
mbuf_tag_free(mbuf_t mbuf,mbuf_tag_id_t id,mbuf_tag_type_t type)1248 mbuf_tag_free(
1249 	mbuf_t                  mbuf,
1250 	mbuf_tag_id_t   id,
1251 	mbuf_tag_type_t type)
1252 {
1253 	struct m_tag *tag;
1254 	u_int32_t mtag_id_first, mtag_id_last;
1255 
1256 	/* Sanity check parameters */
1257 	(void) net_str_id_first_last(&mtag_id_first, &mtag_id_last,
1258 	    NSI_MBUF_TAG);
1259 	if (mbuf == NULL || (mbuf->m_flags & M_PKTHDR) == 0 ||
1260 	    id < mtag_id_first || id > mtag_id_last) {
1261 		return;
1262 	}
1263 
1264 	tag = m_tag_locate(mbuf, id, type);
1265 	if (tag == NULL) {
1266 		return;
1267 	}
1268 
1269 	m_tag_delete(mbuf, tag);
1270 }
1271 
1272 /*
1273  * Maximum length of driver auxiliary data; keep this small to
1274  * fit in a single mbuf to avoid wasting memory, rounded down to
1275  * the nearest 64-bit boundary.  This takes into account mbuf
1276  * tag-related (m_taghdr + m_tag) as well m_drvaux_tag structs.
1277  */
1278 #define MBUF_DRVAUX_MAXLEN                                              \
1279 	P2ROUNDDOWN(MLEN -                                              \
1280 	M_TAG_ALIGN(sizeof (struct m_drvaux_tag)), sizeof (uint64_t))
1281 
1282 errno_t
mbuf_add_drvaux(mbuf_t mbuf,mbuf_how_t how,u_int32_t family,u_int32_t subfamily,size_t length,void ** data_p)1283 mbuf_add_drvaux(mbuf_t mbuf, mbuf_how_t how, u_int32_t family,
1284     u_int32_t subfamily, size_t length, void **data_p)
1285 {
1286 	struct m_drvaux_tag *p;
1287 	struct m_tag *tag;
1288 
1289 	if (mbuf == NULL || !(mbuf->m_flags & M_PKTHDR) ||
1290 	    length == 0 || length > MBUF_DRVAUX_MAXLEN) {
1291 		return EINVAL;
1292 	}
1293 
1294 	if (data_p != NULL) {
1295 		*data_p = NULL;
1296 	}
1297 
1298 	/* Check if one is already associated */
1299 	if ((tag = m_tag_locate(mbuf, KERNEL_MODULE_TAG_ID,
1300 	    KERNEL_TAG_TYPE_DRVAUX)) != NULL) {
1301 		return EEXIST;
1302 	}
1303 
1304 	/* Tag is (m_drvaux_tag + module specific data) */
1305 	if ((tag = m_tag_create(KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_DRVAUX,
1306 	    (int)(sizeof(*p) + length), how, mbuf)) == NULL) {
1307 		return (how == MBUF_WAITOK) ? ENOMEM : EWOULDBLOCK;
1308 	}
1309 
1310 	p = (struct m_drvaux_tag *)(tag->m_tag_data);
1311 	p->da_family = family;
1312 	p->da_subfamily = subfamily;
1313 	p->da_length = (int)length;
1314 
1315 	/* Associate the tag */
1316 	m_tag_prepend(mbuf, tag);
1317 
1318 	if (data_p != NULL) {
1319 		*data_p = (p + 1);
1320 	}
1321 
1322 	return 0;
1323 }
1324 
1325 errno_t
mbuf_find_drvaux(mbuf_t mbuf,u_int32_t * family_p,u_int32_t * subfamily_p,u_int32_t * length_p,void ** data_p)1326 mbuf_find_drvaux(mbuf_t mbuf, u_int32_t *family_p, u_int32_t *subfamily_p,
1327     u_int32_t *length_p, void **data_p)
1328 {
1329 	struct m_drvaux_tag *p;
1330 	struct m_tag *tag;
1331 
1332 	if (mbuf == NULL || !(mbuf->m_flags & M_PKTHDR) || data_p == NULL) {
1333 		return EINVAL;
1334 	}
1335 
1336 	*data_p = NULL;
1337 
1338 	if ((tag = m_tag_locate(mbuf, KERNEL_MODULE_TAG_ID,
1339 	    KERNEL_TAG_TYPE_DRVAUX)) == NULL) {
1340 		return ENOENT;
1341 	}
1342 
1343 	/* Must be at least size of m_drvaux_tag */
1344 	VERIFY(tag->m_tag_len >= sizeof(*p));
1345 
1346 	p = (struct m_drvaux_tag *)(tag->m_tag_data);
1347 	VERIFY(p->da_length > 0 && p->da_length <= MBUF_DRVAUX_MAXLEN);
1348 
1349 	if (family_p != NULL) {
1350 		*family_p = p->da_family;
1351 	}
1352 	if (subfamily_p != NULL) {
1353 		*subfamily_p = p->da_subfamily;
1354 	}
1355 	if (length_p != NULL) {
1356 		*length_p = p->da_length;
1357 	}
1358 
1359 	*data_p = (p + 1);
1360 
1361 	return 0;
1362 }
1363 
1364 void
mbuf_del_drvaux(mbuf_t mbuf)1365 mbuf_del_drvaux(mbuf_t mbuf)
1366 {
1367 	struct m_tag *tag;
1368 
1369 	if (mbuf == NULL || !(mbuf->m_flags & M_PKTHDR)) {
1370 		return;
1371 	}
1372 
1373 	if ((tag = m_tag_locate(mbuf, KERNEL_MODULE_TAG_ID,
1374 	    KERNEL_TAG_TYPE_DRVAUX)) != NULL) {
1375 		m_tag_delete(mbuf, tag);
1376 	}
1377 }
1378 
1379 /* mbuf stats */
1380 void
mbuf_stats(struct mbuf_stat * stats)1381 mbuf_stats(struct mbuf_stat *stats)
1382 {
1383 	stats->mbufs = mbstat.m_mbufs;
1384 	stats->clusters = mbstat.m_clusters;
1385 	stats->clfree = mbstat.m_clfree;
1386 	stats->drops = mbstat.m_drops;
1387 	stats->wait = mbstat.m_wait;
1388 	stats->drain = mbstat.m_drain;
1389 	__builtin_memcpy(stats->mtypes, mbstat.m_mtypes, sizeof(stats->mtypes));
1390 	stats->mcfail = mbstat.m_mcfail;
1391 	stats->mpfail = mbstat.m_mpfail;
1392 	stats->msize = mbstat.m_msize;
1393 	stats->mclbytes = mbstat.m_mclbytes;
1394 	stats->minclsize = mbstat.m_minclsize;
1395 	stats->mlen = mbstat.m_mlen;
1396 	stats->mhlen = mbstat.m_mhlen;
1397 	stats->bigclusters = mbstat.m_bigclusters;
1398 	stats->bigclfree = mbstat.m_bigclfree;
1399 	stats->bigmclbytes = mbstat.m_bigmclbytes;
1400 }
1401 
1402 errno_t
mbuf_allocpacket(mbuf_how_t how,size_t packetlen,unsigned int * maxchunks,mbuf_t * mbuf)1403 mbuf_allocpacket(mbuf_how_t how, size_t packetlen, unsigned int *maxchunks,
1404     mbuf_t *mbuf)
1405 {
1406 	errno_t error = 0;
1407 	struct mbuf *m;
1408 	unsigned int numpkts = 1;
1409 	unsigned int numchunks = maxchunks != NULL ? *maxchunks : 0;
1410 
1411 	if (packetlen == 0) {
1412 		error = EINVAL;
1413 		os_log(OS_LOG_DEFAULT, "mbuf_allocpacket %d", __LINE__);
1414 		goto out;
1415 	}
1416 	m = m_allocpacket_internal(&numpkts, packetlen,
1417 	    maxchunks != NULL ? &numchunks : NULL, how, 1, 0);
1418 	if (m == NULL) {
1419 		if (maxchunks != NULL && *maxchunks && numchunks > *maxchunks) {
1420 			error = ENOBUFS;
1421 			os_log(OS_LOG_DEFAULT, "mbuf_allocpacket %d", __LINE__);
1422 		} else {
1423 			error = ENOMEM;
1424 			os_log(OS_LOG_DEFAULT, "mbuf_allocpacket %d", __LINE__);
1425 		}
1426 	} else {
1427 		if (maxchunks != NULL) {
1428 			*maxchunks = numchunks;
1429 		}
1430 		error = 0;
1431 		*mbuf = m;
1432 	}
1433 out:
1434 	return error;
1435 }
1436 
1437 errno_t
mbuf_allocpacket_list(unsigned int numpkts,mbuf_how_t how,size_t packetlen,unsigned int * maxchunks,mbuf_t * mbuf)1438 mbuf_allocpacket_list(unsigned int numpkts, mbuf_how_t how, size_t packetlen,
1439     unsigned int *maxchunks, mbuf_t *mbuf)
1440 {
1441 	errno_t error = 0;
1442 	struct mbuf *m;
1443 	unsigned int numchunks = maxchunks ? *maxchunks : 0;
1444 
1445 	if (numpkts == 0) {
1446 		error = EINVAL;
1447 		goto out;
1448 	}
1449 	if (packetlen == 0) {
1450 		error = EINVAL;
1451 		goto out;
1452 	}
1453 	m = m_allocpacket_internal(&numpkts, packetlen,
1454 	    maxchunks != NULL ? &numchunks : NULL, how, 1, 0);
1455 	if (m == NULL) {
1456 		if (maxchunks != NULL && *maxchunks && numchunks > *maxchunks) {
1457 			error = ENOBUFS;
1458 		} else {
1459 			error = ENOMEM;
1460 		}
1461 	} else {
1462 		if (maxchunks != NULL) {
1463 			*maxchunks = numchunks;
1464 		}
1465 		error = 0;
1466 		*mbuf = m;
1467 	}
1468 out:
1469 	return error;
1470 }
1471 
1472 __private_extern__ size_t
mbuf_pkt_list_len(mbuf_t m)1473 mbuf_pkt_list_len(mbuf_t m)
1474 {
1475 	size_t len = 0;
1476 	mbuf_t n = m;
1477 
1478 	while (n) {
1479 		len += mbuf_pkthdr_len(n);
1480 		n = mbuf_nextpkt(n);
1481 	}
1482 	return len;
1483 }
1484 
1485 __private_extern__ size_t
mbuf_pkt_list_maxlen(mbuf_t m)1486 mbuf_pkt_list_maxlen(mbuf_t m)
1487 {
1488 	size_t maxlen = 0;
1489 	mbuf_t n = m;
1490 
1491 	while (n) {
1492 		maxlen += mbuf_pkthdr_maxlen(n);
1493 		n = mbuf_nextpkt(n);
1494 	}
1495 	return maxlen;
1496 }
1497 
1498 /*
1499  * mbuf_copyback differs from m_copyback in a few ways:
1500  * 1) mbuf_copyback will allocate clusters for new mbufs we append
1501  * 2) mbuf_copyback will grow the last mbuf in the chain if possible
1502  * 3) mbuf_copyback reports whether or not the operation succeeded
1503  * 4) mbuf_copyback allows the caller to specify M_WAITOK or M_NOWAIT
1504  */
1505 errno_t
mbuf_copyback(mbuf_t m,size_t off,size_t len0,const void * data __sized_by_or_null (len0),mbuf_how_t how)1506 mbuf_copyback(
1507 	mbuf_t          m,
1508 	size_t          off,
1509 	size_t          len0,
1510 	const void      *data __sized_by_or_null(len0),
1511 	mbuf_how_t      how)
1512 {
1513 	size_t  mlen, len = len0;
1514 	mbuf_ref_t  m_start = m;
1515 	mbuf_ref_t  n;
1516 	int             totlen = 0;
1517 	errno_t         result = 0;
1518 	const char      *cp = data;
1519 
1520 	if (m == NULL || len == 0 || data == NULL) {
1521 		return EINVAL;
1522 	}
1523 
1524 	while (off > (mlen = m->m_len)) {
1525 		off -= mlen;
1526 		totlen += mlen;
1527 		if (m->m_next == 0) {
1528 			n = m_getclr(how, m->m_type);
1529 			if (n == 0) {
1530 				result = ENOBUFS;
1531 				goto out;
1532 			}
1533 			n->m_len = (int32_t)MIN(MLEN, len + off);
1534 			m->m_next = n;
1535 		}
1536 		m = m->m_next;
1537 	}
1538 
1539 	while (len > 0) {
1540 		mlen = MIN(m->m_len - off, len);
1541 		if (mlen < len && m->m_next == NULL &&
1542 		    mbuf_trailingspace(m) > 0) {
1543 			size_t  grow = MIN(mbuf_trailingspace(m), len - mlen);
1544 			mlen += grow;
1545 			m->m_len += grow;
1546 		}
1547 		bcopy(cp, off + mtod(m, char *), (unsigned)mlen);
1548 		cp += mlen;
1549 		len -= mlen;
1550 		mlen += off;
1551 		off = 0;
1552 		totlen += mlen;
1553 		if (len == 0) {
1554 			break;
1555 		}
1556 		if (m->m_next == 0) {
1557 			n = m_get(how, m->m_type);
1558 			if (n == NULL) {
1559 				result = ENOBUFS;
1560 				goto out;
1561 			}
1562 			if (len > MINCLSIZE) {
1563 				/*
1564 				 * cluster allocation failure is okay,
1565 				 * we can grow chain
1566 				 */
1567 				mbuf_mclget(how, m->m_type, &n);
1568 			}
1569 			n->m_len = (int32_t)MIN(mbuf_maxlen(n), len);
1570 			m->m_next = n;
1571 		}
1572 		m = m->m_next;
1573 	}
1574 
1575 out:
1576 	if ((m_start->m_flags & M_PKTHDR) && (m_start->m_pkthdr.len < totlen)) {
1577 		m_start->m_pkthdr.len = totlen;
1578 	}
1579 
1580 	return result;
1581 }
1582 
1583 u_int32_t
mbuf_get_mlen(void)1584 mbuf_get_mlen(void)
1585 {
1586 	return _MLEN;
1587 }
1588 
1589 u_int32_t
mbuf_get_mhlen(void)1590 mbuf_get_mhlen(void)
1591 {
1592 	return _MHLEN;
1593 }
1594 
1595 u_int32_t
mbuf_get_minclsize(void)1596 mbuf_get_minclsize(void)
1597 {
1598 	return MHLEN + MLEN;
1599 }
1600 
1601 u_int32_t
mbuf_get_msize(void)1602 mbuf_get_msize(void)
1603 {
1604 	return _MSIZE;
1605 }
1606 
1607 u_int32_t
mbuf_get_traffic_class_max_count(void)1608 mbuf_get_traffic_class_max_count(void)
1609 {
1610 	return MBUF_TC_MAX;
1611 }
1612 
1613 errno_t
mbuf_get_traffic_class_index(mbuf_traffic_class_t tc,u_int32_t * index)1614 mbuf_get_traffic_class_index(mbuf_traffic_class_t tc, u_int32_t *index)
1615 {
1616 	if (index == NULL || (u_int32_t)tc >= MBUF_TC_MAX) {
1617 		return EINVAL;
1618 	}
1619 
1620 	*index = MBUF_SCIDX(m_service_class_from_val(MBUF_TC2SCVAL(tc)));
1621 	return 0;
1622 }
1623 
1624 mbuf_traffic_class_t
mbuf_get_traffic_class(mbuf_t m)1625 mbuf_get_traffic_class(mbuf_t m)
1626 {
1627 	if (m == NULL || !(m->m_flags & M_PKTHDR)) {
1628 		return MBUF_TC_BE;
1629 	}
1630 
1631 	return m_get_traffic_class(m);
1632 }
1633 
1634 errno_t
mbuf_set_traffic_class(mbuf_t m,mbuf_traffic_class_t tc)1635 mbuf_set_traffic_class(mbuf_t m, mbuf_traffic_class_t tc)
1636 {
1637 	if (m == NULL || !(m->m_flags & M_PKTHDR) ||
1638 	    ((u_int32_t)tc >= MBUF_TC_MAX)) {
1639 		return EINVAL;
1640 	}
1641 
1642 	return m_set_traffic_class(m, tc);
1643 }
1644 
1645 int
mbuf_is_traffic_class_privileged(mbuf_t m)1646 mbuf_is_traffic_class_privileged(mbuf_t m)
1647 {
1648 	if (m == NULL || !(m->m_flags & M_PKTHDR) ||
1649 	    !MBUF_VALID_SC(m->m_pkthdr.pkt_svc)) {
1650 		return 0;
1651 	}
1652 
1653 	return (m->m_pkthdr.pkt_flags & PKTF_PRIO_PRIVILEGED) ? 1 : 0;
1654 }
1655 
1656 u_int32_t
mbuf_get_service_class_max_count(void)1657 mbuf_get_service_class_max_count(void)
1658 {
1659 	return MBUF_SC_MAX_CLASSES;
1660 }
1661 
1662 errno_t
mbuf_get_service_class_index(mbuf_svc_class_t sc,u_int32_t * index)1663 mbuf_get_service_class_index(mbuf_svc_class_t sc, u_int32_t *index)
1664 {
1665 	if (index == NULL || !MBUF_VALID_SC(sc)) {
1666 		return EINVAL;
1667 	}
1668 
1669 	*index = MBUF_SCIDX(sc);
1670 	return 0;
1671 }
1672 
1673 mbuf_svc_class_t
mbuf_get_service_class(mbuf_t m)1674 mbuf_get_service_class(mbuf_t m)
1675 {
1676 	if (m == NULL || !(m->m_flags & M_PKTHDR)) {
1677 		return MBUF_SC_BE;
1678 	}
1679 
1680 	return m_get_service_class(m);
1681 }
1682 
1683 errno_t
mbuf_set_service_class(mbuf_t m,mbuf_svc_class_t sc)1684 mbuf_set_service_class(mbuf_t m, mbuf_svc_class_t sc)
1685 {
1686 	if (m == NULL || !(m->m_flags & M_PKTHDR)) {
1687 		return EINVAL;
1688 	}
1689 
1690 	return m_set_service_class(m, sc);
1691 }
1692 
1693 errno_t
mbuf_pkthdr_aux_flags(mbuf_t m,mbuf_pkthdr_aux_flags_t * flagsp)1694 mbuf_pkthdr_aux_flags(mbuf_t m, mbuf_pkthdr_aux_flags_t *flagsp)
1695 {
1696 	u_int32_t flags;
1697 
1698 	if (m == NULL || !(m->m_flags & M_PKTHDR) || flagsp == NULL) {
1699 		return EINVAL;
1700 	}
1701 
1702 	*flagsp = 0;
1703 	flags = m->m_pkthdr.pkt_flags;
1704 	if ((flags & (PKTF_INET_RESOLVE | PKTF_RESOLVE_RTR)) ==
1705 	    (PKTF_INET_RESOLVE | PKTF_RESOLVE_RTR)) {
1706 		*flagsp |= MBUF_PKTAUXF_INET_RESOLVE_RTR;
1707 	}
1708 	if ((flags & (PKTF_INET6_RESOLVE | PKTF_RESOLVE_RTR)) ==
1709 	    (PKTF_INET6_RESOLVE | PKTF_RESOLVE_RTR)) {
1710 		*flagsp |= MBUF_PKTAUXF_INET6_RESOLVE_RTR;
1711 	}
1712 
1713 	/* These 2 flags are mutually exclusive */
1714 	VERIFY((*flagsp &
1715 	    (MBUF_PKTAUXF_INET_RESOLVE_RTR | MBUF_PKTAUXF_INET6_RESOLVE_RTR)) !=
1716 	    (MBUF_PKTAUXF_INET_RESOLVE_RTR | MBUF_PKTAUXF_INET6_RESOLVE_RTR));
1717 
1718 	return 0;
1719 }
1720 
1721 errno_t
mbuf_get_driver_scratch(mbuf_t m,u_int8_t ** area,size_t * area_len)1722 mbuf_get_driver_scratch(mbuf_t m, u_int8_t **area, size_t *area_len)
1723 {
1724 	if (m == NULL || area == NULL || area_len == NULL ||
1725 	    !(m->m_flags & M_PKTHDR)) {
1726 		return EINVAL;
1727 	}
1728 
1729 	*area_len = m_scratch_get(m, area);
1730 	return 0;
1731 }
1732 
1733 errno_t
mbuf_get_unsent_data_bytes(const mbuf_t m,u_int32_t * unsent_data)1734 mbuf_get_unsent_data_bytes(const mbuf_t m, u_int32_t *unsent_data)
1735 {
1736 	if (m == NULL || unsent_data == NULL || !(m->m_flags & M_PKTHDR)) {
1737 		return EINVAL;
1738 	}
1739 
1740 	if (!(m->m_pkthdr.pkt_flags & PKTF_VALID_UNSENT_DATA)) {
1741 		return EINVAL;
1742 	}
1743 
1744 	*unsent_data = m->m_pkthdr.bufstatus_if +
1745 	    m->m_pkthdr.bufstatus_sndbuf;
1746 	return 0;
1747 }
1748 
1749 errno_t
mbuf_get_buffer_status(const mbuf_t m,mbuf_buffer_status_t * buf_status)1750 mbuf_get_buffer_status(const mbuf_t m, mbuf_buffer_status_t *buf_status)
1751 {
1752 	if (m == NULL || buf_status == NULL || !(m->m_flags & M_PKTHDR) ||
1753 	    !(m->m_pkthdr.pkt_flags & PKTF_VALID_UNSENT_DATA)) {
1754 		return EINVAL;
1755 	}
1756 
1757 	buf_status->buf_interface = m->m_pkthdr.bufstatus_if;
1758 	buf_status->buf_sndbuf = m->m_pkthdr.bufstatus_sndbuf;
1759 	return 0;
1760 }
1761 
1762 errno_t
mbuf_pkt_new_flow(const mbuf_t m,u_int32_t * retval)1763 mbuf_pkt_new_flow(const mbuf_t m, u_int32_t *retval)
1764 {
1765 	if (m == NULL || retval == NULL || !(m->m_flags & M_PKTHDR)) {
1766 		return EINVAL;
1767 	}
1768 	if (m->m_pkthdr.pkt_flags & PKTF_NEW_FLOW) {
1769 		*retval = 1;
1770 	} else {
1771 		*retval = 0;
1772 	}
1773 	return 0;
1774 }
1775 
1776 errno_t
mbuf_last_pkt(const mbuf_t m,u_int32_t * retval)1777 mbuf_last_pkt(const mbuf_t m, u_int32_t *retval)
1778 {
1779 	if (m == NULL || retval == NULL || !(m->m_flags & M_PKTHDR)) {
1780 		return EINVAL;
1781 	}
1782 	if (m->m_pkthdr.pkt_flags & PKTF_LAST_PKT) {
1783 		*retval = 1;
1784 	} else {
1785 		*retval = 0;
1786 	}
1787 	return 0;
1788 }
1789 
1790 errno_t
mbuf_get_timestamp(mbuf_t m,u_int64_t * ts,boolean_t * valid)1791 mbuf_get_timestamp(mbuf_t m, u_int64_t *ts, boolean_t *valid)
1792 {
1793 	if (m == NULL || !(m->m_flags & M_PKTHDR) || ts == NULL) {
1794 		return EINVAL;
1795 	}
1796 
1797 	if ((m->m_pkthdr.pkt_flags & PKTF_TS_VALID) == 0) {
1798 		if (valid != NULL) {
1799 			*valid = FALSE;
1800 		}
1801 		*ts = 0;
1802 	} else {
1803 		if (valid != NULL) {
1804 			*valid = TRUE;
1805 		}
1806 		*ts = m->m_pkthdr.pkt_timestamp;
1807 	}
1808 	return 0;
1809 }
1810 
1811 errno_t
mbuf_set_timestamp(mbuf_t m,u_int64_t ts,boolean_t valid)1812 mbuf_set_timestamp(mbuf_t m, u_int64_t ts, boolean_t valid)
1813 {
1814 	if (m == NULL || !(m->m_flags & M_PKTHDR)) {
1815 		return EINVAL;
1816 	}
1817 
1818 	if (valid == FALSE) {
1819 		m->m_pkthdr.pkt_flags &= ~PKTF_TS_VALID;
1820 		m->m_pkthdr.pkt_timestamp = 0;
1821 	} else {
1822 		m->m_pkthdr.pkt_flags |= PKTF_TS_VALID;
1823 		m->m_pkthdr.pkt_timestamp = ts;
1824 	}
1825 	return 0;
1826 }
1827 
1828 errno_t
mbuf_get_status(mbuf_t m,kern_return_t * status)1829 mbuf_get_status(mbuf_t m, kern_return_t *status)
1830 {
1831 	if (m == NULL || !(m->m_flags & M_PKTHDR) || status == NULL) {
1832 		return EINVAL;
1833 	}
1834 
1835 	if ((m->m_pkthdr.pkt_flags & PKTF_DRIVER_MTAG) == 0) {
1836 		*status = 0;
1837 	} else {
1838 		*status = m->m_pkthdr.drv_tx_status;
1839 	}
1840 	return 0;
1841 }
1842 
1843 static void
driver_mtag_init(mbuf_t m)1844 driver_mtag_init(mbuf_t m)
1845 {
1846 	if ((m->m_pkthdr.pkt_flags & PKTF_DRIVER_MTAG) == 0) {
1847 		m->m_pkthdr.pkt_flags |= PKTF_DRIVER_MTAG;
1848 		bzero(&m->m_pkthdr.driver_mtag,
1849 		    sizeof(m->m_pkthdr.driver_mtag));
1850 	}
1851 }
1852 
1853 errno_t
mbuf_set_status(mbuf_t m,kern_return_t status)1854 mbuf_set_status(mbuf_t m, kern_return_t status)
1855 {
1856 	if (m == NULL || !(m->m_flags & M_PKTHDR)) {
1857 		return EINVAL;
1858 	}
1859 
1860 	driver_mtag_init(m);
1861 
1862 	m->m_pkthdr.drv_tx_status = status;
1863 
1864 	return 0;
1865 }
1866 
1867 errno_t
mbuf_get_flowid(mbuf_t m,u_int16_t * flowid)1868 mbuf_get_flowid(mbuf_t m, u_int16_t *flowid)
1869 {
1870 	if (m == NULL || !(m->m_flags & M_PKTHDR) || flowid == NULL) {
1871 		return EINVAL;
1872 	}
1873 
1874 	if ((m->m_pkthdr.pkt_flags & PKTF_DRIVER_MTAG) == 0) {
1875 		*flowid = 0;
1876 	} else {
1877 		*flowid = m->m_pkthdr.drv_flowid;
1878 	}
1879 	return 0;
1880 }
1881 
1882 errno_t
mbuf_set_flowid(mbuf_t m,u_int16_t flowid)1883 mbuf_set_flowid(mbuf_t m, u_int16_t flowid)
1884 {
1885 	if (m == NULL || !(m->m_flags & M_PKTHDR)) {
1886 		return EINVAL;
1887 	}
1888 
1889 	driver_mtag_init(m);
1890 
1891 	m->m_pkthdr.drv_flowid = flowid;
1892 
1893 	return 0;
1894 }
1895 
1896 errno_t
mbuf_get_tx_compl_data(mbuf_t m,uintptr_t * arg,uintptr_t * data)1897 mbuf_get_tx_compl_data(mbuf_t m, uintptr_t *arg, uintptr_t *data)
1898 {
1899 	if (m == NULL || !(m->m_flags & M_PKTHDR) || arg == NULL ||
1900 	    data == NULL) {
1901 		return EINVAL;
1902 	}
1903 
1904 	if ((m->m_pkthdr.pkt_flags & PKTF_DRIVER_MTAG) == 0) {
1905 		*arg = 0;
1906 		*data = 0;
1907 	} else {
1908 		*arg = m->m_pkthdr.drv_tx_compl_arg;
1909 		*data = m->m_pkthdr.drv_tx_compl_data;
1910 	}
1911 	return 0;
1912 }
1913 
1914 errno_t
mbuf_set_tx_compl_data(mbuf_t m,uintptr_t arg,uintptr_t data)1915 mbuf_set_tx_compl_data(mbuf_t m, uintptr_t arg, uintptr_t data)
1916 {
1917 	if (m == NULL || !(m->m_flags & M_PKTHDR)) {
1918 		return EINVAL;
1919 	}
1920 
1921 	driver_mtag_init(m);
1922 
1923 	m->m_pkthdr.drv_tx_compl_arg = arg;
1924 	m->m_pkthdr.drv_tx_compl_data = data;
1925 
1926 	return 0;
1927 }
1928 
1929 static u_int32_t
get_tx_compl_callback_index_locked(mbuf_tx_compl_func callback)1930 get_tx_compl_callback_index_locked(mbuf_tx_compl_func callback)
1931 {
1932 	u_int32_t i;
1933 
1934 	for (i = 0; i < MAX_MBUF_TX_COMPL_FUNC; i++) {
1935 		if (mbuf_tx_compl_table[i] == callback) {
1936 			return i;
1937 		}
1938 	}
1939 	return UINT32_MAX;
1940 }
1941 
1942 static u_int32_t
get_tx_compl_callback_index(mbuf_tx_compl_func callback)1943 get_tx_compl_callback_index(mbuf_tx_compl_func callback)
1944 {
1945 	u_int32_t i;
1946 
1947 	lck_rw_lock_shared(&mbuf_tx_compl_tbl_lock);
1948 
1949 	i = get_tx_compl_callback_index_locked(callback);
1950 
1951 	lck_rw_unlock_shared(&mbuf_tx_compl_tbl_lock);
1952 
1953 	return i;
1954 }
1955 
1956 mbuf_tx_compl_func
m_get_tx_compl_callback(u_int32_t idx)1957 m_get_tx_compl_callback(u_int32_t idx)
1958 {
1959 	mbuf_tx_compl_func cb;
1960 
1961 	if (idx >= MAX_MBUF_TX_COMPL_FUNC) {
1962 		ASSERT(0);
1963 		return NULL;
1964 	}
1965 	lck_rw_lock_shared(&mbuf_tx_compl_tbl_lock);
1966 	cb = mbuf_tx_compl_table[idx];
1967 	lck_rw_unlock_shared(&mbuf_tx_compl_tbl_lock);
1968 	return cb;
1969 }
1970 
1971 errno_t
mbuf_register_tx_compl_callback(mbuf_tx_compl_func callback)1972 mbuf_register_tx_compl_callback(mbuf_tx_compl_func callback)
1973 {
1974 	int i;
1975 	errno_t error;
1976 
1977 	if (callback == NULL) {
1978 		return EINVAL;
1979 	}
1980 
1981 	lck_rw_lock_exclusive(&mbuf_tx_compl_tbl_lock);
1982 
1983 	i = get_tx_compl_callback_index_locked(callback);
1984 	if (i != -1) {
1985 		error = EEXIST;
1986 		goto unlock;
1987 	}
1988 
1989 	/* assume the worst */
1990 	error = ENOSPC;
1991 	for (i = 0; i < MAX_MBUF_TX_COMPL_FUNC; i++) {
1992 		if (mbuf_tx_compl_table[i] == NULL) {
1993 			mbuf_tx_compl_table[i] = callback;
1994 			error = 0;
1995 			goto unlock;
1996 		}
1997 	}
1998 unlock:
1999 	lck_rw_unlock_exclusive(&mbuf_tx_compl_tbl_lock);
2000 
2001 	return error;
2002 }
2003 
2004 errno_t
mbuf_unregister_tx_compl_callback(mbuf_tx_compl_func callback)2005 mbuf_unregister_tx_compl_callback(mbuf_tx_compl_func callback)
2006 {
2007 	int i;
2008 	errno_t error;
2009 
2010 	if (callback == NULL) {
2011 		return EINVAL;
2012 	}
2013 
2014 	lck_rw_lock_exclusive(&mbuf_tx_compl_tbl_lock);
2015 
2016 	/* assume the worst */
2017 	error = ENOENT;
2018 	for (i = 0; i < MAX_MBUF_TX_COMPL_FUNC; i++) {
2019 		if (mbuf_tx_compl_table[i] == callback) {
2020 			mbuf_tx_compl_table[i] = NULL;
2021 			error = 0;
2022 			goto unlock;
2023 		}
2024 	}
2025 unlock:
2026 	lck_rw_unlock_exclusive(&mbuf_tx_compl_tbl_lock);
2027 
2028 	return error;
2029 }
2030 
2031 errno_t
mbuf_get_timestamp_requested(mbuf_t m,boolean_t * requested)2032 mbuf_get_timestamp_requested(mbuf_t m, boolean_t *requested)
2033 {
2034 	if (m == NULL || !(m->m_flags & M_PKTHDR)) {
2035 		return EINVAL;
2036 	}
2037 
2038 	if ((m->m_pkthdr.pkt_flags & PKTF_TX_COMPL_TS_REQ) == 0) {
2039 		*requested = FALSE;
2040 	} else {
2041 		*requested = TRUE;
2042 	}
2043 	return 0;
2044 }
2045 
2046 errno_t
mbuf_set_timestamp_requested(mbuf_t m,uintptr_t * pktid,mbuf_tx_compl_func callback)2047 mbuf_set_timestamp_requested(mbuf_t m, uintptr_t *pktid,
2048     mbuf_tx_compl_func callback)
2049 {
2050 	uint32_t i;
2051 
2052 	if (m == NULL || !(m->m_flags & M_PKTHDR) || callback == NULL ||
2053 	    pktid == NULL) {
2054 		return EINVAL;
2055 	}
2056 
2057 	i = get_tx_compl_callback_index(callback);
2058 	if (i == UINT32_MAX) {
2059 		return ENOENT;
2060 	}
2061 
2062 	m_add_crumb(m, PKT_CRUMB_TS_COMP_REQ);
2063 
2064 	if ((m->m_pkthdr.pkt_flags & PKTF_TX_COMPL_TS_REQ) == 0) {
2065 		m->m_pkthdr.pkt_compl_callbacks = 0;
2066 		m->m_pkthdr.pkt_flags |= PKTF_TX_COMPL_TS_REQ;
2067 		m->m_pkthdr.pkt_compl_context =
2068 		    os_atomic_inc_orig(&mbuf_tx_compl_index, relaxed);
2069 
2070 #if (DEBUG || DEVELOPMENT)
2071 		os_atomic_inc(&mbuf_tx_compl_requested, relaxed);
2072 #endif /* (DEBUG || DEVELOPMENT) */
2073 	}
2074 	m->m_pkthdr.pkt_compl_callbacks |= (1 << i);
2075 	*pktid = m->m_pkthdr.pkt_compl_context;
2076 
2077 	return 0;
2078 }
2079 
2080 void
m_do_tx_compl_callback(struct mbuf * m,struct ifnet * ifp)2081 m_do_tx_compl_callback(struct mbuf *m, struct ifnet *ifp)
2082 {
2083 	int i;
2084 
2085 	if (m == NULL) {
2086 		return;
2087 	}
2088 
2089 	if ((m->m_pkthdr.pkt_flags & PKTF_TX_COMPL_TS_REQ) == 0) {
2090 		return;
2091 	}
2092 
2093 	m_add_crumb(m, PKT_CRUMB_TS_COMP_CB);
2094 
2095 #if (DEBUG || DEVELOPMENT)
2096 	if (mbuf_tx_compl_debug != 0 && ifp != NULL &&
2097 	    (ifp->if_xflags & IFXF_TIMESTAMP_ENABLED) != 0 &&
2098 	    (m->m_pkthdr.pkt_flags & PKTF_TS_VALID) == 0) {
2099 		struct timespec now;
2100 
2101 		nanouptime(&now);
2102 		net_timernsec(&now, &m->m_pkthdr.pkt_timestamp);
2103 	}
2104 #endif /* (DEBUG || DEVELOPMENT) */
2105 
2106 	for (i = 0; i < MAX_MBUF_TX_COMPL_FUNC; i++) {
2107 		mbuf_tx_compl_func callback;
2108 
2109 		if ((m->m_pkthdr.pkt_compl_callbacks & (1 << i)) == 0) {
2110 			continue;
2111 		}
2112 
2113 		lck_rw_lock_shared(&mbuf_tx_compl_tbl_lock);
2114 		callback = mbuf_tx_compl_table[i];
2115 		lck_rw_unlock_shared(&mbuf_tx_compl_tbl_lock);
2116 
2117 		if (callback != NULL) {
2118 			callback(m->m_pkthdr.pkt_compl_context,
2119 			    ifp,
2120 			    (m->m_pkthdr.pkt_flags & PKTF_TS_VALID) ?
2121 			    m->m_pkthdr.pkt_timestamp: 0,
2122 			    m->m_pkthdr.drv_tx_compl_arg,
2123 			    m->m_pkthdr.drv_tx_compl_data,
2124 			    m->m_pkthdr.drv_tx_status);
2125 		}
2126 	}
2127 #if (DEBUG || DEVELOPMENT)
2128 	if (m->m_pkthdr.pkt_compl_callbacks != 0) {
2129 		os_atomic_inc(&mbuf_tx_compl_callbacks, relaxed);
2130 		if (ifp == NULL) {
2131 			os_atomic_inc(&mbuf_tx_compl_aborted, relaxed);
2132 		}
2133 	}
2134 #endif /* (DEBUG || DEVELOPMENT) */
2135 	m->m_pkthdr.pkt_compl_callbacks = 0;
2136 }
2137 
2138 errno_t
mbuf_get_keepalive_flag(mbuf_t m,boolean_t * is_keepalive)2139 mbuf_get_keepalive_flag(mbuf_t m, boolean_t *is_keepalive)
2140 {
2141 	if (m == NULL || is_keepalive == NULL || !(m->m_flags & M_PKTHDR)) {
2142 		return EINVAL;
2143 	}
2144 
2145 	*is_keepalive = (m->m_pkthdr.pkt_flags & PKTF_KEEPALIVE);
2146 
2147 	return 0;
2148 }
2149 
2150 errno_t
mbuf_set_keepalive_flag(mbuf_t m,boolean_t is_keepalive)2151 mbuf_set_keepalive_flag(mbuf_t m, boolean_t is_keepalive)
2152 {
2153 	if (m == NULL || !(m->m_flags & M_PKTHDR)) {
2154 		return EINVAL;
2155 	}
2156 
2157 	if (is_keepalive) {
2158 		m->m_pkthdr.pkt_flags |= PKTF_KEEPALIVE;
2159 	} else {
2160 		m->m_pkthdr.pkt_flags &= ~PKTF_KEEPALIVE;
2161 	}
2162 
2163 	return 0;
2164 }
2165 
2166 errno_t
mbuf_get_wake_packet_flag(mbuf_t m,boolean_t * is_wake_packet)2167 mbuf_get_wake_packet_flag(mbuf_t m, boolean_t *is_wake_packet)
2168 {
2169 	if (m == NULL || is_wake_packet == NULL || !(m->m_flags & M_PKTHDR)) {
2170 		return EINVAL;
2171 	}
2172 
2173 	*is_wake_packet = (m->m_pkthdr.pkt_flags & PKTF_WAKE_PKT);
2174 
2175 	return 0;
2176 }
2177 
2178 errno_t
mbuf_set_wake_packet_flag(mbuf_t m,boolean_t is_wake_packet)2179 mbuf_set_wake_packet_flag(mbuf_t m, boolean_t is_wake_packet)
2180 {
2181 	if (m == NULL || !(m->m_flags & M_PKTHDR)) {
2182 		return EINVAL;
2183 	}
2184 
2185 	if (is_wake_packet) {
2186 		m->m_pkthdr.pkt_flags |= PKTF_WAKE_PKT;
2187 	} else {
2188 		m->m_pkthdr.pkt_flags &= ~PKTF_WAKE_PKT;
2189 	}
2190 
2191 	return 0;
2192 }
2193