1 /*
2 * Copyright (c) 2004-2021 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #define __KPI__
30
31 #include <sys/param.h>
32 #include <sys/mbuf.h>
33 #include <sys/mcache.h>
34 #include <sys/socket.h>
35 #include <kern/debug.h>
36 #include <libkern/OSAtomic.h>
37 #include <string.h>
38 #include <net/dlil.h>
39 #include <netinet/in.h>
40 #include <netinet/ip_var.h>
41
42 #include "net/net_str_id.h"
43
44 /* mbuf flags visible to KPI clients; do not add private flags here */
45 static const mbuf_flags_t mbuf_flags_mask = (MBUF_EXT | MBUF_PKTHDR | MBUF_EOR |
46 MBUF_LOOP | MBUF_BCAST | MBUF_MCAST | MBUF_FRAG | MBUF_FIRSTFRAG |
47 MBUF_LASTFRAG | MBUF_PROMISC | MBUF_HASFCS);
48
49 /* Unalterable mbuf flags */
50 static const mbuf_flags_t mbuf_cflags_mask = (MBUF_EXT);
51
52 #define MAX_MBUF_TX_COMPL_FUNC 32
53 mbuf_tx_compl_func
54 mbuf_tx_compl_table[MAX_MBUF_TX_COMPL_FUNC];
55 extern lck_rw_t mbuf_tx_compl_tbl_lock;
56 u_int32_t mbuf_tx_compl_index = 0;
57
58 #if (DEVELOPMENT || DEBUG)
59 int mbuf_tx_compl_debug = 0;
60 uint64_t mbuf_tx_compl_requested __attribute__((aligned(8))) = 0;
61 uint64_t mbuf_tx_compl_callbacks __attribute__((aligned(8))) = 0;
62 uint64_t mbuf_tx_compl_aborted __attribute__((aligned(8))) = 0;
63
64 SYSCTL_DECL(_kern_ipc);
65 SYSCTL_NODE(_kern_ipc, OID_AUTO, mbtxcf,
66 CTLFLAG_RW | CTLFLAG_LOCKED, 0, "");
67 SYSCTL_INT(_kern_ipc_mbtxcf, OID_AUTO, debug,
68 CTLFLAG_RW | CTLFLAG_LOCKED, &mbuf_tx_compl_debug, 0, "");
69 SYSCTL_INT(_kern_ipc_mbtxcf, OID_AUTO, index,
70 CTLFLAG_RD | CTLFLAG_LOCKED, &mbuf_tx_compl_index, 0, "");
71 SYSCTL_QUAD(_kern_ipc_mbtxcf, OID_AUTO, requested,
72 CTLFLAG_RD | CTLFLAG_LOCKED, &mbuf_tx_compl_requested, "");
73 SYSCTL_QUAD(_kern_ipc_mbtxcf, OID_AUTO, callbacks,
74 CTLFLAG_RD | CTLFLAG_LOCKED, &mbuf_tx_compl_callbacks, "");
75 SYSCTL_QUAD(_kern_ipc_mbtxcf, OID_AUTO, aborted,
76 CTLFLAG_RD | CTLFLAG_LOCKED, &mbuf_tx_compl_aborted, "");
77 #endif /* (DEBUG || DEVELOPMENT) */
78
79 void *
mbuf_data(mbuf_t mbuf)80 mbuf_data(mbuf_t mbuf)
81 {
82 return mbuf->m_data;
83 }
84
85 void *
mbuf_datastart(mbuf_t mbuf)86 mbuf_datastart(mbuf_t mbuf)
87 {
88 if (mbuf->m_flags & M_EXT) {
89 return mbuf->m_ext.ext_buf;
90 }
91 if (mbuf->m_flags & M_PKTHDR) {
92 return mbuf->m_pktdat;
93 }
94 return mbuf->m_dat;
95 }
96
97 errno_t
mbuf_setdata(mbuf_t mbuf,void * data,size_t len)98 mbuf_setdata(mbuf_t mbuf, void *data, size_t len)
99 {
100 size_t start = (size_t)((char *)mbuf_datastart(mbuf));
101 size_t maxlen = mbuf_maxlen(mbuf);
102
103 if ((size_t)data < start || ((size_t)data) + len > start + maxlen) {
104 return EINVAL;
105 }
106 mbuf->m_data = data;
107 mbuf->m_len = (int32_t)len;
108
109 return 0;
110 }
111
112 errno_t
mbuf_align_32(mbuf_t mbuf,size_t len)113 mbuf_align_32(mbuf_t mbuf, size_t len)
114 {
115 if ((mbuf->m_flags & M_EXT) != 0 && m_mclhasreference(mbuf)) {
116 return ENOTSUP;
117 }
118 mbuf->m_data = mbuf_datastart(mbuf);
119 mbuf->m_data +=
120 ((mbuf_trailingspace(mbuf) - len) & ~(sizeof(u_int32_t) - 1));
121
122 return 0;
123 }
124
125 /*
126 * This function is used to provide mcl_to_paddr via symbol indirection,
127 * please avoid any change in behavior or remove the indirection in
128 * config/Unsupported*
129 */
130 addr64_t
mbuf_data_to_physical(void * ptr)131 mbuf_data_to_physical(void *ptr)
132 {
133 return (addr64_t)mcl_to_paddr(ptr);
134 }
135
136 errno_t
mbuf_get(mbuf_how_t how,mbuf_type_t type,mbuf_t * mbuf)137 mbuf_get(mbuf_how_t how, mbuf_type_t type, mbuf_t *mbuf)
138 {
139 /* Must set *mbuf to NULL in failure case */
140 *mbuf = m_get(how, type);
141
142 return *mbuf == NULL ? ENOMEM : 0;
143 }
144
145 errno_t
mbuf_gethdr(mbuf_how_t how,mbuf_type_t type,mbuf_t * mbuf)146 mbuf_gethdr(mbuf_how_t how, mbuf_type_t type, mbuf_t *mbuf)
147 {
148 /* Must set *mbuf to NULL in failure case */
149 *mbuf = m_gethdr(how, type);
150
151 return *mbuf == NULL ? ENOMEM : 0;
152 }
153
154 errno_t
mbuf_attachcluster(mbuf_how_t how,mbuf_type_t type,mbuf_t * mbuf,caddr_t extbuf,void (* extfree)(caddr_t,u_int,caddr_t),size_t extsize,caddr_t extarg)155 mbuf_attachcluster(mbuf_how_t how, mbuf_type_t type, mbuf_t *mbuf,
156 caddr_t extbuf, void (*extfree)(caddr_t, u_int, caddr_t),
157 size_t extsize, caddr_t extarg)
158 {
159 if (mbuf == NULL || extbuf == NULL || extfree == NULL || extsize == 0) {
160 return EINVAL;
161 }
162
163 if ((*mbuf = m_clattach(*mbuf, type, extbuf,
164 extfree, extsize, extarg, how, 0)) == NULL) {
165 return ENOMEM;
166 }
167
168 return 0;
169 }
170
171 errno_t
mbuf_ring_cluster_alloc(mbuf_how_t how,mbuf_type_t type,mbuf_t * mbuf,void (* extfree)(caddr_t,u_int,caddr_t),size_t * size)172 mbuf_ring_cluster_alloc(mbuf_how_t how, mbuf_type_t type, mbuf_t *mbuf,
173 void (*extfree)(caddr_t, u_int, caddr_t), size_t *size)
174 {
175 caddr_t extbuf = NULL;
176 errno_t err;
177
178 if (mbuf == NULL || extfree == NULL || size == NULL || *size == 0) {
179 return EINVAL;
180 }
181
182 if ((err = mbuf_alloccluster(how, size, &extbuf)) != 0) {
183 return err;
184 }
185
186 if ((*mbuf = m_clattach(*mbuf, type, extbuf,
187 extfree, *size, NULL, how, 1)) == NULL) {
188 mbuf_freecluster(extbuf, *size);
189 return ENOMEM;
190 }
191
192 return 0;
193 }
194
195 int
mbuf_ring_cluster_is_active(mbuf_t mbuf)196 mbuf_ring_cluster_is_active(mbuf_t mbuf)
197 {
198 return m_ext_paired_is_active(mbuf);
199 }
200
201 errno_t
mbuf_ring_cluster_activate(mbuf_t mbuf)202 mbuf_ring_cluster_activate(mbuf_t mbuf)
203 {
204 if (mbuf_ring_cluster_is_active(mbuf)) {
205 return EBUSY;
206 }
207
208 m_ext_paired_activate(mbuf);
209 return 0;
210 }
211
212 errno_t
mbuf_cluster_set_prop(mbuf_t mbuf,u_int32_t oldprop,u_int32_t newprop)213 mbuf_cluster_set_prop(mbuf_t mbuf, u_int32_t oldprop, u_int32_t newprop)
214 {
215 if (mbuf == NULL || !(mbuf->m_flags & M_EXT)) {
216 return EINVAL;
217 }
218
219 return m_ext_set_prop(mbuf, oldprop, newprop) ? 0 : EBUSY;
220 }
221
222 errno_t
mbuf_cluster_get_prop(mbuf_t mbuf,u_int32_t * prop)223 mbuf_cluster_get_prop(mbuf_t mbuf, u_int32_t *prop)
224 {
225 if (mbuf == NULL || prop == NULL || !(mbuf->m_flags & M_EXT)) {
226 return EINVAL;
227 }
228
229 *prop = m_ext_get_prop(mbuf);
230 return 0;
231 }
232
233 errno_t
mbuf_alloccluster(mbuf_how_t how,size_t * size,caddr_t * addr)234 mbuf_alloccluster(mbuf_how_t how, size_t *size, caddr_t *addr)
235 {
236 if (size == NULL || *size == 0 || addr == NULL) {
237 return EINVAL;
238 }
239
240 *addr = NULL;
241
242 /* Jumbo cluster pool not available? */
243 if (*size > MBIGCLBYTES && njcl == 0) {
244 return ENOTSUP;
245 }
246
247 if (*size <= MCLBYTES && (*addr = m_mclalloc(how)) != NULL) {
248 *size = MCLBYTES;
249 } else if (*size > MCLBYTES && *size <= MBIGCLBYTES &&
250 (*addr = m_bigalloc(how)) != NULL) {
251 *size = MBIGCLBYTES;
252 } else if (*size > MBIGCLBYTES && *size <= M16KCLBYTES &&
253 (*addr = m_16kalloc(how)) != NULL) {
254 *size = M16KCLBYTES;
255 } else {
256 *size = 0;
257 }
258
259 if (*addr == NULL) {
260 return ENOMEM;
261 }
262
263 return 0;
264 }
265
266 void
mbuf_freecluster(caddr_t addr,size_t size)267 mbuf_freecluster(caddr_t addr, size_t size)
268 {
269 if (size != MCLBYTES && size != MBIGCLBYTES && size != M16KCLBYTES) {
270 panic("%s: invalid size (%ld) for cluster %p", __func__,
271 size, (void *)addr);
272 }
273
274 if (size == MCLBYTES) {
275 m_mclfree(addr);
276 } else if (size == MBIGCLBYTES) {
277 m_bigfree(addr, MBIGCLBYTES, NULL);
278 } else if (njcl > 0) {
279 m_16kfree(addr, M16KCLBYTES, NULL);
280 } else {
281 panic("%s: freeing jumbo cluster to an empty pool", __func__);
282 }
283 }
284
285 errno_t
mbuf_getcluster(mbuf_how_t how,mbuf_type_t type,size_t size,mbuf_t * mbuf)286 mbuf_getcluster(mbuf_how_t how, mbuf_type_t type, size_t size, mbuf_t *mbuf)
287 {
288 /* Must set *mbuf to NULL in failure case */
289 errno_t error = 0;
290 int created = 0;
291
292 if (mbuf == NULL) {
293 return EINVAL;
294 }
295 if (*mbuf == NULL) {
296 *mbuf = m_get(how, type);
297 if (*mbuf == NULL) {
298 return ENOMEM;
299 }
300 created = 1;
301 }
302 /*
303 * At the time this code was written, m_{mclget,mbigget,m16kget}
304 * would always return the same value that was passed in to it.
305 */
306 if (size == MCLBYTES) {
307 *mbuf = m_mclget(*mbuf, how);
308 } else if (size == MBIGCLBYTES) {
309 *mbuf = m_mbigget(*mbuf, how);
310 } else if (size == M16KCLBYTES) {
311 if (njcl > 0) {
312 *mbuf = m_m16kget(*mbuf, how);
313 } else {
314 /* Jumbo cluster pool not available? */
315 error = ENOTSUP;
316 goto out;
317 }
318 } else {
319 error = EINVAL;
320 goto out;
321 }
322 if (*mbuf == NULL || ((*mbuf)->m_flags & M_EXT) == 0) {
323 error = ENOMEM;
324 }
325 out:
326 if (created && error != 0) {
327 mbuf_free(*mbuf);
328 *mbuf = NULL;
329 }
330 return error;
331 }
332
333 errno_t
mbuf_mclget(mbuf_how_t how,mbuf_type_t type,mbuf_t * mbuf)334 mbuf_mclget(mbuf_how_t how, mbuf_type_t type, mbuf_t *mbuf)
335 {
336 /* Must set *mbuf to NULL in failure case */
337 errno_t error = 0;
338 int created = 0;
339 if (mbuf == NULL) {
340 return EINVAL;
341 }
342 if (*mbuf == NULL) {
343 error = mbuf_get(how, type, mbuf);
344 if (error) {
345 return error;
346 }
347 created = 1;
348 }
349
350 /*
351 * At the time this code was written, m_mclget would always
352 * return the same value that was passed in to it.
353 */
354 *mbuf = m_mclget(*mbuf, how);
355
356 if (created && ((*mbuf)->m_flags & M_EXT) == 0) {
357 mbuf_free(*mbuf);
358 *mbuf = NULL;
359 }
360 if (*mbuf == NULL || ((*mbuf)->m_flags & M_EXT) == 0) {
361 error = ENOMEM;
362 }
363 return error;
364 }
365
366
367 errno_t
mbuf_getpacket(mbuf_how_t how,mbuf_t * mbuf)368 mbuf_getpacket(mbuf_how_t how, mbuf_t *mbuf)
369 {
370 /* Must set *mbuf to NULL in failure case */
371 errno_t error = 0;
372
373 *mbuf = m_getpacket_how(how);
374
375 if (*mbuf == NULL) {
376 if (how == MBUF_WAITOK) {
377 error = ENOMEM;
378 } else {
379 error = EWOULDBLOCK;
380 }
381 }
382
383 return error;
384 }
385
386 /*
387 * This function is used to provide m_free via symbol indirection, please avoid
388 * any change in behavior or remove the indirection in config/Unsupported*
389 */
390 mbuf_t
mbuf_free(mbuf_t mbuf)391 mbuf_free(mbuf_t mbuf)
392 {
393 return m_free(mbuf);
394 }
395
396 /*
397 * This function is used to provide m_freem via symbol indirection, please avoid
398 * any change in behavior or remove the indirection in config/Unsupported*
399 */
400 void
mbuf_freem(mbuf_t mbuf)401 mbuf_freem(mbuf_t mbuf)
402 {
403 m_freem(mbuf);
404 }
405
406 int
mbuf_freem_list(mbuf_t mbuf)407 mbuf_freem_list(mbuf_t mbuf)
408 {
409 return m_freem_list(mbuf);
410 }
411
412 size_t
mbuf_leadingspace(const mbuf_t mbuf)413 mbuf_leadingspace(const mbuf_t mbuf)
414 {
415 return M_LEADINGSPACE(mbuf);
416 }
417
418 /*
419 * This function is used to provide m_trailingspace via symbol indirection,
420 * please avoid any change in behavior or remove the indirection in
421 * config/Unsupported*
422 */
423 size_t
mbuf_trailingspace(const mbuf_t mbuf)424 mbuf_trailingspace(const mbuf_t mbuf)
425 {
426 return M_TRAILINGSPACE(mbuf);
427 }
428
429 /* Manipulation */
430 errno_t
mbuf_copym(const mbuf_t src,size_t offset,size_t len,mbuf_how_t how,mbuf_t * new_mbuf)431 mbuf_copym(const mbuf_t src, size_t offset, size_t len,
432 mbuf_how_t how, mbuf_t *new_mbuf)
433 {
434 /* Must set *mbuf to NULL in failure case */
435 *new_mbuf = m_copym(src, (int)offset, (int)len, how);
436
437 return *new_mbuf == NULL ? ENOMEM : 0;
438 }
439
440 errno_t
mbuf_dup(const mbuf_t src,mbuf_how_t how,mbuf_t * new_mbuf)441 mbuf_dup(const mbuf_t src, mbuf_how_t how, mbuf_t *new_mbuf)
442 {
443 /* Must set *new_mbuf to NULL in failure case */
444 *new_mbuf = m_dup(src, how);
445
446 return *new_mbuf == NULL ? ENOMEM : 0;
447 }
448
449 errno_t
mbuf_prepend(mbuf_t * orig,size_t len,mbuf_how_t how)450 mbuf_prepend(mbuf_t *orig, size_t len, mbuf_how_t how)
451 {
452 /* Must set *orig to NULL in failure case */
453 *orig = m_prepend_2(*orig, (int)len, how, 0);
454
455 return *orig == NULL ? ENOMEM : 0;
456 }
457
458 errno_t
mbuf_split(mbuf_t src,size_t offset,mbuf_how_t how,mbuf_t * new_mbuf)459 mbuf_split(mbuf_t src, size_t offset,
460 mbuf_how_t how, mbuf_t *new_mbuf)
461 {
462 /* Must set *new_mbuf to NULL in failure case */
463 *new_mbuf = m_split(src, (int)offset, how);
464
465 return *new_mbuf == NULL ? ENOMEM : 0;
466 }
467
468 errno_t
mbuf_pullup(mbuf_t * mbuf,size_t len)469 mbuf_pullup(mbuf_t *mbuf, size_t len)
470 {
471 /* Must set *mbuf to NULL in failure case */
472 *mbuf = m_pullup(*mbuf, (int)len);
473
474 return *mbuf == NULL ? ENOMEM : 0;
475 }
476
477 errno_t
mbuf_pulldown(mbuf_t src,size_t * offset,size_t len,mbuf_t * location)478 mbuf_pulldown(mbuf_t src, size_t *offset, size_t len, mbuf_t *location)
479 {
480 /* Must set *location to NULL in failure case */
481 int new_offset;
482 *location = m_pulldown(src, (int)*offset, (int)len, &new_offset);
483 *offset = new_offset;
484
485 return *location == NULL ? ENOMEM : 0;
486 }
487
488 /*
489 * This function is used to provide m_adj via symbol indirection, please avoid
490 * any change in behavior or remove the indirection in config/Unsupported*
491 */
492 void
mbuf_adj(mbuf_t mbuf,int len)493 mbuf_adj(mbuf_t mbuf, int len)
494 {
495 m_adj(mbuf, len);
496 }
497
498 errno_t
mbuf_adjustlen(mbuf_t m,int amount)499 mbuf_adjustlen(mbuf_t m, int amount)
500 {
501 /* Verify m_len will be valid after adding amount */
502 if (amount > 0) {
503 size_t used = (size_t)mbuf_data(m) - (size_t)mbuf_datastart(m) +
504 m->m_len;
505
506 if ((size_t)(amount + used) > mbuf_maxlen(m)) {
507 return EINVAL;
508 }
509 } else if (-amount > m->m_len) {
510 return EINVAL;
511 }
512
513 m->m_len += amount;
514 return 0;
515 }
516
517 mbuf_t
mbuf_concatenate(mbuf_t dst,mbuf_t src)518 mbuf_concatenate(mbuf_t dst, mbuf_t src)
519 {
520 if (dst == NULL) {
521 return NULL;
522 }
523
524 m_cat(dst, src);
525
526 /* return dst as is in the current implementation */
527 return dst;
528 }
529 errno_t
mbuf_copydata(const mbuf_t m0,size_t off,size_t len,void * out_data)530 mbuf_copydata(const mbuf_t m0, size_t off, size_t len, void *out_data)
531 {
532 /* Copied m_copydata, added error handling (don't just panic) */
533 size_t count;
534 mbuf_t m = m0;
535
536 if (off >= INT_MAX || len >= INT_MAX) {
537 return EINVAL;
538 }
539
540 while (off > 0) {
541 if (m == 0) {
542 return EINVAL;
543 }
544 if (off < (size_t)m->m_len) {
545 break;
546 }
547 off -= m->m_len;
548 m = m->m_next;
549 }
550 while (len > 0) {
551 if (m == 0) {
552 return EINVAL;
553 }
554 count = m->m_len - off > len ? len : m->m_len - off;
555 bcopy(mtod(m, caddr_t) + off, out_data, count);
556 len -= count;
557 out_data = ((char *)out_data) + count;
558 off = 0;
559 m = m->m_next;
560 }
561
562 return 0;
563 }
564
565 int
mbuf_mclhasreference(mbuf_t mbuf)566 mbuf_mclhasreference(mbuf_t mbuf)
567 {
568 if ((mbuf->m_flags & M_EXT)) {
569 return m_mclhasreference(mbuf);
570 } else {
571 return 0;
572 }
573 }
574
575
576 /* mbuf header */
577 mbuf_t
mbuf_next(const mbuf_t mbuf)578 mbuf_next(const mbuf_t mbuf)
579 {
580 return mbuf->m_next;
581 }
582
583 errno_t
mbuf_setnext(mbuf_t mbuf,mbuf_t next)584 mbuf_setnext(mbuf_t mbuf, mbuf_t next)
585 {
586 if (next && ((next)->m_nextpkt != NULL ||
587 (next)->m_type == MT_FREE)) {
588 return EINVAL;
589 }
590 mbuf->m_next = next;
591
592 return 0;
593 }
594
595 mbuf_t
mbuf_nextpkt(const mbuf_t mbuf)596 mbuf_nextpkt(const mbuf_t mbuf)
597 {
598 return mbuf->m_nextpkt;
599 }
600
601 void
mbuf_setnextpkt(mbuf_t mbuf,mbuf_t nextpkt)602 mbuf_setnextpkt(mbuf_t mbuf, mbuf_t nextpkt)
603 {
604 mbuf->m_nextpkt = nextpkt;
605 }
606
607 size_t
mbuf_len(const mbuf_t mbuf)608 mbuf_len(const mbuf_t mbuf)
609 {
610 return mbuf->m_len;
611 }
612
613 void
mbuf_setlen(mbuf_t mbuf,size_t len)614 mbuf_setlen(mbuf_t mbuf, size_t len)
615 {
616 mbuf->m_len = (int32_t)len;
617 }
618
619 size_t
mbuf_maxlen(const mbuf_t mbuf)620 mbuf_maxlen(const mbuf_t mbuf)
621 {
622 if (mbuf->m_flags & M_EXT) {
623 return mbuf->m_ext.ext_size;
624 }
625 return &mbuf->m_dat[MLEN] - ((char *)mbuf_datastart(mbuf));
626 }
627
628 mbuf_type_t
mbuf_type(const mbuf_t mbuf)629 mbuf_type(const mbuf_t mbuf)
630 {
631 return mbuf->m_type;
632 }
633
634 errno_t
mbuf_settype(mbuf_t mbuf,mbuf_type_t new_type)635 mbuf_settype(mbuf_t mbuf, mbuf_type_t new_type)
636 {
637 if (new_type == MBUF_TYPE_FREE) {
638 return EINVAL;
639 }
640
641 m_mchtype(mbuf, new_type);
642
643 return 0;
644 }
645
646 mbuf_flags_t
mbuf_flags(const mbuf_t mbuf)647 mbuf_flags(const mbuf_t mbuf)
648 {
649 return mbuf->m_flags & mbuf_flags_mask;
650 }
651
652 errno_t
mbuf_setflags(mbuf_t mbuf,mbuf_flags_t flags)653 mbuf_setflags(mbuf_t mbuf, mbuf_flags_t flags)
654 {
655 errno_t ret = 0;
656 mbuf_flags_t oflags = mbuf->m_flags;
657
658 /*
659 * 1. Return error if public but un-alterable flags are changed
660 * in flags argument.
661 * 2. Return error if bits other than public flags are set in passed
662 * flags argument.
663 * Please note that private flag bits must be passed as reset by
664 * kexts, as they must use mbuf_flags KPI to get current set of
665 * mbuf flags and mbuf_flags KPI does not expose private flags.
666 */
667 if ((flags ^ oflags) & mbuf_cflags_mask) {
668 ret = EINVAL;
669 } else if (flags & ~mbuf_flags_mask) {
670 ret = EINVAL;
671 } else {
672 mbuf->m_flags = (uint16_t)flags | (mbuf->m_flags & ~mbuf_flags_mask);
673 /*
674 * If M_PKTHDR bit has changed, we have work to do;
675 * m_reinit() will take care of setting/clearing the
676 * bit, as well as the rest of bookkeeping.
677 */
678 if ((oflags ^ mbuf->m_flags) & M_PKTHDR) {
679 mbuf->m_flags ^= M_PKTHDR; /* restore */
680 ret = m_reinit(mbuf,
681 (mbuf->m_flags & M_PKTHDR) ? 0 : 1);
682 }
683 }
684
685 return ret;
686 }
687
688 errno_t
mbuf_setflags_mask(mbuf_t mbuf,mbuf_flags_t flags,mbuf_flags_t mask)689 mbuf_setflags_mask(mbuf_t mbuf, mbuf_flags_t flags, mbuf_flags_t mask)
690 {
691 errno_t ret = 0;
692
693 if (mask & (~mbuf_flags_mask | mbuf_cflags_mask)) {
694 ret = EINVAL;
695 } else {
696 mbuf_flags_t oflags = mbuf->m_flags;
697 mbuf->m_flags = (uint16_t)((flags & mask) | (mbuf->m_flags & ~mask));
698 /*
699 * If M_PKTHDR bit has changed, we have work to do;
700 * m_reinit() will take care of setting/clearing the
701 * bit, as well as the rest of bookkeeping.
702 */
703 if ((oflags ^ mbuf->m_flags) & M_PKTHDR) {
704 mbuf->m_flags ^= M_PKTHDR; /* restore */
705 ret = m_reinit(mbuf,
706 (mbuf->m_flags & M_PKTHDR) ? 0 : 1);
707 }
708 }
709
710 return ret;
711 }
712
713 errno_t
mbuf_copy_pkthdr(mbuf_t dest,const mbuf_t src)714 mbuf_copy_pkthdr(mbuf_t dest, const mbuf_t src)
715 {
716 if (((src)->m_flags & M_PKTHDR) == 0) {
717 return EINVAL;
718 }
719
720 m_copy_pkthdr(dest, src);
721
722 return 0;
723 }
724
725 size_t
mbuf_pkthdr_len(const mbuf_t mbuf)726 mbuf_pkthdr_len(const mbuf_t mbuf)
727 {
728 if (((mbuf)->m_flags & M_PKTHDR) == 0) {
729 return 0;
730 }
731 /*
732 * While we Assert for development or debug builds,
733 * also make sure we never return negative length
734 * for release build.
735 */
736 ASSERT(mbuf->m_pkthdr.len >= 0);
737 if (mbuf->m_pkthdr.len < 0) {
738 return 0;
739 }
740 return mbuf->m_pkthdr.len;
741 }
742
743 __private_extern__ size_t
mbuf_pkthdr_maxlen(mbuf_t m)744 mbuf_pkthdr_maxlen(mbuf_t m)
745 {
746 size_t maxlen = 0;
747 mbuf_t n = m;
748
749 while (n) {
750 maxlen += mbuf_maxlen(n);
751 n = mbuf_next(n);
752 }
753 return maxlen;
754 }
755
756 void
mbuf_pkthdr_setlen(mbuf_t mbuf,size_t len)757 mbuf_pkthdr_setlen(mbuf_t mbuf, size_t len)
758 {
759 if (len > INT32_MAX) {
760 len = INT32_MAX;
761 }
762
763 mbuf->m_pkthdr.len = (int)len;
764 }
765
766 void
mbuf_pkthdr_adjustlen(mbuf_t mbuf,int amount)767 mbuf_pkthdr_adjustlen(mbuf_t mbuf, int amount)
768 {
769 mbuf->m_pkthdr.len += amount;
770 }
771
772 ifnet_t
mbuf_pkthdr_rcvif(const mbuf_t mbuf)773 mbuf_pkthdr_rcvif(const mbuf_t mbuf)
774 {
775 /*
776 * If we reference count ifnets, we should take a reference here
777 * before returning
778 */
779 return mbuf->m_pkthdr.rcvif;
780 }
781
782 errno_t
mbuf_pkthdr_setrcvif(mbuf_t mbuf,ifnet_t ifnet)783 mbuf_pkthdr_setrcvif(mbuf_t mbuf, ifnet_t ifnet)
784 {
785 /* May want to walk ifnet list to determine if interface is valid */
786 mbuf->m_pkthdr.rcvif = (struct ifnet *)ifnet;
787 return 0;
788 }
789
790 void*
mbuf_pkthdr_header(const mbuf_t mbuf)791 mbuf_pkthdr_header(const mbuf_t mbuf)
792 {
793 return mbuf->m_pkthdr.pkt_hdr;
794 }
795
796 void
mbuf_pkthdr_setheader(mbuf_t mbuf,void * header)797 mbuf_pkthdr_setheader(mbuf_t mbuf, void *header)
798 {
799 mbuf->m_pkthdr.pkt_hdr = (void*)header;
800 }
801
802 void
mbuf_inbound_modified(mbuf_t mbuf)803 mbuf_inbound_modified(mbuf_t mbuf)
804 {
805 /* Invalidate hardware generated checksum flags */
806 mbuf->m_pkthdr.csum_flags = 0;
807 }
808
809 void
mbuf_outbound_finalize(struct mbuf * m,u_int32_t pf,size_t o)810 mbuf_outbound_finalize(struct mbuf *m, u_int32_t pf, size_t o)
811 {
812 /* Generate the packet in software, client needs it */
813 switch (pf) {
814 case PF_INET:
815 (void) in_finalize_cksum(m, (uint32_t)o, m->m_pkthdr.csum_flags);
816 break;
817
818 case PF_INET6:
819 /*
820 * Checksum offload should not have been enabled when
821 * extension headers exist; indicate that the callee
822 * should skip such case by setting optlen to -1.
823 */
824 (void) in6_finalize_cksum(m, (uint32_t)o, -1, -1, m->m_pkthdr.csum_flags);
825 break;
826
827 default:
828 break;
829 }
830 }
831
832 errno_t
mbuf_set_vlan_tag(mbuf_t mbuf,u_int16_t vlan)833 mbuf_set_vlan_tag(
834 mbuf_t mbuf,
835 u_int16_t vlan)
836 {
837 mbuf->m_pkthdr.csum_flags |= CSUM_VLAN_TAG_VALID;
838 mbuf->m_pkthdr.vlan_tag = vlan;
839
840 return 0;
841 }
842
843 errno_t
mbuf_get_vlan_tag(mbuf_t mbuf,u_int16_t * vlan)844 mbuf_get_vlan_tag(
845 mbuf_t mbuf,
846 u_int16_t *vlan)
847 {
848 if ((mbuf->m_pkthdr.csum_flags & CSUM_VLAN_TAG_VALID) == 0) {
849 return ENXIO; // No vlan tag set
850 }
851 *vlan = mbuf->m_pkthdr.vlan_tag;
852
853 return 0;
854 }
855
856 errno_t
mbuf_clear_vlan_tag(mbuf_t mbuf)857 mbuf_clear_vlan_tag(
858 mbuf_t mbuf)
859 {
860 mbuf->m_pkthdr.csum_flags &= ~CSUM_VLAN_TAG_VALID;
861 mbuf->m_pkthdr.vlan_tag = 0;
862
863 return 0;
864 }
865
866 static const mbuf_csum_request_flags_t mbuf_valid_csum_request_flags =
867 MBUF_CSUM_REQ_IP | MBUF_CSUM_REQ_TCP | MBUF_CSUM_REQ_UDP |
868 MBUF_CSUM_PARTIAL | MBUF_CSUM_REQ_TCPIPV6 | MBUF_CSUM_REQ_UDPIPV6;
869
870 errno_t
mbuf_set_csum_requested(mbuf_t mbuf,mbuf_csum_request_flags_t request,u_int32_t value)871 mbuf_set_csum_requested(
872 mbuf_t mbuf,
873 mbuf_csum_request_flags_t request,
874 u_int32_t value)
875 {
876 request &= mbuf_valid_csum_request_flags;
877 mbuf->m_pkthdr.csum_flags =
878 (mbuf->m_pkthdr.csum_flags & 0xffff0000) | request;
879 mbuf->m_pkthdr.csum_data = value;
880
881 return 0;
882 }
883
884 static const mbuf_tso_request_flags_t mbuf_valid_tso_request_flags =
885 MBUF_TSO_IPV4 | MBUF_TSO_IPV6;
886
887 errno_t
mbuf_get_tso_requested(mbuf_t mbuf,mbuf_tso_request_flags_t * request,u_int32_t * value)888 mbuf_get_tso_requested(
889 mbuf_t mbuf,
890 mbuf_tso_request_flags_t *request,
891 u_int32_t *value)
892 {
893 if (mbuf == NULL || (mbuf->m_flags & M_PKTHDR) == 0 ||
894 request == NULL || value == NULL) {
895 return EINVAL;
896 }
897
898 *request = mbuf->m_pkthdr.csum_flags;
899 *request &= mbuf_valid_tso_request_flags;
900 if (*request && value != NULL) {
901 *value = mbuf->m_pkthdr.tso_segsz;
902 }
903
904 return 0;
905 }
906
907 errno_t
mbuf_get_csum_requested(mbuf_t mbuf,mbuf_csum_request_flags_t * request,u_int32_t * value)908 mbuf_get_csum_requested(
909 mbuf_t mbuf,
910 mbuf_csum_request_flags_t *request,
911 u_int32_t *value)
912 {
913 *request = mbuf->m_pkthdr.csum_flags;
914 *request &= mbuf_valid_csum_request_flags;
915 if (value != NULL) {
916 *value = mbuf->m_pkthdr.csum_data;
917 }
918
919 return 0;
920 }
921
922 errno_t
mbuf_clear_csum_requested(mbuf_t mbuf)923 mbuf_clear_csum_requested(
924 mbuf_t mbuf)
925 {
926 mbuf->m_pkthdr.csum_flags &= 0xffff0000;
927 mbuf->m_pkthdr.csum_data = 0;
928
929 return 0;
930 }
931
932 static const mbuf_csum_performed_flags_t mbuf_valid_csum_performed_flags =
933 MBUF_CSUM_DID_IP | MBUF_CSUM_IP_GOOD | MBUF_CSUM_DID_DATA |
934 MBUF_CSUM_PSEUDO_HDR | MBUF_CSUM_PARTIAL;
935
936 errno_t
mbuf_set_csum_performed(mbuf_t mbuf,mbuf_csum_performed_flags_t performed,u_int32_t value)937 mbuf_set_csum_performed(
938 mbuf_t mbuf,
939 mbuf_csum_performed_flags_t performed,
940 u_int32_t value)
941 {
942 performed &= mbuf_valid_csum_performed_flags;
943 mbuf->m_pkthdr.csum_flags =
944 (mbuf->m_pkthdr.csum_flags & 0xffff0000) | performed;
945 mbuf->m_pkthdr.csum_data = value;
946
947 return 0;
948 }
949
950 errno_t
mbuf_get_csum_performed(mbuf_t mbuf,mbuf_csum_performed_flags_t * performed,u_int32_t * value)951 mbuf_get_csum_performed(
952 mbuf_t mbuf,
953 mbuf_csum_performed_flags_t *performed,
954 u_int32_t *value)
955 {
956 *performed =
957 mbuf->m_pkthdr.csum_flags & mbuf_valid_csum_performed_flags;
958 *value = mbuf->m_pkthdr.csum_data;
959
960 return 0;
961 }
962
963 errno_t
mbuf_clear_csum_performed(mbuf_t mbuf)964 mbuf_clear_csum_performed(
965 mbuf_t mbuf)
966 {
967 mbuf->m_pkthdr.csum_flags &= 0xffff0000;
968 mbuf->m_pkthdr.csum_data = 0;
969
970 return 0;
971 }
972
973 errno_t
mbuf_inet_cksum(mbuf_t mbuf,int protocol,u_int32_t offset,u_int32_t length,u_int16_t * csum)974 mbuf_inet_cksum(mbuf_t mbuf, int protocol, u_int32_t offset, u_int32_t length,
975 u_int16_t *csum)
976 {
977 if (mbuf == NULL || length == 0 || csum == NULL ||
978 (u_int32_t)mbuf->m_pkthdr.len < (offset + length)) {
979 return EINVAL;
980 }
981
982 *csum = inet_cksum(mbuf, protocol, offset, length);
983 return 0;
984 }
985
986 errno_t
mbuf_inet6_cksum(mbuf_t mbuf,int protocol,u_int32_t offset,u_int32_t length,u_int16_t * csum)987 mbuf_inet6_cksum(mbuf_t mbuf, int protocol, u_int32_t offset, u_int32_t length,
988 u_int16_t *csum)
989 {
990 if (mbuf == NULL || length == 0 || csum == NULL ||
991 (u_int32_t)mbuf->m_pkthdr.len < (offset + length)) {
992 return EINVAL;
993 }
994
995 *csum = inet6_cksum(mbuf, protocol, offset, length);
996 return 0;
997 }
998
999 /*
1000 * Mbuf tag KPIs
1001 */
1002
1003 #define MTAG_FIRST_ID FIRST_KPI_STR_ID
1004
1005 errno_t
mbuf_tag_id_find(const char * string,mbuf_tag_id_t * out_id)1006 mbuf_tag_id_find(
1007 const char *string,
1008 mbuf_tag_id_t *out_id)
1009 {
1010 return net_str_id_find_internal(string, out_id, NSI_MBUF_TAG, 1);
1011 }
1012
1013 errno_t
mbuf_tag_allocate(mbuf_t mbuf,mbuf_tag_id_t id,mbuf_tag_type_t type,size_t length,mbuf_how_t how,void ** data_p)1014 mbuf_tag_allocate(
1015 mbuf_t mbuf,
1016 mbuf_tag_id_t id,
1017 mbuf_tag_type_t type,
1018 size_t length,
1019 mbuf_how_t how,
1020 void** data_p)
1021 {
1022 struct m_tag *tag;
1023 u_int32_t mtag_id_first, mtag_id_last;
1024
1025 if (data_p != NULL) {
1026 *data_p = NULL;
1027 }
1028
1029 /* Sanity check parameters */
1030 (void) net_str_id_first_last(&mtag_id_first, &mtag_id_last,
1031 NSI_MBUF_TAG);
1032 if (mbuf == NULL || (mbuf->m_flags & M_PKTHDR) == 0 ||
1033 id < mtag_id_first || id > mtag_id_last || length < 1 ||
1034 (length & 0xffff0000) != 0 || data_p == NULL) {
1035 return EINVAL;
1036 }
1037
1038 /* Make sure this mtag hasn't already been allocated */
1039 tag = m_tag_locate(mbuf, id, type, NULL);
1040 if (tag != NULL) {
1041 return EEXIST;
1042 }
1043
1044 /* Allocate an mtag */
1045 tag = m_tag_create(id, type, (int)length, how, mbuf);
1046 if (tag == NULL) {
1047 return how == M_WAITOK ? ENOMEM : EWOULDBLOCK;
1048 }
1049
1050 /* Attach the mtag and set *data_p */
1051 m_tag_prepend(mbuf, tag);
1052 *data_p = tag + 1;
1053
1054 return 0;
1055 }
1056
1057 errno_t
mbuf_tag_find(mbuf_t mbuf,mbuf_tag_id_t id,mbuf_tag_type_t type,size_t * length,void ** data_p)1058 mbuf_tag_find(
1059 mbuf_t mbuf,
1060 mbuf_tag_id_t id,
1061 mbuf_tag_type_t type,
1062 size_t *length,
1063 void **data_p)
1064 {
1065 struct m_tag *tag;
1066 u_int32_t mtag_id_first, mtag_id_last;
1067
1068 if (length != NULL) {
1069 *length = 0;
1070 }
1071 if (data_p != NULL) {
1072 *data_p = NULL;
1073 }
1074
1075 /* Sanity check parameters */
1076 (void) net_str_id_first_last(&mtag_id_first, &mtag_id_last,
1077 NSI_MBUF_TAG);
1078 if (mbuf == NULL || (mbuf->m_flags & M_PKTHDR) == 0 ||
1079 id < mtag_id_first || id > mtag_id_last || length == NULL ||
1080 data_p == NULL) {
1081 return EINVAL;
1082 }
1083
1084 /* Locate an mtag */
1085 tag = m_tag_locate(mbuf, id, type, NULL);
1086 if (tag == NULL) {
1087 return ENOENT;
1088 }
1089
1090 /* Copy out the pointer to the data and the lenght value */
1091 *length = tag->m_tag_len;
1092 *data_p = tag + 1;
1093
1094 return 0;
1095 }
1096
1097 void
mbuf_tag_free(mbuf_t mbuf,mbuf_tag_id_t id,mbuf_tag_type_t type)1098 mbuf_tag_free(
1099 mbuf_t mbuf,
1100 mbuf_tag_id_t id,
1101 mbuf_tag_type_t type)
1102 {
1103 struct m_tag *tag;
1104 u_int32_t mtag_id_first, mtag_id_last;
1105
1106 /* Sanity check parameters */
1107 (void) net_str_id_first_last(&mtag_id_first, &mtag_id_last,
1108 NSI_MBUF_TAG);
1109 if (mbuf == NULL || (mbuf->m_flags & M_PKTHDR) == 0 ||
1110 id < mtag_id_first || id > mtag_id_last) {
1111 return;
1112 }
1113
1114 tag = m_tag_locate(mbuf, id, type, NULL);
1115 if (tag == NULL) {
1116 return;
1117 }
1118
1119 m_tag_delete(mbuf, tag);
1120 }
1121
1122 /*
1123 * Maximum length of driver auxiliary data; keep this small to
1124 * fit in a single mbuf to avoid wasting memory, rounded down to
1125 * the nearest 64-bit boundary. This takes into account mbuf
1126 * tag-related (m_taghdr + m_tag) as well m_drvaux_tag structs.
1127 */
1128 #define MBUF_DRVAUX_MAXLEN \
1129 P2ROUNDDOWN(MLEN - sizeof (struct m_taghdr) - \
1130 M_TAG_ALIGN(sizeof (struct m_drvaux_tag)), sizeof (uint64_t))
1131
1132 errno_t
mbuf_add_drvaux(mbuf_t mbuf,mbuf_how_t how,u_int32_t family,u_int32_t subfamily,size_t length,void ** data_p)1133 mbuf_add_drvaux(mbuf_t mbuf, mbuf_how_t how, u_int32_t family,
1134 u_int32_t subfamily, size_t length, void **data_p)
1135 {
1136 struct m_drvaux_tag *p;
1137 struct m_tag *tag;
1138
1139 if (mbuf == NULL || !(mbuf->m_flags & M_PKTHDR) ||
1140 length == 0 || length > MBUF_DRVAUX_MAXLEN) {
1141 return EINVAL;
1142 }
1143
1144 if (data_p != NULL) {
1145 *data_p = NULL;
1146 }
1147
1148 /* Check if one is already associated */
1149 if ((tag = m_tag_locate(mbuf, KERNEL_MODULE_TAG_ID,
1150 KERNEL_TAG_TYPE_DRVAUX, NULL)) != NULL) {
1151 return EEXIST;
1152 }
1153
1154 /* Tag is (m_drvaux_tag + module specific data) */
1155 if ((tag = m_tag_create(KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_DRVAUX,
1156 (int)(sizeof(*p) + length), how, mbuf)) == NULL) {
1157 return (how == MBUF_WAITOK) ? ENOMEM : EWOULDBLOCK;
1158 }
1159
1160 p = (struct m_drvaux_tag *)(tag + 1);
1161 p->da_family = family;
1162 p->da_subfamily = subfamily;
1163 p->da_length = (int)length;
1164
1165 /* Associate the tag */
1166 m_tag_prepend(mbuf, tag);
1167
1168 if (data_p != NULL) {
1169 *data_p = (p + 1);
1170 }
1171
1172 return 0;
1173 }
1174
1175 errno_t
mbuf_find_drvaux(mbuf_t mbuf,u_int32_t * family_p,u_int32_t * subfamily_p,u_int32_t * length_p,void ** data_p)1176 mbuf_find_drvaux(mbuf_t mbuf, u_int32_t *family_p, u_int32_t *subfamily_p,
1177 u_int32_t *length_p, void **data_p)
1178 {
1179 struct m_drvaux_tag *p;
1180 struct m_tag *tag;
1181
1182 if (mbuf == NULL || !(mbuf->m_flags & M_PKTHDR) || data_p == NULL) {
1183 return EINVAL;
1184 }
1185
1186 *data_p = NULL;
1187
1188 if ((tag = m_tag_locate(mbuf, KERNEL_MODULE_TAG_ID,
1189 KERNEL_TAG_TYPE_DRVAUX, NULL)) == NULL) {
1190 return ENOENT;
1191 }
1192
1193 /* Must be at least size of m_drvaux_tag */
1194 VERIFY(tag->m_tag_len >= sizeof(*p));
1195
1196 p = (struct m_drvaux_tag *)(tag + 1);
1197 VERIFY(p->da_length > 0 && p->da_length <= MBUF_DRVAUX_MAXLEN);
1198
1199 if (family_p != NULL) {
1200 *family_p = p->da_family;
1201 }
1202 if (subfamily_p != NULL) {
1203 *subfamily_p = p->da_subfamily;
1204 }
1205 if (length_p != NULL) {
1206 *length_p = p->da_length;
1207 }
1208
1209 *data_p = (p + 1);
1210
1211 return 0;
1212 }
1213
1214 void
mbuf_del_drvaux(mbuf_t mbuf)1215 mbuf_del_drvaux(mbuf_t mbuf)
1216 {
1217 struct m_tag *tag;
1218
1219 if (mbuf == NULL || !(mbuf->m_flags & M_PKTHDR)) {
1220 return;
1221 }
1222
1223 if ((tag = m_tag_locate(mbuf, KERNEL_MODULE_TAG_ID,
1224 KERNEL_TAG_TYPE_DRVAUX, NULL)) != NULL) {
1225 m_tag_delete(mbuf, tag);
1226 }
1227 }
1228
1229 /* mbuf stats */
1230 void
mbuf_stats(struct mbuf_stat * stats)1231 mbuf_stats(struct mbuf_stat *stats)
1232 {
1233 stats->mbufs = mbstat.m_mbufs;
1234 stats->clusters = mbstat.m_clusters;
1235 stats->clfree = mbstat.m_clfree;
1236 stats->drops = mbstat.m_drops;
1237 stats->wait = mbstat.m_wait;
1238 stats->drain = mbstat.m_drain;
1239 __builtin_memcpy(stats->mtypes, mbstat.m_mtypes, sizeof(stats->mtypes));
1240 stats->mcfail = mbstat.m_mcfail;
1241 stats->mpfail = mbstat.m_mpfail;
1242 stats->msize = mbstat.m_msize;
1243 stats->mclbytes = mbstat.m_mclbytes;
1244 stats->minclsize = mbstat.m_minclsize;
1245 stats->mlen = mbstat.m_mlen;
1246 stats->mhlen = mbstat.m_mhlen;
1247 stats->bigclusters = mbstat.m_bigclusters;
1248 stats->bigclfree = mbstat.m_bigclfree;
1249 stats->bigmclbytes = mbstat.m_bigmclbytes;
1250 }
1251
1252 errno_t
mbuf_allocpacket(mbuf_how_t how,size_t packetlen,unsigned int * maxchunks,mbuf_t * mbuf)1253 mbuf_allocpacket(mbuf_how_t how, size_t packetlen, unsigned int *maxchunks,
1254 mbuf_t *mbuf)
1255 {
1256 errno_t error;
1257 struct mbuf *m;
1258 unsigned int numpkts = 1;
1259 unsigned int numchunks = maxchunks ? *maxchunks : 0;
1260
1261 if (packetlen == 0) {
1262 error = EINVAL;
1263 goto out;
1264 }
1265 m = m_allocpacket_internal(&numpkts, packetlen,
1266 maxchunks ? &numchunks : NULL, how, 1, 0);
1267 if (m == 0) {
1268 if (maxchunks && *maxchunks && numchunks > *maxchunks) {
1269 error = ENOBUFS;
1270 } else {
1271 error = ENOMEM;
1272 }
1273 } else {
1274 if (maxchunks) {
1275 *maxchunks = numchunks;
1276 }
1277 error = 0;
1278 *mbuf = m;
1279 }
1280 out:
1281 return error;
1282 }
1283
1284 errno_t
mbuf_allocpacket_list(unsigned int numpkts,mbuf_how_t how,size_t packetlen,unsigned int * maxchunks,mbuf_t * mbuf)1285 mbuf_allocpacket_list(unsigned int numpkts, mbuf_how_t how, size_t packetlen,
1286 unsigned int *maxchunks, mbuf_t *mbuf)
1287 {
1288 errno_t error;
1289 struct mbuf *m;
1290 unsigned int numchunks = maxchunks ? *maxchunks : 0;
1291
1292 if (numpkts == 0) {
1293 error = EINVAL;
1294 goto out;
1295 }
1296 if (packetlen == 0) {
1297 error = EINVAL;
1298 goto out;
1299 }
1300 m = m_allocpacket_internal(&numpkts, packetlen,
1301 maxchunks ? &numchunks : NULL, how, 1, 0);
1302 if (m == 0) {
1303 if (maxchunks && *maxchunks && numchunks > *maxchunks) {
1304 error = ENOBUFS;
1305 } else {
1306 error = ENOMEM;
1307 }
1308 } else {
1309 if (maxchunks) {
1310 *maxchunks = numchunks;
1311 }
1312 error = 0;
1313 *mbuf = m;
1314 }
1315 out:
1316 return error;
1317 }
1318
1319 __private_extern__ size_t
mbuf_pkt_list_len(mbuf_t m)1320 mbuf_pkt_list_len(mbuf_t m)
1321 {
1322 size_t len = 0;
1323 mbuf_t n = m;
1324
1325 while (n) {
1326 len += mbuf_pkthdr_len(n);
1327 n = mbuf_nextpkt(n);
1328 }
1329 return len;
1330 }
1331
1332 __private_extern__ size_t
mbuf_pkt_list_maxlen(mbuf_t m)1333 mbuf_pkt_list_maxlen(mbuf_t m)
1334 {
1335 size_t maxlen = 0;
1336 mbuf_t n = m;
1337
1338 while (n) {
1339 maxlen += mbuf_pkthdr_maxlen(n);
1340 n = mbuf_nextpkt(n);
1341 }
1342 return maxlen;
1343 }
1344
1345 /*
1346 * mbuf_copyback differs from m_copyback in a few ways:
1347 * 1) mbuf_copyback will allocate clusters for new mbufs we append
1348 * 2) mbuf_copyback will grow the last mbuf in the chain if possible
1349 * 3) mbuf_copyback reports whether or not the operation succeeded
1350 * 4) mbuf_copyback allows the caller to specify M_WAITOK or M_NOWAIT
1351 */
1352 errno_t
mbuf_copyback(mbuf_t m,size_t off,size_t len,const void * data,mbuf_how_t how)1353 mbuf_copyback(
1354 mbuf_t m,
1355 size_t off,
1356 size_t len,
1357 const void *data,
1358 mbuf_how_t how)
1359 {
1360 size_t mlen;
1361 mbuf_t m_start = m;
1362 mbuf_t n;
1363 int totlen = 0;
1364 errno_t result = 0;
1365 const char *cp = data;
1366
1367 if (m == NULL || len == 0 || data == NULL) {
1368 return EINVAL;
1369 }
1370
1371 while (off > (mlen = m->m_len)) {
1372 off -= mlen;
1373 totlen += mlen;
1374 if (m->m_next == 0) {
1375 n = m_getclr(how, m->m_type);
1376 if (n == 0) {
1377 result = ENOBUFS;
1378 goto out;
1379 }
1380 n->m_len = (int32_t)MIN(MLEN, len + off);
1381 m->m_next = n;
1382 }
1383 m = m->m_next;
1384 }
1385
1386 while (len > 0) {
1387 mlen = MIN(m->m_len - off, len);
1388 if (mlen < len && m->m_next == NULL &&
1389 mbuf_trailingspace(m) > 0) {
1390 size_t grow = MIN(mbuf_trailingspace(m), len - mlen);
1391 mlen += grow;
1392 m->m_len += grow;
1393 }
1394 bcopy(cp, off + (char *)mbuf_data(m), (unsigned)mlen);
1395 cp += mlen;
1396 len -= mlen;
1397 mlen += off;
1398 off = 0;
1399 totlen += mlen;
1400 if (len == 0) {
1401 break;
1402 }
1403 if (m->m_next == 0) {
1404 n = m_get(how, m->m_type);
1405 if (n == NULL) {
1406 result = ENOBUFS;
1407 goto out;
1408 }
1409 if (len > MINCLSIZE) {
1410 /*
1411 * cluster allocation failure is okay,
1412 * we can grow chain
1413 */
1414 mbuf_mclget(how, m->m_type, &n);
1415 }
1416 n->m_len = (int32_t)MIN(mbuf_maxlen(n), len);
1417 m->m_next = n;
1418 }
1419 m = m->m_next;
1420 }
1421
1422 out:
1423 if ((m_start->m_flags & M_PKTHDR) && (m_start->m_pkthdr.len < totlen)) {
1424 m_start->m_pkthdr.len = totlen;
1425 }
1426
1427 return result;
1428 }
1429
1430 u_int32_t
mbuf_get_mlen(void)1431 mbuf_get_mlen(void)
1432 {
1433 return _MLEN;
1434 }
1435
1436 u_int32_t
mbuf_get_mhlen(void)1437 mbuf_get_mhlen(void)
1438 {
1439 return _MHLEN;
1440 }
1441
1442 u_int32_t
mbuf_get_minclsize(void)1443 mbuf_get_minclsize(void)
1444 {
1445 return MHLEN + MLEN;
1446 }
1447
1448 u_int32_t
mbuf_get_traffic_class_max_count(void)1449 mbuf_get_traffic_class_max_count(void)
1450 {
1451 return MBUF_TC_MAX;
1452 }
1453
1454 errno_t
mbuf_get_traffic_class_index(mbuf_traffic_class_t tc,u_int32_t * index)1455 mbuf_get_traffic_class_index(mbuf_traffic_class_t tc, u_int32_t *index)
1456 {
1457 if (index == NULL || (u_int32_t)tc >= MBUF_TC_MAX) {
1458 return EINVAL;
1459 }
1460
1461 *index = MBUF_SCIDX(m_service_class_from_val(MBUF_TC2SCVAL(tc)));
1462 return 0;
1463 }
1464
1465 mbuf_traffic_class_t
mbuf_get_traffic_class(mbuf_t m)1466 mbuf_get_traffic_class(mbuf_t m)
1467 {
1468 if (m == NULL || !(m->m_flags & M_PKTHDR)) {
1469 return MBUF_TC_BE;
1470 }
1471
1472 return m_get_traffic_class(m);
1473 }
1474
1475 errno_t
mbuf_set_traffic_class(mbuf_t m,mbuf_traffic_class_t tc)1476 mbuf_set_traffic_class(mbuf_t m, mbuf_traffic_class_t tc)
1477 {
1478 if (m == NULL || !(m->m_flags & M_PKTHDR) ||
1479 ((u_int32_t)tc >= MBUF_TC_MAX)) {
1480 return EINVAL;
1481 }
1482
1483 return m_set_traffic_class(m, tc);
1484 }
1485
1486 int
mbuf_is_traffic_class_privileged(mbuf_t m)1487 mbuf_is_traffic_class_privileged(mbuf_t m)
1488 {
1489 if (m == NULL || !(m->m_flags & M_PKTHDR) ||
1490 !MBUF_VALID_SC(m->m_pkthdr.pkt_svc)) {
1491 return 0;
1492 }
1493
1494 return (m->m_pkthdr.pkt_flags & PKTF_PRIO_PRIVILEGED) ? 1 : 0;
1495 }
1496
1497 u_int32_t
mbuf_get_service_class_max_count(void)1498 mbuf_get_service_class_max_count(void)
1499 {
1500 return MBUF_SC_MAX_CLASSES;
1501 }
1502
1503 errno_t
mbuf_get_service_class_index(mbuf_svc_class_t sc,u_int32_t * index)1504 mbuf_get_service_class_index(mbuf_svc_class_t sc, u_int32_t *index)
1505 {
1506 if (index == NULL || !MBUF_VALID_SC(sc)) {
1507 return EINVAL;
1508 }
1509
1510 *index = MBUF_SCIDX(sc);
1511 return 0;
1512 }
1513
1514 mbuf_svc_class_t
mbuf_get_service_class(mbuf_t m)1515 mbuf_get_service_class(mbuf_t m)
1516 {
1517 if (m == NULL || !(m->m_flags & M_PKTHDR)) {
1518 return MBUF_SC_BE;
1519 }
1520
1521 return m_get_service_class(m);
1522 }
1523
1524 errno_t
mbuf_set_service_class(mbuf_t m,mbuf_svc_class_t sc)1525 mbuf_set_service_class(mbuf_t m, mbuf_svc_class_t sc)
1526 {
1527 if (m == NULL || !(m->m_flags & M_PKTHDR)) {
1528 return EINVAL;
1529 }
1530
1531 return m_set_service_class(m, sc);
1532 }
1533
1534 errno_t
mbuf_pkthdr_aux_flags(mbuf_t m,mbuf_pkthdr_aux_flags_t * flagsp)1535 mbuf_pkthdr_aux_flags(mbuf_t m, mbuf_pkthdr_aux_flags_t *flagsp)
1536 {
1537 u_int32_t flags;
1538
1539 if (m == NULL || !(m->m_flags & M_PKTHDR) || flagsp == NULL) {
1540 return EINVAL;
1541 }
1542
1543 *flagsp = 0;
1544 flags = m->m_pkthdr.pkt_flags;
1545 if ((flags & (PKTF_INET_RESOLVE | PKTF_RESOLVE_RTR)) ==
1546 (PKTF_INET_RESOLVE | PKTF_RESOLVE_RTR)) {
1547 *flagsp |= MBUF_PKTAUXF_INET_RESOLVE_RTR;
1548 }
1549 if ((flags & (PKTF_INET6_RESOLVE | PKTF_RESOLVE_RTR)) ==
1550 (PKTF_INET6_RESOLVE | PKTF_RESOLVE_RTR)) {
1551 *flagsp |= MBUF_PKTAUXF_INET6_RESOLVE_RTR;
1552 }
1553
1554 /* These 2 flags are mutually exclusive */
1555 VERIFY((*flagsp &
1556 (MBUF_PKTAUXF_INET_RESOLVE_RTR | MBUF_PKTAUXF_INET6_RESOLVE_RTR)) !=
1557 (MBUF_PKTAUXF_INET_RESOLVE_RTR | MBUF_PKTAUXF_INET6_RESOLVE_RTR));
1558
1559 return 0;
1560 }
1561
1562 errno_t
mbuf_get_driver_scratch(mbuf_t m,u_int8_t ** area,size_t * area_len)1563 mbuf_get_driver_scratch(mbuf_t m, u_int8_t **area, size_t *area_len)
1564 {
1565 if (m == NULL || area == NULL || area_len == NULL ||
1566 !(m->m_flags & M_PKTHDR)) {
1567 return EINVAL;
1568 }
1569
1570 *area_len = m_scratch_get(m, area);
1571 return 0;
1572 }
1573
1574 errno_t
mbuf_get_unsent_data_bytes(const mbuf_t m,u_int32_t * unsent_data)1575 mbuf_get_unsent_data_bytes(const mbuf_t m, u_int32_t *unsent_data)
1576 {
1577 if (m == NULL || unsent_data == NULL || !(m->m_flags & M_PKTHDR)) {
1578 return EINVAL;
1579 }
1580
1581 if (!(m->m_pkthdr.pkt_flags & PKTF_VALID_UNSENT_DATA)) {
1582 return EINVAL;
1583 }
1584
1585 *unsent_data = m->m_pkthdr.bufstatus_if +
1586 m->m_pkthdr.bufstatus_sndbuf;
1587 return 0;
1588 }
1589
1590 errno_t
mbuf_get_buffer_status(const mbuf_t m,mbuf_buffer_status_t * buf_status)1591 mbuf_get_buffer_status(const mbuf_t m, mbuf_buffer_status_t *buf_status)
1592 {
1593 if (m == NULL || buf_status == NULL || !(m->m_flags & M_PKTHDR) ||
1594 !(m->m_pkthdr.pkt_flags & PKTF_VALID_UNSENT_DATA)) {
1595 return EINVAL;
1596 }
1597
1598 buf_status->buf_interface = m->m_pkthdr.bufstatus_if;
1599 buf_status->buf_sndbuf = m->m_pkthdr.bufstatus_sndbuf;
1600 return 0;
1601 }
1602
1603 errno_t
mbuf_pkt_new_flow(const mbuf_t m,u_int32_t * retval)1604 mbuf_pkt_new_flow(const mbuf_t m, u_int32_t *retval)
1605 {
1606 if (m == NULL || retval == NULL || !(m->m_flags & M_PKTHDR)) {
1607 return EINVAL;
1608 }
1609 if (m->m_pkthdr.pkt_flags & PKTF_NEW_FLOW) {
1610 *retval = 1;
1611 } else {
1612 *retval = 0;
1613 }
1614 return 0;
1615 }
1616
1617 errno_t
mbuf_last_pkt(const mbuf_t m,u_int32_t * retval)1618 mbuf_last_pkt(const mbuf_t m, u_int32_t *retval)
1619 {
1620 if (m == NULL || retval == NULL || !(m->m_flags & M_PKTHDR)) {
1621 return EINVAL;
1622 }
1623 if (m->m_pkthdr.pkt_flags & PKTF_LAST_PKT) {
1624 *retval = 1;
1625 } else {
1626 *retval = 0;
1627 }
1628 return 0;
1629 }
1630
1631 errno_t
mbuf_get_timestamp(mbuf_t m,u_int64_t * ts,boolean_t * valid)1632 mbuf_get_timestamp(mbuf_t m, u_int64_t *ts, boolean_t *valid)
1633 {
1634 if (m == NULL || !(m->m_flags & M_PKTHDR) || ts == NULL) {
1635 return EINVAL;
1636 }
1637
1638 if ((m->m_pkthdr.pkt_flags & PKTF_TS_VALID) == 0) {
1639 if (valid != NULL) {
1640 *valid = FALSE;
1641 }
1642 *ts = 0;
1643 } else {
1644 if (valid != NULL) {
1645 *valid = TRUE;
1646 }
1647 *ts = m->m_pkthdr.pkt_timestamp;
1648 }
1649 return 0;
1650 }
1651
1652 errno_t
mbuf_set_timestamp(mbuf_t m,u_int64_t ts,boolean_t valid)1653 mbuf_set_timestamp(mbuf_t m, u_int64_t ts, boolean_t valid)
1654 {
1655 if (m == NULL || !(m->m_flags & M_PKTHDR)) {
1656 return EINVAL;
1657 }
1658
1659 if (valid == FALSE) {
1660 m->m_pkthdr.pkt_flags &= ~PKTF_TS_VALID;
1661 m->m_pkthdr.pkt_timestamp = 0;
1662 } else {
1663 m->m_pkthdr.pkt_flags |= PKTF_TS_VALID;
1664 m->m_pkthdr.pkt_timestamp = ts;
1665 }
1666 return 0;
1667 }
1668
1669 errno_t
mbuf_get_status(mbuf_t m,kern_return_t * status)1670 mbuf_get_status(mbuf_t m, kern_return_t *status)
1671 {
1672 if (m == NULL || !(m->m_flags & M_PKTHDR) || status == NULL) {
1673 return EINVAL;
1674 }
1675
1676 if ((m->m_pkthdr.pkt_flags & PKTF_DRIVER_MTAG) == 0) {
1677 *status = 0;
1678 } else {
1679 *status = m->m_pkthdr.drv_tx_status;
1680 }
1681 return 0;
1682 }
1683
1684 static void
driver_mtag_init(mbuf_t m)1685 driver_mtag_init(mbuf_t m)
1686 {
1687 if ((m->m_pkthdr.pkt_flags & PKTF_DRIVER_MTAG) == 0) {
1688 m->m_pkthdr.pkt_flags |= PKTF_DRIVER_MTAG;
1689 bzero(&m->m_pkthdr.driver_mtag,
1690 sizeof(m->m_pkthdr.driver_mtag));
1691 }
1692 }
1693
1694 errno_t
mbuf_set_status(mbuf_t m,kern_return_t status)1695 mbuf_set_status(mbuf_t m, kern_return_t status)
1696 {
1697 if (m == NULL || !(m->m_flags & M_PKTHDR)) {
1698 return EINVAL;
1699 }
1700
1701 driver_mtag_init(m);
1702
1703 m->m_pkthdr.drv_tx_status = status;
1704
1705 return 0;
1706 }
1707
1708 errno_t
mbuf_get_flowid(mbuf_t m,u_int16_t * flowid)1709 mbuf_get_flowid(mbuf_t m, u_int16_t *flowid)
1710 {
1711 if (m == NULL || !(m->m_flags & M_PKTHDR) || flowid == NULL) {
1712 return EINVAL;
1713 }
1714
1715 if ((m->m_pkthdr.pkt_flags & PKTF_DRIVER_MTAG) == 0) {
1716 *flowid = 0;
1717 } else {
1718 *flowid = m->m_pkthdr.drv_flowid;
1719 }
1720 return 0;
1721 }
1722
1723 errno_t
mbuf_set_flowid(mbuf_t m,u_int16_t flowid)1724 mbuf_set_flowid(mbuf_t m, u_int16_t flowid)
1725 {
1726 if (m == NULL || !(m->m_flags & M_PKTHDR)) {
1727 return EINVAL;
1728 }
1729
1730 driver_mtag_init(m);
1731
1732 m->m_pkthdr.drv_flowid = flowid;
1733
1734 return 0;
1735 }
1736
1737 errno_t
mbuf_get_tx_compl_data(mbuf_t m,uintptr_t * arg,uintptr_t * data)1738 mbuf_get_tx_compl_data(mbuf_t m, uintptr_t *arg, uintptr_t *data)
1739 {
1740 if (m == NULL || !(m->m_flags & M_PKTHDR) || arg == NULL ||
1741 data == NULL) {
1742 return EINVAL;
1743 }
1744
1745 if ((m->m_pkthdr.pkt_flags & PKTF_DRIVER_MTAG) == 0) {
1746 *arg = 0;
1747 *data = 0;
1748 } else {
1749 *arg = m->m_pkthdr.drv_tx_compl_arg;
1750 *data = m->m_pkthdr.drv_tx_compl_data;
1751 }
1752 return 0;
1753 }
1754
1755 errno_t
mbuf_set_tx_compl_data(mbuf_t m,uintptr_t arg,uintptr_t data)1756 mbuf_set_tx_compl_data(mbuf_t m, uintptr_t arg, uintptr_t data)
1757 {
1758 if (m == NULL || !(m->m_flags & M_PKTHDR)) {
1759 return EINVAL;
1760 }
1761
1762 driver_mtag_init(m);
1763
1764 m->m_pkthdr.drv_tx_compl_arg = arg;
1765 m->m_pkthdr.drv_tx_compl_data = data;
1766
1767 return 0;
1768 }
1769
1770 static u_int32_t
get_tx_compl_callback_index_locked(mbuf_tx_compl_func callback)1771 get_tx_compl_callback_index_locked(mbuf_tx_compl_func callback)
1772 {
1773 u_int32_t i;
1774
1775 for (i = 0; i < MAX_MBUF_TX_COMPL_FUNC; i++) {
1776 if (mbuf_tx_compl_table[i] == callback) {
1777 return i;
1778 }
1779 }
1780 return UINT32_MAX;
1781 }
1782
1783 static u_int32_t
get_tx_compl_callback_index(mbuf_tx_compl_func callback)1784 get_tx_compl_callback_index(mbuf_tx_compl_func callback)
1785 {
1786 u_int32_t i;
1787
1788 lck_rw_lock_shared(&mbuf_tx_compl_tbl_lock);
1789
1790 i = get_tx_compl_callback_index_locked(callback);
1791
1792 lck_rw_unlock_shared(&mbuf_tx_compl_tbl_lock);
1793
1794 return i;
1795 }
1796
1797 mbuf_tx_compl_func
m_get_tx_compl_callback(u_int32_t idx)1798 m_get_tx_compl_callback(u_int32_t idx)
1799 {
1800 mbuf_tx_compl_func cb;
1801
1802 if (idx >= MAX_MBUF_TX_COMPL_FUNC) {
1803 ASSERT(0);
1804 return NULL;
1805 }
1806 lck_rw_lock_shared(&mbuf_tx_compl_tbl_lock);
1807 cb = mbuf_tx_compl_table[idx];
1808 lck_rw_unlock_shared(&mbuf_tx_compl_tbl_lock);
1809 return cb;
1810 }
1811
1812 errno_t
mbuf_register_tx_compl_callback(mbuf_tx_compl_func callback)1813 mbuf_register_tx_compl_callback(mbuf_tx_compl_func callback)
1814 {
1815 int i;
1816 errno_t error;
1817
1818 if (callback == NULL) {
1819 return EINVAL;
1820 }
1821
1822 lck_rw_lock_exclusive(&mbuf_tx_compl_tbl_lock);
1823
1824 i = get_tx_compl_callback_index_locked(callback);
1825 if (i != -1) {
1826 error = EEXIST;
1827 goto unlock;
1828 }
1829
1830 /* assume the worst */
1831 error = ENOSPC;
1832 for (i = 0; i < MAX_MBUF_TX_COMPL_FUNC; i++) {
1833 if (mbuf_tx_compl_table[i] == NULL) {
1834 mbuf_tx_compl_table[i] = callback;
1835 error = 0;
1836 goto unlock;
1837 }
1838 }
1839 unlock:
1840 lck_rw_unlock_exclusive(&mbuf_tx_compl_tbl_lock);
1841
1842 return error;
1843 }
1844
1845 errno_t
mbuf_unregister_tx_compl_callback(mbuf_tx_compl_func callback)1846 mbuf_unregister_tx_compl_callback(mbuf_tx_compl_func callback)
1847 {
1848 int i;
1849 errno_t error;
1850
1851 if (callback == NULL) {
1852 return EINVAL;
1853 }
1854
1855 lck_rw_lock_exclusive(&mbuf_tx_compl_tbl_lock);
1856
1857 /* assume the worst */
1858 error = ENOENT;
1859 for (i = 0; i < MAX_MBUF_TX_COMPL_FUNC; i++) {
1860 if (mbuf_tx_compl_table[i] == callback) {
1861 mbuf_tx_compl_table[i] = NULL;
1862 error = 0;
1863 goto unlock;
1864 }
1865 }
1866 unlock:
1867 lck_rw_unlock_exclusive(&mbuf_tx_compl_tbl_lock);
1868
1869 return error;
1870 }
1871
1872 errno_t
mbuf_get_timestamp_requested(mbuf_t m,boolean_t * requested)1873 mbuf_get_timestamp_requested(mbuf_t m, boolean_t *requested)
1874 {
1875 if (m == NULL || !(m->m_flags & M_PKTHDR)) {
1876 return EINVAL;
1877 }
1878
1879 if ((m->m_pkthdr.pkt_flags & PKTF_TX_COMPL_TS_REQ) == 0) {
1880 *requested = FALSE;
1881 } else {
1882 *requested = TRUE;
1883 }
1884 return 0;
1885 }
1886
1887 errno_t
mbuf_set_timestamp_requested(mbuf_t m,uintptr_t * pktid,mbuf_tx_compl_func callback)1888 mbuf_set_timestamp_requested(mbuf_t m, uintptr_t *pktid,
1889 mbuf_tx_compl_func callback)
1890 {
1891 size_t i;
1892
1893 if (m == NULL || !(m->m_flags & M_PKTHDR) || callback == NULL ||
1894 pktid == NULL) {
1895 return EINVAL;
1896 }
1897
1898 i = get_tx_compl_callback_index(callback);
1899 if (i == UINT32_MAX) {
1900 return ENOENT;
1901 }
1902
1903 m_add_crumb(m, PKT_CRUMB_TS_COMP_REQ);
1904
1905 #if (DEBUG || DEVELOPMENT)
1906 VERIFY(i < sizeof(m->m_pkthdr.pkt_compl_callbacks));
1907 #endif /* (DEBUG || DEVELOPMENT) */
1908
1909 if ((m->m_pkthdr.pkt_flags & PKTF_TX_COMPL_TS_REQ) == 0) {
1910 m->m_pkthdr.pkt_compl_callbacks = 0;
1911 m->m_pkthdr.pkt_flags |= PKTF_TX_COMPL_TS_REQ;
1912 m->m_pkthdr.pkt_compl_context =
1913 atomic_add_32_ov(&mbuf_tx_compl_index, 1);
1914
1915 #if (DEBUG || DEVELOPMENT)
1916 atomic_add_64(&mbuf_tx_compl_requested, 1);
1917 #endif /* (DEBUG || DEVELOPMENT) */
1918 }
1919 m->m_pkthdr.pkt_compl_callbacks |= (1 << i);
1920 *pktid = m->m_pkthdr.pkt_compl_context;
1921
1922 return 0;
1923 }
1924
1925 void
m_do_tx_compl_callback(struct mbuf * m,struct ifnet * ifp)1926 m_do_tx_compl_callback(struct mbuf *m, struct ifnet *ifp)
1927 {
1928 int i;
1929
1930 if (m == NULL) {
1931 return;
1932 }
1933
1934 if ((m->m_pkthdr.pkt_flags & PKTF_TX_COMPL_TS_REQ) == 0) {
1935 return;
1936 }
1937
1938 m_add_crumb(m, PKT_CRUMB_TS_COMP_CB);
1939
1940 #if (DEBUG || DEVELOPMENT)
1941 if (mbuf_tx_compl_debug != 0 && ifp != NULL &&
1942 (ifp->if_xflags & IFXF_TIMESTAMP_ENABLED) != 0 &&
1943 (m->m_pkthdr.pkt_flags & PKTF_TS_VALID) == 0) {
1944 struct timespec now;
1945
1946 nanouptime(&now);
1947 net_timernsec(&now, &m->m_pkthdr.pkt_timestamp);
1948 }
1949 #endif /* (DEBUG || DEVELOPMENT) */
1950
1951 for (i = 0; i < MAX_MBUF_TX_COMPL_FUNC; i++) {
1952 mbuf_tx_compl_func callback;
1953
1954 if ((m->m_pkthdr.pkt_compl_callbacks & (1 << i)) == 0) {
1955 continue;
1956 }
1957
1958 lck_rw_lock_shared(&mbuf_tx_compl_tbl_lock);
1959 callback = mbuf_tx_compl_table[i];
1960 lck_rw_unlock_shared(&mbuf_tx_compl_tbl_lock);
1961
1962 if (callback != NULL) {
1963 callback(m->m_pkthdr.pkt_compl_context,
1964 ifp,
1965 (m->m_pkthdr.pkt_flags & PKTF_TS_VALID) ?
1966 m->m_pkthdr.pkt_timestamp: 0,
1967 m->m_pkthdr.drv_tx_compl_arg,
1968 m->m_pkthdr.drv_tx_compl_data,
1969 m->m_pkthdr.drv_tx_status);
1970 }
1971 }
1972 #if (DEBUG || DEVELOPMENT)
1973 if (m->m_pkthdr.pkt_compl_callbacks != 0) {
1974 atomic_add_64(&mbuf_tx_compl_callbacks, 1);
1975 if (ifp == NULL) {
1976 atomic_add_64(&mbuf_tx_compl_aborted, 1);
1977 }
1978 }
1979 #endif /* (DEBUG || DEVELOPMENT) */
1980 m->m_pkthdr.pkt_compl_callbacks = 0;
1981 }
1982
1983 errno_t
mbuf_get_keepalive_flag(mbuf_t m,boolean_t * is_keepalive)1984 mbuf_get_keepalive_flag(mbuf_t m, boolean_t *is_keepalive)
1985 {
1986 if (m == NULL || is_keepalive == NULL || !(m->m_flags & M_PKTHDR)) {
1987 return EINVAL;
1988 }
1989
1990 *is_keepalive = (m->m_pkthdr.pkt_flags & PKTF_KEEPALIVE);
1991
1992 return 0;
1993 }
1994
1995 errno_t
mbuf_set_keepalive_flag(mbuf_t m,boolean_t is_keepalive)1996 mbuf_set_keepalive_flag(mbuf_t m, boolean_t is_keepalive)
1997 {
1998 if (m == NULL || !(m->m_flags & M_PKTHDR)) {
1999 return EINVAL;
2000 }
2001
2002 if (is_keepalive) {
2003 m->m_pkthdr.pkt_flags |= PKTF_KEEPALIVE;
2004 } else {
2005 m->m_pkthdr.pkt_flags &= ~PKTF_KEEPALIVE;
2006 }
2007
2008 return 0;
2009 }
2010