1 /*
2 * Copyright (c) 2016-2021 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #ifndef _SKYWALK_PACKET_COMMON_H_
30 #define _SKYWALK_PACKET_COMMON_H_
31
32 #if defined(PRIVATE) || defined(BSD_KERNEL_PRIVATE)
33 /*
34 * Routines common to kernel and userland. This file is intended to
35 * be included by code implementing the packet APIs, in particular,
36 * the Skywalk kernel and libsyscall code.
37 */
38
39 #include <skywalk/os_packet_private.h>
40 #include <net/if_vlan_var.h>
41 #include <sys/errno.h>
42 #include <sys/kdebug.h>
43
44 #ifndef KERNEL
45 /*
46 * User.
47 */
48 #if !defined(LIBSYSCALL_INTERFACE)
49 #error "LIBSYSCALL_INTERFACE not defined"
50 #endif /* !LIBSYSCALL_INTERFACE */
51 #define QUM_ADDR(_ph) SK_PTR_ADDR_UQUM(_ph)
52 #define PKT_ADDR(_ph) SK_PTR_ADDR_UPKT(_ph)
53 #define BLT_ADDR(_bp) ((struct __user_buflet *)(uintptr_t)_bp)
54 #else /* KERNEL */
55 /*
56 * Kernel.
57 */
58 #include <skywalk/packet/packet_var.h>
59 #include <skywalk/packet/pbufpool_var.h>
60 #define QUM_ADDR(_ph) SK_PTR_ADDR_KQUM(_ph)
61 #define PKT_ADDR(_ph) SK_PTR_ADDR_KPKT(_ph)
62 #define BLT_ADDR(_bp) ((struct __kern_buflet *)(uintptr_t)_bp)
63 #define PKT_HAS_ATTACHED_MBUF(_ph) \
64 ((PKT_ADDR(_ph)->pkt_pflags & PKT_F_MBUF_DATA) != 0)
65 #endif /* KERNEL */
66
67 /*
68 * Common.
69 */
70 #if (DEBUG || DEVELOPMENT)
71 #define PKT_SUBTYPE_ASSERT(_ph, _type, _subtype) do { \
72 if (__improbable(SK_PTR_TYPE(_ph) != (uint64_t)(_type) || \
73 SK_PTR_SUBTYPE(_ph) != (uint64_t)(_subtype))) { \
74 pkt_subtype_assert_fail(_ph, _type, _subtype); \
75 /* NOTREACHED */ \
76 __builtin_unreachable(); \
77 } \
78 } while (0)
79
80 #define PKT_TYPE_ASSERT(_ph, _type) do { \
81 if (__improbable(SK_PTR_TYPE(_ph) != (uint64_t)(_type))) { \
82 pkt_type_assert_fail(_ph, _type); \
83 /* NOTREACHED */ \
84 __builtin_unreachable(); \
85 } \
86 } while (0)
87 #else /* !DEBUG && !DEVELOPMENT */
88 #define PKT_SUBTYPE_ASSERT(_ph, _type, _subtype) ((void)0)
89 #define PKT_TYPE_ASSERT(_ph, _type) ((void)0)
90 #endif /* !DEBUG && !DEVELOPMENT */
91
92 #define QUM_GET_NEXT_BUFLET(_qum, _pbuf, _buf) do { \
93 ASSERT((_pbuf) == NULL || (_pbuf) == (_qum)->qum_buf); \
94 (_buf) = (((_pbuf) == NULL) ? (_qum)->qum_buf : NULL); \
95 } while (0)
96
97 #define PKT_GET_FIRST_BUFLET(_pkt, _bcnt, _buf) do { \
98 if (__improbable((_bcnt) == 0)) { \
99 (_buf) = NULL; \
100 break; \
101 } \
102 if (__probable((_pkt)->pkt_qum_buf.buf_addr != 0)) { \
103 (_buf) = &(_pkt)->pkt_qum_buf; \
104 } else { \
105 (_buf) = __DECONST(void *, (_pkt)->pkt_qum_buf.buf_nbft_addr);\
106 } \
107 } while (0)
108
109 #define _PKT_GET_NEXT_BUFLET(_pkt, _bcnt, _pbuf, _buf) do { \
110 if ((_pbuf) == NULL) { \
111 PKT_GET_FIRST_BUFLET(_pkt, _bcnt, _buf); \
112 } else { \
113 (_buf) = __DECONST(void *, (_pbuf)->buf_nbft_addr); \
114 } \
115 } while (0)
116
117 #ifndef KERNEL
118 #define PKT_GET_NEXT_BUFLET(_pkt, _bcnt, _pbuf, _buf) do { \
119 _PKT_GET_NEXT_BUFLET(_pkt, _bcnt, _pbuf, _buf); \
120 } while (0)
121 #else /* KERNEL */
122 #define PKT_GET_NEXT_BUFLET(_pkt, _bcnt, _pbuf, _buf) do { \
123 ASSERT(((_bcnt) >= 1) || ((_pbuf) == NULL)); \
124 _PKT_GET_NEXT_BUFLET(_pkt, _bcnt, _pbuf, _buf); \
125 } while (0)
126 #endif /* KERNEL */
127
128 __attribute__((always_inline))
129 static inline int
__packet_set_headroom(const uint64_t ph,const uint8_t headroom)130 __packet_set_headroom(const uint64_t ph, const uint8_t headroom)
131 {
132 PKT_SUBTYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET, NEXUS_META_SUBTYPE_RAW);
133 if (__probable(headroom < PKT_ADDR(ph)->pkt_qum_buf.buf_dlim)) {
134 PKT_ADDR(ph)->pkt_headroom = headroom;
135 return 0;
136 }
137 return ERANGE;
138 }
139
140 __attribute__((always_inline))
141 static inline uint8_t
__packet_get_headroom(const uint64_t ph)142 __packet_get_headroom(const uint64_t ph)
143 {
144 PKT_SUBTYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET, NEXUS_META_SUBTYPE_RAW);
145 return PKT_ADDR(ph)->pkt_headroom;
146 }
147
148 __attribute__((always_inline))
149 static inline int
__packet_set_link_header_length(const uint64_t ph,const uint8_t len)150 __packet_set_link_header_length(const uint64_t ph, const uint8_t len)
151 {
152 PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
153 if (__probable(len <= PKT_ADDR(ph)->pkt_qum_buf.buf_dlim)) {
154 PKT_ADDR(ph)->pkt_l2_len = len;
155 return 0;
156 }
157 return ERANGE;
158 }
159
160 __attribute__((always_inline))
161 static inline uint8_t
__packet_get_link_header_length(const uint64_t ph)162 __packet_get_link_header_length(const uint64_t ph)
163 {
164 PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
165 return PKT_ADDR(ph)->pkt_l2_len;
166 }
167
168 __attribute__((always_inline))
169 static inline int
__packet_set_link_broadcast(const uint64_t ph)170 __packet_set_link_broadcast(const uint64_t ph)
171 {
172 PKT_SUBTYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET, NEXUS_META_SUBTYPE_RAW);
173 PKT_ADDR(ph)->pkt_link_flags |= PKT_LINKF_BCAST;
174 return 0;
175 }
176
177 __attribute__((always_inline))
178 static inline boolean_t
__packet_get_link_broadcast(const uint64_t ph)179 __packet_get_link_broadcast(const uint64_t ph)
180 {
181 PKT_SUBTYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET, NEXUS_META_SUBTYPE_RAW);
182 return (PKT_ADDR(ph)->pkt_link_flags & PKT_LINKF_BCAST) != 0;
183 }
184
185 __attribute__((always_inline))
186 static inline int
__packet_set_link_multicast(const uint64_t ph)187 __packet_set_link_multicast(const uint64_t ph)
188 {
189 PKT_SUBTYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET, NEXUS_META_SUBTYPE_RAW);
190 PKT_ADDR(ph)->pkt_link_flags |= PKT_LINKF_MCAST;
191 return 0;
192 }
193
194 __attribute__((always_inline))
195 static inline boolean_t
__packet_get_link_multicast(const uint64_t ph)196 __packet_get_link_multicast(const uint64_t ph)
197 {
198 PKT_SUBTYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET, NEXUS_META_SUBTYPE_RAW);
199 return (PKT_ADDR(ph)->pkt_link_flags & PKT_LINKF_MCAST) != 0;
200 }
201
202 __attribute__((always_inline))
203 static inline int
__packet_set_link_ethfcs(const uint64_t ph)204 __packet_set_link_ethfcs(const uint64_t ph)
205 {
206 PKT_SUBTYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET, NEXUS_META_SUBTYPE_RAW);
207 PKT_ADDR(ph)->pkt_link_flags |= PKT_LINKF_ETHFCS;
208 return 0;
209 }
210
211 __attribute__((always_inline))
212 static inline boolean_t
__packet_get_link_ethfcs(const uint64_t ph)213 __packet_get_link_ethfcs(const uint64_t ph)
214 {
215 PKT_SUBTYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET, NEXUS_META_SUBTYPE_RAW);
216 return (PKT_ADDR(ph)->pkt_link_flags & PKT_LINKF_ETHFCS) != 0;
217 }
218
219 __attribute__((always_inline))
220 static inline int
__packet_set_transport_traffic_background(const uint64_t ph)221 __packet_set_transport_traffic_background(const uint64_t ph)
222 {
223 PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
224 PKT_ADDR(ph)->pkt_pflags |= PKT_F_BACKGROUND;
225 return 0;
226 }
227
228 __attribute__((always_inline))
229 static inline boolean_t
__packet_get_transport_traffic_background(const uint64_t ph)230 __packet_get_transport_traffic_background(const uint64_t ph)
231 {
232 PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
233 return (PKT_ADDR(ph)->pkt_pflags & PKT_F_BACKGROUND) != 0;
234 }
235
236 __attribute__((always_inline))
237 static inline int
__packet_set_transport_traffic_realtime(const uint64_t ph)238 __packet_set_transport_traffic_realtime(const uint64_t ph)
239 {
240 PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
241 PKT_ADDR(ph)->pkt_pflags |= PKT_F_REALTIME;
242 return 0;
243 }
244
245 __attribute__((always_inline))
246 static inline boolean_t
__packet_get_transport_traffic_realtime(const uint64_t ph)247 __packet_get_transport_traffic_realtime(const uint64_t ph)
248 {
249 PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
250 return (PKT_ADDR(ph)->pkt_pflags & PKT_F_REALTIME) != 0;
251 }
252
253 __attribute__((always_inline))
254 static inline int
__packet_set_transport_retransmit(const uint64_t ph)255 __packet_set_transport_retransmit(const uint64_t ph)
256 {
257 PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
258 PKT_ADDR(ph)->pkt_pflags |= PKT_F_REXMT;
259 return 0;
260 }
261
262 __attribute__((always_inline))
263 static inline boolean_t
__packet_get_transport_retransmit(const uint64_t ph)264 __packet_get_transport_retransmit(const uint64_t ph)
265 {
266 PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
267 return (PKT_ADDR(ph)->pkt_pflags & PKT_F_REXMT) != 0;
268 }
269
270 __attribute__((always_inline))
271 static inline int
__packet_set_transport_last_packet(const uint64_t ph)272 __packet_set_transport_last_packet(const uint64_t ph)
273 {
274 PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
275 PKT_ADDR(ph)->pkt_pflags |= PKT_F_LAST_PKT;
276 return 0;
277 }
278
279 __attribute__((always_inline))
280 static inline int
__packet_set_group_start(const uint64_t ph)281 __packet_set_group_start(const uint64_t ph)
282 {
283 PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
284 PKT_ADDR(ph)->pkt_pflags |= PKT_F_OPT_GROUP_START;
285 return 0;
286 }
287
288 __attribute__((always_inline))
289 static inline boolean_t
__packet_get_group_start(const uint64_t ph)290 __packet_get_group_start(const uint64_t ph)
291 {
292 PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
293 return (PKT_ADDR(ph)->pkt_pflags & PKT_F_OPT_GROUP_START) != 0;
294 }
295
296 __attribute__((always_inline))
297 static inline int
__packet_set_group_end(const uint64_t ph)298 __packet_set_group_end(const uint64_t ph)
299 {
300 PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
301 PKT_ADDR(ph)->pkt_pflags |= PKT_F_OPT_GROUP_END;
302 return 0;
303 }
304
305 __attribute__((always_inline))
306 static inline boolean_t
__packet_get_group_end(const uint64_t ph)307 __packet_get_group_end(const uint64_t ph)
308 {
309 PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
310 return (PKT_ADDR(ph)->pkt_pflags & PKT_F_OPT_GROUP_END) != 0;
311 }
312
313 __attribute__((always_inline))
314 static inline errno_t
__packet_get_expire_time(const uint64_t ph,uint64_t * ts)315 __packet_get_expire_time(const uint64_t ph, uint64_t *ts)
316 {
317 PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
318 #ifdef KERNEL
319 struct __packet_opt *po = PKT_ADDR(ph)->pkt_com_opt;
320 #else /* !KERNEL */
321 struct __packet_opt *po = &PKT_ADDR(ph)->pkt_com_opt;
322 #endif /* !KERNEL */
323 if ((PKT_ADDR(ph)->pkt_pflags & PKT_F_OPT_EXPIRE_TS) == 0) {
324 return ENOENT;
325 }
326 if (ts == NULL) {
327 return EINVAL;
328 }
329 *ts = po->__po_expire_ts;
330 return 0;
331 }
332
333 __attribute__((always_inline))
334 static inline errno_t
__packet_set_expire_time(const uint64_t ph,const uint64_t ts)335 __packet_set_expire_time(const uint64_t ph, const uint64_t ts)
336 {
337 PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
338 #ifdef KERNEL
339 struct __packet_opt *po = PKT_ADDR(ph)->pkt_com_opt;
340 #else /* !KERNEL */
341 struct __packet_opt *po = &PKT_ADDR(ph)->pkt_com_opt;
342 #endif /* !KERNEL */
343 if (ts != 0) {
344 po->__po_expire_ts = ts;
345 PKT_ADDR(ph)->pkt_pflags |= PKT_F_OPT_EXPIRE_TS;
346 } else {
347 po->__po_expire_ts = 0;
348 PKT_ADDR(ph)->pkt_pflags &= ~PKT_F_OPT_EXPIRE_TS;
349 }
350 return 0;
351 }
352
353 __attribute__((always_inline))
354 static inline errno_t
__packet_opt_get_token(const struct __packet_opt * po,void * token,uint16_t * len,uint8_t * type)355 __packet_opt_get_token(const struct __packet_opt *po, void *token,
356 uint16_t *len, uint8_t *type)
357 {
358 uint16_t tlen = po->__po_token_len;
359 uint8_t ttype;
360
361 if (token == NULL || len == NULL || type == NULL || tlen > *len || po->__po_token_type > UINT8_MAX) {
362 return EINVAL;
363 }
364 ttype = (uint8_t)po->__po_token_type;
365
366 ASSERT(tlen <= PKT_OPT_MAX_TOKEN_SIZE);
367 _CASSERT((__builtin_offsetof(struct __packet_opt, __po_token) % 8) == 0);
368 bcopy(po->__po_token, token, tlen);
369 *len = tlen;
370 *type = ttype;
371 return 0;
372 }
373
374 __attribute__((always_inline))
375 static inline errno_t
__packet_get_token(const uint64_t ph,void * token,uint16_t * len)376 __packet_get_token(const uint64_t ph, void *token, uint16_t *len)
377 {
378 #ifdef KERNEL
379 struct __packet_opt *po = PKT_ADDR(ph)->pkt_com_opt;
380 #else /* !KERNEL */
381 struct __packet_opt *po = &PKT_ADDR(ph)->pkt_com_opt;
382 #endif /* !KERNEL */
383 uint8_t type;
384 errno_t err;
385
386 PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
387 if ((PKT_ADDR(ph)->pkt_pflags & PKT_F_OPT_TOKEN) == 0) {
388 return ENOENT;
389 }
390 err = __packet_opt_get_token(po, token, len, &type);
391 if ((err == 0) && (type != PKT_OPT_TOKEN_TYPE_OPAQUE)) {
392 err = ENOENT;
393 }
394 return err;
395 }
396
397 __attribute__((always_inline))
398 static inline errno_t
__packet_opt_set_token(struct __packet_opt * po,const void * token,const uint16_t len,const uint8_t type,volatile uint64_t * pflags)399 __packet_opt_set_token(struct __packet_opt *po, const void *token,
400 const uint16_t len, const uint8_t type, volatile uint64_t *pflags)
401 {
402 _CASSERT((__builtin_offsetof(struct __packet_opt, __po_token) % 8) == 0);
403 if (len != 0) {
404 if (token == NULL || len > PKT_OPT_MAX_TOKEN_SIZE ||
405 type == 0) {
406 return EINVAL;
407 }
408 if (__probable(IS_P2ALIGNED(token, 8))) {
409 uint64_t *token64 = __DECONST(void *, token);
410 po->__po_token_data[0] = *token64;
411 po->__po_token_data[1] = *(token64 + 1);
412 } else {
413 bcopy(token, po->__po_token, len);
414 }
415 po->__po_token_len = len;
416 po->__po_token_type = type;
417 *pflags |= PKT_F_OPT_TOKEN;
418 } else {
419 _CASSERT(sizeof(po->__po_token_data[0]) == 8);
420 _CASSERT(sizeof(po->__po_token_data[1]) == 8);
421 _CASSERT(sizeof(po->__po_token) == 16);
422 po->__po_token_data[0] = 0;
423 po->__po_token_data[1] = 0;
424 po->__po_token_len = 0;
425 po->__po_token_type = 0;
426 *pflags &= ~PKT_F_OPT_TOKEN;
427 }
428 return 0;
429 }
430
431 __attribute__((always_inline))
432 static inline errno_t
__packet_set_token(const uint64_t ph,const void * token,const uint16_t len)433 __packet_set_token(const uint64_t ph, const void *token, const uint16_t len)
434 {
435 PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
436 #ifdef KERNEL
437 return __packet_opt_set_token(PKT_ADDR(ph)->pkt_com_opt, token, len,
438 PKT_OPT_TOKEN_TYPE_OPAQUE, &PKT_ADDR(ph)->pkt_pflags);
439 #else /* !KERNEL */
440 return __packet_opt_set_token(&PKT_ADDR(ph)->pkt_com_opt, token, len,
441 PKT_OPT_TOKEN_TYPE_OPAQUE, &PKT_ADDR(ph)->pkt_pflags);
442 #endif /* !KERNEL */
443 }
444
445 __attribute__((always_inline))
446 static inline errno_t
__packet_get_packetid(const uint64_t ph,packet_id_t * pktid)447 __packet_get_packetid(const uint64_t ph, packet_id_t *pktid)
448 {
449 #ifdef KERNEL
450 struct __packet_opt *po = PKT_ADDR(ph)->pkt_com_opt;
451 #else /* !KERNEL */
452 struct __packet_opt *po = &PKT_ADDR(ph)->pkt_com_opt;
453 #endif /* !KERNEL */
454 uint16_t len = sizeof(packet_id_t);
455 uint8_t type;
456 errno_t err;
457
458 PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
459 if ((PKT_ADDR(ph)->pkt_pflags & PKT_F_OPT_TOKEN) == 0) {
460 return ENOENT;
461 }
462 err = __packet_opt_get_token(po, pktid, &len, &type);
463 if ((err == 0) && ((type != PKT_OPT_TOKEN_TYPE_PACKET_ID) ||
464 (len != sizeof(packet_id_t)))) {
465 err = ENOENT;
466 }
467 return err;
468 }
469
470 __attribute__((always_inline))
471 static inline errno_t
__packet_set_packetid(const uint64_t ph,const packet_id_t * pktid)472 __packet_set_packetid(const uint64_t ph, const packet_id_t *pktid)
473 {
474 PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
475 #ifdef KERNEL
476 return __packet_opt_set_token(PKT_ADDR(ph)->pkt_com_opt, pktid,
477 sizeof(packet_id_t), PKT_OPT_TOKEN_TYPE_PACKET_ID,
478 &PKT_ADDR(ph)->pkt_pflags);
479 #else /* !KERNEL */
480 return __packet_opt_set_token(&PKT_ADDR(ph)->pkt_com_opt, pktid,
481 sizeof(packet_id_t), PKT_OPT_TOKEN_TYPE_PACKET_ID,
482 &PKT_ADDR(ph)->pkt_pflags);
483 #endif /* !KERNEL */
484 }
485
486 __attribute__((always_inline))
487 static inline errno_t
__packet_get_vlan_tag(const uint64_t ph,uint16_t * vlan_tag,boolean_t * tag_in_pkt)488 __packet_get_vlan_tag(const uint64_t ph, uint16_t *vlan_tag,
489 boolean_t *tag_in_pkt)
490 {
491 #ifdef KERNEL
492 struct __packet_opt *po = PKT_ADDR(ph)->pkt_com_opt;
493 #else /* !KERNEL */
494 struct __packet_opt *po = &PKT_ADDR(ph)->pkt_com_opt;
495 #endif /* !KERNEL */
496 uint64_t pflags;
497
498 PKT_SUBTYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET, NEXUS_META_SUBTYPE_RAW);
499 pflags = PKT_ADDR(ph)->pkt_pflags;
500 if ((pflags & PKT_F_OPT_VLTAG) == 0) {
501 return ENOENT;
502 }
503 if (vlan_tag != NULL) {
504 *vlan_tag = po->__po_vlan_tag;
505 }
506 if (tag_in_pkt != NULL) {
507 *tag_in_pkt = ((pflags & PKT_F_OPT_VLTAG_IN_PKT) != 0);
508 }
509 return 0;
510 }
511
512 __attribute__((always_inline))
513 static inline errno_t
__packet_set_vlan_tag(const uint64_t ph,const uint16_t vlan_tag,const boolean_t tag_in_pkt)514 __packet_set_vlan_tag(const uint64_t ph, const uint16_t vlan_tag,
515 const boolean_t tag_in_pkt)
516 {
517 #ifdef KERNEL
518 struct __packet_opt *po = PKT_ADDR(ph)->pkt_com_opt;
519 #else /* !KERNEL */
520 struct __packet_opt *po = &PKT_ADDR(ph)->pkt_com_opt;
521 #endif /* !KERNEL */
522
523 PKT_SUBTYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET, NEXUS_META_SUBTYPE_RAW);
524 PKT_ADDR(ph)->pkt_pflags |= PKT_F_OPT_VLTAG;
525 po->__po_vlan_tag = vlan_tag;
526
527 if (tag_in_pkt) {
528 PKT_ADDR(ph)->pkt_pflags |= PKT_F_OPT_VLTAG_IN_PKT;
529 }
530 return 0;
531 }
532
533 __attribute__((always_inline))
534 static inline uint16_t
__packet_get_vlan_id(const uint16_t vlan_tag)535 __packet_get_vlan_id(const uint16_t vlan_tag)
536 {
537 return EVL_VLANOFTAG(vlan_tag);
538 }
539
540 __attribute__((always_inline))
541 static inline uint8_t
__packet_get_vlan_priority(const uint16_t vlan_tag)542 __packet_get_vlan_priority(const uint16_t vlan_tag)
543 {
544 return EVL_PRIOFTAG(vlan_tag);
545 }
546
547 #ifdef KERNEL
548 __attribute__((always_inline))
549 static inline void
__packet_set_wake_flag(const uint64_t ph)550 __packet_set_wake_flag(const uint64_t ph)
551 {
552 PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
553 PKT_ADDR(ph)->pkt_pflags |= PKT_F_WAKE_PKT;
554 }
555 #endif
556
557 __attribute__((always_inline))
558 static inline boolean_t
__packet_get_wake_flag(const uint64_t ph)559 __packet_get_wake_flag(const uint64_t ph)
560 {
561 return (PKT_ADDR(ph)->pkt_pflags & PKT_F_WAKE_PKT) != 0;
562 }
563
564 __attribute__((always_inline))
565 static inline void
__packet_set_keep_alive(const uint64_t ph,const boolean_t is_keep_alive)566 __packet_set_keep_alive(const uint64_t ph, const boolean_t is_keep_alive)
567 {
568 PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
569 if (is_keep_alive) {
570 PKT_ADDR(ph)->pkt_pflags |= PKT_F_KEEPALIVE;
571 } else {
572 PKT_ADDR(ph)->pkt_pflags &= ~PKT_F_KEEPALIVE;
573 }
574 }
575
576 __attribute__((always_inline))
577 static inline boolean_t
__packet_get_keep_alive(const uint64_t ph)578 __packet_get_keep_alive(const uint64_t ph)
579 {
580 return (PKT_ADDR(ph)->pkt_pflags & PKT_F_KEEPALIVE) != 0;
581 }
582
583 __attribute__((always_inline))
584 static inline boolean_t
__packet_get_truncated(const uint64_t ph)585 __packet_get_truncated(const uint64_t ph)
586 {
587 PKT_SUBTYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET, NEXUS_META_SUBTYPE_RAW);
588 return (PKT_ADDR(ph)->pkt_pflags & PKT_F_TRUNCATED) != 0;
589 }
590
591 #ifdef KERNEL
592 __attribute__((always_inline))
593 static inline boolean_t
__packet_get_transport_new_flow(const uint64_t ph)594 __packet_get_transport_new_flow(const uint64_t ph)
595 {
596 PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
597 return (PKT_ADDR(ph)->pkt_pflags & PKT_F_NEW_FLOW) != 0;
598 }
599
600 __attribute__((always_inline))
601 static inline boolean_t
__packet_get_transport_last_packet(const uint64_t ph)602 __packet_get_transport_last_packet(const uint64_t ph)
603 {
604 PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
605 return (PKT_ADDR(ph)->pkt_pflags & PKT_F_LAST_PKT) != 0;
606 }
607 #endif /* KERNEL */
608
609 __attribute__((always_inline))
610 static inline int
__packet_set_service_class(const uint64_t ph,const uint32_t sc)611 __packet_set_service_class(const uint64_t ph, const uint32_t sc)
612 {
613 int err = 0;
614
615 _CASSERT(sizeof(QUM_ADDR(ph)->qum_svc_class == sizeof(uint32_t)));
616
617 switch (sc) {
618 case PKT_SC_BE:
619 case PKT_SC_BK_SYS:
620 case PKT_SC_BK:
621 case PKT_SC_RD:
622 case PKT_SC_OAM:
623 case PKT_SC_AV:
624 case PKT_SC_RV:
625 case PKT_SC_VI:
626 case PKT_SC_SIG:
627 case PKT_SC_VO:
628 case PKT_SC_CTL:
629 QUM_ADDR(ph)->qum_svc_class = sc;
630 break;
631
632 default:
633 err = EINVAL;
634 break;
635 }
636
637 return err;
638 }
639
640 __attribute__((always_inline))
641 static inline uint32_t
__packet_get_service_class(const uint64_t ph)642 __packet_get_service_class(const uint64_t ph)
643 {
644 uint32_t sc;
645
646 _CASSERT(sizeof(QUM_ADDR(ph)->qum_svc_class == sizeof(uint32_t)));
647
648 switch (QUM_ADDR(ph)->qum_svc_class) {
649 case PKT_SC_BE: /* most likely best effort */
650 case PKT_SC_BK_SYS:
651 case PKT_SC_BK:
652 case PKT_SC_RD:
653 case PKT_SC_OAM:
654 case PKT_SC_AV:
655 case PKT_SC_RV:
656 case PKT_SC_VI:
657 case PKT_SC_SIG:
658 case PKT_SC_VO:
659 case PKT_SC_CTL:
660 sc = QUM_ADDR(ph)->qum_svc_class;
661 break;
662
663 default:
664 sc = PKT_SC_BE;
665 break;
666 }
667
668 return sc;
669 }
670
671 __attribute__((always_inline))
672 static inline void
__packet_set_comp_gencnt(const uint64_t ph,const uint32_t gencnt)673 __packet_set_comp_gencnt(const uint64_t ph, const uint32_t gencnt)
674 {
675 _CASSERT(sizeof(PKT_ADDR(ph)->pkt_comp_gencnt == sizeof(uint32_t)));
676 PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
677
678 PKT_ADDR(ph)->pkt_comp_gencnt = gencnt;
679 }
680
681 __attribute__((always_inline))
682 static inline uint32_t
__packet_get_comp_gencnt(const uint64_t ph)683 __packet_get_comp_gencnt(const uint64_t ph)
684 {
685 _CASSERT(sizeof(PKT_ADDR(ph)->pkt_comp_gencnt == sizeof(uint32_t)));
686 PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
687
688 return PKT_ADDR(ph)->pkt_comp_gencnt;
689 }
690
691
692 __attribute__((always_inline))
693 static inline int
__packet_set_traffic_class(const uint64_t ph,const uint32_t tc)694 __packet_set_traffic_class(const uint64_t ph, const uint32_t tc)
695 {
696 uint32_t val = PKT_TC2SCVAL(tc); /* just the val portion */
697 uint32_t sc;
698
699 switch (val) {
700 case PKT_SCVAL_BK_SYS:
701 sc = PKT_SC_BK_SYS;
702 break;
703 case PKT_SCVAL_BK:
704 sc = PKT_SC_BK;
705 break;
706 case PKT_SCVAL_BE:
707 sc = PKT_SC_BE;
708 break;
709 case PKT_SCVAL_RD:
710 sc = PKT_SC_RD;
711 break;
712 case PKT_SCVAL_OAM:
713 sc = PKT_SC_OAM;
714 break;
715 case PKT_SCVAL_AV:
716 sc = PKT_SC_AV;
717 break;
718 case PKT_SCVAL_RV:
719 sc = PKT_SC_RV;
720 break;
721 case PKT_SCVAL_VI:
722 sc = PKT_SC_VI;
723 break;
724 case PKT_SCVAL_SIG:
725 sc = PKT_SC_SIG;
726 break;
727 case PKT_SCVAL_VO:
728 sc = PKT_SC_VO;
729 break;
730 case PKT_SCVAL_CTL:
731 sc = PKT_SC_CTL;
732 break;
733 default:
734 sc = PKT_SC_BE;
735 break;
736 }
737
738 return __packet_set_service_class(ph, sc);
739 }
740
741 __attribute__((always_inline))
742 static inline uint32_t
__packet_get_traffic_class(const uint64_t ph)743 __packet_get_traffic_class(const uint64_t ph)
744 {
745 return PKT_SC2TC(__packet_get_service_class(ph));
746 }
747
748 __attribute__((always_inline))
749 static inline int
__packet_set_inet_checksum(const uint64_t ph,const packet_csum_flags_t flags,const uint16_t start,const uint16_t stuff_val,boolean_t tx)750 __packet_set_inet_checksum(const uint64_t ph, const packet_csum_flags_t flags,
751 const uint16_t start, const uint16_t stuff_val, boolean_t tx)
752 {
753 PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
754
755 PKT_ADDR(ph)->pkt_csum_flags = flags;
756
757 if (tx) {
758 PKT_ADDR(ph)->pkt_csum_tx_start_off = start;
759 PKT_ADDR(ph)->pkt_csum_tx_stuff_off = stuff_val;
760 } else {
761 PKT_ADDR(ph)->pkt_csum_rx_start_off = start;
762 PKT_ADDR(ph)->pkt_csum_rx_value = stuff_val;
763 }
764 return 0;
765 }
766
767 __attribute__((always_inline))
768 static inline packet_csum_flags_t
__packet_get_inet_checksum(const uint64_t ph,uint16_t * start,uint16_t * stuff_val,boolean_t tx)769 __packet_get_inet_checksum(const uint64_t ph, uint16_t *start,
770 uint16_t *stuff_val, boolean_t tx)
771 {
772 PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
773
774 if (tx) {
775 if (__probable(start != NULL)) {
776 *start = PKT_ADDR(ph)->pkt_csum_tx_start_off;
777 }
778 if (__probable(stuff_val != NULL)) {
779 *stuff_val = PKT_ADDR(ph)->pkt_csum_tx_stuff_off;
780 }
781 } else {
782 if (__probable(start != NULL)) {
783 *start = PKT_ADDR(ph)->pkt_csum_rx_start_off;
784 }
785 if (__probable(stuff_val != NULL)) {
786 *stuff_val = PKT_ADDR(ph)->pkt_csum_rx_value;
787 }
788 }
789 return PKT_ADDR(ph)->pkt_csum_flags;
790 }
791
792 __attribute__((always_inline))
793 static inline void
__packet_set_flow_uuid(const uint64_t ph,const uuid_t flow_uuid)794 __packet_set_flow_uuid(const uint64_t ph, const uuid_t flow_uuid)
795 {
796 struct __quantum *q = &QUM_ADDR(ph)->qum_com;
797
798 /*
799 * Anticipate a nicely (8-bytes) aligned UUID from caller;
800 * the one in qum_flow_id is always 8-byte aligned.
801 */
802 if (__probable(IS_P2ALIGNED(flow_uuid, sizeof(uint64_t)))) {
803 uint64_t *id_64 = (uint64_t *)(uintptr_t)flow_uuid;
804 q->__q_flow_id_val64[0] = id_64[0];
805 q->__q_flow_id_val64[1] = id_64[1];
806 } else if (__probable(IS_P2ALIGNED(flow_uuid, sizeof(uint32_t)))) {
807 uint32_t *id_32 = (uint32_t *)(uintptr_t)flow_uuid;
808 q->__q_flow_id_val32[0] = id_32[0];
809 q->__q_flow_id_val32[1] = id_32[1];
810 q->__q_flow_id_val32[2] = id_32[2];
811 q->__q_flow_id_val32[3] = id_32[3];
812 } else {
813 bcopy(flow_uuid, q->__q_flow_id, sizeof(uuid_t));
814 }
815 }
816
817 __attribute__((always_inline))
818 static inline void
__packet_get_flow_uuid(const uint64_t ph,uuid_t flow_uuid)819 __packet_get_flow_uuid(const uint64_t ph, uuid_t flow_uuid)
820 {
821 struct __quantum *q = &QUM_ADDR(ph)->qum_com;
822
823 /*
824 * Anticipate a nicely (8-bytes) aligned UUID from caller;
825 * the one in qum_flow_id is always 8-byte aligned.
826 */
827 if (__probable(IS_P2ALIGNED(flow_uuid, sizeof(uint64_t)))) {
828 uint64_t *id_64 = (uint64_t *)(uintptr_t)flow_uuid;
829 id_64[0] = q->__q_flow_id_val64[0];
830 id_64[1] = q->__q_flow_id_val64[1];
831 } else if (__probable(IS_P2ALIGNED(flow_uuid, sizeof(uint32_t)))) {
832 uint32_t *id_32 = (uint32_t *)(uintptr_t)flow_uuid;
833 id_32[0] = q->__q_flow_id_val32[0];
834 id_32[1] = q->__q_flow_id_val32[1];
835 id_32[2] = q->__q_flow_id_val32[2];
836 id_32[3] = q->__q_flow_id_val32[3];
837 } else {
838 bcopy(q->__q_flow_id, flow_uuid, sizeof(uuid_t));
839 }
840 }
841
842 __attribute__((always_inline))
843 static inline void
__packet_clear_flow_uuid(const uint64_t ph)844 __packet_clear_flow_uuid(const uint64_t ph)
845 {
846 struct __quantum *q = &QUM_ADDR(ph)->qum_com;
847 q->__q_flow_id_val64[0] = 0;
848 q->__q_flow_id_val64[1] = 0;
849 }
850
851 __attribute__((always_inline))
852 static inline uint8_t
__packet_get_aggregation_type(const uint64_t ph)853 __packet_get_aggregation_type(const uint64_t ph)
854 {
855 _CASSERT(sizeof(PKT_ADDR(ph)->pkt_aggr_type == sizeof(uint8_t)));
856 PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
857
858 return PKT_ADDR(ph)->pkt_aggr_type;
859 }
860
861 __attribute__((always_inline))
862 static inline uint32_t
__packet_get_data_length(const uint64_t ph)863 __packet_get_data_length(const uint64_t ph)
864 {
865 return QUM_ADDR(ph)->qum_len;
866 }
867
868 __attribute__((always_inline))
869 static inline uint16_t
__packet_get_buflet_count(const uint64_t ph)870 __packet_get_buflet_count(const uint64_t ph)
871 {
872 uint16_t bcnt = 0;
873
874 switch (SK_PTR_TYPE(ph)) {
875 case NEXUS_META_TYPE_PACKET:
876 bcnt = PKT_ADDR(ph)->pkt_bufs_cnt;
877 #ifdef KERNEL
878 VERIFY(bcnt != 0 ||
879 PP_HAS_BUFFER_ON_DEMAND(PKT_ADDR(ph)->pkt_qum.qum_pp));
880 #else /* !KERNEL */
881 /*
882 * Handle the case where the metadata region gets
883 * redirected to anonymous zero-filled pages at
884 * defunct time. There's always 1 buflet in the
885 * packet metadata, so pretend that's the count.
886 */
887 if (__improbable(bcnt == 0)) {
888 bcnt = 1;
889 }
890 #endif /* !KERNEL */
891 break;
892 case NEXUS_META_TYPE_QUANTUM:
893 bcnt = 1;
894 break;
895 default:
896 #ifdef KERNEL
897 VERIFY(0);
898 /* NOTREACHED */
899 __builtin_unreachable();
900 #endif /* KERNEL */
901 break;
902 }
903 return bcnt;
904 }
905
906 __attribute__((always_inline))
907 static inline int
__packet_add_buflet(const uint64_t ph,const void * bprev0,const void * bnew0)908 __packet_add_buflet(const uint64_t ph, const void *bprev0, const void *bnew0)
909 {
910 uint16_t bcnt;
911
912 #ifdef KERNEL
913 kern_buflet_t bprev = __DECONST(kern_buflet_t, bprev0);
914 kern_buflet_t bnew = __DECONST(kern_buflet_t, bnew0);
915
916 VERIFY(PKT_ADDR(ph) && bnew && (bnew != bprev));
917 VERIFY(PP_HAS_BUFFER_ON_DEMAND(PKT_ADDR(ph)->pkt_qum.qum_pp));
918 #else /* !KERNEL */
919 buflet_t bprev = __DECONST(buflet_t, bprev0);
920 buflet_t bnew = __DECONST(buflet_t, bnew0);
921
922 if (__improbable(!PKT_ADDR(ph) || !bnew || (bnew == bprev))) {
923 return EINVAL;
924 }
925 #endif /* !KERNEL */
926
927 PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
928 bcnt = PKT_ADDR(ph)->pkt_bufs_cnt;
929
930 #ifdef KERNEL
931 VERIFY((bprev != NULL || bcnt == 0) &&
932 (bcnt < PKT_ADDR(ph)->pkt_bufs_max));
933 #else /* !KERNEL */
934 if (__improbable(bcnt >= PKT_ADDR(ph)->pkt_bufs_max) ||
935 (bprev == NULL && bcnt != 0)) {
936 return EINVAL;
937 }
938 #endif /* !KERNEL */
939
940 #ifdef KERNEL
941 #if DEVELOPMENT || DEBUG
942 /* check if bprev is the last buflet in the chain */
943 struct __kern_buflet *pbft, *kbft;
944 int n = bcnt;
945
946 PKT_GET_FIRST_BUFLET(PKT_ADDR(ph), bcnt, pbft);
947 kbft = pbft;
948
949 while ((kbft != NULL) && n--) {
950 pbft = kbft;
951 kbft = __DECONST(struct __kern_buflet *, kbft->buf_nbft_addr);
952 }
953 ASSERT(n == 0);
954 ASSERT(bprev == pbft);
955 #endif /* DEVELOPMENT || DEBUG */
956 #endif /* KERNEL */
957
958 if (bprev == NULL) {
959 bprev = &PKT_ADDR(ph)->pkt_qum_buf;
960 }
961 #ifdef KERNEL
962 KBUF_LINK(bprev, bnew);
963 #else /* !KERNEL */
964 UBUF_LINK(bprev, bnew);
965 #endif /* !KERNEL */
966
967 *(uint16_t *)(uintptr_t)&PKT_ADDR(ph)->pkt_bufs_cnt = ++bcnt;
968 return 0;
969 }
970
971 __attribute__((always_inline))
972 static inline void *
__packet_get_next_buflet(const uint64_t ph,const void * bprev0)973 __packet_get_next_buflet(const uint64_t ph, const void *bprev0)
974 {
975 #ifdef KERNEL
976 kern_buflet_t bprev = __DECONST(kern_buflet_t, bprev0);
977 #else /* !KERNEL */
978 buflet_t bprev = __DECONST(buflet_t, bprev0);
979 #endif /* !KERNEL */
980 void *bcur = NULL;
981
982 switch (SK_PTR_TYPE(ph)) {
983 case NEXUS_META_TYPE_PACKET: {
984 uint32_t bcnt = PKT_ADDR(ph)->pkt_bufs_cnt;
985 #ifdef KERNEL
986 ASSERT(bcnt != 0 ||
987 PP_HAS_BUFFER_ON_DEMAND(PKT_ADDR(ph)->pkt_qum.qum_pp));
988 #else /* !KERNEL */
989 /*
990 * Handle the case where the metadata region gets
991 * redirected to anonymous zero-filled pages at
992 * defunct time. There's always 1 buflet in the
993 * packet metadata, so pretend that's the count.
994 */
995 if (__improbable(bcnt == 0)) {
996 bcnt = 1;
997 bprev = NULL;
998 }
999 #endif /* !KERNEL */
1000 PKT_GET_NEXT_BUFLET(PKT_ADDR(ph), bcnt, BLT_ADDR(bprev), bcur);
1001 break;
1002 }
1003 case NEXUS_META_TYPE_QUANTUM:
1004 QUM_GET_NEXT_BUFLET(QUM_ADDR(ph), BLT_ADDR(bprev), bcur);
1005 break;
1006 default:
1007 #ifdef KERNEL
1008 VERIFY(0);
1009 /* NOTREACHED */
1010 __builtin_unreachable();
1011 #endif /* KERNEL */
1012 break;
1013 }
1014 return bcur;
1015 }
1016
1017 __attribute__((always_inline))
1018 static inline uint8_t
__packet_get_segment_count(const uint64_t ph)1019 __packet_get_segment_count(const uint64_t ph)
1020 {
1021 _CASSERT(sizeof(PKT_ADDR(ph)->pkt_seg_cnt == sizeof(uint8_t)));
1022 PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
1023
1024 return PKT_ADDR(ph)->pkt_seg_cnt;
1025 }
1026
1027 __attribute__((always_inline))
1028 static inline uint16_t
__buflet_get_data_limit(const void * buf)1029 __buflet_get_data_limit(const void *buf)
1030 {
1031 return BLT_ADDR(buf)->buf_dlim;
1032 }
1033
1034 #ifdef KERNEL
1035 __attribute__((always_inline))
1036 static inline errno_t
__buflet_set_data_limit(const void * buf,const uint16_t dlim)1037 __buflet_set_data_limit(const void *buf, const uint16_t dlim)
1038 {
1039 /* buffer region is always marked as shareable */
1040 ASSERT(BLT_ADDR(buf)->buf_ctl->bc_flags & SKMEM_BUFCTL_SHAREOK);
1041
1042 /* full bounds checking will be performed during finalize */
1043 if (__probable((uint32_t)dlim <= BLT_ADDR(buf)->buf_objlim)) {
1044 _CASSERT(sizeof(BLT_ADDR(buf)->buf_dlim) == sizeof(uint16_t));
1045 /* deconst */
1046 *(uint16_t *)(uintptr_t)&BLT_ADDR(buf)->buf_dlim = dlim;
1047 return 0;
1048 }
1049 return ERANGE;
1050 }
1051 #endif /* KERNEL */
1052
1053 __attribute__((always_inline))
1054 static inline uint16_t
__buflet_get_data_offset(const void * buf)1055 __buflet_get_data_offset(const void *buf)
1056 {
1057 return BLT_ADDR(buf)->buf_doff;
1058 }
1059
1060 /*
1061 * ******************************************************************
1062 * Checks in __packet_finalize for packet finalized from userland
1063 * ******************************************************************
1064 * +-------+---------------------------+---------------------------+
1065 * | NEXUS_META_SUBTYPE_RAW | NEXUS_META_SUBTYPE_PAYLOAD|
1066 * |-------+---------------------------+---------------------------+
1067 * |buflet | (bdoff + len) <= dlim | (bdoff + len) <= dlim |
1068 * |l2_off | l2 == bdoff && l2 < bdlim | l2 = l3 = 0 && doff == 0 |
1069 * |l3_off | l3 = l2 | l3 == 0 |
1070 * |l4_off | l4 = l3 = l2 | l4 = l3 = 0 |
1071 * +-------+---------------------------+---------------------------+
1072 *
1073 * ******************************************************************
1074 * Checks in __packet_finalize for packet finalized from kernel
1075 * ******************************************************************
1076 * +-------+---------------------------+---------------------------+
1077 * | NEXUS_META_SUBTYPE_RAW | NEXUS_META_SUBTYPE_PAYLOAD|
1078 * |-------+---------------------------+---------------------------+
1079 * |buflet | (bdoff + len) <= dlim | (bdoff + len) <= dlim |
1080 * |l2_off | l2 == bdoff && l2 < bdlim | l2 = l3 = 0 && doff == 0 |
1081 * |l3_off | l3 >= l2 && l3 <bdlim | l3 == 0 |
1082 * |l4_off | l4 = l3 | l4 = l3 = 0 |
1083 * +-------+---------------------------+---------------------------+
1084 *
1085 */
1086 __attribute__((always_inline))
1087 static inline int
__packet_finalize(const uint64_t ph)1088 __packet_finalize(const uint64_t ph)
1089 {
1090 void *bcur = NULL, *bprev = NULL;
1091 uint32_t len, bcnt, bdoff0, bdlim0;
1092 int err = 0;
1093
1094 #ifdef KERNEL
1095 ASSERT(QUM_ADDR(ph)->qum_qflags & QUM_F_INTERNALIZED);
1096 #endif /* KERNEL */
1097 QUM_ADDR(ph)->qum_qflags &= ~(QUM_F_DROPPED | QUM_F_FINALIZED);
1098
1099 bcnt = __packet_get_buflet_count(ph);
1100 len = QUM_ADDR(ph)->qum_len = 0;
1101
1102 while (bcnt--) {
1103 bcur = __packet_get_next_buflet(ph, bprev);
1104
1105 #ifdef KERNEL
1106 ASSERT(bcur != NULL);
1107 ASSERT(BLT_ADDR(bcur)->buf_addr != 0);
1108 #else /* !KERNEL */
1109 if (__improbable(bcur == NULL)) {
1110 err = ERANGE;
1111 break;
1112 }
1113 #endif /* KERNEL */
1114
1115 /* save data offset from the first buflet */
1116 if (bprev == NULL) {
1117 bdoff0 = __buflet_get_data_offset(bcur);
1118 bdlim0 = __buflet_get_data_limit(bcur);
1119 }
1120
1121 #ifndef KERNEL
1122 if (__improbable(!BUF_IN_RANGE(BLT_ADDR(bcur)))) {
1123 err = ERANGE;
1124 break;
1125 }
1126 #else /* !KERNEL */
1127 if (__improbable(!BUF_IN_RANGE(BLT_ADDR(bcur)) &&
1128 !PKT_HAS_ATTACHED_MBUF(ph))) {
1129 err = ERANGE;
1130 break;
1131 }
1132 #endif /* KERNEL */
1133 len += BLT_ADDR(bcur)->buf_dlen;
1134 bprev = bcur;
1135 }
1136
1137 if (__improbable(err != 0)) {
1138 goto done;
1139 }
1140
1141 switch (SK_PTR_TYPE(ph)) {
1142 case NEXUS_META_TYPE_PACKET:
1143 /* validate header offsets in packet */
1144 switch (SK_PTR_SUBTYPE(ph)) {
1145 case NEXUS_META_SUBTYPE_RAW:
1146 /* ensure that L2 == bdoff && L2 < bdlim */
1147 if (__improbable((PKT_ADDR(ph)->pkt_headroom !=
1148 bdoff0) || (PKT_ADDR(ph)->pkt_headroom >=
1149 bdlim0))) {
1150 err = ERANGE;
1151 goto done;
1152 }
1153 #ifndef KERNEL
1154 /* Overwrite L2 len for raw packets from user space */
1155 PKT_ADDR(ph)->pkt_l2_len = 0;
1156 #else /* !KERNEL */
1157 /* ensure that L3 >= L2 && L3 < bdlim */
1158 if (__improbable((PKT_ADDR(ph)->pkt_headroom +
1159 PKT_ADDR(ph)->pkt_l2_len) >= bdlim0)) {
1160 err = ERANGE;
1161 goto done;
1162 }
1163 #endif /* KERNEL */
1164 break;
1165 case NEXUS_META_SUBTYPE_PAYLOAD:
1166 /*
1167 * For payload packet there is no concept of headroom
1168 * and L3 offset should always be 0
1169 */
1170 if (__improbable((PKT_ADDR(ph)->pkt_headroom != 0) ||
1171 (bdoff0 != 0) ||
1172 (PKT_ADDR(ph)->pkt_l2_len != 0))) {
1173 err = ERANGE;
1174 goto done;
1175 }
1176 break;
1177 default:
1178 #ifdef KERNEL
1179 VERIFY(0);
1180 /* NOTREACHED */
1181 __builtin_unreachable();
1182 #endif /* KERNEL */
1183 break;
1184 }
1185
1186 if (__improbable(PKT_ADDR(ph)->pkt_pflags & PKT_F_OPT_DATA)) {
1187 #ifdef KERNEL
1188 struct __packet_opt *po = PKT_ADDR(ph)->pkt_com_opt;
1189 #else /* !KERNEL */
1190 struct __packet_opt *po = &PKT_ADDR(ph)->pkt_com_opt;
1191 #endif /* !KERNEL */
1192 if ((PKT_ADDR(ph)->pkt_pflags & PKT_F_OPT_EXPIRE_TS) &&
1193 po->__po_expire_ts == 0) {
1194 err = EINVAL;
1195 goto done;
1196 }
1197 if ((PKT_ADDR(ph)->pkt_pflags & PKT_F_OPT_TOKEN) &&
1198 po->__po_token_len == 0) {
1199 err = EINVAL;
1200 goto done;
1201 }
1202 ASSERT(err == 0);
1203 }
1204
1205 /*
1206 * NOTE: we don't need the validation for total packet length
1207 * as checking if each buflet is in range and that
1208 * (pkt_headroom == bdoff0), should cover this check.
1209 */
1210 break;
1211
1212 default:
1213 /* nothing to do currently for quantum */
1214 break;
1215 }
1216
1217 done:
1218 if (__probable(err == 0)) {
1219 QUM_ADDR(ph)->qum_len = len;
1220 QUM_ADDR(ph)->qum_qflags |= QUM_F_FINALIZED;
1221 } else {
1222 QUM_ADDR(ph)->qum_len = 0;
1223 QUM_ADDR(ph)->qum_qflags |= QUM_F_DROPPED;
1224 }
1225
1226 return err;
1227 }
1228
1229 __attribute__((always_inline))
1230 static inline boolean_t
__packet_is_finalized(const uint64_t ph)1231 __packet_is_finalized(const uint64_t ph)
1232 {
1233 return QUM_ADDR(ph)->qum_qflags & QUM_F_FINALIZED;
1234 }
1235
1236 #ifdef KERNEL
1237 /*
1238 * function to initialize a packet with mbuf chain.
1239 * Apart from the attached mbuf, the packet can also be used to convey
1240 * additional metadata like the headroom and L2 header length.
1241 * For a packet with attached mbuf, the pkt_length conveys the length of
1242 * the attached mbuf. If the data copied is partial then PKT_F_TRUNCATED is
1243 * also set.
1244 */
1245 __attribute__((always_inline))
1246 static inline int
__packet_initialize_with_mbufchain(struct __kern_packet * pkt,struct mbuf * mbuf,uint8_t headroom,uint8_t l2len)1247 __packet_initialize_with_mbufchain(struct __kern_packet *pkt, struct mbuf *mbuf,
1248 uint8_t headroom, uint8_t l2len)
1249 {
1250 VERIFY(METADATA_TYPE(pkt) == NEXUS_META_TYPE_PACKET);
1251 VERIFY(pkt->pkt_qum.qum_qflags & QUM_F_INTERNALIZED);
1252 VERIFY((pkt->pkt_pflags & PKT_F_MBUF_MASK) == 0);
1253 VERIFY((pkt->pkt_pflags & PKT_F_PKT_DATA) == 0);
1254 VERIFY(pkt->pkt_mbuf == NULL);
1255
1256 pkt->pkt_qum.qum_qflags &= ~(QUM_F_DROPPED | QUM_F_FINALIZED);
1257 pkt->pkt_mbuf = mbuf;
1258 pkt->pkt_pflags |= (PKT_F_MBUF_DATA | PKT_F_TRUNCATED);
1259 pkt->pkt_headroom = headroom;
1260 pkt->pkt_l2_len = l2len;
1261 pkt->pkt_length = m_pktlen(mbuf);
1262 pkt->pkt_qum_buf.buf_dlen = 0;
1263 pkt->pkt_qum_buf.buf_doff = 0;
1264 pkt->pkt_qum.qum_qflags |= QUM_F_FINALIZED;
1265 return 0;
1266 }
1267
1268 __attribute__((always_inline))
1269 static inline int
__packet_initialize_with_mbuf(struct __kern_packet * pkt,struct mbuf * mbuf,uint8_t headroom,uint8_t l2len)1270 __packet_initialize_with_mbuf(struct __kern_packet *pkt, struct mbuf *mbuf,
1271 uint8_t headroom, uint8_t l2len)
1272 {
1273 __packet_initialize_with_mbufchain(pkt, mbuf, headroom, l2len);
1274 VERIFY(mbuf->m_nextpkt == NULL);
1275 return 0;
1276 }
1277
1278 /*
1279 * function to finalize a packet with attached mbuf.
1280 */
1281 __attribute__((always_inline))
1282 static inline int
__packet_finalize_with_mbuf(struct __kern_packet * pkt)1283 __packet_finalize_with_mbuf(struct __kern_packet *pkt)
1284 {
1285 uint16_t bdoff, bdlim, bdlen;
1286 struct __kern_buflet *buf;
1287 int err = 0;
1288
1289 VERIFY(METADATA_TYPE(pkt) == NEXUS_META_TYPE_PACKET);
1290 VERIFY((pkt->pkt_pflags & (PKT_F_MBUF_DATA | PKT_F_PKT_DATA)) ==
1291 PKT_F_MBUF_DATA);
1292 VERIFY(pkt->pkt_mbuf != NULL);
1293 ASSERT(pkt->pkt_qum.qum_qflags & QUM_F_INTERNALIZED);
1294 VERIFY(pkt->pkt_bufs_cnt == 1);
1295 PKT_GET_FIRST_BUFLET(pkt, pkt->pkt_bufs_cnt, buf);
1296 ASSERT(buf->buf_addr != 0);
1297
1298 pkt->pkt_qum.qum_qflags &= ~(QUM_F_DROPPED | QUM_F_FINALIZED);
1299 pkt->pkt_pflags &= ~PKT_F_TRUNCATED;
1300 bdlen = buf->buf_dlen;
1301 bdlim = buf->buf_dlim;
1302 bdoff = buf->buf_doff;
1303 if (__improbable(!BUF_IN_RANGE(buf))) {
1304 err = ERANGE;
1305 goto done;
1306 }
1307
1308 /* validate header offsets in packet */
1309 switch (METADATA_SUBTYPE(pkt)) {
1310 case NEXUS_META_SUBTYPE_RAW:
1311 if (__improbable((pkt->pkt_headroom != bdoff) ||
1312 (pkt->pkt_headroom >= bdlim))) {
1313 err = ERANGE;
1314 goto done;
1315 }
1316 if (__improbable((pkt->pkt_headroom +
1317 pkt->pkt_l2_len) >= bdlim)) {
1318 err = ERANGE;
1319 goto done;
1320 }
1321 break;
1322
1323 case NEXUS_META_SUBTYPE_PAYLOAD:
1324 /*
1325 * For payload packet there is no concept of headroom.
1326 */
1327 if (__improbable((pkt->pkt_headroom != 0) || (bdoff != 0) ||
1328 (pkt->pkt_l2_len != 0))) {
1329 err = ERANGE;
1330 goto done;
1331 }
1332 break;
1333
1334 default:
1335 VERIFY(0);
1336 /* NOTREACHED */
1337 __builtin_unreachable();
1338 break;
1339 }
1340
1341
1342 if (__improbable(pkt->pkt_pflags & PKT_F_OPT_DATA)) {
1343 struct __packet_opt *po = pkt->pkt_com_opt;
1344
1345 if ((pkt->pkt_pflags & PKT_F_OPT_EXPIRE_TS) &&
1346 po->__po_expire_ts == 0) {
1347 err = EINVAL;
1348 goto done;
1349 }
1350 if ((pkt->pkt_pflags & PKT_F_OPT_TOKEN) &&
1351 po->__po_token_len == 0) {
1352 err = EINVAL;
1353 goto done;
1354 }
1355 }
1356 ASSERT(err == 0);
1357
1358 done:
1359 if (__probable(err == 0)) {
1360 pkt->pkt_length = (uint32_t)m_pktlen(pkt->pkt_mbuf);
1361 if (bdlen < pkt->pkt_length) {
1362 pkt->pkt_pflags |= PKT_F_TRUNCATED;
1363 }
1364 pkt->pkt_qum.qum_qflags |= QUM_F_FINALIZED;
1365 } else {
1366 pkt->pkt_length = 0;
1367 pkt->pkt_qum.qum_qflags |= QUM_F_DROPPED;
1368 }
1369
1370 return err;
1371 }
1372
1373 __attribute__((always_inline))
1374 static inline uint32_t
__packet_get_object_index(const uint64_t ph)1375 __packet_get_object_index(const uint64_t ph)
1376 {
1377 return METADATA_IDX(QUM_ADDR(ph));
1378 }
1379
1380 __attribute__((always_inline))
1381 static inline errno_t
__packet_get_timestamp(const uint64_t ph,uint64_t * ts,boolean_t * valid)1382 __packet_get_timestamp(const uint64_t ph, uint64_t *ts, boolean_t *valid)
1383 {
1384 PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
1385
1386 if ((PKT_ADDR(ph)->pkt_pflags & PKT_F_TS_VALID) != 0) {
1387 if (valid != NULL) {
1388 *valid = TRUE;
1389 }
1390 *ts = PKT_ADDR(ph)->pkt_timestamp;
1391 } else {
1392 if (valid != NULL) {
1393 *valid = FALSE;
1394 }
1395 *ts = 0;
1396 }
1397
1398 return 0;
1399 }
1400
1401 __attribute__((always_inline))
1402 static inline errno_t
__packet_set_timestamp(const uint64_t ph,uint64_t ts,boolean_t valid)1403 __packet_set_timestamp(const uint64_t ph, uint64_t ts, boolean_t valid)
1404 {
1405 PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
1406
1407 if (valid) {
1408 PKT_ADDR(ph)->pkt_timestamp = ts;
1409 PKT_ADDR(ph)->pkt_pflags |= PKT_F_TS_VALID;
1410 } else {
1411 PKT_ADDR(ph)->pkt_pflags &= ~PKT_F_TS_VALID;
1412 PKT_ADDR(ph)->pkt_timestamp = 0;
1413 }
1414
1415 return 0;
1416 }
1417
1418 __attribute__((always_inline))
1419 static inline errno_t
__packet_get_tx_completion_data(const uint64_t ph,uintptr_t * cb_arg,uintptr_t * cb_data)1420 __packet_get_tx_completion_data(const uint64_t ph, uintptr_t *cb_arg,
1421 uintptr_t *cb_data)
1422 {
1423 PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
1424 if ((PKT_ADDR(ph)->pkt_pflags & PKT_F_TX_COMPL_DATA) != 0) {
1425 ASSERT((PKT_ADDR(ph)->pkt_pflags & PKT_F_TX_COMPL_ALLOC));
1426 *cb_arg = PKT_ADDR(ph)->pkt_tx_compl_cb_arg;
1427 *cb_data = PKT_ADDR(ph)->pkt_tx_compl_cb_data;
1428 } else {
1429 *cb_arg = 0;
1430 *cb_data = 0;
1431 }
1432 return 0;
1433 }
1434
1435 __attribute__((always_inline))
1436 static inline errno_t
__packet_set_tx_completion_data(const uint64_t ph,uintptr_t cb_arg,uintptr_t cb_data)1437 __packet_set_tx_completion_data(const uint64_t ph, uintptr_t cb_arg,
1438 uintptr_t cb_data)
1439 {
1440 PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
1441 _KPKT_INIT_TX_COMPL_DATA(PKT_ADDR(ph));
1442 PKT_ADDR(ph)->pkt_tx_compl_cb_arg = cb_arg;
1443 PKT_ADDR(ph)->pkt_tx_compl_cb_data = cb_data;
1444 return 0;
1445 }
1446
1447 __attribute__((always_inline))
1448 static inline errno_t
__packet_get_timestamp_requested(const uint64_t ph,boolean_t * requested)1449 __packet_get_timestamp_requested(const uint64_t ph, boolean_t *requested)
1450 {
1451 PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
1452 if ((PKT_ADDR(ph)->pkt_pflags & PKT_F_TX_COMPL_TS_REQ) != 0) {
1453 *requested = TRUE;
1454 } else {
1455 *requested = FALSE;
1456 }
1457 return 0;
1458 }
1459
1460 __attribute__((always_inline))
1461 static inline errno_t
__packet_get_tx_completion_status(const uint64_t ph,kern_return_t * status)1462 __packet_get_tx_completion_status(const uint64_t ph, kern_return_t *status)
1463 {
1464 PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
1465 if ((PKT_ADDR(ph)->pkt_pflags & PKT_F_TX_COMPL_DATA) != 0) {
1466 ASSERT((PKT_ADDR(ph)->pkt_pflags & PKT_F_TX_COMPL_ALLOC));
1467 *status = (kern_return_t)PKT_ADDR(ph)->pkt_tx_compl_status;
1468 } else {
1469 *status = 0;
1470 }
1471 return 0;
1472 }
1473
1474 __attribute__((always_inline))
1475 static inline errno_t
__packet_set_tx_completion_status(const uint64_t ph,kern_return_t status)1476 __packet_set_tx_completion_status(const uint64_t ph, kern_return_t status)
1477 {
1478 PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
1479 _KPKT_INIT_TX_COMPL_DATA(PKT_ADDR(ph));
1480 PKT_ADDR(ph)->pkt_tx_compl_status = (uint32_t)status;
1481 return 0;
1482 }
1483 #endif /* KERNEL */
1484
1485 extern uint32_t os_cpu_in_cksum(const void *, uint32_t, uint32_t);
1486
1487 __attribute__((always_inline))
1488 static inline uint16_t
__packet_fold_sum(uint32_t sum)1489 __packet_fold_sum(uint32_t sum)
1490 {
1491 sum = (sum >> 16) + (sum & 0xffff); /* 17-bit */
1492 sum = (sum >> 16) + (sum & 0xffff); /* 16-bit + carry */
1493 sum = (sum >> 16) + (sum & 0xffff); /* final carry */
1494 return sum & 0xffff;
1495 }
1496
1497 __attribute__((always_inline))
1498 static inline uint16_t
__packet_fold_sum_final(uint32_t sum)1499 __packet_fold_sum_final(uint32_t sum)
1500 {
1501 sum = (sum >> 16) + (sum & 0xffff); /* 17-bit */
1502 sum = (sum >> 16) + (sum & 0xffff); /* 16-bit + carry */
1503 sum = (sum >> 16) + (sum & 0xffff); /* final carry */
1504 return ~sum & 0xffff;
1505 }
1506
1507 __attribute__((always_inline))
1508 static inline uint32_t
__packet_cksum(const void * data,uint32_t len,uint32_t sum0)1509 __packet_cksum(const void *data, uint32_t len, uint32_t sum0)
1510 {
1511 return os_cpu_in_cksum(data, len, sum0);
1512 }
1513
1514 extern uint32_t os_cpu_copy_in_cksum(const void *, void *, uint32_t, uint32_t);
1515
1516 __attribute__((always_inline))
1517 static inline uint32_t
__packet_copy_and_sum(const void * src,void * dst,uint32_t len,uint32_t sum0)1518 __packet_copy_and_sum(const void *src, void *dst, uint32_t len, uint32_t sum0)
1519 {
1520 return os_cpu_copy_in_cksum(src, dst, len, sum0);
1521 }
1522
1523 __attribute__((always_inline))
1524 static inline uint16_t
__packet_fix_sum(uint16_t csum,uint16_t old,uint16_t new)1525 __packet_fix_sum(uint16_t csum, uint16_t old, uint16_t new)
1526 {
1527 uint32_t c = csum + old - new;
1528 c = (c >> 16) + (c & 0xffff); /* Only add carry once */
1529
1530 return c & 0xffff;
1531 }
1532
1533 /* MUST be used for uint32_t fields */
1534 __attribute__((always_inline))
1535 static inline void
__packet_fix_hdr_sum(uint8_t * field,uint16_t * csum,uint32_t new)1536 __packet_fix_hdr_sum(uint8_t *field, uint16_t *csum, uint32_t new)
1537 {
1538 uint32_t old;
1539 memcpy(&old, field, sizeof(old));
1540 memcpy(field, &new, sizeof(uint32_t));
1541 *csum = __packet_fix_sum(__packet_fix_sum(*csum, (uint16_t)(old >> 16),
1542 (uint16_t)(new >> 16)), (uint16_t)(old & 0xffff),
1543 (uint16_t)(new & 0xffff));
1544 }
1545
1546 __attribute__((always_inline))
1547 static inline void *
__buflet_get_data_address(const void * buf)1548 __buflet_get_data_address(const void *buf)
1549 {
1550 return (void *)(BLT_ADDR(buf)->buf_addr);
1551 }
1552
1553 #ifdef KERNEL
1554 __attribute__((always_inline))
1555 static inline errno_t
__buflet_set_data_address(const void * buf,const void * addr)1556 __buflet_set_data_address(const void *buf, const void *addr)
1557 {
1558 /* buffer region is always marked as shareable */
1559 ASSERT(BLT_ADDR(buf)->buf_ctl->bc_flags & SKMEM_BUFCTL_SHAREOK);
1560
1561 /* full bounds checking will be performed during finalize */
1562 if (__probable((uintptr_t)addr >=
1563 (uintptr_t)BLT_ADDR(buf)->buf_objaddr)) {
1564 _CASSERT(sizeof(BLT_ADDR(buf)->buf_addr) ==
1565 sizeof(mach_vm_address_t));
1566 /* deconst */
1567 *(mach_vm_address_t *)(uintptr_t)&BLT_ADDR(buf)->buf_addr =
1568 (mach_vm_address_t)addr;
1569 return 0;
1570 }
1571 return ERANGE;
1572 }
1573 #endif /* KERNEL */
1574
1575 __attribute__((always_inline))
1576 static inline int
__buflet_set_data_offset(const void * buf,const uint16_t doff)1577 __buflet_set_data_offset(const void *buf, const uint16_t doff)
1578 {
1579 #ifdef KERNEL
1580 /*
1581 * Kernel-specific assertion. For user space, the metadata
1582 * region gets redirected to anonymous zero-filled pages at
1583 * defunct time, so ignore it there.
1584 */
1585 ASSERT(BLT_ADDR(buf)->buf_dlim != 0);
1586
1587 if (__probable((uint32_t)doff <= BLT_ADDR(buf)->buf_objlim)) {
1588 BLT_ADDR(buf)->buf_doff = doff;
1589 return 0;
1590 }
1591 return ERANGE;
1592 #else /* !KERNEL */
1593 BLT_ADDR(buf)->buf_doff = doff;
1594 return 0;
1595 #endif /* KERNEL */
1596 }
1597
1598 __attribute__((always_inline))
1599 static inline int
__buflet_set_data_length(const void * buf,const uint16_t dlen)1600 __buflet_set_data_length(const void *buf, const uint16_t dlen)
1601 {
1602 #ifdef KERNEL
1603 /*
1604 * Kernel-specific assertion. For user space, the metadata
1605 * region gets redirected to anonymous zero-filled pages at
1606 * defunct time, so ignore it there.
1607 */
1608 ASSERT(BLT_ADDR(buf)->buf_dlim != 0);
1609
1610 if (__probable((uint32_t)dlen <= BLT_ADDR(buf)->buf_objlim)) {
1611 BLT_ADDR(buf)->buf_dlen = dlen;
1612 return 0;
1613 }
1614 return ERANGE;
1615 #else /* !KERNEL */
1616 BLT_ADDR(buf)->buf_dlen = dlen;
1617 return 0;
1618 #endif /* KERNEL */
1619 }
1620
1621 __attribute__((always_inline))
1622 static inline uint16_t
__buflet_get_data_length(const void * buf)1623 __buflet_get_data_length(const void *buf)
1624 {
1625 return BLT_ADDR(buf)->buf_dlen;
1626 }
1627
1628 #ifdef KERNEL
1629 __attribute__((always_inline))
1630 static inline struct sksegment *
__buflet_get_object_segment(const void * buf,kern_obj_idx_seg_t * idx)1631 __buflet_get_object_segment(const void *buf, kern_obj_idx_seg_t *idx)
1632 {
1633 _CASSERT(sizeof(obj_idx_t) == sizeof(kern_obj_idx_seg_t));
1634
1635 if (idx != NULL) {
1636 *idx = BLT_ADDR(buf)->buf_ctl->bc_idx;
1637 }
1638
1639 return BLT_ADDR(buf)->buf_ctl->bc_slab->sl_seg;
1640 }
1641 #endif /* KERNEL */
1642
1643 __attribute__((always_inline))
1644 static inline void *
__buflet_get_object_address(const void * buf)1645 __buflet_get_object_address(const void *buf)
1646 {
1647 #ifdef KERNEL
1648 return (void *)(BLT_ADDR(buf)->buf_objaddr);
1649 #else /* !KERNEL */
1650 /*
1651 * For user space, shared buffer is not available and hence the data
1652 * address is immutable and is always the same as the underlying
1653 * buffer object address itself.
1654 */
1655 return __buflet_get_data_address(buf);
1656 #endif /* !KERNEL */
1657 }
1658
1659 __attribute__((always_inline))
1660 static inline uint32_t
__buflet_get_object_limit(const void * buf)1661 __buflet_get_object_limit(const void *buf)
1662 {
1663 #ifdef KERNEL
1664 return BLT_ADDR(buf)->buf_objlim;
1665 #else /* !KERNEL */
1666 /*
1667 * For user space, shared buffer is not available and hence the data
1668 * limit is immutable and is always the same as the underlying buffer
1669 * object limit itself.
1670 */
1671 return (uint32_t)__buflet_get_data_limit(buf);
1672 #endif /* !KERNEL */
1673 }
1674
1675 __attribute__((always_inline))
1676 static inline packet_trace_id_t
__packet_get_trace_id(const uint64_t ph)1677 __packet_get_trace_id(const uint64_t ph)
1678 {
1679 switch (SK_PTR_TYPE(ph)) {
1680 case NEXUS_META_TYPE_PACKET:
1681 return PKT_ADDR(ph)->pkt_trace_id;
1682 break;
1683 default:
1684 return 0;
1685 }
1686 }
1687
1688 __attribute__((always_inline))
1689 static inline void
__packet_set_trace_id(const uint64_t ph,packet_trace_id_t id)1690 __packet_set_trace_id(const uint64_t ph, packet_trace_id_t id)
1691 {
1692 PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
1693 PKT_ADDR(ph)->pkt_trace_id = id;
1694 }
1695
1696 __attribute__((always_inline))
1697 static inline void
__packet_trace_event(const uint64_t ph,uint32_t event)1698 __packet_trace_event(const uint64_t ph, uint32_t event)
1699 {
1700 PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
1701 #ifdef KERNEL
1702 #pragma unused(event, ph)
1703 KDBG(event, PKT_ADDR(ph)->pkt_trace_id);
1704 #else /* !KERNEL */
1705 kdebug_trace(event, PKT_ADDR(ph)->pkt_trace_id, 0, 0, 0);
1706 #endif /* !KERNEL */
1707 }
1708
1709 #ifdef KERNEL
1710 __attribute__((always_inline))
1711 static inline void
__packet_perform_tx_completion_callbacks(const kern_packet_t ph,ifnet_t ifp)1712 __packet_perform_tx_completion_callbacks(const kern_packet_t ph, ifnet_t ifp)
1713 {
1714 /*
1715 * NOTE: this function can be called with ifp as NULL.
1716 */
1717 uint64_t ts;
1718 kern_return_t tx_status;
1719 uintptr_t cb_arg, cb_data;
1720 struct __kern_packet *kpkt = SK_PTR_ADDR_KPKT(ph);
1721
1722 ASSERT((kpkt->pkt_pflags & PKT_F_TX_COMPL_TS_REQ) != 0);
1723 (void) __packet_get_tx_completion_status(ph, &tx_status);
1724 __packet_get_tx_completion_data(ph, &cb_arg, &cb_data);
1725 __packet_get_timestamp(ph, &ts, NULL);
1726 while (kpkt->pkt_tx_compl_callbacks != 0) {
1727 mbuf_tx_compl_func cb;
1728 uint32_t i;
1729
1730 i = ffs(kpkt->pkt_tx_compl_callbacks) - 1;
1731 kpkt->pkt_tx_compl_callbacks &= ~(1 << i);
1732 cb = m_get_tx_compl_callback(i);
1733 if (__probable(cb != NULL)) {
1734 cb(kpkt->pkt_tx_compl_context, ifp, ts, cb_arg, cb_data,
1735 tx_status);
1736 }
1737 }
1738 kpkt->pkt_pflags &= ~PKT_F_TX_COMPL_TS_REQ;
1739 }
1740 #endif /* KERNEL */
1741
1742 #endif /* PRIVATE || BSD_KERNEL_PRIVATE */
1743 #endif /* !_SKYWALK_PACKET_COMMON_H_ */
1744