xref: /xnu-8796.101.5/bsd/skywalk/packet/packet_common.h (revision aca3beaa3dfbd42498b42c5e5ce20a938e6554e5)
1 /*
2  * Copyright (c) 2016-2022 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #ifndef _SKYWALK_PACKET_COMMON_H_
30 #define _SKYWALK_PACKET_COMMON_H_
31 
32 #if defined(PRIVATE) || defined(BSD_KERNEL_PRIVATE)
33 /*
34  * Routines common to kernel and userland.  This file is intended to
35  * be included by code implementing the packet APIs, in particular,
36  * the Skywalk kernel and libsyscall code.
37  */
38 
39 #include <skywalk/os_packet_private.h>
40 #include <net/if_vlan_var.h>
41 #include <sys/errno.h>
42 #include <sys/kdebug.h>
43 
44 #ifndef KERNEL
45 /*
46  * User.
47  */
48 #if !defined(LIBSYSCALL_INTERFACE)
49 #error "LIBSYSCALL_INTERFACE not defined"
50 #endif /* !LIBSYSCALL_INTERFACE */
51 #define QUM_ADDR(_ph)   SK_PTR_ADDR_UQUM(_ph)
52 #define PKT_ADDR(_ph)   SK_PTR_ADDR_UPKT(_ph)
53 #define BLT_ADDR(_bp)   ((struct __user_buflet *)(uintptr_t)_bp)
54 #else /* KERNEL */
55 /*
56  * Kernel.
57  */
58 #include <skywalk/packet/packet_var.h>
59 #include <skywalk/packet/pbufpool_var.h>
60 #define QUM_ADDR(_ph)   SK_PTR_ADDR_KQUM(_ph)
61 #define PKT_ADDR(_ph)   SK_PTR_ADDR_KPKT(_ph)
62 #define BLT_ADDR(_bp)   ((struct __kern_buflet *)(uintptr_t)_bp)
63 #define PKT_HAS_ATTACHED_MBUF(_ph)              \
64 	((PKT_ADDR(_ph)->pkt_pflags & PKT_F_MBUF_DATA) != 0)
65 #endif /* KERNEL */
66 
67 /*
68  * Common.
69  */
70 #if (DEBUG || DEVELOPMENT)
71 #define PKT_SUBTYPE_ASSERT(_ph, _type, _subtype) do {                   \
72 	if (__improbable(SK_PTR_TYPE(_ph) != (uint64_t)(_type) ||       \
73 	    SK_PTR_SUBTYPE(_ph) != (uint64_t)(_subtype))) {             \
74 	        pkt_subtype_assert_fail(_ph, _type, _subtype);          \
75 	/* NOTREACHED */                                        \
76 	        __builtin_unreachable();                                \
77 	}                                                               \
78 } while (0)
79 
80 #define PKT_TYPE_ASSERT(_ph, _type) do {                                \
81 	if (__improbable(SK_PTR_TYPE(_ph) != (uint64_t)(_type))) {      \
82 	        pkt_type_assert_fail(_ph, _type);                       \
83 	/* NOTREACHED */                                        \
84 	        __builtin_unreachable();                                \
85 	}                                                               \
86 } while (0)
87 #else /* !DEBUG && !DEVELOPMENT */
88 #define PKT_SUBTYPE_ASSERT(_ph, _type, _subtype)        ((void)0)
89 #define PKT_TYPE_ASSERT(_ph, _type)                     ((void)0)
90 #endif /* !DEBUG && !DEVELOPMENT */
91 
92 #define QUM_GET_NEXT_BUFLET(_qum, _pbuf, _buf) do {                     \
93 	ASSERT((_pbuf) == NULL || (_pbuf) == (_qum)->qum_buf);          \
94 	(_buf) = (((_pbuf) == NULL) ? (_qum)->qum_buf : NULL);          \
95 } while (0)
96 
97 #define PKT_GET_FIRST_BUFLET(_pkt, _bcnt, _buf) do {                    \
98 	if (__improbable((_bcnt) == 0)) {                               \
99 	        (_buf) = NULL;                                          \
100 	        break;                                                  \
101 	}                                                               \
102 	if (__probable((_pkt)->pkt_qum_buf.buf_addr != 0)) {            \
103 	        (_buf) = &(_pkt)->pkt_qum_buf;                          \
104 	} else {                                                        \
105 	        (_buf) = __DECONST(void *, (_pkt)->pkt_qum_buf.buf_nbft_addr);\
106 	}                                                               \
107 } while (0)
108 
109 #define _PKT_GET_NEXT_BUFLET(_pkt, _bcnt, _pbuf, _buf) do {             \
110 	if ((_pbuf) == NULL) {                                          \
111 	        PKT_GET_FIRST_BUFLET(_pkt, _bcnt, _buf);                \
112 	} else {                                                        \
113 	        (_buf) = __DECONST(void *, (_pbuf)->buf_nbft_addr);     \
114 	}                                                               \
115 } while (0)
116 
117 #ifndef KERNEL
118 #define PKT_GET_NEXT_BUFLET(_pkt, _bcnt, _pbuf, _buf) do {              \
119 	_PKT_GET_NEXT_BUFLET(_pkt, _bcnt, _pbuf, _buf);                 \
120 } while (0)
121 #else /* KERNEL */
122 #define PKT_GET_NEXT_BUFLET(_pkt, _bcnt, _pbuf, _buf) do {              \
123 	ASSERT(((_bcnt) >= 1) || ((_pbuf) == NULL));                    \
124 	_PKT_GET_NEXT_BUFLET(_pkt, _bcnt, _pbuf, _buf);                 \
125 } while (0)
126 #endif /* KERNEL */
127 
128 #ifdef KERNEL
129 #define PKT_COMPOSE_NX_PORT_ID(_nx_port, _gencnt)    \
130 	((uint32_t)((_gencnt & 0xffff) << 16) | (_nx_port & 0xffff))
131 
132 #define PKT_DECOMPOSE_NX_PORT_ID(_nx_port_id, _nx_port, _gencnt) do {   \
133 	_nx_port = _nx_port_id & 0xffff;                                \
134 	_gencnt = (_nx_port_id >> 16) & 0xffff;                         \
135 } while (0)
136 #endif /* KERNEL */
137 
138 __attribute__((always_inline))
139 static inline int
__packet_set_headroom(const uint64_t ph,const uint8_t headroom)140 __packet_set_headroom(const uint64_t ph, const uint8_t headroom)
141 {
142 	PKT_SUBTYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET, NEXUS_META_SUBTYPE_RAW);
143 	if (__probable(headroom < PKT_ADDR(ph)->pkt_qum_buf.buf_dlim)) {
144 		PKT_ADDR(ph)->pkt_headroom = headroom;
145 		return 0;
146 	}
147 	return ERANGE;
148 }
149 
150 __attribute__((always_inline))
151 static inline uint8_t
__packet_get_headroom(const uint64_t ph)152 __packet_get_headroom(const uint64_t ph)
153 {
154 	PKT_SUBTYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET, NEXUS_META_SUBTYPE_RAW);
155 	return PKT_ADDR(ph)->pkt_headroom;
156 }
157 
158 __attribute__((always_inline))
159 static inline int
__packet_set_link_header_length(const uint64_t ph,const uint8_t len)160 __packet_set_link_header_length(const uint64_t ph, const uint8_t len)
161 {
162 	PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
163 	if (__probable(len <= PKT_ADDR(ph)->pkt_qum_buf.buf_dlim)) {
164 		PKT_ADDR(ph)->pkt_l2_len = len;
165 		return 0;
166 	}
167 	return ERANGE;
168 }
169 
170 __attribute__((always_inline))
171 static inline uint8_t
__packet_get_link_header_length(const uint64_t ph)172 __packet_get_link_header_length(const uint64_t ph)
173 {
174 	PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
175 	return PKT_ADDR(ph)->pkt_l2_len;
176 }
177 
178 __attribute__((always_inline))
179 static inline int
__packet_set_link_broadcast(const uint64_t ph)180 __packet_set_link_broadcast(const uint64_t ph)
181 {
182 	PKT_SUBTYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET, NEXUS_META_SUBTYPE_RAW);
183 	PKT_ADDR(ph)->pkt_link_flags |= PKT_LINKF_BCAST;
184 	return 0;
185 }
186 
187 __attribute__((always_inline))
188 static inline boolean_t
__packet_get_link_broadcast(const uint64_t ph)189 __packet_get_link_broadcast(const uint64_t ph)
190 {
191 	PKT_SUBTYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET, NEXUS_META_SUBTYPE_RAW);
192 	return (PKT_ADDR(ph)->pkt_link_flags & PKT_LINKF_BCAST) != 0;
193 }
194 
195 __attribute__((always_inline))
196 static inline int
__packet_set_link_multicast(const uint64_t ph)197 __packet_set_link_multicast(const uint64_t ph)
198 {
199 	PKT_SUBTYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET, NEXUS_META_SUBTYPE_RAW);
200 	PKT_ADDR(ph)->pkt_link_flags |= PKT_LINKF_MCAST;
201 	return 0;
202 }
203 
204 __attribute__((always_inline))
205 static inline boolean_t
__packet_get_link_multicast(const uint64_t ph)206 __packet_get_link_multicast(const uint64_t ph)
207 {
208 	PKT_SUBTYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET, NEXUS_META_SUBTYPE_RAW);
209 	return (PKT_ADDR(ph)->pkt_link_flags & PKT_LINKF_MCAST) != 0;
210 }
211 
212 __attribute__((always_inline))
213 static inline int
__packet_set_link_ethfcs(const uint64_t ph)214 __packet_set_link_ethfcs(const uint64_t ph)
215 {
216 	PKT_SUBTYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET, NEXUS_META_SUBTYPE_RAW);
217 	PKT_ADDR(ph)->pkt_link_flags |= PKT_LINKF_ETHFCS;
218 	return 0;
219 }
220 
221 __attribute__((always_inline))
222 static inline boolean_t
__packet_get_link_ethfcs(const uint64_t ph)223 __packet_get_link_ethfcs(const uint64_t ph)
224 {
225 	PKT_SUBTYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET, NEXUS_META_SUBTYPE_RAW);
226 	return (PKT_ADDR(ph)->pkt_link_flags & PKT_LINKF_ETHFCS) != 0;
227 }
228 
229 __attribute__((always_inline))
230 static inline int
__packet_set_transport_traffic_background(const uint64_t ph)231 __packet_set_transport_traffic_background(const uint64_t ph)
232 {
233 	PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
234 	PKT_ADDR(ph)->pkt_pflags |= PKT_F_BACKGROUND;
235 	return 0;
236 }
237 
238 __attribute__((always_inline))
239 static inline boolean_t
__packet_get_transport_traffic_background(const uint64_t ph)240 __packet_get_transport_traffic_background(const uint64_t ph)
241 {
242 	PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
243 	return (PKT_ADDR(ph)->pkt_pflags & PKT_F_BACKGROUND) != 0;
244 }
245 
246 __attribute__((always_inline))
247 static inline int
__packet_set_transport_traffic_realtime(const uint64_t ph)248 __packet_set_transport_traffic_realtime(const uint64_t ph)
249 {
250 	PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
251 	PKT_ADDR(ph)->pkt_pflags |= PKT_F_REALTIME;
252 	return 0;
253 }
254 
255 __attribute__((always_inline))
256 static inline boolean_t
__packet_get_transport_traffic_realtime(const uint64_t ph)257 __packet_get_transport_traffic_realtime(const uint64_t ph)
258 {
259 	PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
260 	return (PKT_ADDR(ph)->pkt_pflags & PKT_F_REALTIME) != 0;
261 }
262 
263 __attribute__((always_inline))
264 static inline int
__packet_set_transport_retransmit(const uint64_t ph)265 __packet_set_transport_retransmit(const uint64_t ph)
266 {
267 	PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
268 	PKT_ADDR(ph)->pkt_pflags |= PKT_F_REXMT;
269 	return 0;
270 }
271 
272 __attribute__((always_inline))
273 static inline boolean_t
__packet_get_transport_retransmit(const uint64_t ph)274 __packet_get_transport_retransmit(const uint64_t ph)
275 {
276 	PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
277 	return (PKT_ADDR(ph)->pkt_pflags & PKT_F_REXMT) != 0;
278 }
279 
280 __attribute__((always_inline))
281 static inline int
__packet_set_transport_last_packet(const uint64_t ph)282 __packet_set_transport_last_packet(const uint64_t ph)
283 {
284 	PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
285 	PKT_ADDR(ph)->pkt_pflags |= PKT_F_LAST_PKT;
286 	return 0;
287 }
288 
289 __attribute__((always_inline))
290 static inline int
__packet_set_group_start(const uint64_t ph)291 __packet_set_group_start(const uint64_t ph)
292 {
293 	PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
294 	PKT_ADDR(ph)->pkt_pflags |= PKT_F_OPT_GROUP_START;
295 	return 0;
296 }
297 
298 __attribute__((always_inline))
299 static inline boolean_t
__packet_get_group_start(const uint64_t ph)300 __packet_get_group_start(const uint64_t ph)
301 {
302 	PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
303 	return (PKT_ADDR(ph)->pkt_pflags & PKT_F_OPT_GROUP_START) != 0;
304 }
305 
306 __attribute__((always_inline))
307 static inline int
__packet_set_group_end(const uint64_t ph)308 __packet_set_group_end(const uint64_t ph)
309 {
310 	PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
311 	PKT_ADDR(ph)->pkt_pflags |= PKT_F_OPT_GROUP_END;
312 	return 0;
313 }
314 
315 __attribute__((always_inline))
316 static inline boolean_t
__packet_get_group_end(const uint64_t ph)317 __packet_get_group_end(const uint64_t ph)
318 {
319 	PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
320 	return (PKT_ADDR(ph)->pkt_pflags & PKT_F_OPT_GROUP_END) != 0;
321 }
322 
323 __attribute__((always_inline))
324 static inline errno_t
__packet_get_expire_time(const uint64_t ph,uint64_t * ts)325 __packet_get_expire_time(const uint64_t ph, uint64_t *ts)
326 {
327 	PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
328 #ifdef KERNEL
329 	struct __packet_opt *po = PKT_ADDR(ph)->pkt_com_opt;
330 #else /* !KERNEL */
331 	struct __packet_opt *po = &PKT_ADDR(ph)->pkt_com_opt;
332 #endif /* !KERNEL */
333 	if ((PKT_ADDR(ph)->pkt_pflags & PKT_F_OPT_EXPIRE_TS) == 0) {
334 		return ENOENT;
335 	}
336 	if (ts == NULL) {
337 		return EINVAL;
338 	}
339 	*ts = po->__po_expire_ts;
340 	return 0;
341 }
342 
343 __attribute__((always_inline))
344 static inline errno_t
__packet_set_expire_time(const uint64_t ph,const uint64_t ts)345 __packet_set_expire_time(const uint64_t ph, const uint64_t ts)
346 {
347 	PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
348 #ifdef KERNEL
349 	struct __packet_opt *po = PKT_ADDR(ph)->pkt_com_opt;
350 #else /* !KERNEL */
351 	struct __packet_opt *po = &PKT_ADDR(ph)->pkt_com_opt;
352 #endif /* !KERNEL */
353 	if (ts != 0) {
354 		po->__po_expire_ts = ts;
355 		PKT_ADDR(ph)->pkt_pflags |= PKT_F_OPT_EXPIRE_TS;
356 	} else {
357 		po->__po_expire_ts = 0;
358 		PKT_ADDR(ph)->pkt_pflags &= ~PKT_F_OPT_EXPIRE_TS;
359 	}
360 	return 0;
361 }
362 
363 __attribute__((always_inline))
364 static inline errno_t
__packet_get_expiry_action(const uint64_t ph,packet_expiry_action_t * pea)365 __packet_get_expiry_action(const uint64_t ph, packet_expiry_action_t *pea)
366 {
367 	PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
368 #ifdef KERNEL
369 	struct __packet_opt *po = PKT_ADDR(ph)->pkt_com_opt;
370 #else /* !KERNEL */
371 	struct __packet_opt *po = &PKT_ADDR(ph)->pkt_com_opt;
372 #endif /* !KERNEL */
373 	if ((PKT_ADDR(ph)->pkt_pflags & PKT_F_OPT_EXP_ACTION) == 0) {
374 		return ENOENT;
375 	}
376 	if (pea == NULL) {
377 		return EINVAL;
378 	}
379 	*pea = po->__po_expiry_action;
380 	return 0;
381 }
382 
383 __attribute__((always_inline))
384 static inline errno_t
__packet_set_expiry_action(const uint64_t ph,packet_expiry_action_t pea)385 __packet_set_expiry_action(const uint64_t ph, packet_expiry_action_t pea)
386 {
387 	PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
388 #ifdef KERNEL
389 	struct __packet_opt *po = PKT_ADDR(ph)->pkt_com_opt;
390 #else /* !KERNEL */
391 	struct __packet_opt *po = &PKT_ADDR(ph)->pkt_com_opt;
392 #endif /* !KERNEL */
393 	if (pea != PACKET_EXPIRY_ACTION_NONE) {
394 		po->__po_expiry_action = (uint8_t)pea;
395 		PKT_ADDR(ph)->pkt_pflags |= PKT_F_OPT_EXP_ACTION;
396 	} else {
397 		po->__po_expiry_action = 0;
398 		PKT_ADDR(ph)->pkt_pflags &= ~PKT_F_OPT_EXP_ACTION;
399 	}
400 	return 0;
401 }
402 
403 __attribute__((always_inline))
404 static inline errno_t
__packet_opt_get_token(const struct __packet_opt * po,void * token,uint16_t * len,uint8_t * type)405 __packet_opt_get_token(const struct __packet_opt *po, void *token,
406     uint16_t *len, uint8_t *type)
407 {
408 	uint16_t tlen = po->__po_token_len;
409 	uint8_t ttype;
410 
411 	if (token == NULL || len == NULL || type == NULL || tlen > *len) {
412 		return EINVAL;
413 	}
414 	ttype = (uint8_t)po->__po_token_type;
415 
416 	ASSERT(tlen <= PKT_OPT_MAX_TOKEN_SIZE);
417 	_CASSERT((__builtin_offsetof(struct __packet_opt, __po_token) % 8) == 0);
418 	bcopy(po->__po_token, token, tlen);
419 	*len = tlen;
420 	*type = ttype;
421 	return 0;
422 }
423 
424 __attribute__((always_inline))
425 static inline errno_t
__packet_get_token(const uint64_t ph,void * token,uint16_t * len)426 __packet_get_token(const uint64_t ph, void *token, uint16_t *len)
427 {
428 #ifdef KERNEL
429 	struct __packet_opt *po = PKT_ADDR(ph)->pkt_com_opt;
430 #else /* !KERNEL */
431 	struct __packet_opt *po = &PKT_ADDR(ph)->pkt_com_opt;
432 #endif /* !KERNEL */
433 	uint8_t type;
434 	errno_t err;
435 
436 	PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
437 	if ((PKT_ADDR(ph)->pkt_pflags & PKT_F_OPT_TOKEN) == 0) {
438 		return ENOENT;
439 	}
440 	err = __packet_opt_get_token(po, token, len, &type);
441 	if ((err == 0) && (type != PKT_OPT_TOKEN_TYPE_OPAQUE)) {
442 		err = ENOENT;
443 	}
444 	return err;
445 }
446 
447 __attribute__((always_inline))
448 static inline errno_t
__packet_opt_set_token(struct __packet_opt * po,const void * token,const uint16_t len,const uint8_t type,volatile uint64_t * pflags)449 __packet_opt_set_token(struct __packet_opt *po, const void *token,
450     const uint16_t len, const uint8_t type, volatile uint64_t *pflags)
451 {
452 	_CASSERT((__builtin_offsetof(struct __packet_opt, __po_token) % 8) == 0);
453 	if (len != 0) {
454 		if (token == NULL || len > PKT_OPT_MAX_TOKEN_SIZE ||
455 		    type == 0) {
456 			return EINVAL;
457 		}
458 		if (__probable(IS_P2ALIGNED(token, 8))) {
459 			uint64_t *token64 = __DECONST(void *, token);
460 			po->__po_token_data[0] = *token64;
461 			po->__po_token_data[1] = *(token64 + 1);
462 		} else {
463 			bcopy(token, po->__po_token, len);
464 		}
465 		po->__po_token_len = len;
466 		po->__po_token_type = type;
467 		*pflags |= PKT_F_OPT_TOKEN;
468 	} else {
469 		_CASSERT(sizeof(po->__po_token_data[0]) == 8);
470 		_CASSERT(sizeof(po->__po_token_data[1]) == 8);
471 		_CASSERT(sizeof(po->__po_token) == 16);
472 		po->__po_token_data[0] = 0;
473 		po->__po_token_data[1] = 0;
474 		po->__po_token_len = 0;
475 		po->__po_token_type = 0;
476 		*pflags &= ~PKT_F_OPT_TOKEN;
477 	}
478 	return 0;
479 }
480 
481 __attribute__((always_inline))
482 static inline errno_t
__packet_set_token(const uint64_t ph,const void * token,const uint16_t len)483 __packet_set_token(const uint64_t ph, const void *token, const uint16_t len)
484 {
485 	PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
486 #ifdef KERNEL
487 	return __packet_opt_set_token(PKT_ADDR(ph)->pkt_com_opt, token, len,
488 	           PKT_OPT_TOKEN_TYPE_OPAQUE, &PKT_ADDR(ph)->pkt_pflags);
489 #else /* !KERNEL */
490 	return __packet_opt_set_token(&PKT_ADDR(ph)->pkt_com_opt, token, len,
491 	           PKT_OPT_TOKEN_TYPE_OPAQUE, &PKT_ADDR(ph)->pkt_pflags);
492 #endif /* !KERNEL */
493 }
494 
495 __attribute__((always_inline))
496 static inline errno_t
__packet_get_packetid(const uint64_t ph,packet_id_t * pktid)497 __packet_get_packetid(const uint64_t ph, packet_id_t *pktid)
498 {
499 #ifdef KERNEL
500 	struct __packet_opt *po = PKT_ADDR(ph)->pkt_com_opt;
501 #else /* !KERNEL */
502 	struct __packet_opt *po = &PKT_ADDR(ph)->pkt_com_opt;
503 #endif /* !KERNEL */
504 	uint16_t len = sizeof(packet_id_t);
505 	uint8_t type;
506 	errno_t err;
507 
508 	PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
509 	if ((PKT_ADDR(ph)->pkt_pflags & PKT_F_OPT_TOKEN) == 0) {
510 		return ENOENT;
511 	}
512 	err = __packet_opt_get_token(po, pktid, &len, &type);
513 	if ((err == 0) && ((type != PKT_OPT_TOKEN_TYPE_PACKET_ID) ||
514 	    (len != sizeof(packet_id_t)))) {
515 		err = ENOENT;
516 	}
517 	return err;
518 }
519 
520 __attribute__((always_inline))
521 static inline errno_t
__packet_set_packetid(const uint64_t ph,const packet_id_t * pktid)522 __packet_set_packetid(const uint64_t ph, const packet_id_t *pktid)
523 {
524 	PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
525 #ifdef KERNEL
526 	return __packet_opt_set_token(PKT_ADDR(ph)->pkt_com_opt, pktid,
527 	           sizeof(packet_id_t), PKT_OPT_TOKEN_TYPE_PACKET_ID,
528 	           &PKT_ADDR(ph)->pkt_pflags);
529 #else /* !KERNEL */
530 	return __packet_opt_set_token(&PKT_ADDR(ph)->pkt_com_opt, pktid,
531 	           sizeof(packet_id_t), PKT_OPT_TOKEN_TYPE_PACKET_ID,
532 	           &PKT_ADDR(ph)->pkt_pflags);
533 #endif /* !KERNEL */
534 }
535 
536 __attribute__((always_inline))
537 static inline errno_t
__packet_get_vlan_tag(const uint64_t ph,uint16_t * vlan_tag,boolean_t * tag_in_pkt)538 __packet_get_vlan_tag(const uint64_t ph, uint16_t *vlan_tag,
539     boolean_t *tag_in_pkt)
540 {
541 #ifdef KERNEL
542 	struct __packet_opt *po = PKT_ADDR(ph)->pkt_com_opt;
543 #else /* !KERNEL */
544 	struct __packet_opt *po = &PKT_ADDR(ph)->pkt_com_opt;
545 #endif /* !KERNEL */
546 	uint64_t pflags;
547 
548 	PKT_SUBTYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET, NEXUS_META_SUBTYPE_RAW);
549 	pflags = PKT_ADDR(ph)->pkt_pflags;
550 	if ((pflags & PKT_F_OPT_VLTAG) == 0) {
551 		return ENOENT;
552 	}
553 	if (vlan_tag != NULL) {
554 		*vlan_tag = po->__po_vlan_tag;
555 	}
556 	if (tag_in_pkt != NULL) {
557 		*tag_in_pkt = ((pflags & PKT_F_OPT_VLTAG_IN_PKT) != 0);
558 	}
559 	return 0;
560 }
561 
562 __attribute__((always_inline))
563 static inline errno_t
__packet_set_vlan_tag(const uint64_t ph,const uint16_t vlan_tag,const boolean_t tag_in_pkt)564 __packet_set_vlan_tag(const uint64_t ph, const uint16_t vlan_tag,
565     const boolean_t tag_in_pkt)
566 {
567 #ifdef KERNEL
568 	struct __packet_opt *po = PKT_ADDR(ph)->pkt_com_opt;
569 #else /* !KERNEL */
570 	struct __packet_opt *po = &PKT_ADDR(ph)->pkt_com_opt;
571 #endif /* !KERNEL */
572 
573 	PKT_SUBTYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET, NEXUS_META_SUBTYPE_RAW);
574 	PKT_ADDR(ph)->pkt_pflags |= PKT_F_OPT_VLTAG;
575 	po->__po_vlan_tag = vlan_tag;
576 
577 	if (tag_in_pkt) {
578 		PKT_ADDR(ph)->pkt_pflags |= PKT_F_OPT_VLTAG_IN_PKT;
579 	}
580 	return 0;
581 }
582 
583 __attribute__((always_inline))
584 static inline uint16_t
__packet_get_vlan_id(const uint16_t vlan_tag)585 __packet_get_vlan_id(const uint16_t vlan_tag)
586 {
587 	return EVL_VLANOFTAG(vlan_tag);
588 }
589 
590 __attribute__((always_inline))
591 static inline uint8_t
__packet_get_vlan_priority(const uint16_t vlan_tag)592 __packet_get_vlan_priority(const uint16_t vlan_tag)
593 {
594 	return EVL_PRIOFTAG(vlan_tag);
595 }
596 
597 __attribute__((always_inline))
598 static inline errno_t
__packet_get_app_metadata(const uint64_t ph,packet_app_metadata_type_t * app_type,uint8_t * app_metadata)599 __packet_get_app_metadata(const uint64_t ph,
600     packet_app_metadata_type_t *app_type, uint8_t *app_metadata)
601 {
602 	PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
603 	if (app_type == NULL || app_metadata == NULL) {
604 		return EINVAL;
605 	}
606 	if ((PKT_ADDR(ph)->pkt_pflags & PKT_F_OPT_APP_METADATA) == 0) {
607 		return ENOENT;
608 	}
609 #ifdef KERNEL
610 	struct __packet_opt *po = PKT_ADDR(ph)->pkt_com_opt;
611 #else /* !KERNEL */
612 	struct __packet_opt *po = &PKT_ADDR(ph)->pkt_com_opt;
613 #endif /* !KERNEL */
614 	if (po->__po_app_type == PACKET_APP_METADATA_TYPE_UNSPECIFIED) {
615 		return ENOENT;
616 	}
617 	*app_type = po->__po_app_type;
618 	*app_metadata = po->__po_app_metadata;
619 	return 0;
620 }
621 
622 __attribute__((always_inline))
623 static inline errno_t
__packet_set_app_metadata(const uint64_t ph,const packet_app_metadata_type_t app_type,const uint8_t app_metadata)624 __packet_set_app_metadata(const uint64_t ph,
625     const packet_app_metadata_type_t app_type, const uint8_t app_metadata)
626 {
627 	PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
628 #ifdef KERNEL
629 	struct __packet_opt *po = PKT_ADDR(ph)->pkt_com_opt;
630 #else /* !KERNEL */
631 	struct __packet_opt *po = &PKT_ADDR(ph)->pkt_com_opt;
632 #endif /* !KERNEL */
633 	if (app_type < PACKET_APP_METADATA_TYPE_MIN ||
634 	    app_type > PACKET_APP_METADATA_TYPE_MAX) {
635 		po->__po_app_type = PACKET_APP_METADATA_TYPE_UNSPECIFIED;
636 		PKT_ADDR(ph)->pkt_pflags &= ~PKT_F_OPT_APP_METADATA;
637 		return EINVAL;
638 	}
639 	po->__po_app_type = app_type;
640 	po->__po_app_metadata = app_metadata;
641 	PKT_ADDR(ph)->pkt_pflags |= PKT_F_OPT_APP_METADATA;
642 	return 0;
643 }
644 
645 #ifdef KERNEL
646 __attribute__((always_inline))
647 static inline void
__packet_set_wake_flag(const uint64_t ph)648 __packet_set_wake_flag(const uint64_t ph)
649 {
650 	PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
651 	PKT_ADDR(ph)->pkt_pflags |= PKT_F_WAKE_PKT;
652 }
653 #endif
654 
655 __attribute__((always_inline))
656 static inline boolean_t
__packet_get_wake_flag(const uint64_t ph)657 __packet_get_wake_flag(const uint64_t ph)
658 {
659 	return (PKT_ADDR(ph)->pkt_pflags & PKT_F_WAKE_PKT) != 0;
660 }
661 
662 __attribute__((always_inline))
663 static inline void
__packet_set_keep_alive(const uint64_t ph,const boolean_t is_keep_alive)664 __packet_set_keep_alive(const uint64_t ph, const boolean_t is_keep_alive)
665 {
666 	PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
667 	if (is_keep_alive) {
668 		PKT_ADDR(ph)->pkt_pflags |= PKT_F_KEEPALIVE;
669 	} else {
670 		PKT_ADDR(ph)->pkt_pflags &= ~PKT_F_KEEPALIVE;
671 	}
672 }
673 
674 __attribute__((always_inline))
675 static inline boolean_t
__packet_get_keep_alive(const uint64_t ph)676 __packet_get_keep_alive(const uint64_t ph)
677 {
678 	return (PKT_ADDR(ph)->pkt_pflags & PKT_F_KEEPALIVE) != 0;
679 }
680 
681 __attribute__((always_inline))
682 static inline boolean_t
__packet_get_truncated(const uint64_t ph)683 __packet_get_truncated(const uint64_t ph)
684 {
685 	PKT_SUBTYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET, NEXUS_META_SUBTYPE_RAW);
686 	return (PKT_ADDR(ph)->pkt_pflags & PKT_F_TRUNCATED) != 0;
687 }
688 
689 #ifdef KERNEL
690 __attribute__((always_inline))
691 static inline boolean_t
__packet_get_transport_new_flow(const uint64_t ph)692 __packet_get_transport_new_flow(const uint64_t ph)
693 {
694 	PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
695 	return (PKT_ADDR(ph)->pkt_pflags & PKT_F_NEW_FLOW) != 0;
696 }
697 
698 __attribute__((always_inline))
699 static inline boolean_t
__packet_get_transport_last_packet(const uint64_t ph)700 __packet_get_transport_last_packet(const uint64_t ph)
701 {
702 	PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
703 	return (PKT_ADDR(ph)->pkt_pflags & PKT_F_LAST_PKT) != 0;
704 }
705 
706 __attribute__((always_inline))
707 static inline boolean_t
__packet_get_l4s_flag(const uint64_t ph)708 __packet_get_l4s_flag(const uint64_t ph)
709 {
710 	PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
711 	return (PKT_ADDR(ph)->pkt_pflags & PKT_F_L4S) != 0;
712 }
713 #endif /* KERNEL */
714 
715 __attribute__((always_inline))
716 static inline void
__packet_set_l4s_flag(const uint64_t ph,const boolean_t is_l4s)717 __packet_set_l4s_flag(const uint64_t ph, const boolean_t is_l4s)
718 {
719 	PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
720 	if (is_l4s) {
721 		PKT_ADDR(ph)->pkt_pflags |= PKT_F_L4S;
722 	} else {
723 		PKT_ADDR(ph)->pkt_pflags &= ~PKT_F_L4S;
724 	}
725 }
726 
727 __attribute__((always_inline))
728 static inline int
__packet_set_service_class(const uint64_t ph,const uint32_t sc)729 __packet_set_service_class(const uint64_t ph, const uint32_t sc)
730 {
731 	int err = 0;
732 
733 	_CASSERT(sizeof(QUM_ADDR(ph)->qum_svc_class == sizeof(uint32_t)));
734 
735 	switch (sc) {
736 	case PKT_SC_BE:
737 	case PKT_SC_BK_SYS:
738 	case PKT_SC_BK:
739 	case PKT_SC_RD:
740 	case PKT_SC_OAM:
741 	case PKT_SC_AV:
742 	case PKT_SC_RV:
743 	case PKT_SC_VI:
744 	case PKT_SC_SIG:
745 	case PKT_SC_VO:
746 	case PKT_SC_CTL:
747 		QUM_ADDR(ph)->qum_svc_class = sc;
748 		break;
749 
750 	default:
751 		err = EINVAL;
752 		break;
753 	}
754 
755 	return err;
756 }
757 
758 __attribute__((always_inline))
759 static inline uint32_t
__packet_get_service_class(const uint64_t ph)760 __packet_get_service_class(const uint64_t ph)
761 {
762 	uint32_t sc;
763 
764 	_CASSERT(sizeof(QUM_ADDR(ph)->qum_svc_class == sizeof(uint32_t)));
765 
766 	switch (QUM_ADDR(ph)->qum_svc_class) {
767 	case PKT_SC_BE:         /* most likely best effort */
768 	case PKT_SC_BK_SYS:
769 	case PKT_SC_BK:
770 	case PKT_SC_RD:
771 	case PKT_SC_OAM:
772 	case PKT_SC_AV:
773 	case PKT_SC_RV:
774 	case PKT_SC_VI:
775 	case PKT_SC_SIG:
776 	case PKT_SC_VO:
777 	case PKT_SC_CTL:
778 		sc = QUM_ADDR(ph)->qum_svc_class;
779 		break;
780 
781 	default:
782 		sc = PKT_SC_BE;
783 		break;
784 	}
785 
786 	return sc;
787 }
788 
789 __attribute__((always_inline))
790 static inline errno_t
__packet_set_comp_gencnt(const uint64_t ph,const uint32_t gencnt)791 __packet_set_comp_gencnt(const uint64_t ph, const uint32_t gencnt)
792 {
793 	_CASSERT(sizeof(PKT_ADDR(ph)->pkt_comp_gencnt == sizeof(uint32_t)));
794 	PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
795 
796 	PKT_ADDR(ph)->pkt_comp_gencnt = gencnt;
797 
798 	return 0;
799 }
800 
801 __attribute__((always_inline))
802 static inline errno_t
__packet_get_comp_gencnt(const uint64_t ph,uint32_t * pgencnt)803 __packet_get_comp_gencnt(const uint64_t ph, uint32_t *pgencnt)
804 {
805 	_CASSERT(sizeof(PKT_ADDR(ph)->pkt_comp_gencnt == sizeof(uint32_t)));
806 	PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
807 
808 	if (pgencnt == NULL) {
809 		return EINVAL;
810 	}
811 
812 	if (PKT_ADDR(ph)->pkt_comp_gencnt == 0) {
813 		return ENOENT;
814 	}
815 
816 	*pgencnt = PKT_ADDR(ph)->pkt_comp_gencnt;
817 	return 0;
818 }
819 
820 __attribute__((always_inline))
821 static inline int
__packet_set_traffic_class(const uint64_t ph,const uint32_t tc)822 __packet_set_traffic_class(const uint64_t ph, const uint32_t tc)
823 {
824 	uint32_t val = PKT_TC2SCVAL(tc);        /* just the val portion */
825 	uint32_t sc;
826 
827 	switch (val) {
828 	case PKT_SCVAL_BK_SYS:
829 		sc = PKT_SC_BK_SYS;
830 		break;
831 	case PKT_SCVAL_BK:
832 		sc = PKT_SC_BK;
833 		break;
834 	case PKT_SCVAL_BE:
835 		sc = PKT_SC_BE;
836 		break;
837 	case PKT_SCVAL_RD:
838 		sc = PKT_SC_RD;
839 		break;
840 	case PKT_SCVAL_OAM:
841 		sc = PKT_SC_OAM;
842 		break;
843 	case PKT_SCVAL_AV:
844 		sc = PKT_SC_AV;
845 		break;
846 	case PKT_SCVAL_RV:
847 		sc = PKT_SC_RV;
848 		break;
849 	case PKT_SCVAL_VI:
850 		sc = PKT_SC_VI;
851 		break;
852 	case PKT_SCVAL_SIG:
853 		sc = PKT_SC_SIG;
854 		break;
855 	case PKT_SCVAL_VO:
856 		sc = PKT_SC_VO;
857 		break;
858 	case PKT_SCVAL_CTL:
859 		sc = PKT_SC_CTL;
860 		break;
861 	default:
862 		sc = PKT_SC_BE;
863 		break;
864 	}
865 
866 	return __packet_set_service_class(ph, sc);
867 }
868 
869 __attribute__((always_inline))
870 static inline uint32_t
__packet_get_traffic_class(const uint64_t ph)871 __packet_get_traffic_class(const uint64_t ph)
872 {
873 	return PKT_SC2TC(__packet_get_service_class(ph));
874 }
875 
876 __attribute__((always_inline))
877 static inline int
__packet_set_inet_checksum(const uint64_t ph,const packet_csum_flags_t flags,const uint16_t start,const uint16_t stuff_val,boolean_t tx)878 __packet_set_inet_checksum(const uint64_t ph, const packet_csum_flags_t flags,
879     const uint16_t start, const uint16_t stuff_val, boolean_t tx)
880 {
881 	PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
882 
883 	PKT_ADDR(ph)->pkt_csum_flags = flags & (~PACKET_CSUM_TSO_FLAGS);
884 
885 	if (tx) {
886 		PKT_ADDR(ph)->pkt_csum_tx_start_off = start;
887 		PKT_ADDR(ph)->pkt_csum_tx_stuff_off = stuff_val;
888 	} else {
889 		PKT_ADDR(ph)->pkt_csum_rx_start_off = start;
890 		PKT_ADDR(ph)->pkt_csum_rx_value = stuff_val;
891 	}
892 	return 0;
893 }
894 
895 __attribute__((always_inline))
896 static inline packet_csum_flags_t
__packet_get_inet_checksum(const uint64_t ph,uint16_t * start,uint16_t * stuff_val,boolean_t tx)897 __packet_get_inet_checksum(const uint64_t ph, uint16_t *start,
898     uint16_t *stuff_val, boolean_t tx)
899 {
900 	PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
901 
902 	if (tx) {
903 		if (__probable(start != NULL)) {
904 			*start = PKT_ADDR(ph)->pkt_csum_tx_start_off;
905 		}
906 		if (__probable(stuff_val != NULL)) {
907 			*stuff_val = PKT_ADDR(ph)->pkt_csum_tx_stuff_off;
908 		}
909 	} else {
910 		if (__probable(start != NULL)) {
911 			*start = PKT_ADDR(ph)->pkt_csum_rx_start_off;
912 		}
913 		if (__probable(stuff_val != NULL)) {
914 			*stuff_val = PKT_ADDR(ph)->pkt_csum_rx_value;
915 		}
916 	}
917 	return PKT_ADDR(ph)->pkt_csum_flags & (~PACKET_CSUM_TSO_FLAGS);
918 }
919 
920 __attribute__((always_inline))
921 static inline void
__packet_set_flow_uuid(const uint64_t ph,const uuid_t flow_uuid)922 __packet_set_flow_uuid(const uint64_t ph, const uuid_t flow_uuid)
923 {
924 	struct __quantum *q = &QUM_ADDR(ph)->qum_com;
925 
926 	/*
927 	 * Anticipate a nicely (8-bytes) aligned UUID from caller;
928 	 * the one in qum_flow_id is always 8-byte aligned.
929 	 */
930 	if (__probable(IS_P2ALIGNED(flow_uuid, sizeof(uint64_t)))) {
931 		uint64_t *id_64 = (uint64_t *)(uintptr_t)flow_uuid;
932 		q->__q_flow_id_val64[0] = id_64[0];
933 		q->__q_flow_id_val64[1] = id_64[1];
934 	} else if (__probable(IS_P2ALIGNED(flow_uuid, sizeof(uint32_t)))) {
935 		uint32_t *id_32 = (uint32_t *)(uintptr_t)flow_uuid;
936 		q->__q_flow_id_val32[0] = id_32[0];
937 		q->__q_flow_id_val32[1] = id_32[1];
938 		q->__q_flow_id_val32[2] = id_32[2];
939 		q->__q_flow_id_val32[3] = id_32[3];
940 	} else {
941 		bcopy(flow_uuid, q->__q_flow_id, sizeof(uuid_t));
942 	}
943 }
944 
945 __attribute__((always_inline))
946 static inline void
__packet_get_flow_uuid(const uint64_t ph,uuid_t flow_uuid)947 __packet_get_flow_uuid(const uint64_t ph, uuid_t flow_uuid)
948 {
949 	struct __quantum *q = &QUM_ADDR(ph)->qum_com;
950 
951 	/*
952 	 * Anticipate a nicely (8-bytes) aligned UUID from caller;
953 	 * the one in qum_flow_id is always 8-byte aligned.
954 	 */
955 	if (__probable(IS_P2ALIGNED(flow_uuid, sizeof(uint64_t)))) {
956 		uint64_t *id_64 = (uint64_t *)(uintptr_t)flow_uuid;
957 		id_64[0] = q->__q_flow_id_val64[0];
958 		id_64[1] = q->__q_flow_id_val64[1];
959 	} else if (__probable(IS_P2ALIGNED(flow_uuid, sizeof(uint32_t)))) {
960 		uint32_t *id_32 = (uint32_t *)(uintptr_t)flow_uuid;
961 		id_32[0] = q->__q_flow_id_val32[0];
962 		id_32[1] = q->__q_flow_id_val32[1];
963 		id_32[2] = q->__q_flow_id_val32[2];
964 		id_32[3] = q->__q_flow_id_val32[3];
965 	} else {
966 		bcopy(q->__q_flow_id, flow_uuid, sizeof(uuid_t));
967 	}
968 }
969 
970 __attribute__((always_inline))
971 static inline void
__packet_clear_flow_uuid(const uint64_t ph)972 __packet_clear_flow_uuid(const uint64_t ph)
973 {
974 	struct __quantum *q = &QUM_ADDR(ph)->qum_com;
975 	q->__q_flow_id_val64[0] = 0;
976 	q->__q_flow_id_val64[1] = 0;
977 }
978 
979 __attribute__((always_inline))
980 static inline uint8_t
__packet_get_aggregation_type(const uint64_t ph)981 __packet_get_aggregation_type(const uint64_t ph)
982 {
983 	_CASSERT(sizeof(PKT_ADDR(ph)->pkt_aggr_type == sizeof(uint8_t)));
984 	PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
985 
986 	return PKT_ADDR(ph)->pkt_aggr_type;
987 }
988 
989 __attribute__((always_inline))
990 static inline uint32_t
__packet_get_data_length(const uint64_t ph)991 __packet_get_data_length(const uint64_t ph)
992 {
993 	return QUM_ADDR(ph)->qum_len;
994 }
995 
996 __attribute__((always_inline))
997 static inline uint16_t
__packet_get_buflet_count(const uint64_t ph)998 __packet_get_buflet_count(const uint64_t ph)
999 {
1000 	uint16_t bcnt = 0;
1001 
1002 	switch (SK_PTR_TYPE(ph)) {
1003 	case NEXUS_META_TYPE_PACKET:
1004 		bcnt = PKT_ADDR(ph)->pkt_bufs_cnt;
1005 #ifdef KERNEL
1006 		VERIFY(bcnt != 0 ||
1007 		    PP_HAS_BUFFER_ON_DEMAND(PKT_ADDR(ph)->pkt_qum.qum_pp));
1008 #else /* !KERNEL */
1009 		/*
1010 		 * Handle the case where the metadata region gets
1011 		 * redirected to anonymous zero-filled pages at
1012 		 * defunct time.  There's always 1 buflet in the
1013 		 * packet metadata, so pretend that's the count.
1014 		 */
1015 		if (__improbable(bcnt == 0)) {
1016 			bcnt = 1;
1017 		}
1018 #endif /* !KERNEL */
1019 		break;
1020 	case NEXUS_META_TYPE_QUANTUM:
1021 		bcnt = 1;
1022 		break;
1023 	default:
1024 #ifdef KERNEL
1025 		VERIFY(0);
1026 		/* NOTREACHED */
1027 		__builtin_unreachable();
1028 #endif /* KERNEL */
1029 		break;
1030 	}
1031 	return bcnt;
1032 }
1033 
1034 __attribute__((always_inline))
1035 static inline int
__packet_add_buflet(const uint64_t ph,const void * bprev0,const void * bnew0)1036 __packet_add_buflet(const uint64_t ph, const void *bprev0, const void *bnew0)
1037 {
1038 	uint16_t bcnt;
1039 
1040 #ifdef KERNEL
1041 	kern_buflet_t bprev = __DECONST(kern_buflet_t, bprev0);
1042 	kern_buflet_t bnew = __DECONST(kern_buflet_t, bnew0);
1043 
1044 	VERIFY(PKT_ADDR(ph) && bnew && (bnew != bprev));
1045 	VERIFY(PP_HAS_BUFFER_ON_DEMAND(PKT_ADDR(ph)->pkt_qum.qum_pp));
1046 	VERIFY(bnew->buf_ctl != NULL);
1047 #else /* !KERNEL */
1048 	buflet_t bprev = __DECONST(buflet_t, bprev0);
1049 	buflet_t bnew = __DECONST(buflet_t, bnew0);
1050 
1051 	if (__improbable(!PKT_ADDR(ph) || !bnew || (bnew == bprev))) {
1052 		return EINVAL;
1053 	}
1054 #endif /* !KERNEL */
1055 
1056 	PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
1057 	bcnt = PKT_ADDR(ph)->pkt_bufs_cnt;
1058 
1059 #ifdef KERNEL
1060 	VERIFY((bprev != NULL || bcnt == 0) &&
1061 	    (bcnt < PKT_ADDR(ph)->pkt_bufs_max));
1062 #else /* !KERNEL */
1063 	if (__improbable(bcnt >= PKT_ADDR(ph)->pkt_bufs_max) ||
1064 	    (bprev == NULL && bcnt != 0)) {
1065 		return EINVAL;
1066 	}
1067 #endif /* !KERNEL */
1068 
1069 #ifdef KERNEL
1070 #if DEVELOPMENT || DEBUG
1071 	/* check if bprev is the last buflet in the chain */
1072 	struct __kern_buflet *pbft, *kbft;
1073 	int n = bcnt;
1074 
1075 	PKT_GET_FIRST_BUFLET(PKT_ADDR(ph), bcnt, pbft);
1076 	kbft = pbft;
1077 
1078 	while ((kbft != NULL) && n--) {
1079 		pbft = kbft;
1080 		kbft = __DECONST(struct __kern_buflet *, kbft->buf_nbft_addr);
1081 	}
1082 	ASSERT(n == 0);
1083 	ASSERT(bprev == pbft);
1084 #endif /* DEVELOPMENT || DEBUG */
1085 #endif /* KERNEL */
1086 
1087 	if (bprev == NULL) {
1088 		bprev = &PKT_ADDR(ph)->pkt_qum_buf;
1089 	}
1090 #ifdef KERNEL
1091 	KBUF_LINK(bprev, bnew);
1092 #else /* !KERNEL */
1093 	UBUF_LINK(bprev, bnew);
1094 #endif /* !KERNEL */
1095 
1096 	*(uint16_t *)(uintptr_t)&PKT_ADDR(ph)->pkt_bufs_cnt = ++bcnt;
1097 	return 0;
1098 }
1099 
1100 __attribute__((always_inline))
1101 static inline void *
__packet_get_next_buflet(const uint64_t ph,const void * bprev0)1102 __packet_get_next_buflet(const uint64_t ph, const void *bprev0)
1103 {
1104 #ifdef KERNEL
1105 	kern_buflet_t bprev = __DECONST(kern_buflet_t, bprev0);
1106 #else /* !KERNEL */
1107 	buflet_t bprev = __DECONST(buflet_t, bprev0);
1108 #endif /* !KERNEL */
1109 	void *bcur = NULL;
1110 
1111 	switch (SK_PTR_TYPE(ph)) {
1112 	case NEXUS_META_TYPE_PACKET: {
1113 		uint32_t bcnt = PKT_ADDR(ph)->pkt_bufs_cnt;
1114 #ifdef KERNEL
1115 		ASSERT(bcnt != 0 ||
1116 		    PP_HAS_BUFFER_ON_DEMAND(PKT_ADDR(ph)->pkt_qum.qum_pp));
1117 #else /* !KERNEL */
1118 		/*
1119 		 * Handle the case where the metadata region gets
1120 		 * redirected to anonymous zero-filled pages at
1121 		 * defunct time.  There's always 1 buflet in the
1122 		 * packet metadata, so pretend that's the count.
1123 		 */
1124 		if (__improbable(bcnt == 0)) {
1125 			bcnt = 1;
1126 			bprev = NULL;
1127 		}
1128 #endif /* !KERNEL */
1129 		PKT_GET_NEXT_BUFLET(PKT_ADDR(ph), bcnt, BLT_ADDR(bprev), bcur);
1130 		break;
1131 	}
1132 	case NEXUS_META_TYPE_QUANTUM:
1133 		QUM_GET_NEXT_BUFLET(QUM_ADDR(ph), BLT_ADDR(bprev), bcur);
1134 		break;
1135 	default:
1136 #ifdef KERNEL
1137 		VERIFY(0);
1138 		/* NOTREACHED */
1139 		__builtin_unreachable();
1140 #endif /* KERNEL */
1141 		break;
1142 	}
1143 	return bcur;
1144 }
1145 
1146 __attribute__((always_inline))
1147 static inline uint8_t
__packet_get_segment_count(const uint64_t ph)1148 __packet_get_segment_count(const uint64_t ph)
1149 {
1150 	_CASSERT(sizeof(PKT_ADDR(ph)->pkt_seg_cnt == sizeof(uint8_t)));
1151 	PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
1152 
1153 	return PKT_ADDR(ph)->pkt_seg_cnt;
1154 }
1155 
1156 __attribute__((always_inline))
1157 static inline void
__packet_set_segment_count(const uint64_t ph,uint8_t segcount)1158 __packet_set_segment_count(const uint64_t ph, uint8_t segcount)
1159 {
1160 	_CASSERT(sizeof(PKT_ADDR(ph)->pkt_seg_cnt == sizeof(uint8_t)));
1161 	PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
1162 
1163 	PKT_ADDR(ph)->pkt_seg_cnt = segcount;
1164 }
1165 
1166 __attribute__((always_inline))
1167 static inline errno_t
__packet_get_protocol_segment_size(const uint64_t ph,uint16_t * proto_seg_sz)1168 __packet_get_protocol_segment_size(const uint64_t ph, uint16_t *proto_seg_sz)
1169 {
1170 	_CASSERT(sizeof(PKT_ADDR(ph)->pkt_proto_seg_sz == sizeof(uint16_t)));
1171 	PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
1172 	*proto_seg_sz =  PKT_ADDR(ph)->pkt_proto_seg_sz;
1173 	return 0;
1174 }
1175 
1176 __attribute__((always_inline))
1177 static inline errno_t
__packet_set_protocol_segment_size(const uint64_t ph,uint16_t proto_seg_sz)1178 __packet_set_protocol_segment_size(const uint64_t ph, uint16_t proto_seg_sz)
1179 {
1180 	PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
1181 	PKT_ADDR(ph)->pkt_proto_seg_sz = proto_seg_sz;
1182 	return 0;
1183 }
1184 
1185 __attribute__((always_inline))
1186 static inline void
__packet_get_tso_flags(const uint64_t ph,packet_tso_flags_t * flags)1187 __packet_get_tso_flags(const uint64_t ph, packet_tso_flags_t *flags)
1188 {
1189 	_CASSERT(sizeof(PKT_ADDR(ph)->pkt_proto_seg_sz == sizeof(uint16_t)));
1190 
1191 	PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
1192 	*flags = PKT_ADDR(ph)->pkt_csum_flags & (PACKET_CSUM_TSO_FLAGS);
1193 }
1194 
1195 __attribute__((always_inline))
1196 static inline void
__packet_set_tso_flags(const uint64_t ph,packet_tso_flags_t flags)1197 __packet_set_tso_flags(const uint64_t ph, packet_tso_flags_t flags)
1198 {
1199 	PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
1200 
1201 	PKT_ADDR(ph)->pkt_csum_flags |= flags & (PACKET_CSUM_TSO_FLAGS);
1202 }
1203 
1204 __attribute__((always_inline))
1205 static inline uint16_t
__buflet_get_data_limit(const void * buf)1206 __buflet_get_data_limit(const void *buf)
1207 {
1208 	return BLT_ADDR(buf)->buf_dlim;
1209 }
1210 
1211 #ifdef KERNEL
1212 __attribute__((always_inline))
1213 static inline errno_t
__buflet_set_data_limit(const void * buf,const uint16_t dlim)1214 __buflet_set_data_limit(const void *buf, const uint16_t dlim)
1215 {
1216 	/* buffer region is always marked as shareable */
1217 	ASSERT(BLT_ADDR(buf)->buf_ctl->bc_flags & SKMEM_BUFCTL_SHAREOK);
1218 
1219 	/* full bounds checking will be performed during finalize */
1220 	if (__probable((uint32_t)dlim + BLT_ADDR(buf)->buf_boff <=
1221 	    BLT_ADDR(buf)->buf_objlim)) {
1222 		_CASSERT(sizeof(BLT_ADDR(buf)->buf_dlim) == sizeof(uint16_t));
1223 		/* deconst */
1224 		*(uint16_t *)(uintptr_t)&BLT_ADDR(buf)->buf_dlim = dlim;
1225 		return 0;
1226 	}
1227 	return ERANGE;
1228 }
1229 #endif /* KERNEL */
1230 
1231 __attribute__((always_inline))
1232 static inline uint16_t
__buflet_get_data_offset(const void * buf)1233 __buflet_get_data_offset(const void *buf)
1234 {
1235 	return BLT_ADDR(buf)->buf_doff;
1236 }
1237 
1238 /*
1239  * ******************************************************************
1240  * Checks in __packet_finalize for packet finalized from userland
1241  * ******************************************************************
1242  *  +-------+---------------------------+---------------------------+
1243  *  |         NEXUS_META_SUBTYPE_RAW    | NEXUS_META_SUBTYPE_PAYLOAD|
1244  *  |-------+---------------------------+---------------------------+
1245  *  |buflet | (bdoff + len) <= dlim     | (bdoff + len) <= dlim     |
1246  *  |l2_off | l2 == bdoff && l2 < bdlim | l2 = l3 = 0 && doff == 0  |
1247  *  |l3_off | l3 = l2                   | l3 == 0                   |
1248  *  |l4_off | l4 = l3 = l2              | l4 = l3 = 0               |
1249  *  +-------+---------------------------+---------------------------+
1250  *
1251  * ******************************************************************
1252  * Checks in __packet_finalize for packet finalized from kernel
1253  * ******************************************************************
1254  *  +-------+---------------------------+---------------------------+
1255  *  |         NEXUS_META_SUBTYPE_RAW    | NEXUS_META_SUBTYPE_PAYLOAD|
1256  *  |-------+---------------------------+---------------------------+
1257  *  |buflet | (bdoff + len) <= dlim     | (bdoff + len) <= dlim     |
1258  *  |buflet | (boff + objaddr) == addr  | (boff + objaddr) <= addr  |
1259  *  |l2_off | l2 == bdoff && l2 < bdlim | l2 = l3 = 0 && doff == 0  |
1260  *  |l3_off | l3 >= l2 && l3 <bdlim     | l3 == 0                   |
1261  *  |l4_off | l4 = l3                   | l4 = l3 = 0               |
1262  *  +-------+---------------------------+---------------------------+
1263  *
1264  */
1265 __attribute__((always_inline))
1266 static inline int
__packet_finalize(const uint64_t ph)1267 __packet_finalize(const uint64_t ph)
1268 {
1269 	void *bcur = NULL, *bprev = NULL;
1270 	uint32_t len, bcnt, bdoff0, bdlim0;
1271 	int err = 0;
1272 
1273 #ifdef KERNEL
1274 	ASSERT(QUM_ADDR(ph)->qum_qflags & QUM_F_INTERNALIZED);
1275 #endif /* KERNEL */
1276 	QUM_ADDR(ph)->qum_qflags &= ~(QUM_F_DROPPED | QUM_F_FINALIZED);
1277 
1278 	bcnt = __packet_get_buflet_count(ph);
1279 	len = QUM_ADDR(ph)->qum_len = 0;
1280 
1281 	while (bcnt--) {
1282 		bcur = __packet_get_next_buflet(ph, bprev);
1283 
1284 #ifdef KERNEL
1285 		ASSERT(bcur != NULL);
1286 		ASSERT(BLT_ADDR(bcur)->buf_addr != 0);
1287 #else  /* !KERNEL */
1288 		if (__improbable(bcur == NULL || BLT_ADDR(bcur)->buf_grolen != 0)) {
1289 			err = ERANGE;
1290 			break;
1291 		}
1292 #endif /* KERNEL */
1293 
1294 		/* save data offset from the first buflet */
1295 		if (bprev == NULL) {
1296 			bdoff0 = __buflet_get_data_offset(bcur);
1297 			bdlim0 = __buflet_get_data_limit(bcur);
1298 		}
1299 
1300 #ifndef KERNEL
1301 		if (__improbable(!BUF_IN_RANGE(BLT_ADDR(bcur)))) {
1302 			err = ERANGE;
1303 			break;
1304 		}
1305 #else /* !KERNEL */
1306 		if (__improbable(!BUF_IN_RANGE(BLT_ADDR(bcur)) &&
1307 		    !PKT_HAS_ATTACHED_MBUF(ph))) {
1308 			err = ERANGE;
1309 			break;
1310 		}
1311 #endif /* KERNEL */
1312 		len += BLT_ADDR(bcur)->buf_dlen;
1313 		bprev = bcur;
1314 	}
1315 
1316 	if (__improbable(err != 0)) {
1317 		goto done;
1318 	}
1319 
1320 	switch (SK_PTR_TYPE(ph)) {
1321 	case NEXUS_META_TYPE_PACKET:
1322 		if (__improbable(bdoff0 > UINT8_MAX)) {
1323 			err = ERANGE;
1324 			goto done;
1325 		}
1326 		/* internalize headroom value from offset */
1327 		PKT_ADDR(ph)->pkt_headroom = (uint8_t)bdoff0;
1328 		/* validate header offsets in packet */
1329 		switch (SK_PTR_SUBTYPE(ph)) {
1330 		case NEXUS_META_SUBTYPE_RAW:
1331 #ifndef KERNEL
1332 			/* Overwrite L2 len for raw packets from user space */
1333 			PKT_ADDR(ph)->pkt_l2_len = 0;
1334 #else /* !KERNEL */
1335 			/* ensure that L3 >= L2 && L3 < bdlim */
1336 			if (__improbable((PKT_ADDR(ph)->pkt_headroom +
1337 			    PKT_ADDR(ph)->pkt_l2_len) >= bdlim0)) {
1338 				err = ERANGE;
1339 				goto done;
1340 			}
1341 #endif /* KERNEL */
1342 			break;
1343 		case NEXUS_META_SUBTYPE_PAYLOAD:
1344 			/*
1345 			 * For payload packet there is no concept of headroom
1346 			 * and L3 offset should always be 0
1347 			 */
1348 			if (__improbable((PKT_ADDR(ph)->pkt_headroom != 0) ||
1349 			    (PKT_ADDR(ph)->pkt_l2_len != 0))) {
1350 				err = ERANGE;
1351 				goto done;
1352 			}
1353 			break;
1354 		default:
1355 #ifdef KERNEL
1356 			VERIFY(0);
1357 			/* NOTREACHED */
1358 			__builtin_unreachable();
1359 #endif /* KERNEL */
1360 			break;
1361 		}
1362 
1363 		if (__improbable(PKT_ADDR(ph)->pkt_pflags & PKT_F_OPT_DATA)) {
1364 #ifdef KERNEL
1365 			struct __packet_opt *po = PKT_ADDR(ph)->pkt_com_opt;
1366 #else /* !KERNEL */
1367 			struct __packet_opt *po = &PKT_ADDR(ph)->pkt_com_opt;
1368 #endif /* !KERNEL */
1369 			if ((PKT_ADDR(ph)->pkt_pflags & PKT_F_OPT_EXPIRE_TS) &&
1370 			    po->__po_expire_ts == 0) {
1371 				err = EINVAL;
1372 				goto done;
1373 			}
1374 			if ((PKT_ADDR(ph)->pkt_pflags & PKT_F_OPT_TOKEN) &&
1375 			    po->__po_token_len == 0) {
1376 				err =  EINVAL;
1377 				goto done;
1378 			}
1379 			ASSERT(err == 0);
1380 		}
1381 
1382 		/*
1383 		 * NOTE: we don't need the validation for total packet length
1384 		 * as checking if each buflet is in range and that
1385 		 * (pkt_headroom == bdoff0), should cover this check.
1386 		 */
1387 		break;
1388 
1389 	default:
1390 		/* nothing to do currently for quantum */
1391 		break;
1392 	}
1393 
1394 done:
1395 	if (__probable(err == 0)) {
1396 		QUM_ADDR(ph)->qum_len = len;
1397 		QUM_ADDR(ph)->qum_qflags |= QUM_F_FINALIZED;
1398 	} else {
1399 		QUM_ADDR(ph)->qum_len = 0;
1400 		QUM_ADDR(ph)->qum_qflags |= QUM_F_DROPPED;
1401 	}
1402 
1403 	return err;
1404 }
1405 
1406 __attribute__((always_inline))
1407 static inline boolean_t
__packet_is_finalized(const uint64_t ph)1408 __packet_is_finalized(const uint64_t ph)
1409 {
1410 	return QUM_ADDR(ph)->qum_qflags & QUM_F_FINALIZED;
1411 }
1412 
1413 #ifdef KERNEL
1414 /*
1415  * function to initialize a packet with mbuf chain.
1416  * Apart from the attached mbuf, the packet can also be used to convey
1417  * additional metadata like the headroom and L2 header length.
1418  * For a packet with attached mbuf, the pkt_length conveys the length of
1419  * the attached mbuf. If the data copied is partial then PKT_F_TRUNCATED is
1420  * also set.
1421  */
1422 __attribute__((always_inline))
1423 static inline int
__packet_initialize_with_mbufchain(struct __kern_packet * pkt,struct mbuf * mbuf,uint8_t headroom,uint8_t l2len)1424 __packet_initialize_with_mbufchain(struct __kern_packet *pkt, struct mbuf *mbuf,
1425     uint8_t headroom, uint8_t l2len)
1426 {
1427 	VERIFY(METADATA_TYPE(pkt) == NEXUS_META_TYPE_PACKET);
1428 	VERIFY(pkt->pkt_qum.qum_qflags & QUM_F_INTERNALIZED);
1429 	VERIFY((pkt->pkt_pflags & PKT_F_MBUF_MASK) == 0);
1430 	VERIFY((pkt->pkt_pflags & PKT_F_PKT_DATA) == 0);
1431 	VERIFY(pkt->pkt_mbuf == NULL);
1432 
1433 	pkt->pkt_qum.qum_qflags &= ~(QUM_F_DROPPED | QUM_F_FINALIZED);
1434 	pkt->pkt_mbuf = mbuf;
1435 	pkt->pkt_pflags |= (PKT_F_MBUF_DATA | PKT_F_TRUNCATED);
1436 	pkt->pkt_headroom = headroom;
1437 	pkt->pkt_l2_len = l2len;
1438 	pkt->pkt_length = m_pktlen(mbuf);
1439 	pkt->pkt_qum_buf.buf_dlen = 0;
1440 	pkt->pkt_qum_buf.buf_doff = 0;
1441 	pkt->pkt_qum.qum_qflags |= QUM_F_FINALIZED;
1442 	return 0;
1443 }
1444 
1445 __attribute__((always_inline))
1446 static inline int
__packet_initialize_with_mbuf(struct __kern_packet * pkt,struct mbuf * mbuf,uint8_t headroom,uint8_t l2len)1447 __packet_initialize_with_mbuf(struct __kern_packet *pkt, struct mbuf *mbuf,
1448     uint8_t headroom, uint8_t l2len)
1449 {
1450 	__packet_initialize_with_mbufchain(pkt, mbuf, headroom, l2len);
1451 	VERIFY(mbuf->m_nextpkt == NULL);
1452 	return 0;
1453 }
1454 
1455 /*
1456  * function to finalize a packet with attached mbuf.
1457  */
1458 __attribute__((always_inline))
1459 static inline int
__packet_finalize_with_mbuf(struct __kern_packet * pkt)1460 __packet_finalize_with_mbuf(struct __kern_packet *pkt)
1461 {
1462 	uint16_t bdoff, bdlim, bdlen;
1463 	struct __kern_buflet *buf;
1464 	int err = 0;
1465 
1466 	VERIFY(METADATA_TYPE(pkt) == NEXUS_META_TYPE_PACKET);
1467 	VERIFY((pkt->pkt_pflags & (PKT_F_MBUF_DATA | PKT_F_PKT_DATA)) ==
1468 	    PKT_F_MBUF_DATA);
1469 	VERIFY(pkt->pkt_mbuf != NULL);
1470 	ASSERT(pkt->pkt_qum.qum_qflags & QUM_F_INTERNALIZED);
1471 	VERIFY(pkt->pkt_bufs_cnt == 1);
1472 	PKT_GET_FIRST_BUFLET(pkt, pkt->pkt_bufs_cnt, buf);
1473 	ASSERT(buf->buf_addr != 0);
1474 
1475 	pkt->pkt_qum.qum_qflags &= ~(QUM_F_DROPPED | QUM_F_FINALIZED);
1476 	pkt->pkt_pflags &= ~PKT_F_TRUNCATED;
1477 	bdlen = buf->buf_dlen;
1478 	bdlim = buf->buf_dlim;
1479 	bdoff = buf->buf_doff;
1480 	if (__improbable(!BUF_IN_RANGE(buf))) {
1481 		err = ERANGE;
1482 		goto done;
1483 	}
1484 
1485 	/* validate header offsets in packet */
1486 	switch (METADATA_SUBTYPE(pkt)) {
1487 	case NEXUS_META_SUBTYPE_RAW:
1488 		if (__improbable((pkt->pkt_headroom != bdoff) ||
1489 		    (pkt->pkt_headroom >= bdlim))) {
1490 			err = ERANGE;
1491 			goto done;
1492 		}
1493 		if (__improbable((pkt->pkt_headroom +
1494 		    pkt->pkt_l2_len) >= bdlim)) {
1495 			err = ERANGE;
1496 			goto done;
1497 		}
1498 		break;
1499 
1500 	case NEXUS_META_SUBTYPE_PAYLOAD:
1501 		/*
1502 		 * For payload packet there is no concept of headroom.
1503 		 */
1504 		if (__improbable((pkt->pkt_headroom != 0) || (bdoff != 0) ||
1505 		    (pkt->pkt_l2_len != 0))) {
1506 			err = ERANGE;
1507 			goto done;
1508 		}
1509 		break;
1510 
1511 	default:
1512 		VERIFY(0);
1513 		/* NOTREACHED */
1514 		__builtin_unreachable();
1515 		break;
1516 	}
1517 
1518 
1519 	if (__improbable(pkt->pkt_pflags & PKT_F_OPT_DATA)) {
1520 		struct __packet_opt *po = pkt->pkt_com_opt;
1521 
1522 		if ((pkt->pkt_pflags & PKT_F_OPT_EXPIRE_TS) &&
1523 		    po->__po_expire_ts == 0) {
1524 			err = EINVAL;
1525 			goto done;
1526 		}
1527 		if ((pkt->pkt_pflags & PKT_F_OPT_TOKEN) &&
1528 		    po->__po_token_len == 0) {
1529 			err =  EINVAL;
1530 			goto done;
1531 		}
1532 	}
1533 	ASSERT(err == 0);
1534 
1535 done:
1536 	if (__probable(err == 0)) {
1537 		pkt->pkt_length = (uint32_t)m_pktlen(pkt->pkt_mbuf);
1538 		if (bdlen < pkt->pkt_length) {
1539 			pkt->pkt_pflags |= PKT_F_TRUNCATED;
1540 		}
1541 		pkt->pkt_qum.qum_qflags |= QUM_F_FINALIZED;
1542 	} else {
1543 		pkt->pkt_length = 0;
1544 		pkt->pkt_qum.qum_qflags |= QUM_F_DROPPED;
1545 	}
1546 
1547 	return err;
1548 }
1549 
1550 __attribute__((always_inline))
1551 static inline uint32_t
__packet_get_object_index(const uint64_t ph)1552 __packet_get_object_index(const uint64_t ph)
1553 {
1554 	return METADATA_IDX(QUM_ADDR(ph));
1555 }
1556 
1557 __attribute__((always_inline))
1558 static inline errno_t
__packet_get_timestamp(const uint64_t ph,uint64_t * ts,boolean_t * valid)1559 __packet_get_timestamp(const uint64_t ph, uint64_t *ts, boolean_t *valid)
1560 {
1561 	PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
1562 
1563 	if ((PKT_ADDR(ph)->pkt_pflags & PKT_F_TS_VALID) != 0) {
1564 		if (valid != NULL) {
1565 			*valid = TRUE;
1566 		}
1567 		*ts = PKT_ADDR(ph)->pkt_timestamp;
1568 	} else {
1569 		if (valid != NULL) {
1570 			*valid = FALSE;
1571 		}
1572 		*ts = 0;
1573 	}
1574 
1575 	return 0;
1576 }
1577 
1578 __attribute__((always_inline))
1579 static inline errno_t
__packet_set_timestamp(const uint64_t ph,uint64_t ts,boolean_t valid)1580 __packet_set_timestamp(const uint64_t ph, uint64_t ts, boolean_t valid)
1581 {
1582 	PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
1583 
1584 	if (valid) {
1585 		PKT_ADDR(ph)->pkt_timestamp = ts;
1586 		PKT_ADDR(ph)->pkt_pflags |= PKT_F_TS_VALID;
1587 	} else {
1588 		PKT_ADDR(ph)->pkt_pflags &= ~PKT_F_TS_VALID;
1589 		PKT_ADDR(ph)->pkt_timestamp = 0;
1590 	}
1591 
1592 	return 0;
1593 }
1594 
1595 __attribute__((always_inline))
1596 static inline errno_t
__packet_get_tx_completion_data(const uint64_t ph,uintptr_t * cb_arg,uintptr_t * cb_data)1597 __packet_get_tx_completion_data(const uint64_t ph, uintptr_t *cb_arg,
1598     uintptr_t *cb_data)
1599 {
1600 	PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
1601 	if ((PKT_ADDR(ph)->pkt_pflags & PKT_F_TX_COMPL_DATA) != 0) {
1602 		ASSERT((PKT_ADDR(ph)->pkt_pflags & PKT_F_TX_COMPL_ALLOC));
1603 		*cb_arg = PKT_ADDR(ph)->pkt_tx_compl_cb_arg;
1604 		*cb_data = PKT_ADDR(ph)->pkt_tx_compl_cb_data;
1605 	} else {
1606 		*cb_arg = 0;
1607 		*cb_data = 0;
1608 	}
1609 	return 0;
1610 }
1611 
1612 __attribute__((always_inline))
1613 static inline errno_t
__packet_set_tx_completion_data(const uint64_t ph,uintptr_t cb_arg,uintptr_t cb_data)1614 __packet_set_tx_completion_data(const uint64_t ph, uintptr_t cb_arg,
1615     uintptr_t cb_data)
1616 {
1617 	PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
1618 	_KPKT_INIT_TX_COMPL_DATA(PKT_ADDR(ph));
1619 	PKT_ADDR(ph)->pkt_tx_compl_cb_arg = cb_arg;
1620 	PKT_ADDR(ph)->pkt_tx_compl_cb_data = cb_data;
1621 	return 0;
1622 }
1623 
1624 __attribute__((always_inline))
1625 static inline errno_t
__packet_get_timestamp_requested(const uint64_t ph,boolean_t * requested)1626 __packet_get_timestamp_requested(const uint64_t ph, boolean_t *requested)
1627 {
1628 	PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
1629 	if ((PKT_ADDR(ph)->pkt_pflags & PKT_F_TX_COMPL_TS_REQ) != 0) {
1630 		*requested = TRUE;
1631 	} else {
1632 		*requested = FALSE;
1633 	}
1634 	return 0;
1635 }
1636 
1637 __attribute__((always_inline))
1638 static inline errno_t
__packet_get_tx_completion_status(const uint64_t ph,kern_return_t * status)1639 __packet_get_tx_completion_status(const uint64_t ph, kern_return_t *status)
1640 {
1641 	PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
1642 	if ((PKT_ADDR(ph)->pkt_pflags & PKT_F_TX_COMPL_DATA) != 0) {
1643 		ASSERT((PKT_ADDR(ph)->pkt_pflags & PKT_F_TX_COMPL_ALLOC));
1644 		*status = (kern_return_t)PKT_ADDR(ph)->pkt_tx_compl_status;
1645 	} else {
1646 		*status = 0;
1647 	}
1648 	return 0;
1649 }
1650 
1651 __attribute__((always_inline))
1652 static inline errno_t
__packet_set_tx_completion_status(const uint64_t ph,kern_return_t status)1653 __packet_set_tx_completion_status(const uint64_t ph, kern_return_t status)
1654 {
1655 	PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
1656 	_KPKT_INIT_TX_COMPL_DATA(PKT_ADDR(ph));
1657 	PKT_ADDR(ph)->pkt_tx_compl_status = (uint32_t)status;
1658 	return 0;
1659 }
1660 
1661 __attribute__((always_inline))
1662 static inline errno_t
__packet_set_tx_nx_port(const uint64_t ph,nexus_port_t nx_port,uint16_t vpna_gencnt)1663 __packet_set_tx_nx_port(const uint64_t ph, nexus_port_t nx_port,
1664     uint16_t vpna_gencnt)
1665 {
1666 	PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
1667 	PKT_ADDR(ph)->pkt_nx_port = nx_port;
1668 	PKT_ADDR(ph)->pkt_vpna_gencnt = vpna_gencnt;
1669 	PKT_ADDR(ph)->pkt_pflags |= PKT_F_TX_PORT_DATA;
1670 	return 0;
1671 }
1672 
1673 __attribute__((always_inline))
1674 static inline errno_t
__packet_get_tx_nx_port(const uint64_t ph,nexus_port_t * nx_port,uint16_t * vpna_gencnt)1675 __packet_get_tx_nx_port(const uint64_t ph, nexus_port_t *nx_port,
1676     uint16_t *vpna_gencnt)
1677 {
1678 	PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
1679 	if ((PKT_ADDR(ph)->pkt_pflags & PKT_F_TX_PORT_DATA) == 0) {
1680 		return ENOTSUP;
1681 	}
1682 
1683 	*nx_port = PKT_ADDR(ph)->pkt_nx_port;
1684 	*vpna_gencnt = PKT_ADDR(ph)->pkt_vpna_gencnt;
1685 	return 0;
1686 }
1687 
1688 __attribute__((always_inline))
1689 static inline errno_t
__packet_get_tx_nx_port_id(const uint64_t ph,uint32_t * nx_port_id)1690 __packet_get_tx_nx_port_id(const uint64_t ph, uint32_t *nx_port_id)
1691 {
1692 	errno_t err;
1693 	nexus_port_t nx_port;
1694 	uint16_t vpna_gencnt;
1695 
1696 	_CASSERT(sizeof(nx_port) == sizeof(uint16_t));
1697 
1698 	err = __packet_get_tx_nx_port(ph, &nx_port, &vpna_gencnt);
1699 	if (err == 0) {
1700 		*nx_port_id = PKT_COMPOSE_NX_PORT_ID(nx_port, vpna_gencnt);
1701 	}
1702 	return err;
1703 }
1704 
1705 
1706 __attribute__((always_inline))
1707 static inline errno_t
__packet_get_flowid(const uint64_t ph,packet_flowid_t * pflowid)1708 __packet_get_flowid(const uint64_t ph, packet_flowid_t *pflowid)
1709 {
1710 	PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
1711 	if ((PKT_ADDR(ph)->pkt_pflags & PKT_F_FLOW_ID) == 0) {
1712 		return ENOENT;
1713 	}
1714 	*pflowid = PKT_ADDR(ph)->pkt_flow_token;
1715 	return 0;
1716 }
1717 #endif /* KERNEL */
1718 
1719 extern uint32_t os_cpu_in_cksum(const void *, uint32_t, uint32_t);
1720 
1721 __attribute__((always_inline))
1722 static inline uint16_t
__packet_fold_sum(uint32_t sum)1723 __packet_fold_sum(uint32_t sum)
1724 {
1725 	sum = (sum >> 16) + (sum & 0xffff);     /* 17-bit */
1726 	sum = (sum >> 16) + (sum & 0xffff);     /* 16-bit + carry */
1727 	sum = (sum >> 16) + (sum & 0xffff);     /* final carry */
1728 	return sum & 0xffff;
1729 }
1730 
1731 __attribute__((always_inline))
1732 static inline uint16_t
__packet_fold_sum_final(uint32_t sum)1733 __packet_fold_sum_final(uint32_t sum)
1734 {
1735 	sum = (sum >> 16) + (sum & 0xffff);     /* 17-bit */
1736 	sum = (sum >> 16) + (sum & 0xffff);     /* 16-bit + carry */
1737 	sum = (sum >> 16) + (sum & 0xffff);     /* final carry */
1738 	return ~sum & 0xffff;
1739 }
1740 
1741 __attribute__((always_inline))
1742 static inline uint32_t
__packet_cksum(const void * data,uint32_t len,uint32_t sum0)1743 __packet_cksum(const void *data, uint32_t len, uint32_t sum0)
1744 {
1745 	return os_cpu_in_cksum(data, len, sum0);
1746 }
1747 
1748 extern uint32_t os_cpu_copy_in_cksum(const void *, void *, uint32_t, uint32_t);
1749 
1750 __attribute__((always_inline))
1751 static inline uint32_t
__packet_copy_and_sum(const void * src,void * dst,uint32_t len,uint32_t sum0)1752 __packet_copy_and_sum(const void *src, void *dst, uint32_t len, uint32_t sum0)
1753 {
1754 	return os_cpu_copy_in_cksum(src, dst, len, sum0);
1755 }
1756 
1757 __attribute__((always_inline))
1758 static inline uint16_t
__packet_fix_sum(uint16_t csum,uint16_t old,uint16_t new)1759 __packet_fix_sum(uint16_t csum, uint16_t old, uint16_t new)
1760 {
1761 	uint32_t c = csum + old - new;
1762 	c = (c >> 16) + (c & 0xffff);   /* Only add carry once */
1763 
1764 	return c & 0xffff;
1765 }
1766 
1767 /* MUST be used for uint32_t fields */
1768 __attribute__((always_inline))
1769 static inline void
__packet_fix_hdr_sum(uint8_t * field,uint16_t * csum,uint32_t new)1770 __packet_fix_hdr_sum(uint8_t *field, uint16_t *csum, uint32_t new)
1771 {
1772 	uint32_t old;
1773 	memcpy(&old, field, sizeof(old));
1774 	memcpy(field, &new, sizeof(uint32_t));
1775 	*csum = __packet_fix_sum(__packet_fix_sum(*csum, (uint16_t)(old >> 16),
1776 	    (uint16_t)(new >> 16)), (uint16_t)(old & 0xffff),
1777 	    (uint16_t)(new & 0xffff));
1778 }
1779 
1780 __attribute__((always_inline))
1781 static inline void *
__buflet_get_data_address(const void * buf)1782 __buflet_get_data_address(const void *buf)
1783 {
1784 #if (defined(KERNEL) && (DEBUG || DEVELOPMENT))
1785 	ASSERT(BLT_ADDR(buf)->buf_addr ==
1786 	    (mach_vm_address_t)BLT_ADDR(buf)->buf_objaddr +
1787 	    BLT_ADDR(buf)->buf_boff);
1788 #endif /* KERNEL && (DEBUG || DEVELOPMENT) */
1789 	return (void *)(BLT_ADDR(buf)->buf_addr);
1790 }
1791 
1792 #ifdef KERNEL
1793 __attribute__((always_inline))
1794 static inline errno_t
__buflet_set_data_address(const void * buf,const void * addr)1795 __buflet_set_data_address(const void *buf, const void *addr)
1796 {
1797 	/* buffer region is always marked as shareable */
1798 	ASSERT(BLT_ADDR(buf)->buf_ctl->bc_flags & SKMEM_BUFCTL_SHAREOK);
1799 
1800 	/* full bounds checking will be performed during finalize */
1801 	if (__probable((uintptr_t)addr >=
1802 	    (uintptr_t)BLT_ADDR(buf)->buf_objaddr)) {
1803 		_CASSERT(sizeof(BLT_ADDR(buf)->buf_addr) ==
1804 		    sizeof(mach_vm_address_t));
1805 		/* deconst */
1806 		*(mach_vm_address_t *)(uintptr_t)&BLT_ADDR(buf)->buf_addr =
1807 		    (mach_vm_address_t)addr;
1808 
1809 		/* compute the offset from objaddr for the case of shared buffer */
1810 		_CASSERT(sizeof(BLT_ADDR(buf)->buf_boff) == sizeof(uint16_t));
1811 		*(uint16_t *)(uintptr_t)&BLT_ADDR(buf)->buf_boff =
1812 		    (uint16_t)((mach_vm_address_t)addr -
1813 		    (mach_vm_address_t)BLT_ADDR(buf)->buf_objaddr);
1814 
1815 		return 0;
1816 	}
1817 	return ERANGE;
1818 }
1819 #endif /* KERNEL */
1820 
1821 __attribute__((always_inline))
1822 static inline int
__buflet_set_data_offset(const void * buf,const uint16_t doff)1823 __buflet_set_data_offset(const void *buf, const uint16_t doff)
1824 {
1825 #ifdef KERNEL
1826 	/*
1827 	 * Kernel-specific assertion.  For user space, the metadata
1828 	 * region gets redirected to anonymous zero-filled pages at
1829 	 * defunct time, so ignore it there.
1830 	 */
1831 	ASSERT(BLT_ADDR(buf)->buf_dlim != 0);
1832 
1833 	if (__probable(doff + BLT_ADDR(buf)->buf_boff <=
1834 	    BLT_ADDR(buf)->buf_objlim)) {
1835 		BLT_ADDR(buf)->buf_doff = doff;
1836 		return 0;
1837 	}
1838 	return ERANGE;
1839 #else /* !KERNEL */
1840 	BLT_ADDR(buf)->buf_doff = doff;
1841 	return 0;
1842 #endif /* KERNEL */
1843 }
1844 
1845 __attribute__((always_inline))
1846 static inline int
__buflet_set_data_length(const void * buf,const uint16_t dlen)1847 __buflet_set_data_length(const void *buf, const uint16_t dlen)
1848 {
1849 #ifdef KERNEL
1850 	/*
1851 	 * Kernel-specific assertion.  For user space, the metadata
1852 	 * region gets redirected to anonymous zero-filled pages at
1853 	 * defunct time, so ignore it there.
1854 	 */
1855 	ASSERT(BLT_ADDR(buf)->buf_dlim != 0);
1856 
1857 	if (__probable((uint32_t)dlen <= BLT_ADDR(buf)->buf_objlim)) {
1858 		BLT_ADDR(buf)->buf_dlen = dlen;
1859 		return 0;
1860 	}
1861 	return ERANGE;
1862 #else /* !KERNEL */
1863 	BLT_ADDR(buf)->buf_dlen = dlen;
1864 	return 0;
1865 #endif /* KERNEL */
1866 }
1867 
1868 __attribute__((always_inline))
1869 static inline uint16_t
__buflet_get_data_length(const void * buf)1870 __buflet_get_data_length(const void *buf)
1871 {
1872 	return BLT_ADDR(buf)->buf_dlen;
1873 }
1874 
1875 #ifdef KERNEL
1876 __attribute__((always_inline))
1877 static inline int
__buflet_set_buffer_offset(const void * buf,const uint16_t off)1878 __buflet_set_buffer_offset(const void *buf, const uint16_t off)
1879 {
1880 	ASSERT(BLT_ADDR(buf)->buf_objlim != 0);
1881 
1882 	if (__probable(off <= BLT_ADDR(buf)->buf_objlim)) {
1883 		_CASSERT(sizeof(BLT_ADDR(buf)->buf_boff) == sizeof(uint16_t));
1884 		*(uint16_t *)(uintptr_t)&BLT_ADDR(buf)->buf_boff = off;
1885 
1886 		/* adjust dlim and buf_addr */
1887 		if (BLT_ADDR(buf)->buf_dlim + off >= BLT_ADDR(buf)->buf_objlim) {
1888 			_CASSERT(sizeof(BLT_ADDR(buf)->buf_dlim) == sizeof(uint16_t));
1889 			*(uint16_t *)(uintptr_t)&BLT_ADDR(buf)->buf_dlim =
1890 			    (uint16_t)BLT_ADDR(buf)->buf_objlim - off;
1891 		}
1892 		*(mach_vm_address_t *)(uintptr_t)&BLT_ADDR(buf)->buf_addr =
1893 		    (mach_vm_address_t)BLT_ADDR(buf)->buf_objaddr + off;
1894 		return 0;
1895 	}
1896 	return ERANGE;
1897 }
1898 #endif /* KERNEL */
1899 
1900 __attribute__((always_inline))
1901 static inline uint16_t
__buflet_get_buffer_offset(const void * buf)1902 __buflet_get_buffer_offset(const void *buf)
1903 {
1904 #if (defined(KERNEL) && (DEBUG || DEVELOPMENT))
1905 	ASSERT(BLT_ADDR(buf)->buf_addr ==
1906 	    (mach_vm_address_t)BLT_ADDR(buf)->buf_objaddr +
1907 	    BLT_ADDR(buf)->buf_boff);
1908 #endif /* KERNEL && (DEBUG || DEVELOPMENT) */
1909 	return BLT_ADDR(buf)->buf_boff;
1910 }
1911 
1912 #ifdef KERNEL
1913 __attribute__((always_inline))
1914 static inline int
__buflet_set_gro_len(const void * buf,const uint16_t len)1915 __buflet_set_gro_len(const void *buf, const uint16_t len)
1916 {
1917 	ASSERT(BLT_ADDR(buf)->buf_dlim != 0);
1918 
1919 	if (__probable(len <= BLT_ADDR(buf)->buf_dlim)) {
1920 		/* deconst */
1921 		_CASSERT(sizeof(BLT_ADDR(buf)->buf_grolen) == sizeof(uint16_t));
1922 		*(uint16_t *)(uintptr_t)&BLT_ADDR(buf)->buf_grolen = len;
1923 		return 0;
1924 	}
1925 	return ERANGE;
1926 }
1927 #endif /* KERNEL */
1928 
1929 __attribute__((always_inline))
1930 static inline uint16_t
__buflet_get_gro_len(const void * buf)1931 __buflet_get_gro_len(const void *buf)
1932 {
1933 	return BLT_ADDR(buf)->buf_grolen;
1934 }
1935 
1936 __attribute__((always_inline))
1937 static inline void *
__buflet_get_next_buf(const void * buflet,const void * prev_buf)1938 __buflet_get_next_buf(const void *buflet, const void *prev_buf)
1939 {
1940 	uint16_t gro_len, dlen;
1941 	mach_vm_address_t next_buf, baddr;
1942 
1943 	ASSERT(BLT_ADDR(buflet)->buf_dlen != 0);
1944 	ASSERT(BLT_ADDR(buflet)->buf_grolen != 0);
1945 
1946 	gro_len = BLT_ADDR(buflet)->buf_grolen;
1947 	dlen = BLT_ADDR(buflet)->buf_dlen;
1948 	baddr = BLT_ADDR(buflet)->buf_addr;
1949 	if (prev_buf != NULL) {
1950 		ASSERT((mach_vm_address_t)prev_buf >= BLT_ADDR(buflet)->buf_addr);
1951 		next_buf = (mach_vm_address_t)prev_buf + gro_len;
1952 	} else {
1953 		next_buf = BLT_ADDR(buflet)->buf_addr;
1954 	}
1955 
1956 	if (__probable(next_buf < baddr + dlen)) {
1957 		ASSERT(next_buf + gro_len <= baddr + dlen);
1958 		return (void *)next_buf;
1959 	}
1960 
1961 	return NULL;
1962 }
1963 
1964 #ifdef KERNEL
1965 __attribute__((always_inline))
1966 static inline struct sksegment *
__buflet_get_object_segment(const void * buf,kern_obj_idx_seg_t * idx)1967 __buflet_get_object_segment(const void *buf, kern_obj_idx_seg_t *idx)
1968 {
1969 	_CASSERT(sizeof(obj_idx_t) == sizeof(kern_obj_idx_seg_t));
1970 
1971 	if (idx != NULL) {
1972 		*idx = BLT_ADDR(buf)->buf_ctl->bc_idx;
1973 	}
1974 
1975 	return BLT_ADDR(buf)->buf_ctl->bc_slab->sl_seg;
1976 }
1977 #endif /* KERNEL */
1978 
1979 __attribute__((always_inline))
1980 static inline void *
__buflet_get_object_address(const void * buf)1981 __buflet_get_object_address(const void *buf)
1982 {
1983 #ifdef KERNEL
1984 	return (void *)(BLT_ADDR(buf)->buf_objaddr);
1985 #else /* !KERNEL */
1986 	/*
1987 	 * For user space, shared buffer is not available and hence the data
1988 	 * address is immutable and is always the same as the underlying
1989 	 * buffer object address itself.
1990 	 */
1991 	return __buflet_get_data_address(buf);
1992 #endif /* !KERNEL */
1993 }
1994 
1995 __attribute__((always_inline))
1996 static inline uint32_t
__buflet_get_object_limit(const void * buf)1997 __buflet_get_object_limit(const void *buf)
1998 {
1999 #ifdef KERNEL
2000 	return BLT_ADDR(buf)->buf_objlim;
2001 #else /* !KERNEL */
2002 	/*
2003 	 * For user space, shared buffer is not available and hence the data
2004 	 * limit is immutable and is always the same as the underlying buffer
2005 	 * object limit itself.
2006 	 */
2007 	return (uint32_t)__buflet_get_data_limit(buf);
2008 #endif /* !KERNEL */
2009 }
2010 
2011 __attribute__((always_inline))
2012 static inline packet_trace_id_t
__packet_get_trace_id(const uint64_t ph)2013 __packet_get_trace_id(const uint64_t ph)
2014 {
2015 	switch (SK_PTR_TYPE(ph)) {
2016 	case NEXUS_META_TYPE_PACKET:
2017 		return PKT_ADDR(ph)->pkt_trace_id;
2018 		break;
2019 	default:
2020 		return 0;
2021 	}
2022 }
2023 
2024 __attribute__((always_inline))
2025 static inline void
__packet_set_trace_id(const uint64_t ph,packet_trace_id_t id)2026 __packet_set_trace_id(const uint64_t ph, packet_trace_id_t id)
2027 {
2028 	PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
2029 	PKT_ADDR(ph)->pkt_trace_id = id;
2030 }
2031 
2032 __attribute__((always_inline))
2033 static inline void
__packet_trace_event(const uint64_t ph,uint32_t event)2034 __packet_trace_event(const uint64_t ph, uint32_t event)
2035 {
2036 	PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
2037 #ifdef KERNEL
2038 #pragma unused(event, ph)
2039 	KDBG(event, PKT_ADDR(ph)->pkt_trace_id);
2040 #else /* !KERNEL */
2041 	kdebug_trace(event, PKT_ADDR(ph)->pkt_trace_id, 0, 0, 0);
2042 #endif /* !KERNEL */
2043 }
2044 
2045 #ifdef KERNEL
2046 __attribute__((always_inline))
2047 static inline packet_trace_tag_t
__packet_get_trace_tag(const uint64_t ph)2048 __packet_get_trace_tag(const uint64_t ph)
2049 {
2050 	PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
2051 	return PKT_ADDR(ph)->pkt_trace_tag;
2052 }
2053 
2054 __attribute__((always_inline))
2055 static inline void
__packet_set_trace_tag(const uint64_t ph,packet_trace_tag_t tag)2056 __packet_set_trace_tag(const uint64_t ph, packet_trace_tag_t tag)
2057 {
2058 	PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
2059 	PKT_ADDR(ph)->pkt_trace_tag = tag;
2060 }
2061 
2062 static inline void
__packet_perform_tx_completion_callbacks(const kern_packet_t ph,ifnet_t ifp)2063 __packet_perform_tx_completion_callbacks(const kern_packet_t ph, ifnet_t ifp)
2064 {
2065 	/*
2066 	 * NOTE: this function can be called with ifp as NULL.
2067 	 */
2068 	uint64_t ts;
2069 	kern_return_t tx_status;
2070 	uintptr_t cb_arg, cb_data;
2071 	struct __kern_packet *kpkt = SK_PTR_ADDR_KPKT(ph);
2072 
2073 	ASSERT((kpkt->pkt_pflags & PKT_F_TX_COMPL_TS_REQ) != 0);
2074 	(void) __packet_get_tx_completion_status(ph, &tx_status);
2075 	__packet_get_tx_completion_data(ph, &cb_arg, &cb_data);
2076 	__packet_get_timestamp(ph, &ts, NULL);
2077 	while (kpkt->pkt_tx_compl_callbacks != 0) {
2078 		mbuf_tx_compl_func cb;
2079 		uint32_t i;
2080 
2081 		i = ffs(kpkt->pkt_tx_compl_callbacks) - 1;
2082 		kpkt->pkt_tx_compl_callbacks &= ~(1 << i);
2083 		cb = m_get_tx_compl_callback(i);
2084 		if (__probable(cb != NULL)) {
2085 			cb(kpkt->pkt_tx_compl_context, ifp, ts, cb_arg, cb_data,
2086 			    tx_status);
2087 		}
2088 	}
2089 	kpkt->pkt_pflags &= ~PKT_F_TX_COMPL_TS_REQ;
2090 }
2091 
2092 static inline void *
__packet_get_priv(const kern_packet_t ph)2093 __packet_get_priv(const kern_packet_t ph)
2094 {
2095 	PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
2096 	return PKT_ADDR(ph)->pkt_priv;
2097 }
2098 
2099 static inline void
__packet_set_priv(const uint64_t ph,void * priv)2100 __packet_set_priv(const uint64_t ph, void *priv)
2101 {
2102 	PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
2103 	PKT_ADDR(ph)->pkt_priv = priv;
2104 }
2105 #endif /* KERNEL */
2106 
2107 #endif /* PRIVATE || BSD_KERNEL_PRIVATE */
2108 #endif /* !_SKYWALK_PACKET_COMMON_H_ */
2109