xref: /xnu-12377.1.9/bsd/skywalk/packet/packet_common.h (revision f6217f891ac0bb64f3d375211650a4c1ff8ca1ea)
1 /*
2  * Copyright (c) 2016-2024 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #ifndef _SKYWALK_PACKET_COMMON_H_
30 #define _SKYWALK_PACKET_COMMON_H_
31 
32 #if defined(PRIVATE) || defined(BSD_KERNEL_PRIVATE)
33 /*
34  * Routines common to kernel and userland.  This file is intended to
35  * be included by code implementing the packet APIs, in particular,
36  * the Skywalk kernel and libsyscall code.
37  */
38 
39 #include <skywalk/os_packet_private.h>
40 #include <net/if_vlan_var.h>
41 #include <sys/errno.h>
42 #include <sys/kdebug.h>
43 
44 #ifndef KERNEL
45 /*
46  * User.
47  */
48 #if !defined(LIBSYSCALL_INTERFACE)
49 #error "LIBSYSCALL_INTERFACE not defined"
50 #endif /* !LIBSYSCALL_INTERFACE */
51 #define QUM_ADDR(_ph)   SK_PTR_ADDR_UQUM(_ph)
52 #define PKT_ADDR(_ph)   SK_PTR_ADDR_UPKT(_ph)
53 #define BLT_ADDR(_bp)   ((struct __user_buflet *)(uintptr_t)_bp)
54 #else /* KERNEL */
55 /*
56  * Kernel.
57  */
58 #include <skywalk/packet/packet_var.h>
59 #include <skywalk/packet/pbufpool_var.h>
60 #define QUM_ADDR(_ph)   SK_PTR_ADDR_KQUM(_ph)
61 #define PKT_ADDR(_ph)   SK_PTR_ADDR_KPKT(_ph)
62 #define BLT_ADDR(_bp)   ((struct __kern_buflet *)(uintptr_t)_bp)
63 #define PKT_HAS_ATTACHED_MBUF(_ph)              \
64 	((PKT_ADDR(_ph)->pkt_pflags & PKT_F_MBUF_DATA) != 0)
65 #endif /* KERNEL */
66 
67 /*
68  * Common.
69  */
70 #define PKT_TYPE_ASSERT(_ph, _type)                     ((void)0)
71 #define PKT_SUBTYPE_ASSERT(_ph, _type, _subtype)        ((void)0)
72 
73 #define QUM_GET_NEXT_BUFLET(_qum, _pbuf, _buf) do {                     \
74 	ASSERT((_pbuf) == NULL || (_pbuf) == (_qum)->qum_buf);          \
75 	(_buf) = (((_pbuf) == NULL) ? (_qum)->qum_buf : NULL);          \
76 } while (0)
77 
78 #define PKT_GET_FIRST_BUFLET(_pkt, _bcnt, _buf) do {                    \
79 	if (__improbable((_bcnt) == 0)) {                               \
80 	        (_buf) = NULL;                                          \
81 	        break;                                                  \
82 	}                                                               \
83 	if (__probable((_pkt)->pkt_qum_buf.buf_addr != 0)) {            \
84 	        (_buf) = &(_pkt)->pkt_qum_buf;                          \
85 	} else {                                                        \
86 	        (_buf) = __unsafe_forge_single(struct __kern_buflet *,  \
87 	            __DECONST(void *, (_pkt)->pkt_qum_buf.buf_nbft_addr));\
88 	}                                                               \
89 } while (0)
90 
91 #define _PKT_GET_NEXT_BUFLET(_pkt, _bcnt, _pbuf, _buf) do {             \
92 	if ((_pbuf) == NULL) {                                          \
93 	        PKT_GET_FIRST_BUFLET(_pkt, _bcnt, _buf);                \
94 	} else {                                                        \
95 	        (_buf) = __unsafe_forge_single(struct __kern_buflet *,  \
96 	            __DECONST(void *, (_pbuf)->buf_nbft_addr));         \
97 	}                                                               \
98 } while (0)
99 
100 #ifndef KERNEL
101 #define PKT_GET_NEXT_BUFLET(_pkt, _bcnt, _pbuf, _buf) do {              \
102 	_PKT_GET_NEXT_BUFLET(_pkt, _bcnt, _pbuf, _buf);                 \
103 } while (0)
104 #else /* KERNEL */
105 #define PKT_GET_NEXT_BUFLET(_pkt, _bcnt, _pbuf, _buf) do {              \
106 	ASSERT(((_bcnt) >= 1) || ((_pbuf) == NULL));                    \
107 	_PKT_GET_NEXT_BUFLET(_pkt, _bcnt, _pbuf, _buf);                 \
108 } while (0)
109 #endif /* KERNEL */
110 
111 #ifdef KERNEL
112 #define PKT_COMPOSE_NX_PORT_ID(_nx_port, _gencnt)    \
113 	((uint32_t)((_gencnt & 0xffff) << 16) | (_nx_port & 0xffff))
114 
115 #define PKT_DECOMPOSE_NX_PORT_ID(_nx_port_id, _nx_port, _gencnt) do {   \
116 	_nx_port = _nx_port_id & 0xffff;                                \
117 	_gencnt = (_nx_port_id >> 16) & 0xffff;                         \
118 } while (0)
119 #endif /* KERNEL */
120 
121 __attribute__((always_inline))
122 static inline int
__packet_set_headroom(const uint64_t ph,const uint8_t headroom)123 __packet_set_headroom(const uint64_t ph, const uint8_t headroom)
124 {
125 	PKT_SUBTYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET, NEXUS_META_SUBTYPE_RAW);
126 	if (__probable(headroom < PKT_ADDR(ph)->pkt_qum_buf.buf_dlim)) {
127 		PKT_ADDR(ph)->pkt_headroom = headroom;
128 		return 0;
129 	}
130 	return ERANGE;
131 }
132 
133 __attribute__((always_inline))
134 static inline uint8_t
__packet_get_headroom(const uint64_t ph)135 __packet_get_headroom(const uint64_t ph)
136 {
137 	PKT_SUBTYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET, NEXUS_META_SUBTYPE_RAW);
138 	return PKT_ADDR(ph)->pkt_headroom;
139 }
140 
141 __attribute__((always_inline))
142 static inline int
__packet_set_link_header_length(const uint64_t ph,const uint8_t len)143 __packet_set_link_header_length(const uint64_t ph, const uint8_t len)
144 {
145 	PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
146 	if (__probable(len <= PKT_ADDR(ph)->pkt_qum_buf.buf_dlim)) {
147 		PKT_ADDR(ph)->pkt_l2_len = len;
148 		return 0;
149 	}
150 	return ERANGE;
151 }
152 
153 __attribute__((always_inline))
154 static inline uint8_t
__packet_get_link_header_length(const uint64_t ph)155 __packet_get_link_header_length(const uint64_t ph)
156 {
157 	PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
158 	return PKT_ADDR(ph)->pkt_l2_len;
159 }
160 
161 __attribute__((always_inline))
162 static inline int
__packet_set_link_broadcast(const uint64_t ph)163 __packet_set_link_broadcast(const uint64_t ph)
164 {
165 	PKT_SUBTYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET, NEXUS_META_SUBTYPE_RAW);
166 	PKT_ADDR(ph)->pkt_link_flags |= PKT_LINKF_BCAST;
167 	return 0;
168 }
169 
170 __attribute__((always_inline))
171 static inline boolean_t
__packet_get_link_broadcast(const uint64_t ph)172 __packet_get_link_broadcast(const uint64_t ph)
173 {
174 	PKT_SUBTYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET, NEXUS_META_SUBTYPE_RAW);
175 	return (PKT_ADDR(ph)->pkt_link_flags & PKT_LINKF_BCAST) != 0;
176 }
177 
178 __attribute__((always_inline))
179 static inline int
__packet_set_link_multicast(const uint64_t ph)180 __packet_set_link_multicast(const uint64_t ph)
181 {
182 	PKT_SUBTYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET, NEXUS_META_SUBTYPE_RAW);
183 	PKT_ADDR(ph)->pkt_link_flags |= PKT_LINKF_MCAST;
184 	return 0;
185 }
186 
187 __attribute__((always_inline))
188 static inline boolean_t
__packet_get_link_multicast(const uint64_t ph)189 __packet_get_link_multicast(const uint64_t ph)
190 {
191 	PKT_SUBTYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET, NEXUS_META_SUBTYPE_RAW);
192 	return (PKT_ADDR(ph)->pkt_link_flags & PKT_LINKF_MCAST) != 0;
193 }
194 
195 __attribute__((always_inline))
196 static inline int
__packet_set_link_ethfcs(const uint64_t ph)197 __packet_set_link_ethfcs(const uint64_t ph)
198 {
199 	PKT_SUBTYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET, NEXUS_META_SUBTYPE_RAW);
200 	PKT_ADDR(ph)->pkt_link_flags |= PKT_LINKF_ETHFCS;
201 	return 0;
202 }
203 
204 __attribute__((always_inline))
205 static inline boolean_t
__packet_get_link_ethfcs(const uint64_t ph)206 __packet_get_link_ethfcs(const uint64_t ph)
207 {
208 	PKT_SUBTYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET, NEXUS_META_SUBTYPE_RAW);
209 	return (PKT_ADDR(ph)->pkt_link_flags & PKT_LINKF_ETHFCS) != 0;
210 }
211 
212 __attribute__((always_inline))
213 static inline int
__packet_set_transport_traffic_background(const uint64_t ph)214 __packet_set_transport_traffic_background(const uint64_t ph)
215 {
216 	PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
217 	PKT_ADDR(ph)->pkt_pflags |= PKT_F_BACKGROUND;
218 	return 0;
219 }
220 
221 __attribute__((always_inline))
222 static inline boolean_t
__packet_get_transport_traffic_background(const uint64_t ph)223 __packet_get_transport_traffic_background(const uint64_t ph)
224 {
225 	PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
226 	return (PKT_ADDR(ph)->pkt_pflags & PKT_F_BACKGROUND) != 0;
227 }
228 
229 __attribute__((always_inline))
230 static inline int
__packet_set_transport_traffic_realtime(const uint64_t ph)231 __packet_set_transport_traffic_realtime(const uint64_t ph)
232 {
233 	PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
234 	PKT_ADDR(ph)->pkt_pflags |= PKT_F_REALTIME;
235 	return 0;
236 }
237 
238 __attribute__((always_inline))
239 static inline boolean_t
__packet_get_transport_traffic_realtime(const uint64_t ph)240 __packet_get_transport_traffic_realtime(const uint64_t ph)
241 {
242 	PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
243 	return (PKT_ADDR(ph)->pkt_pflags & PKT_F_REALTIME) != 0;
244 }
245 
246 __attribute__((always_inline))
247 static inline int
__packet_set_transport_retransmit(const uint64_t ph)248 __packet_set_transport_retransmit(const uint64_t ph)
249 {
250 	PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
251 	PKT_ADDR(ph)->pkt_pflags |= PKT_F_REXMT;
252 	return 0;
253 }
254 
255 __attribute__((always_inline))
256 static inline boolean_t
__packet_get_transport_retransmit(const uint64_t ph)257 __packet_get_transport_retransmit(const uint64_t ph)
258 {
259 	PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
260 	return (PKT_ADDR(ph)->pkt_pflags & PKT_F_REXMT) != 0;
261 }
262 
263 __attribute__((always_inline))
264 static inline int
__packet_set_transport_last_packet(const uint64_t ph)265 __packet_set_transport_last_packet(const uint64_t ph)
266 {
267 	PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
268 	PKT_ADDR(ph)->pkt_pflags |= PKT_F_LAST_PKT;
269 	return 0;
270 }
271 
272 __attribute__((always_inline))
273 static inline int
__packet_set_group_start(const uint64_t ph)274 __packet_set_group_start(const uint64_t ph)
275 {
276 	PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
277 	PKT_ADDR(ph)->pkt_pflags |= PKT_F_OPT_GROUP_START;
278 	return 0;
279 }
280 
281 __attribute__((always_inline))
282 static inline boolean_t
__packet_get_group_start(const uint64_t ph)283 __packet_get_group_start(const uint64_t ph)
284 {
285 	PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
286 	return (PKT_ADDR(ph)->pkt_pflags & PKT_F_OPT_GROUP_START) != 0;
287 }
288 
289 __attribute__((always_inline))
290 static inline int
__packet_set_group_end(const uint64_t ph)291 __packet_set_group_end(const uint64_t ph)
292 {
293 	PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
294 	PKT_ADDR(ph)->pkt_pflags |= PKT_F_OPT_GROUP_END;
295 	return 0;
296 }
297 
298 __attribute__((always_inline))
299 static inline boolean_t
__packet_get_group_end(const uint64_t ph)300 __packet_get_group_end(const uint64_t ph)
301 {
302 	PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
303 	return (PKT_ADDR(ph)->pkt_pflags & PKT_F_OPT_GROUP_END) != 0;
304 }
305 
306 __attribute__((always_inline))
307 static inline errno_t
__packet_get_expire_time(const uint64_t ph,uint64_t * ts)308 __packet_get_expire_time(const uint64_t ph, uint64_t *ts)
309 {
310 	PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
311 #ifdef KERNEL
312 	struct __packet_opt *po = PKT_ADDR(ph)->pkt_com_opt;
313 #else /* !KERNEL */
314 	struct __packet_opt *po = &PKT_ADDR(ph)->pkt_com_opt;
315 #endif /* !KERNEL */
316 	if ((PKT_ADDR(ph)->pkt_pflags & PKT_F_OPT_EXPIRE_TS) == 0) {
317 		return ENOENT;
318 	}
319 	if (ts == NULL) {
320 		return EINVAL;
321 	}
322 	*ts = po->__po_expire_ts;
323 	return 0;
324 }
325 
326 __attribute__((always_inline))
327 static inline errno_t
__packet_set_expire_time(const uint64_t ph,const uint64_t ts)328 __packet_set_expire_time(const uint64_t ph, const uint64_t ts)
329 {
330 	PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
331 #ifdef KERNEL
332 	struct __packet_opt *po = PKT_ADDR(ph)->pkt_com_opt;
333 #else /* !KERNEL */
334 	struct __packet_opt *po = &PKT_ADDR(ph)->pkt_com_opt;
335 #endif /* !KERNEL */
336 	if (ts != 0) {
337 		po->__po_expire_ts = ts;
338 		PKT_ADDR(ph)->pkt_pflags |= PKT_F_OPT_EXPIRE_TS;
339 	} else {
340 		po->__po_expire_ts = 0;
341 		PKT_ADDR(ph)->pkt_pflags &= ~PKT_F_OPT_EXPIRE_TS;
342 	}
343 	return 0;
344 }
345 
346 __attribute__((always_inline))
347 static inline errno_t
__packet_get_expiry_action(const uint64_t ph,packet_expiry_action_t * pea)348 __packet_get_expiry_action(const uint64_t ph, packet_expiry_action_t *pea)
349 {
350 	PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
351 #ifdef KERNEL
352 	struct __packet_opt *po = PKT_ADDR(ph)->pkt_com_opt;
353 #else /* !KERNEL */
354 	struct __packet_opt *po = &PKT_ADDR(ph)->pkt_com_opt;
355 #endif /* !KERNEL */
356 	if ((PKT_ADDR(ph)->pkt_pflags & PKT_F_OPT_EXP_ACTION) == 0) {
357 		return ENOENT;
358 	}
359 	if (pea == NULL) {
360 		return EINVAL;
361 	}
362 	*pea = po->__po_expiry_action;
363 	return 0;
364 }
365 
366 __attribute__((always_inline))
367 static inline errno_t
__packet_set_expiry_action(const uint64_t ph,packet_expiry_action_t pea)368 __packet_set_expiry_action(const uint64_t ph, packet_expiry_action_t pea)
369 {
370 	PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
371 #ifdef KERNEL
372 	struct __packet_opt *po = PKT_ADDR(ph)->pkt_com_opt;
373 #else /* !KERNEL */
374 	struct __packet_opt *po = &PKT_ADDR(ph)->pkt_com_opt;
375 #endif /* !KERNEL */
376 	if (pea != PACKET_EXPIRY_ACTION_NONE) {
377 		po->__po_expiry_action = (uint8_t)pea;
378 		PKT_ADDR(ph)->pkt_pflags |= PKT_F_OPT_EXP_ACTION;
379 	} else {
380 		po->__po_expiry_action = 0;
381 		PKT_ADDR(ph)->pkt_pflags &= ~PKT_F_OPT_EXP_ACTION;
382 	}
383 	return 0;
384 }
385 
386 __attribute__((always_inline))
387 static inline errno_t
__packet_opt_get_token(const struct __packet_opt * po,void * __sized_by (* len)token,uint16_t * len,uint8_t * type)388 __packet_opt_get_token(const struct __packet_opt *po,
389     void *__sized_by(*len)token,
390     uint16_t *len, uint8_t *type)
391 {
392 	uint16_t tlen = po->__po_token_len;
393 	uint8_t ttype;
394 
395 	if (token == NULL || len == NULL || type == NULL || tlen > *len) {
396 		return EINVAL;
397 	}
398 	ttype = (uint8_t)po->__po_token_type;
399 
400 	ASSERT(tlen <= PKT_OPT_MAX_TOKEN_SIZE);
401 	static_assert((__builtin_offsetof(struct __packet_opt, __po_token) % 8) == 0);
402 	bcopy(po->__po_token, token, tlen);
403 	/*
404 	 * -fbounds-safety: Updating *len should be fine because at this point
405 	 * we know tlen is less than or equal to *len (check the first if
406 	 * statement in this function)
407 	 */
408 	*len = tlen;
409 	*type = ttype;
410 	return 0;
411 }
412 
413 __attribute__((always_inline))
414 static inline errno_t
__packet_get_token(const uint64_t ph,void * __sized_by (* len)token,uint16_t * len)415 __packet_get_token(const uint64_t ph,
416     void *__sized_by(*len)token, uint16_t *len)
417 {
418 #ifdef KERNEL
419 	struct __packet_opt *po = PKT_ADDR(ph)->pkt_com_opt;
420 #else /* !KERNEL */
421 	struct __packet_opt *po = &PKT_ADDR(ph)->pkt_com_opt;
422 #endif /* !KERNEL */
423 	uint8_t type;
424 	errno_t err;
425 
426 	PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
427 	if ((PKT_ADDR(ph)->pkt_pflags & PKT_F_OPT_TOKEN) == 0) {
428 		return ENOENT;
429 	}
430 	err = __packet_opt_get_token(po, token, len, &type);
431 	if ((err == 0) && (type != PKT_OPT_TOKEN_TYPE_OPAQUE)) {
432 		err = ENOENT;
433 	}
434 	return err;
435 }
436 
437 __attribute__((always_inline))
438 static inline errno_t
__packet_opt_set_token(struct __packet_opt * po,const void * __sized_by (PKT_OPT_MAX_TOKEN_SIZE)token,const uint16_t len,const uint8_t type,volatile uint64_t * pflags)439 __packet_opt_set_token(struct __packet_opt *po,
440     const void *__sized_by(PKT_OPT_MAX_TOKEN_SIZE)token,
441     const uint16_t len, const uint8_t type, volatile uint64_t *pflags)
442 {
443 	static_assert((__builtin_offsetof(struct __packet_opt, __po_token) % 8) == 0);
444 	if (len != 0) {
445 		if (token == NULL || len > PKT_OPT_MAX_TOKEN_SIZE ||
446 		    type == 0) {
447 			return EINVAL;
448 		}
449 		if (__probable(IS_P2ALIGNED(token, 8))) {
450 			uint64_t *token64 = __DECONST(void *, token);
451 			po->__po_token_data[0] = *token64;
452 			po->__po_token_data[1] = *(token64 + 1);
453 		} else {
454 			bcopy(token, po->__po_token, len);
455 		}
456 		po->__po_token_len = len;
457 		po->__po_token_type = type;
458 		*pflags |= PKT_F_OPT_TOKEN;
459 	} else {
460 		static_assert(sizeof(po->__po_token_data[0]) == 8);
461 		static_assert(sizeof(po->__po_token_data[1]) == 8);
462 		static_assert(sizeof(po->__po_token) == 16);
463 		po->__po_token_data[0] = 0;
464 		po->__po_token_data[1] = 0;
465 		po->__po_token_len = 0;
466 		po->__po_token_type = 0;
467 		*pflags &= ~PKT_F_OPT_TOKEN;
468 	}
469 	return 0;
470 }
471 
472 __attribute__((always_inline))
473 static inline void
__packet_set_tx_timestamp(const uint64_t ph,const uint64_t ts)474 __packet_set_tx_timestamp(const uint64_t ph, const uint64_t ts)
475 {
476 	PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
477 #ifdef KERNEL
478 	struct __packet_opt *po = PKT_ADDR(ph)->pkt_com_opt;
479 #else /* !KERNEL */
480 	struct __packet_opt *po = &PKT_ADDR(ph)->pkt_com_opt;
481 #endif /* !KERNEL */
482 
483 	if (po != NULL) {
484 		po->__po_pkt_tx_time = ts;
485 		PKT_ADDR(ph)->pkt_pflags |= PKT_F_OPT_TX_TIMESTAMP;
486 	}
487 }
488 
489 __attribute__((always_inline))
490 static inline uint64_t
__packet_get_tx_timestamp(const uint64_t ph)491 __packet_get_tx_timestamp(const uint64_t ph)
492 {
493 	PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
494 #ifdef KERNEL
495 	struct __packet_opt *po = PKT_ADDR(ph)->pkt_com_opt;
496 #else /* !KERNEL */
497 	struct __packet_opt *po = &PKT_ADDR(ph)->pkt_com_opt;
498 #endif /* !KERNEL */
499 	if (po == NULL || (PKT_ADDR(ph)->pkt_pflags & PKT_F_OPT_TX_TIMESTAMP) == 0) {
500 		return 0;
501 	}
502 
503 	return po->__po_pkt_tx_time;
504 }
505 
506 __attribute__((always_inline))
507 static inline errno_t
__packet_set_token(const uint64_t ph,const void * __sized_by (len)token,const uint16_t len)508 __packet_set_token(const uint64_t ph,
509     const void *__sized_by(len)token, const uint16_t len)
510 {
511 	PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
512 #ifdef KERNEL
513 	return __packet_opt_set_token(PKT_ADDR(ph)->pkt_com_opt, token, len,
514 	           PKT_OPT_TOKEN_TYPE_OPAQUE, &PKT_ADDR(ph)->pkt_pflags);
515 #else /* !KERNEL */
516 	return __packet_opt_set_token(&PKT_ADDR(ph)->pkt_com_opt, token, len,
517 	           PKT_OPT_TOKEN_TYPE_OPAQUE, &PKT_ADDR(ph)->pkt_pflags);
518 #endif /* !KERNEL */
519 }
520 
521 __attribute__((always_inline))
522 static inline errno_t
__packet_get_packetid(const uint64_t ph,packet_id_t * pktid)523 __packet_get_packetid(const uint64_t ph, packet_id_t *pktid)
524 {
525 #ifdef KERNEL
526 	struct __packet_opt *po = PKT_ADDR(ph)->pkt_com_opt;
527 #else /* !KERNEL */
528 	struct __packet_opt *po = &PKT_ADDR(ph)->pkt_com_opt;
529 #endif /* !KERNEL */
530 	uint16_t len = sizeof(packet_id_t);
531 	uint8_t type;
532 	errno_t err;
533 
534 
535 	PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
536 	if ((PKT_ADDR(ph)->pkt_pflags & PKT_F_OPT_TOKEN) == 0) {
537 		return ENOENT;
538 	}
539 	err = __packet_opt_get_token(po, (packet_id_t * __header_indexable)pktid,
540 	    &len, &type);
541 	if ((err == 0) && ((type != PKT_OPT_TOKEN_TYPE_PACKET_ID) ||
542 	    (len != sizeof(packet_id_t)))) {
543 		err = ENOENT;
544 	}
545 	return err;
546 }
547 
548 __attribute__((always_inline))
549 static inline errno_t
__packet_set_packetid(const uint64_t ph,const packet_id_t * pktid)550 __packet_set_packetid(const uint64_t ph, const packet_id_t *pktid)
551 {
552 	PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
553 #ifdef KERNEL
554 	return __packet_opt_set_token(PKT_ADDR(ph)->pkt_com_opt, pktid,
555 	           sizeof(packet_id_t), PKT_OPT_TOKEN_TYPE_PACKET_ID,
556 	           &PKT_ADDR(ph)->pkt_pflags);
557 #else /* !KERNEL */
558 	return __packet_opt_set_token(&PKT_ADDR(ph)->pkt_com_opt, pktid,
559 	           sizeof(packet_id_t), PKT_OPT_TOKEN_TYPE_PACKET_ID,
560 	           &PKT_ADDR(ph)->pkt_pflags);
561 #endif /* !KERNEL */
562 }
563 
564 __attribute__((always_inline))
565 static inline errno_t
__packet_get_vlan_tag(const uint64_t ph,uint16_t * vlan_tag)566 __packet_get_vlan_tag(const uint64_t ph, uint16_t *vlan_tag)
567 {
568 #ifdef KERNEL
569 	struct __packet_opt *po = PKT_ADDR(ph)->pkt_com_opt;
570 #else /* !KERNEL */
571 	struct __packet_opt *po = &PKT_ADDR(ph)->pkt_com_opt;
572 #endif /* !KERNEL */
573 	uint64_t pflags;
574 
575 	PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
576 	pflags = PKT_ADDR(ph)->pkt_pflags;
577 	if ((pflags & PKT_F_OPT_VLTAG) == 0) {
578 		return ENOENT;
579 	}
580 	if (vlan_tag != NULL) {
581 		*vlan_tag = po->__po_vlan_tag;
582 	}
583 	return 0;
584 }
585 
586 __attribute__((always_inline))
587 static inline errno_t
__packet_set_vlan_tag(const uint64_t ph,const uint16_t vlan_tag)588 __packet_set_vlan_tag(const uint64_t ph, const uint16_t vlan_tag)
589 {
590 #ifdef KERNEL
591 	struct __packet_opt *po = PKT_ADDR(ph)->pkt_com_opt;
592 #else /* !KERNEL */
593 	struct __packet_opt *po = &PKT_ADDR(ph)->pkt_com_opt;
594 #endif /* !KERNEL */
595 
596 	PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
597 	PKT_ADDR(ph)->pkt_pflags |= PKT_F_OPT_VLTAG;
598 	po->__po_vlan_tag = vlan_tag;
599 
600 	return 0;
601 }
602 
603 __attribute__((always_inline))
604 static inline uint16_t
__packet_get_vlan_id(const uint16_t vlan_tag)605 __packet_get_vlan_id(const uint16_t vlan_tag)
606 {
607 	return EVL_VLANOFTAG(vlan_tag);
608 }
609 
610 __attribute__((always_inline))
611 static inline uint8_t
__packet_get_vlan_priority(const uint16_t vlan_tag)612 __packet_get_vlan_priority(const uint16_t vlan_tag)
613 {
614 	return EVL_PRIOFTAG(vlan_tag);
615 }
616 
617 __attribute__((always_inline))
618 static inline errno_t
__packet_get_app_metadata(const uint64_t ph,packet_app_metadata_type_t * app_type,uint8_t * app_metadata)619 __packet_get_app_metadata(const uint64_t ph,
620     packet_app_metadata_type_t *app_type, uint8_t *app_metadata)
621 {
622 	PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
623 	if (app_type == NULL || app_metadata == NULL) {
624 		return EINVAL;
625 	}
626 	if ((PKT_ADDR(ph)->pkt_pflags & PKT_F_OPT_APP_METADATA) == 0) {
627 		return ENOENT;
628 	}
629 #ifdef KERNEL
630 	struct __packet_opt *po = PKT_ADDR(ph)->pkt_com_opt;
631 #else /* !KERNEL */
632 	struct __packet_opt *po = &PKT_ADDR(ph)->pkt_com_opt;
633 #endif /* !KERNEL */
634 	if (po->__po_app_type == PACKET_APP_METADATA_TYPE_UNSPECIFIED) {
635 		return ENOENT;
636 	}
637 	*app_type = po->__po_app_type;
638 	*app_metadata = po->__po_app_metadata;
639 	return 0;
640 }
641 
642 __attribute__((always_inline))
643 static inline errno_t
__packet_set_app_metadata(const uint64_t ph,const packet_app_metadata_type_t app_type,const uint8_t app_metadata)644 __packet_set_app_metadata(const uint64_t ph,
645     const packet_app_metadata_type_t app_type, const uint8_t app_metadata)
646 {
647 	PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
648 #ifdef KERNEL
649 	struct __packet_opt *po = PKT_ADDR(ph)->pkt_com_opt;
650 #else /* !KERNEL */
651 	struct __packet_opt *po = &PKT_ADDR(ph)->pkt_com_opt;
652 #endif /* !KERNEL */
653 	if (app_type < PACKET_APP_METADATA_TYPE_MIN ||
654 	    app_type > PACKET_APP_METADATA_TYPE_MAX) {
655 		po->__po_app_type = PACKET_APP_METADATA_TYPE_UNSPECIFIED;
656 		PKT_ADDR(ph)->pkt_pflags &= ~PKT_F_OPT_APP_METADATA;
657 		return EINVAL;
658 	}
659 	po->__po_app_type = app_type;
660 	po->__po_app_metadata = app_metadata;
661 	PKT_ADDR(ph)->pkt_pflags |= PKT_F_OPT_APP_METADATA;
662 	return 0;
663 }
664 
665 __attribute__((always_inline))
666 static inline void
__packet_set_wake_flag(const uint64_t ph)667 __packet_set_wake_flag(const uint64_t ph)
668 {
669 	PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
670 	PKT_ADDR(ph)->pkt_pflags |= PKT_F_WAKE_PKT;
671 }
672 
673 __attribute__((always_inline))
674 static inline boolean_t
__packet_get_wake_flag(const uint64_t ph)675 __packet_get_wake_flag(const uint64_t ph)
676 {
677 	return (PKT_ADDR(ph)->pkt_pflags & PKT_F_WAKE_PKT) != 0;
678 }
679 
680 #ifdef KERNEL
681 __attribute__((always_inline))
682 static inline void
__packet_set_ulpn_flag(const uint64_t ph)683 __packet_set_ulpn_flag(const uint64_t ph)
684 {
685 	PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
686 	PKT_ADDR(ph)->pkt_pflags |= PKT_F_ULPN;
687 }
688 #endif
689 
690 __attribute__((always_inline))
691 static inline boolean_t
__packet_get_ulpn_flag(const uint64_t ph)692 __packet_get_ulpn_flag(const uint64_t ph)
693 {
694 	return (PKT_ADDR(ph)->pkt_pflags & PKT_F_ULPN) != 0;
695 }
696 
697 __attribute__((always_inline))
698 static inline boolean_t
__packet_get_lpw_flag(const uint64_t ph)699 __packet_get_lpw_flag(const uint64_t ph)
700 {
701 	return (PKT_ADDR(ph)->pkt_pflags & __PKT_F_LPW) != 0;
702 }
703 
704 __attribute__((always_inline))
705 static inline void
__packet_set_keep_alive(const uint64_t ph,const boolean_t is_keep_alive)706 __packet_set_keep_alive(const uint64_t ph, const boolean_t is_keep_alive)
707 {
708 	PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
709 	if (is_keep_alive) {
710 		PKT_ADDR(ph)->pkt_pflags |= PKT_F_KEEPALIVE;
711 	} else {
712 		PKT_ADDR(ph)->pkt_pflags &= ~PKT_F_KEEPALIVE;
713 	}
714 }
715 
716 __attribute__((always_inline))
717 static inline boolean_t
__packet_get_keep_alive(const uint64_t ph)718 __packet_get_keep_alive(const uint64_t ph)
719 {
720 	return (PKT_ADDR(ph)->pkt_pflags & PKT_F_KEEPALIVE) != 0;
721 }
722 
723 __attribute__((always_inline))
724 static inline boolean_t
__packet_get_truncated(const uint64_t ph)725 __packet_get_truncated(const uint64_t ph)
726 {
727 	PKT_SUBTYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET, NEXUS_META_SUBTYPE_RAW);
728 	return (PKT_ADDR(ph)->pkt_pflags & PKT_F_TRUNCATED) != 0;
729 }
730 
731 #ifdef KERNEL
732 __attribute__((always_inline))
733 static inline boolean_t
__packet_get_transport_new_flow(const uint64_t ph)734 __packet_get_transport_new_flow(const uint64_t ph)
735 {
736 	PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
737 	return (PKT_ADDR(ph)->pkt_pflags & PKT_F_NEW_FLOW) != 0;
738 }
739 
740 __attribute__((always_inline))
741 static inline boolean_t
__packet_get_transport_last_packet(const uint64_t ph)742 __packet_get_transport_last_packet(const uint64_t ph)
743 {
744 	PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
745 	return (PKT_ADDR(ph)->pkt_pflags & PKT_F_LAST_PKT) != 0;
746 }
747 
748 __attribute__((always_inline))
749 static inline boolean_t
__packet_get_l4s_flag(const uint64_t ph)750 __packet_get_l4s_flag(const uint64_t ph)
751 {
752 	PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
753 	return (PKT_ADDR(ph)->pkt_pflags & PKT_F_L4S) != 0;
754 }
755 #endif /* KERNEL */
756 
757 __attribute__((always_inline))
758 static inline void
__packet_set_l4s_flag(const uint64_t ph)759 __packet_set_l4s_flag(const uint64_t ph)
760 {
761 	PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
762 	PKT_ADDR(ph)->pkt_pflags |= PKT_F_L4S;
763 }
764 
765 __attribute__((always_inline))
766 static inline int
__packet_set_service_class(const uint64_t ph,const uint32_t sc)767 __packet_set_service_class(const uint64_t ph, const uint32_t sc)
768 {
769 	int err = 0;
770 
771 	static_assert(sizeof(QUM_ADDR(ph)->qum_svc_class == sizeof(uint32_t)));
772 
773 	switch (sc) {
774 	case PKT_SC_BE:
775 	case PKT_SC_BK_SYS:
776 	case PKT_SC_BK:
777 	case PKT_SC_RD:
778 	case PKT_SC_OAM:
779 	case PKT_SC_AV:
780 	case PKT_SC_RV:
781 	case PKT_SC_VI:
782 	case PKT_SC_SIG:
783 	case PKT_SC_VO:
784 	case PKT_SC_CTL:
785 		QUM_ADDR(ph)->qum_svc_class = sc;
786 		break;
787 
788 	default:
789 		err = EINVAL;
790 		break;
791 	}
792 
793 	return err;
794 }
795 
796 __attribute__((always_inline))
797 static inline uint32_t
__packet_get_service_class(const uint64_t ph)798 __packet_get_service_class(const uint64_t ph)
799 {
800 	uint32_t sc;
801 
802 	static_assert(sizeof(QUM_ADDR(ph)->qum_svc_class == sizeof(uint32_t)));
803 
804 	switch (QUM_ADDR(ph)->qum_svc_class) {
805 	case PKT_SC_BE:         /* most likely best effort */
806 	case PKT_SC_BK_SYS:
807 	case PKT_SC_BK:
808 	case PKT_SC_RD:
809 	case PKT_SC_OAM:
810 	case PKT_SC_AV:
811 	case PKT_SC_RV:
812 	case PKT_SC_VI:
813 	case PKT_SC_SIG:
814 	case PKT_SC_VO:
815 	case PKT_SC_CTL:
816 		sc = QUM_ADDR(ph)->qum_svc_class;
817 		break;
818 
819 	default:
820 		sc = PKT_SC_BE;
821 		break;
822 	}
823 
824 	return sc;
825 }
826 
827 __attribute__((always_inline))
828 static inline errno_t
__packet_set_comp_gencnt(const uint64_t ph,const uint32_t gencnt)829 __packet_set_comp_gencnt(const uint64_t ph, const uint32_t gencnt)
830 {
831 	static_assert(sizeof(PKT_ADDR(ph)->pkt_comp_gencnt == sizeof(uint32_t)));
832 	PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
833 
834 	PKT_ADDR(ph)->pkt_comp_gencnt = gencnt;
835 
836 	return 0;
837 }
838 
839 __attribute__((always_inline))
840 static inline errno_t
__packet_get_comp_gencnt(const uint64_t ph,uint32_t * pgencnt)841 __packet_get_comp_gencnt(const uint64_t ph, uint32_t *pgencnt)
842 {
843 	static_assert(sizeof(PKT_ADDR(ph)->pkt_comp_gencnt == sizeof(uint32_t)));
844 	PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
845 
846 	if (pgencnt == NULL) {
847 		return EINVAL;
848 	}
849 
850 	if (PKT_ADDR(ph)->pkt_comp_gencnt == 0) {
851 		return ENOENT;
852 	}
853 
854 	*pgencnt = PKT_ADDR(ph)->pkt_comp_gencnt;
855 	return 0;
856 }
857 
858 __attribute__((always_inline))
859 static inline int
__packet_set_traffic_class(const uint64_t ph,const uint32_t tc)860 __packet_set_traffic_class(const uint64_t ph, const uint32_t tc)
861 {
862 	uint32_t val = PKT_TC2SCVAL(tc);        /* just the val portion */
863 	uint32_t sc;
864 
865 	switch (val) {
866 	case PKT_SCVAL_BK_SYS:
867 		sc = PKT_SC_BK_SYS;
868 		break;
869 	case PKT_SCVAL_BK:
870 		sc = PKT_SC_BK;
871 		break;
872 	case PKT_SCVAL_BE:
873 		sc = PKT_SC_BE;
874 		break;
875 	case PKT_SCVAL_RD:
876 		sc = PKT_SC_RD;
877 		break;
878 	case PKT_SCVAL_OAM:
879 		sc = PKT_SC_OAM;
880 		break;
881 	case PKT_SCVAL_AV:
882 		sc = PKT_SC_AV;
883 		break;
884 	case PKT_SCVAL_RV:
885 		sc = PKT_SC_RV;
886 		break;
887 	case PKT_SCVAL_VI:
888 		sc = PKT_SC_VI;
889 		break;
890 	case PKT_SCVAL_SIG:
891 		sc = PKT_SC_SIG;
892 		break;
893 	case PKT_SCVAL_VO:
894 		sc = PKT_SC_VO;
895 		break;
896 	case PKT_SCVAL_CTL:
897 		sc = PKT_SC_CTL;
898 		break;
899 	default:
900 		sc = PKT_SC_BE;
901 		break;
902 	}
903 
904 	return __packet_set_service_class(ph, sc);
905 }
906 
907 __attribute__((always_inline))
908 static inline uint32_t
__packet_get_traffic_class(const uint64_t ph)909 __packet_get_traffic_class(const uint64_t ph)
910 {
911 	return PKT_SC2TC(__packet_get_service_class(ph));
912 }
913 
914 __attribute__((always_inline))
915 static inline int
__packet_set_inet_checksum(const uint64_t ph,const packet_csum_flags_t flags,const uint16_t start,const uint16_t stuff_val,boolean_t tx)916 __packet_set_inet_checksum(const uint64_t ph, const packet_csum_flags_t flags,
917     const uint16_t start, const uint16_t stuff_val, boolean_t tx)
918 {
919 	PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
920 
921 	PKT_ADDR(ph)->pkt_csum_flags = flags & PACKET_CSUM_FLAGS;
922 
923 	if (tx) {
924 		PKT_ADDR(ph)->pkt_csum_tx_start_off = start;
925 		PKT_ADDR(ph)->pkt_csum_tx_stuff_off = stuff_val;
926 	} else {
927 		PKT_ADDR(ph)->pkt_csum_rx_start_off = start;
928 		PKT_ADDR(ph)->pkt_csum_rx_value = stuff_val;
929 	}
930 	return 0;
931 }
932 
933 __attribute__((always_inline))
934 static inline void
__packet_add_inet_csum_flags(const uint64_t ph,const packet_csum_flags_t flags)935 __packet_add_inet_csum_flags(const uint64_t ph, const packet_csum_flags_t flags)
936 {
937 	PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
938 
939 	PKT_ADDR(ph)->pkt_csum_flags |= flags & PACKET_CSUM_FLAGS;
940 }
941 
942 __attribute__((always_inline))
943 static inline packet_csum_flags_t
__packet_get_inet_checksum(const uint64_t ph,uint16_t * start,uint16_t * stuff_val,boolean_t tx)944 __packet_get_inet_checksum(const uint64_t ph, uint16_t *start,
945     uint16_t *stuff_val, boolean_t tx)
946 {
947 	PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
948 
949 	if (tx) {
950 		if (__probable(start != NULL)) {
951 			*start = PKT_ADDR(ph)->pkt_csum_tx_start_off;
952 		}
953 		if (__probable(stuff_val != NULL)) {
954 			*stuff_val = PKT_ADDR(ph)->pkt_csum_tx_stuff_off;
955 		}
956 	} else {
957 		if (__probable(start != NULL)) {
958 			*start = PKT_ADDR(ph)->pkt_csum_rx_start_off;
959 		}
960 		if (__probable(stuff_val != NULL)) {
961 			*stuff_val = PKT_ADDR(ph)->pkt_csum_rx_value;
962 		}
963 	}
964 	return PKT_ADDR(ph)->pkt_csum_flags & PACKET_CSUM_FLAGS;
965 }
966 
967 __attribute__((always_inline))
968 static inline void
__packet_set_flow_uuid(const uint64_t ph,const uuid_t flow_uuid)969 __packet_set_flow_uuid(const uint64_t ph, const uuid_t flow_uuid)
970 {
971 	struct __quantum *q = &QUM_ADDR(ph)->qum_com;
972 
973 	/*
974 	 * Anticipate a nicely (8-bytes) aligned UUID from caller;
975 	 * the one in qum_flow_id is always 8-byte aligned.
976 	 */
977 	if (__probable(IS_P2ALIGNED(flow_uuid, sizeof(uint64_t)))) {
978 		const uint64_t *id_64 = (const uint64_t *)(const void *)flow_uuid;
979 		q->__q_flow_id_val64[0] = id_64[0];
980 		q->__q_flow_id_val64[1] = id_64[1];
981 	} else if (__probable(IS_P2ALIGNED(flow_uuid, sizeof(uint32_t)))) {
982 		const uint32_t *id_32 = (const uint32_t *)(const void *)flow_uuid;
983 		q->__q_flow_id_val32[0] = id_32[0];
984 		q->__q_flow_id_val32[1] = id_32[1];
985 		q->__q_flow_id_val32[2] = id_32[2];
986 		q->__q_flow_id_val32[3] = id_32[3];
987 	} else {
988 		bcopy(flow_uuid, q->__q_flow_id, sizeof(uuid_t));
989 	}
990 }
991 
992 __attribute__((always_inline))
993 static inline void
__packet_get_flow_uuid(const uint64_t ph,uuid_t flow_uuid)994 __packet_get_flow_uuid(const uint64_t ph, uuid_t flow_uuid)
995 {
996 	struct __quantum *q = &QUM_ADDR(ph)->qum_com;
997 
998 	/*
999 	 * Anticipate a nicely (8-bytes) aligned UUID from caller;
1000 	 * the one in qum_flow_id is always 8-byte aligned.
1001 	 */
1002 	if (__probable(IS_P2ALIGNED(flow_uuid, sizeof(uint64_t)))) {
1003 		uint64_t *id_64 = (uint64_t *)(void *)flow_uuid;
1004 		id_64[0] = q->__q_flow_id_val64[0];
1005 		id_64[1] = q->__q_flow_id_val64[1];
1006 	} else if (__probable(IS_P2ALIGNED(flow_uuid, sizeof(uint32_t)))) {
1007 		uint32_t *id_32 = (uint32_t *)(void *)flow_uuid;
1008 		id_32[0] = q->__q_flow_id_val32[0];
1009 		id_32[1] = q->__q_flow_id_val32[1];
1010 		id_32[2] = q->__q_flow_id_val32[2];
1011 		id_32[3] = q->__q_flow_id_val32[3];
1012 	} else {
1013 		bcopy(q->__q_flow_id, flow_uuid, sizeof(uuid_t));
1014 	}
1015 }
1016 
1017 __attribute__((always_inline))
1018 static inline void
__packet_clear_flow_uuid(const uint64_t ph)1019 __packet_clear_flow_uuid(const uint64_t ph)
1020 {
1021 	struct __quantum *q = &QUM_ADDR(ph)->qum_com;
1022 	q->__q_flow_id_val64[0] = 0;
1023 	q->__q_flow_id_val64[1] = 0;
1024 }
1025 
1026 __attribute__((always_inline))
1027 static inline uint8_t
__packet_get_aggregation_type(const uint64_t ph)1028 __packet_get_aggregation_type(const uint64_t ph)
1029 {
1030 	static_assert(sizeof(PKT_ADDR(ph)->pkt_aggr_type == sizeof(uint8_t)));
1031 	PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
1032 
1033 	return PKT_ADDR(ph)->pkt_aggr_type;
1034 }
1035 
1036 __attribute__((always_inline))
1037 static inline uint32_t
__packet_get_data_length(const uint64_t ph)1038 __packet_get_data_length(const uint64_t ph)
1039 {
1040 	return QUM_ADDR(ph)->qum_len;
1041 }
1042 
1043 #ifdef KERNEL
1044 /*
1045  * This handles truncated packets used in compat Tx and Rx classification.
1046  */
1047 __attribute__((always_inline))
1048 static inline uint32_t
__packet_get_real_data_length(const struct __kern_packet * pkt)1049 __packet_get_real_data_length(const struct __kern_packet *pkt)
1050 {
1051 	uint32_t pkt_len;
1052 
1053 	if (pkt->pkt_pflags & PKT_F_TRUNCATED) {
1054 		struct __kern_buflet *bft;
1055 
1056 		bft = kern_packet_get_next_buflet(SK_PKT2PH(pkt), NULL);
1057 		pkt_len = kern_buflet_get_data_length(bft);
1058 	} else {
1059 		pkt_len = pkt->pkt_length;
1060 	}
1061 	return pkt_len;
1062 }
1063 #endif /* KERNEL */
1064 
1065 __attribute__((always_inline))
1066 static inline uint16_t
__packet_get_buflet_count(const uint64_t ph)1067 __packet_get_buflet_count(const uint64_t ph)
1068 {
1069 	uint16_t bcnt = 0;
1070 
1071 	bcnt = PKT_ADDR(ph)->pkt_bufs_cnt;
1072 #ifdef KERNEL
1073 	VERIFY(bcnt != 0 ||
1074 	    PP_HAS_BUFFER_ON_DEMAND(PKT_ADDR(ph)->pkt_qum.qum_pp));
1075 #else /* !KERNEL */
1076 	/*
1077 	 * Handle the case where the metadata region gets
1078 	 * redirected to anonymous zero-filled pages at
1079 	 * defunct time.  There's always 1 buflet in the
1080 	 * packet metadata, so pretend that's the count.
1081 	 */
1082 	if (__improbable(bcnt == 0)) {
1083 		bcnt = 1;
1084 	}
1085 #endif /* !KERNEL */
1086 	return bcnt;
1087 }
1088 
1089 __attribute__((always_inline))
1090 static inline int
__packet_add_buflet(const uint64_t ph,const void * bprev0,const void * bnew0)1091 __packet_add_buflet(const uint64_t ph, const void *bprev0, const void *bnew0)
1092 {
1093 	uint16_t bcnt;
1094 
1095 #ifdef KERNEL
1096 	kern_buflet_t bprev = __DECONST(kern_buflet_t, bprev0);
1097 	kern_buflet_t bnew = __DECONST(kern_buflet_t, bnew0);
1098 
1099 	VERIFY(PKT_ADDR(ph) && bnew && (bnew != bprev));
1100 	VERIFY(PP_HAS_BUFFER_ON_DEMAND(PKT_ADDR(ph)->pkt_qum.qum_pp));
1101 #else /* !KERNEL */
1102 	buflet_t bprev = __DECONST(buflet_t, bprev0);
1103 	buflet_t bnew = __DECONST(buflet_t, bnew0);
1104 
1105 	if (__improbable(!PKT_ADDR(ph) || !bnew || (bnew == bprev))) {
1106 		return EINVAL;
1107 	}
1108 #endif /* !KERNEL */
1109 
1110 	PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
1111 	bcnt = PKT_ADDR(ph)->pkt_bufs_cnt;
1112 
1113 #ifdef KERNEL
1114 	VERIFY((bprev != NULL || bcnt == 0) &&
1115 	    (bcnt < PKT_ADDR(ph)->pkt_bufs_max));
1116 #else /* !KERNEL */
1117 	if (__improbable(bcnt >= PKT_ADDR(ph)->pkt_bufs_max) ||
1118 	    (bprev == NULL && bcnt != 0)) {
1119 		return EINVAL;
1120 	}
1121 #endif /* !KERNEL */
1122 
1123 #ifdef KERNEL
1124 #if DEVELOPMENT || DEBUG
1125 	/* check if bprev is the last buflet in the chain */
1126 	struct __kern_buflet *__single pbft, *__single kbft;
1127 	int n = bcnt;
1128 
1129 	PKT_GET_FIRST_BUFLET(PKT_ADDR(ph), bcnt, pbft);
1130 	kbft = pbft;
1131 
1132 	while ((kbft != NULL) && n--) {
1133 		pbft = kbft;
1134 		kbft = __unsafe_forge_single(struct __kern_buflet *,
1135 		    __DECONST(struct __kern_buflet *, kbft->buf_nbft_addr));
1136 	}
1137 	ASSERT(n == 0);
1138 	ASSERT(bprev == pbft);
1139 #endif /* DEVELOPMENT || DEBUG */
1140 #endif /* KERNEL */
1141 
1142 	if (bprev == NULL) {
1143 		bprev = &PKT_ADDR(ph)->pkt_qum_buf;
1144 	}
1145 #ifdef KERNEL
1146 	KBUF_LINK(bprev, bnew);
1147 #else /* !KERNEL */
1148 	UBUF_LINK(bprev, bnew);
1149 #endif /* !KERNEL */
1150 
1151 	*(uint16_t *)(uintptr_t)&PKT_ADDR(ph)->pkt_bufs_cnt = ++bcnt;
1152 	return 0;
1153 }
1154 
1155 __attribute__((always_inline))
1156 #ifdef KERNEL
1157 static inline struct __kern_buflet *
1158 #else
1159 static inline struct __user_buflet *
1160 #endif
__packet_get_next_buflet(const uint64_t ph,const void * bprev0)1161 __packet_get_next_buflet(const uint64_t ph, const void *bprev0)
1162 {
1163 #ifdef KERNEL
1164 	kern_buflet_t bprev = __DECONST(kern_buflet_t, bprev0);
1165 	struct __kern_buflet *__single bcur = NULL;
1166 #else /* !KERNEL */
1167 	buflet_t bprev = __DECONST(buflet_t, bprev0);
1168 	void *bcur = NULL;
1169 #endif /* !KERNEL */
1170 
1171 	uint32_t bcnt = PKT_ADDR(ph)->pkt_bufs_cnt;
1172 #ifdef KERNEL
1173 	ASSERT(bcnt != 0 ||
1174 	    PP_HAS_BUFFER_ON_DEMAND(PKT_ADDR(ph)->pkt_qum.qum_pp));
1175 #else /* !KERNEL */
1176 	/*
1177 	 * Handle the case where the metadata region gets
1178 	 * redirected to anonymous zero-filled pages at
1179 	 * defunct time.  There's always 1 buflet in the
1180 	 * packet metadata, so pretend that's the count.
1181 	 */
1182 	if (__improbable(bcnt == 0)) {
1183 		bcnt = 1;
1184 		bprev = NULL;
1185 	}
1186 #endif /* !KERNEL */
1187 	PKT_GET_NEXT_BUFLET(PKT_ADDR(ph), bcnt, BLT_ADDR(bprev), bcur);
1188 	return bcur;
1189 }
1190 
1191 __attribute__((always_inline))
1192 static inline uint8_t
__packet_get_segment_count(const uint64_t ph)1193 __packet_get_segment_count(const uint64_t ph)
1194 {
1195 	static_assert(sizeof(PKT_ADDR(ph)->pkt_seg_cnt == sizeof(uint8_t)));
1196 	PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
1197 
1198 	return PKT_ADDR(ph)->pkt_seg_cnt;
1199 }
1200 
1201 __attribute__((always_inline))
1202 static inline void
__packet_set_segment_count(const uint64_t ph,uint8_t segcount)1203 __packet_set_segment_count(const uint64_t ph, uint8_t segcount)
1204 {
1205 	static_assert(sizeof(PKT_ADDR(ph)->pkt_seg_cnt == sizeof(uint8_t)));
1206 	PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
1207 
1208 	PKT_ADDR(ph)->pkt_seg_cnt = segcount;
1209 }
1210 
1211 __attribute__((always_inline))
1212 static inline uint16_t
__packet_get_protocol_segment_size(const uint64_t ph)1213 __packet_get_protocol_segment_size(const uint64_t ph)
1214 {
1215 	PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
1216 	return PKT_ADDR(ph)->pkt_proto_seg_sz;
1217 }
1218 
1219 __attribute__((always_inline))
1220 static inline errno_t
__packet_set_protocol_segment_size(const uint64_t ph,uint16_t proto_seg_sz)1221 __packet_set_protocol_segment_size(const uint64_t ph, uint16_t proto_seg_sz)
1222 {
1223 	PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
1224 	PKT_ADDR(ph)->pkt_proto_seg_sz = proto_seg_sz;
1225 	return 0;
1226 }
1227 
1228 __attribute__((always_inline))
1229 static inline void
__packet_get_tso_flags(const uint64_t ph,packet_tso_flags_t * flags)1230 __packet_get_tso_flags(const uint64_t ph, packet_tso_flags_t *flags)
1231 {
1232 	static_assert(sizeof(PKT_ADDR(ph)->pkt_proto_seg_sz == sizeof(uint16_t)));
1233 
1234 	PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
1235 	*flags = PKT_ADDR(ph)->pkt_csum_flags & (PACKET_CSUM_TSO_FLAGS);
1236 }
1237 
1238 __attribute__((always_inline))
1239 static inline void
__packet_set_tso_flags(const uint64_t ph,packet_tso_flags_t flags)1240 __packet_set_tso_flags(const uint64_t ph, packet_tso_flags_t flags)
1241 {
1242 	PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
1243 
1244 	PKT_ADDR(ph)->pkt_csum_flags |= flags & (PACKET_CSUM_TSO_FLAGS);
1245 }
1246 
1247 __attribute__((always_inline))
1248 static inline uint32_t
__buflet_get_data_limit(const void * buf)1249 __buflet_get_data_limit(const void *buf)
1250 {
1251 	return BLT_ADDR(buf)->buf_dlim;
1252 }
1253 
1254 #ifdef KERNEL
1255 __attribute__((always_inline))
1256 static inline errno_t
__buflet_set_data_limit(const void * buf,const uint32_t dlim)1257 __buflet_set_data_limit(const void *buf, const uint32_t dlim)
1258 {
1259 	/* buffer region is always marked as shareable */
1260 	ASSERT(BLT_ADDR(buf)->buf_ctl->bc_flags & SKMEM_BUFCTL_SHAREOK);
1261 
1262 	/* full bounds checking will be performed during finalize */
1263 	if (__probable((uint32_t)dlim <= BLT_ADDR(buf)->buf_objlim)) {
1264 		static_assert(sizeof(BLT_ADDR(buf)->buf_dlim) == sizeof(uint32_t));
1265 		/* deconst */
1266 		*(uint32_t *)(uintptr_t)&BLT_ADDR(buf)->buf_dlim = dlim;
1267 		return 0;
1268 	}
1269 	return ERANGE;
1270 }
1271 #endif /* KERNEL */
1272 
1273 __attribute__((always_inline))
1274 static inline uint32_t
__buflet_get_data_offset(const void * buf)1275 __buflet_get_data_offset(const void *buf)
1276 {
1277 	return BLT_ADDR(buf)->buf_doff;
1278 }
1279 
1280 /*
1281  * ******************************************************************
1282  * Checks in __packet_finalize for packet finalized from userland
1283  * ******************************************************************
1284  *  +-------+---------------------------+---------------------------+
1285  *  |         NEXUS_META_SUBTYPE_RAW    | NEXUS_META_SUBTYPE_PAYLOAD|
1286  *  |-------+---------------------------+---------------------------+
1287  *  |buflet | (bdoff + len) <= dlim     | (bdoff + len) <= dlim     |
1288  *  |l2_off | l2 == bdoff && l2 < bdlim | l2 = l3 = 0 && doff == 0  |
1289  *  |l3_off | l3 = l2                   | l3 == 0                   |
1290  *  |l4_off | l4 = l3 = l2              | l4 = l3 = 0               |
1291  *  +-------+---------------------------+---------------------------+
1292  *
1293  * ******************************************************************
1294  * Checks in __packet_finalize for packet finalized from kernel
1295  * ******************************************************************
1296  *  +-------+---------------------------+---------------------------+
1297  *  |         NEXUS_META_SUBTYPE_RAW    | NEXUS_META_SUBTYPE_PAYLOAD|
1298  *  |-------+---------------------------+---------------------------+
1299  *  |buflet | (bdoff + len) <= dlim     | (bdoff + len) <= dlim     |
1300  *  |l2_off | l2 == bdoff && l2 < bdlim | l2 = l3 = 0 && doff == 0  |
1301  *  |l3_off | l3 >= l2 && l3 <bdlim     | l3 == 0                   |
1302  *  |l4_off | l4 = l3                   | l4 = l3 = 0               |
1303  *  +-------+---------------------------+---------------------------+
1304  *
1305  */
1306 __attribute__((always_inline))
1307 static inline int
__packet_finalize(const uint64_t ph)1308 __packet_finalize(const uint64_t ph)
1309 {
1310 	void *__single bcur = NULL, *__single bprev = NULL;
1311 	uint32_t len, bcnt, bdoff0, bdlim0;
1312 	int err = 0;
1313 
1314 #ifdef KERNEL
1315 	ASSERT(QUM_ADDR(ph)->qum_qflags & QUM_F_INTERNALIZED);
1316 #endif /* KERNEL */
1317 	QUM_ADDR(ph)->qum_qflags &= ~(QUM_F_DROPPED | QUM_F_FINALIZED);
1318 
1319 	bcnt = __packet_get_buflet_count(ph);
1320 	len = QUM_ADDR(ph)->qum_len = 0;
1321 
1322 	while (bcnt--) {
1323 		bcur = __packet_get_next_buflet(ph, bprev);
1324 
1325 #ifdef KERNEL
1326 		ASSERT(bcur != NULL);
1327 		ASSERT(BLT_ADDR(bcur)->buf_addr != 0);
1328 #else  /* !KERNEL */
1329 		if (__improbable(bcur == NULL)) {
1330 			err = ERANGE;
1331 			break;
1332 		}
1333 #endif /* KERNEL */
1334 
1335 		/* save data offset from the first buflet */
1336 		if (bprev == NULL) {
1337 			bdoff0 = __buflet_get_data_offset(bcur);
1338 			bdlim0 = __buflet_get_data_limit(bcur);
1339 		}
1340 
1341 #ifndef KERNEL
1342 		if (__improbable(!BUF_IN_RANGE(BLT_ADDR(bcur)))) {
1343 			err = ERANGE;
1344 			break;
1345 		}
1346 #else /* !KERNEL */
1347 		if (__improbable(!BUF_IN_RANGE(BLT_ADDR(bcur)) &&
1348 		    !PKT_HAS_ATTACHED_MBUF(ph))) {
1349 			err = ERANGE;
1350 			break;
1351 		}
1352 #endif /* KERNEL */
1353 		len += BLT_ADDR(bcur)->buf_dlen;
1354 		bprev = bcur;
1355 	}
1356 
1357 	if (__improbable(err != 0)) {
1358 		goto done;
1359 	}
1360 
1361 	if (__improbable(bdoff0 > UINT8_MAX)) {
1362 		err = ERANGE;
1363 		goto done;
1364 	}
1365 	/* internalize headroom value from offset */
1366 	PKT_ADDR(ph)->pkt_headroom = (uint8_t)bdoff0;
1367 	/* validate header offsets in packet */
1368 #ifndef KERNEL
1369 	/* Overwrite L2 len for raw packets from user space */
1370 	PKT_ADDR(ph)->pkt_l2_len = 0;
1371 #else /* !KERNEL */
1372 	/* ensure that L3 >= L2 && L3 < bdlim */
1373 	if (__improbable((PKT_ADDR(ph)->pkt_headroom +
1374 	    PKT_ADDR(ph)->pkt_l2_len) >= bdlim0)) {
1375 		err = ERANGE;
1376 		goto done;
1377 	}
1378 #endif /* KERNEL */
1379 
1380 	if (__improbable(PKT_ADDR(ph)->pkt_pflags & PKT_F_OPT_DATA)) {
1381 #ifdef KERNEL
1382 		struct __packet_opt *po = PKT_ADDR(ph)->pkt_com_opt;
1383 #else /* !KERNEL */
1384 		struct __packet_opt *po = &PKT_ADDR(ph)->pkt_com_opt;
1385 #endif /* !KERNEL */
1386 		if ((PKT_ADDR(ph)->pkt_pflags & PKT_F_OPT_EXPIRE_TS) &&
1387 		    po->__po_expire_ts == 0) {
1388 			err = EINVAL;
1389 			goto done;
1390 		}
1391 		if ((PKT_ADDR(ph)->pkt_pflags & PKT_F_OPT_TOKEN) &&
1392 		    po->__po_token_len == 0) {
1393 			err =  EINVAL;
1394 			goto done;
1395 		}
1396 		ASSERT(err == 0);
1397 	}
1398 
1399 	/*
1400 	 * NOTE: we don't need the validation for total packet length
1401 	 * as checking if each buflet is in range and that
1402 	 * (pkt_headroom == bdoff0), should cover this check.
1403 	 */
1404 
1405 done:
1406 	if (__probable(err == 0)) {
1407 		QUM_ADDR(ph)->qum_len = len;
1408 		QUM_ADDR(ph)->qum_qflags |= QUM_F_FINALIZED;
1409 	} else {
1410 		QUM_ADDR(ph)->qum_len = 0;
1411 		QUM_ADDR(ph)->qum_qflags |= QUM_F_DROPPED;
1412 	}
1413 
1414 	return err;
1415 }
1416 
1417 __attribute__((always_inline))
1418 static inline boolean_t
__packet_is_finalized(const uint64_t ph)1419 __packet_is_finalized(const uint64_t ph)
1420 {
1421 	return QUM_ADDR(ph)->qum_qflags & QUM_F_FINALIZED;
1422 }
1423 
1424 #ifdef KERNEL
1425 /*
1426  * function to initialize a packet with mbuf chain.
1427  * Apart from the attached mbuf, the packet can also be used to convey
1428  * additional metadata like the headroom and L2 header length.
1429  * For a packet with attached mbuf, the pkt_length conveys the length of
1430  * the attached mbuf. If the data copied is partial then PKT_F_TRUNCATED is
1431  * also set.
1432  */
1433 __attribute__((always_inline))
1434 static inline int
__packet_initialize_with_mbufchain(struct __kern_packet * pkt,struct mbuf * mbuf,uint8_t headroom,uint8_t l2len)1435 __packet_initialize_with_mbufchain(struct __kern_packet *pkt, struct mbuf *mbuf,
1436     uint8_t headroom, uint8_t l2len)
1437 {
1438 	VERIFY(METADATA_TYPE(pkt) == NEXUS_META_TYPE_PACKET);
1439 	VERIFY(pkt->pkt_qum.qum_qflags & QUM_F_INTERNALIZED);
1440 	VERIFY((pkt->pkt_pflags & PKT_F_MBUF_MASK) == 0);
1441 	VERIFY((pkt->pkt_pflags & PKT_F_PKT_DATA) == 0);
1442 	VERIFY(pkt->pkt_mbuf == NULL);
1443 
1444 	pkt->pkt_qum.qum_qflags &= ~(QUM_F_DROPPED | QUM_F_FINALIZED);
1445 	pkt->pkt_mbuf = mbuf;
1446 	pkt->pkt_pflags |= (PKT_F_MBUF_DATA | PKT_F_TRUNCATED);
1447 	pkt->pkt_headroom = headroom;
1448 	pkt->pkt_l2_len = l2len;
1449 	pkt->pkt_length = m_pktlen(mbuf);
1450 	pkt->pkt_qum_buf.buf_dlen = 0;
1451 	pkt->pkt_qum_buf.buf_doff = 0;
1452 	pkt->pkt_qum.qum_qflags |= QUM_F_FINALIZED;
1453 	return 0;
1454 }
1455 
1456 __attribute__((always_inline))
1457 static inline int
__packet_initialize_with_mbuf(struct __kern_packet * pkt,struct mbuf * mbuf,uint8_t headroom,uint8_t l2len)1458 __packet_initialize_with_mbuf(struct __kern_packet *pkt, struct mbuf *mbuf,
1459     uint8_t headroom, uint8_t l2len)
1460 {
1461 	__packet_initialize_with_mbufchain(pkt, mbuf, headroom, l2len);
1462 	VERIFY(mbuf->m_nextpkt == NULL);
1463 	return 0;
1464 }
1465 
1466 /*
1467  * function to finalize a packet with attached mbuf.
1468  */
1469 __attribute__((always_inline))
1470 static inline int
__packet_finalize_with_mbuf(struct __kern_packet * pkt)1471 __packet_finalize_with_mbuf(struct __kern_packet *pkt)
1472 {
1473 	uint32_t bdlen, bdoff, bdlim;
1474 	struct __kern_buflet *buf;
1475 	int err = 0;
1476 
1477 	VERIFY(METADATA_TYPE(pkt) == NEXUS_META_TYPE_PACKET);
1478 	VERIFY((pkt->pkt_pflags & (PKT_F_MBUF_DATA | PKT_F_PKT_DATA)) ==
1479 	    PKT_F_MBUF_DATA);
1480 	VERIFY(pkt->pkt_mbuf != NULL);
1481 	ASSERT(pkt->pkt_qum.qum_qflags & QUM_F_INTERNALIZED);
1482 	VERIFY(pkt->pkt_bufs_cnt == 1);
1483 	PKT_GET_FIRST_BUFLET(pkt, pkt->pkt_bufs_cnt, buf);
1484 	ASSERT(buf->buf_addr != 0);
1485 
1486 	pkt->pkt_qum.qum_qflags &= ~(QUM_F_DROPPED | QUM_F_FINALIZED);
1487 	pkt->pkt_pflags &= ~PKT_F_TRUNCATED;
1488 	bdlen = buf->buf_dlen;
1489 	bdlim = buf->buf_dlim;
1490 	bdoff = buf->buf_doff;
1491 	if (__improbable(!BUF_IN_RANGE(buf))) {
1492 		err = ERANGE;
1493 		goto done;
1494 	}
1495 
1496 	/* validate header offsets in packet */
1497 	if (__improbable((pkt->pkt_headroom != bdoff) ||
1498 	    (pkt->pkt_headroom >= bdlim))) {
1499 		err = ERANGE;
1500 		goto done;
1501 	}
1502 	if (__improbable((pkt->pkt_headroom +
1503 	    pkt->pkt_l2_len) >= bdlim)) {
1504 		err = ERANGE;
1505 		goto done;
1506 	}
1507 
1508 	if (__improbable(pkt->pkt_pflags & PKT_F_OPT_DATA)) {
1509 		struct __packet_opt *po = pkt->pkt_com_opt;
1510 
1511 		if ((pkt->pkt_pflags & PKT_F_OPT_EXPIRE_TS) &&
1512 		    po->__po_expire_ts == 0) {
1513 			err = EINVAL;
1514 			goto done;
1515 		}
1516 		if ((pkt->pkt_pflags & PKT_F_OPT_TOKEN) &&
1517 		    po->__po_token_len == 0) {
1518 			err =  EINVAL;
1519 			goto done;
1520 		}
1521 	}
1522 	ASSERT(err == 0);
1523 
1524 done:
1525 	if (__probable(err == 0)) {
1526 		pkt->pkt_length = (uint32_t)m_pktlen(pkt->pkt_mbuf);
1527 		if (bdlen < pkt->pkt_length) {
1528 			pkt->pkt_pflags |= PKT_F_TRUNCATED;
1529 		}
1530 		pkt->pkt_qum.qum_qflags |= QUM_F_FINALIZED;
1531 	} else {
1532 		pkt->pkt_length = 0;
1533 		pkt->pkt_qum.qum_qflags |= QUM_F_DROPPED;
1534 	}
1535 
1536 	return err;
1537 }
1538 
1539 __attribute__((always_inline))
1540 static inline uint32_t
__packet_get_object_index(const uint64_t ph)1541 __packet_get_object_index(const uint64_t ph)
1542 {
1543 	return METADATA_IDX(QUM_ADDR(ph));
1544 }
1545 
1546 __attribute__((always_inline))
1547 static inline errno_t
__packet_get_timestamp(const uint64_t ph,uint64_t * ts,boolean_t * valid)1548 __packet_get_timestamp(const uint64_t ph, uint64_t *ts, boolean_t *valid)
1549 {
1550 	PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
1551 
1552 	if ((PKT_ADDR(ph)->pkt_pflags & PKT_F_TS_VALID) != 0) {
1553 		if (valid != NULL) {
1554 			*valid = TRUE;
1555 		}
1556 		*ts = PKT_ADDR(ph)->pkt_timestamp;
1557 	} else {
1558 		if (valid != NULL) {
1559 			*valid = FALSE;
1560 		}
1561 		*ts = 0;
1562 	}
1563 
1564 	return 0;
1565 }
1566 
1567 __attribute__((always_inline))
1568 static inline errno_t
__packet_set_timestamp(const uint64_t ph,uint64_t ts,boolean_t valid)1569 __packet_set_timestamp(const uint64_t ph, uint64_t ts, boolean_t valid)
1570 {
1571 	PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
1572 
1573 	if (valid) {
1574 		PKT_ADDR(ph)->pkt_timestamp = ts;
1575 		PKT_ADDR(ph)->pkt_pflags |= PKT_F_TS_VALID;
1576 	} else {
1577 		PKT_ADDR(ph)->pkt_pflags &= ~PKT_F_TS_VALID;
1578 		PKT_ADDR(ph)->pkt_timestamp = 0;
1579 	}
1580 
1581 	return 0;
1582 }
1583 
1584 __attribute__((always_inline))
1585 static inline errno_t
__packet_get_tx_completion_data(const uint64_t ph,uintptr_t * cb_arg,uintptr_t * cb_data)1586 __packet_get_tx_completion_data(const uint64_t ph, uintptr_t *cb_arg,
1587     uintptr_t *cb_data)
1588 {
1589 	PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
1590 	if ((PKT_ADDR(ph)->pkt_pflags & PKT_F_TX_COMPL_DATA) != 0) {
1591 		ASSERT((PKT_ADDR(ph)->pkt_pflags & PKT_F_TX_COMPL_ALLOC));
1592 		*cb_arg = PKT_ADDR(ph)->pkt_tx_compl_cb_arg;
1593 		*cb_data = PKT_ADDR(ph)->pkt_tx_compl_cb_data;
1594 	} else {
1595 		*cb_arg = 0;
1596 		*cb_data = 0;
1597 	}
1598 	return 0;
1599 }
1600 
1601 __attribute__((always_inline))
1602 static inline errno_t
__packet_set_tx_completion_data(const uint64_t ph,uintptr_t cb_arg,uintptr_t cb_data)1603 __packet_set_tx_completion_data(const uint64_t ph, uintptr_t cb_arg,
1604     uintptr_t cb_data)
1605 {
1606 	PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
1607 	_KPKT_INIT_TX_COMPL_DATA(PKT_ADDR(ph));
1608 	PKT_ADDR(ph)->pkt_tx_compl_cb_arg = cb_arg;
1609 	PKT_ADDR(ph)->pkt_tx_compl_cb_data = cb_data;
1610 	return 0;
1611 }
1612 
1613 __attribute__((always_inline))
1614 static inline errno_t
__packet_get_timestamp_requested(const uint64_t ph,boolean_t * requested)1615 __packet_get_timestamp_requested(const uint64_t ph, boolean_t *requested)
1616 {
1617 	PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
1618 	if ((PKT_ADDR(ph)->pkt_pflags & PKT_F_TX_COMPL_TS_REQ) != 0) {
1619 		*requested = TRUE;
1620 	} else {
1621 		*requested = FALSE;
1622 	}
1623 	return 0;
1624 }
1625 
1626 __attribute__((always_inline))
1627 static inline errno_t
__packet_get_tx_completion_status(const uint64_t ph,kern_return_t * status)1628 __packet_get_tx_completion_status(const uint64_t ph, kern_return_t *status)
1629 {
1630 	PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
1631 	if ((PKT_ADDR(ph)->pkt_pflags & PKT_F_TX_COMPL_DATA) != 0) {
1632 		ASSERT((PKT_ADDR(ph)->pkt_pflags & PKT_F_TX_COMPL_ALLOC));
1633 		*status = (kern_return_t)PKT_ADDR(ph)->pkt_tx_compl_status;
1634 	} else {
1635 		*status = 0;
1636 	}
1637 	return 0;
1638 }
1639 
1640 __attribute__((always_inline))
1641 static inline errno_t
__packet_set_tx_completion_status(const uint64_t ph,kern_return_t status)1642 __packet_set_tx_completion_status(const uint64_t ph, kern_return_t status)
1643 {
1644 	PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
1645 	_KPKT_INIT_TX_COMPL_DATA(PKT_ADDR(ph));
1646 	PKT_ADDR(ph)->pkt_tx_compl_status = (uint32_t)status;
1647 	return 0;
1648 }
1649 
1650 __attribute__((always_inline))
1651 static inline errno_t
__packet_set_tx_nx_port(const uint64_t ph,nexus_port_t nx_port,uint16_t vpna_gencnt)1652 __packet_set_tx_nx_port(const uint64_t ph, nexus_port_t nx_port,
1653     uint16_t vpna_gencnt)
1654 {
1655 	PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
1656 	PKT_ADDR(ph)->pkt_nx_port = nx_port;
1657 	PKT_ADDR(ph)->pkt_vpna_gencnt = vpna_gencnt;
1658 	PKT_ADDR(ph)->pkt_pflags |= PKT_F_TX_PORT_DATA;
1659 	return 0;
1660 }
1661 
1662 __attribute__((always_inline))
1663 static inline errno_t
__packet_get_tx_nx_port(const uint64_t ph,nexus_port_t * nx_port,uint16_t * vpna_gencnt)1664 __packet_get_tx_nx_port(const uint64_t ph, nexus_port_t *nx_port,
1665     uint16_t *vpna_gencnt)
1666 {
1667 	PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
1668 	if ((PKT_ADDR(ph)->pkt_pflags & PKT_F_TX_PORT_DATA) == 0) {
1669 		return ENOTSUP;
1670 	}
1671 
1672 	*nx_port = PKT_ADDR(ph)->pkt_nx_port;
1673 	*vpna_gencnt = PKT_ADDR(ph)->pkt_vpna_gencnt;
1674 	return 0;
1675 }
1676 
1677 __attribute__((always_inline))
1678 static inline errno_t
__packet_get_tx_nx_port_id(const uint64_t ph,uint32_t * nx_port_id)1679 __packet_get_tx_nx_port_id(const uint64_t ph, uint32_t *nx_port_id)
1680 {
1681 	errno_t err;
1682 	nexus_port_t nx_port;
1683 	uint16_t vpna_gencnt;
1684 
1685 	static_assert(sizeof(nx_port) == sizeof(uint16_t));
1686 
1687 	err = __packet_get_tx_nx_port(ph, &nx_port, &vpna_gencnt);
1688 	if (err == 0) {
1689 		*nx_port_id = PKT_COMPOSE_NX_PORT_ID(nx_port, vpna_gencnt);
1690 	}
1691 	return err;
1692 }
1693 
1694 
1695 __attribute__((always_inline))
1696 static inline errno_t
__packet_get_flowid(const uint64_t ph,packet_flowid_t * pflowid)1697 __packet_get_flowid(const uint64_t ph, packet_flowid_t *pflowid)
1698 {
1699 	PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
1700 	if ((PKT_ADDR(ph)->pkt_pflags & PKT_F_FLOW_ID) == 0) {
1701 		return ENOENT;
1702 	}
1703 	*pflowid = PKT_ADDR(ph)->pkt_flow_token;
1704 	return 0;
1705 }
1706 #endif /* KERNEL */
1707 
1708 extern uint32_t os_cpu_in_cksum(const void *, uint32_t, uint32_t);
1709 
1710 __attribute__((always_inline))
1711 static inline uint16_t
__packet_fold_sum(uint32_t sum)1712 __packet_fold_sum(uint32_t sum)
1713 {
1714 	sum = (sum >> 16) + (sum & 0xffff);     /* 17-bit */
1715 	sum = (sum >> 16) + (sum & 0xffff);     /* 16-bit + carry */
1716 	sum = (sum >> 16) + (sum & 0xffff);     /* final carry */
1717 	return sum & 0xffff;
1718 }
1719 
1720 __attribute__((always_inline))
1721 static inline uint16_t
__packet_fold_sum_final(uint32_t sum)1722 __packet_fold_sum_final(uint32_t sum)
1723 {
1724 	sum = (sum >> 16) + (sum & 0xffff);     /* 17-bit */
1725 	sum = (sum >> 16) + (sum & 0xffff);     /* 16-bit + carry */
1726 	sum = (sum >> 16) + (sum & 0xffff);     /* final carry */
1727 	return ~sum & 0xffff;
1728 }
1729 
1730 __attribute__((always_inline))
1731 static inline uint32_t
__packet_cksum(const void * data,uint32_t len,uint32_t sum0)1732 __packet_cksum(const void *data, uint32_t len, uint32_t sum0)
1733 {
1734 	return os_cpu_in_cksum(data, len, sum0);
1735 }
1736 
1737 extern uint32_t os_cpu_copy_in_cksum(const void *__sized_by(len), void *__sized_by(len),
1738     uint32_t len, uint32_t);
1739 
1740 __attribute__((always_inline))
1741 static inline uint32_t
__packet_copy_and_sum(const void * __sized_by (len)src,void * __sized_by (len)dst,uint32_t len,uint32_t sum0)1742 __packet_copy_and_sum(const void *__sized_by(len) src, void *__sized_by(len) dst,
1743     uint32_t len, uint32_t sum0)
1744 {
1745 	return os_cpu_copy_in_cksum(src, dst, len, sum0);
1746 }
1747 
1748 __attribute__((always_inline))
1749 static inline uint16_t
__packet_fix_sum(uint16_t csum,uint16_t old,uint16_t new)1750 __packet_fix_sum(uint16_t csum, uint16_t old, uint16_t new)
1751 {
1752 	uint32_t c = csum + old - new;
1753 	c = (c >> 16) + (c & 0xffff);   /* Only add carry once */
1754 
1755 	return c & 0xffff;
1756 }
1757 
1758 /* MUST be used for uint32_t fields */
1759 __attribute__((always_inline))
1760 static inline void
1761 __packet_fix_hdr_sum(uint8_t *__sized_by(4)field, uint16_t *csum, uint32_t new)
1762 {
1763 	uint32_t old;
1764 	memcpy(&old, field, sizeof(old));
1765 	memcpy(field, &new, sizeof(uint32_t));
1766 	*csum = __packet_fix_sum(__packet_fix_sum(*csum, (uint16_t)(old >> 16),
1767 	    (uint16_t)(new >> 16)), (uint16_t)(old & 0xffff),
1768 	    (uint16_t)(new & 0xffff));
1769 }
1770 
1771 __attribute__((always_inline))
1772 static inline void *__header_indexable
__buflet_get_data_address(const void * buf)1773 __buflet_get_data_address(const void *buf)
1774 {
1775 	return __unsafe_forge_bidi_indexable(void *, (void *)(BLT_ADDR(buf)->buf_addr),
1776 	           BLT_ADDR(buf)->buf_dlim);
1777 }
1778 
1779 #ifdef KERNEL
1780 __attribute__((always_inline))
1781 static inline errno_t
__buflet_set_data_address(const void * buf,const void * addr)1782 __buflet_set_data_address(const void *buf, const void *addr)
1783 {
1784 	/* buffer region is always marked as shareable */
1785 	ASSERT(BLT_ADDR(buf)->buf_ctl->bc_flags & SKMEM_BUFCTL_SHAREOK);
1786 
1787 	/* full bounds checking will be performed during finalize */
1788 	if (__probable((uintptr_t)addr >=
1789 	    (uintptr_t)BLT_ADDR(buf)->buf_objaddr)) {
1790 		static_assert(sizeof(BLT_ADDR(buf)->buf_addr) ==
1791 		    sizeof(mach_vm_address_t));
1792 		/* deconst */
1793 		*(mach_vm_address_t *)(uintptr_t)&BLT_ADDR(buf)->buf_addr =
1794 		    (mach_vm_address_t)addr;
1795 		return 0;
1796 	}
1797 	return ERANGE;
1798 }
1799 
1800 /*
1801  * Equivalent to __buflet_set_data_address but based on offset, packets/buflets
1802  * set with this should not be directly passed to userspace, since shared buffer
1803  * is not yet supported by user facing pool.
1804  */
1805 __attribute__((always_inline))
1806 static inline int
__buflet_set_buffer_offset(const void * buf,const uint32_t off)1807 __buflet_set_buffer_offset(const void *buf, const uint32_t off)
1808 {
1809 	ASSERT(BLT_ADDR(buf)->buf_objlim != 0);
1810 
1811 	if (__probable(off <= BLT_ADDR(buf)->buf_objlim)) {
1812 		*(mach_vm_address_t *)(uintptr_t)&BLT_ADDR(buf)->buf_addr =
1813 		    (mach_vm_address_t)BLT_ADDR(buf)->buf_objaddr + off;
1814 		return 0;
1815 	}
1816 	return ERANGE;
1817 }
1818 #endif /* KERNEL */
1819 
1820 __attribute__((always_inline))
1821 static inline int
__buflet_set_data_offset(const void * buf,const uint32_t doff)1822 __buflet_set_data_offset(const void *buf, const uint32_t doff)
1823 {
1824 #ifdef KERNEL
1825 	/*
1826 	 * Kernel-specific assertion.  For user space, the metadata
1827 	 * region gets redirected to anonymous zero-filled pages at
1828 	 * defunct time, so ignore it there.
1829 	 */
1830 	ASSERT(BLT_ADDR(buf)->buf_dlim != 0);
1831 
1832 	if (__probable((uint32_t)doff <= BLT_ADDR(buf)->buf_objlim)) {
1833 		BLT_ADDR(buf)->buf_doff = doff;
1834 		return 0;
1835 	}
1836 	return ERANGE;
1837 #else /* !KERNEL */
1838 	BLT_ADDR(buf)->buf_doff = doff;
1839 	return 0;
1840 #endif /* KERNEL */
1841 }
1842 
1843 __attribute__((always_inline))
1844 static inline int
__buflet_set_data_length(const void * buf,const uint32_t dlen)1845 __buflet_set_data_length(const void *buf, const uint32_t dlen)
1846 {
1847 #ifdef KERNEL
1848 	/*
1849 	 * Kernel-specific assertion.  For user space, the metadata
1850 	 * region gets redirected to anonymous zero-filled pages at
1851 	 * defunct time, so ignore it there.
1852 	 */
1853 	ASSERT(BLT_ADDR(buf)->buf_dlim != 0);
1854 
1855 	if (__probable((uint32_t)dlen <= BLT_ADDR(buf)->buf_objlim)) {
1856 		BLT_ADDR(buf)->buf_dlen = dlen;
1857 		return 0;
1858 	}
1859 	return ERANGE;
1860 #else /* !KERNEL */
1861 	BLT_ADDR(buf)->buf_dlen = dlen;
1862 	return 0;
1863 #endif /* KERNEL */
1864 }
1865 
1866 __attribute__((always_inline))
1867 static inline uint32_t
__buflet_get_data_length(const void * buf)1868 __buflet_get_data_length(const void *buf)
1869 {
1870 	return BLT_ADDR(buf)->buf_dlen;
1871 }
1872 
1873 #ifdef KERNEL
1874 __attribute__((always_inline))
1875 static inline struct sksegment *
__buflet_get_object_segment(const void * buf,kern_obj_idx_seg_t * idx)1876 __buflet_get_object_segment(const void *buf, kern_obj_idx_seg_t *idx)
1877 {
1878 	static_assert(sizeof(obj_idx_t) == sizeof(kern_obj_idx_seg_t));
1879 
1880 	if (idx != NULL) {
1881 		*idx = BLT_ADDR(buf)->buf_ctl->bc_idx;
1882 	}
1883 
1884 	return BLT_ADDR(buf)->buf_ctl->bc_slab->sl_seg;
1885 }
1886 #endif /* KERNEL */
1887 
1888 __attribute__((always_inline))
1889 static inline void *
__buflet_get_object_address(const void * buf)1890 __buflet_get_object_address(const void *buf)
1891 {
1892 #ifdef KERNEL
1893 	return (void *)(BLT_ADDR(buf)->buf_objaddr);
1894 #else /* !KERNEL */
1895 	/*
1896 	 * For user space, shared buffer is not available and hence the data
1897 	 * address is immutable and is always the same as the underlying
1898 	 * buffer object address itself.
1899 	 */
1900 	return __buflet_get_data_address(buf);
1901 #endif /* !KERNEL */
1902 }
1903 
1904 __attribute__((always_inline))
1905 static inline uint32_t
__buflet_get_object_limit(const void * buf)1906 __buflet_get_object_limit(const void *buf)
1907 {
1908 #ifdef KERNEL
1909 	return BLT_ADDR(buf)->buf_objlim;
1910 #else /* !KERNEL */
1911 	/*
1912 	 * For user space, shared buffer is not available and hence the data
1913 	 * limit is immutable and is always the same as the underlying buffer
1914 	 * object limit itself.
1915 	 */
1916 	return (uint32_t)__buflet_get_data_limit(buf);
1917 #endif /* !KERNEL */
1918 }
1919 
1920 __attribute__((always_inline))
1921 static inline packet_trace_id_t
__packet_get_trace_id(const uint64_t ph)1922 __packet_get_trace_id(const uint64_t ph)
1923 {
1924 	return PKT_ADDR(ph)->pkt_trace_id;
1925 }
1926 
1927 __attribute__((always_inline))
1928 static inline void
__packet_set_trace_id(const uint64_t ph,packet_trace_id_t id)1929 __packet_set_trace_id(const uint64_t ph, packet_trace_id_t id)
1930 {
1931 	PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
1932 	PKT_ADDR(ph)->pkt_trace_id = id;
1933 }
1934 
1935 __attribute__((always_inline))
1936 static inline void
__packet_trace_event(const uint64_t ph,uint32_t event)1937 __packet_trace_event(const uint64_t ph, uint32_t event)
1938 {
1939 	PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
1940 #ifdef KERNEL
1941 #pragma unused(event, ph)
1942 	KDBG(event, PKT_ADDR(ph)->pkt_trace_id);
1943 #else /* !KERNEL */
1944 	kdebug_trace(event, PKT_ADDR(ph)->pkt_trace_id, 0, 0, 0);
1945 #endif /* !KERNEL */
1946 }
1947 
1948 #ifdef KERNEL
1949 __attribute__((always_inline))
1950 static inline packet_trace_tag_t
__packet_get_trace_tag(const uint64_t ph)1951 __packet_get_trace_tag(const uint64_t ph)
1952 {
1953 	PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
1954 	return PKT_ADDR(ph)->pkt_trace_tag;
1955 }
1956 
1957 __attribute__((always_inline))
1958 static inline void
__packet_set_trace_tag(const uint64_t ph,packet_trace_tag_t tag)1959 __packet_set_trace_tag(const uint64_t ph, packet_trace_tag_t tag)
1960 {
1961 	PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
1962 	PKT_ADDR(ph)->pkt_trace_tag = tag;
1963 }
1964 
1965 static inline void
__packet_perform_tx_completion_callbacks(const kern_packet_t ph,ifnet_t ifp)1966 __packet_perform_tx_completion_callbacks(const kern_packet_t ph, ifnet_t ifp)
1967 {
1968 	/*
1969 	 * NOTE: this function can be called with ifp as NULL.
1970 	 */
1971 	uint64_t ts;
1972 	kern_return_t tx_status;
1973 	uintptr_t cb_arg, cb_data;
1974 	struct __kern_packet *kpkt = SK_PTR_ADDR_KPKT(ph);
1975 
1976 	ASSERT((kpkt->pkt_pflags & PKT_F_TX_COMPL_TS_REQ) != 0);
1977 	(void) __packet_get_tx_completion_status(ph, &tx_status);
1978 	__packet_get_tx_completion_data(ph, &cb_arg, &cb_data);
1979 	__packet_get_timestamp(ph, &ts, NULL);
1980 	while (kpkt->pkt_tx_compl_callbacks != 0) {
1981 		mbuf_tx_compl_func cb;
1982 		uint32_t i;
1983 
1984 		i = ffs(kpkt->pkt_tx_compl_callbacks) - 1;
1985 		kpkt->pkt_tx_compl_callbacks &= ~(1 << i);
1986 		cb = m_get_tx_compl_callback(i);
1987 		if (__probable(cb != NULL)) {
1988 			cb(kpkt->pkt_tx_compl_context, ifp, ts, cb_arg, cb_data,
1989 			    tx_status);
1990 		}
1991 	}
1992 	kpkt->pkt_pflags &= ~PKT_F_TX_COMPL_TS_REQ;
1993 }
1994 
1995 static inline void *
__packet_get_priv(const kern_packet_t ph)1996 __packet_get_priv(const kern_packet_t ph)
1997 {
1998 	PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
1999 	return PKT_ADDR(ph)->pkt_priv;
2000 }
2001 
2002 static inline void
__packet_set_priv(const uint64_t ph,void * priv)2003 __packet_set_priv(const uint64_t ph, void *priv)
2004 {
2005 	PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
2006 	PKT_ADDR(ph)->pkt_priv = priv;
2007 }
2008 #endif /* KERNEL */
2009 
2010 #endif /* PRIVATE || BSD_KERNEL_PRIVATE */
2011 #endif /* !_SKYWALK_PACKET_COMMON_H_ */
2012