xref: /xnu-10002.41.9/bsd/skywalk/packet/os_packet_private.h (revision 699cd48037512bf4380799317ca44ca453c82f57)
1 /*
2  * Copyright (c) 2016-2022 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #ifndef _SKYWALK_OS_PACKET_PRIVATE_H_
30 #define _SKYWALK_OS_PACKET_PRIVATE_H_
31 
32 #if defined(PRIVATE) || defined(BSD_KERNEL_PRIVATE)
33 #include <skywalk/os_packet.h>
34 #include <skywalk/os_nexus_private.h>
35 #include <skywalk/os_channel_private.h>
36 #include <libkern/OSByteOrder.h>
37 #include <netinet/in.h>
38 #include <net/ethernet.h>
39 
40 #if defined(BSD_KERNEL_PRIVATE)
41 /*
42  * Flow (currently for kernel, potentially for userland one day).
43  *
44  * XXX: When we expose this to userland, we need to be make sure to NOT
45  * expose kernel pointer/address values embedded within.
46  *
47  * Values in flow_{l2,l3,l4} are stored in network byte order.  Pointers
48  * are defined using mach_vm_address_t because it's stable across user
49  * and kernel, and therefore keeps the structure size the same.
50  *
51  * Because this structure might be initialized on a per-packet allocation
52  * basis, it as well as some of its member sub-subtructures are allocated
53  * on a 16-bytes address boundary to allow 128-bit operations on platforms
54  * that support them.
55  *
56  * XXX: when adding new fields, try to leverage __pad ones first.
57  *
58  * TODO: we should consider embedding a flow_key structure here and
59  * use that to store the tuples.  That way we can leverage that for
60  * flow lookups without having to copy things back-and-forth.
61  */
62 struct __flow {
63 	union {
64 		/*
65 		 * The following is always zeroed out on each alloc.
66 		 */
67 		struct __flow_init {
68 			/*
69 			 * Layer 3
70 			 */
71 			struct __flow_l3 {
72 				union {
73 					struct __flow_l3_ipv4_addrs {
74 						struct in_addr _src;
75 						struct in_addr _dst;
76 					} _l3_ipv4;
77 					struct __flow_l3_ipv6_addrs {
78 						struct in6_addr _src;
79 						struct in6_addr _dst;
80 					} _l3_ipv6;
81 				};
82 				uint8_t  _l3_ip_ver;
83 				uint8_t  _l3_proto;
84 				uint8_t  _l3_hlen;
85 				unsigned _l3_is_frag : 1;
86 				unsigned _l3_is_first_frag : 1;
87 				unsigned _l3_reserved_flags : 6;
88 				uint32_t _l3_frag_id;
89 				mach_vm_address_t _l3_ptr;
90 			} __l3;
91 			/*
92 			 * AQM
93 			 */
94 			struct __flow_classq {
95 				uint32_t _fcq_hash;  /* classq-specific hash */
96 				uint32_t _fcq_flags; /* classq-specific flags */
97 			} __classq;
98 			/*
99 			 * Misc.
100 			 */
101 			uint32_t __ulen;      /* user data length */
102 			uint8_t  __ulp_encap; /* e.g. IPPROTO_QUIC */
103 			uint8_t  __pad[3];
104 			uint64_t __pad64[2];
105 			/*
106 			 * Flow Source.
107 			 */
108 			struct __flow_source {
109 				union {
110 					/* source identifier */
111 					uint64_t _fsrc_id_64[2];
112 					uint32_t _fsrc_id_32[4];
113 					uuid_t   _fsrc_id;
114 				} __attribute__((aligned(sizeof(uint64_t))));
115 				flowadv_idx_t _fsrc_fidx; /* flow adv. index */
116 				uint8_t       _fsrc_type; /* FLOWSRC_* mbuf.h */
117 				uint8_t       _fsrc_pad[3];
118 			} __source;
119 			/*
120 			 * Policy.
121 			 */
122 			struct __flow_policy {
123 				uint32_t _fpc_id; /* policy id of pkt sender */
124 				uint32_t _fpc_pad;
125 				union {
126 					/* process identifier */
127 					uint64_t _fpc_euuid_64[2];
128 					uint32_t _fpc_euuid_32[4];
129 					uuid_t   _fpc_euuid;
130 				} __attribute__((aligned(sizeof(uint64_t))));
131 			} __policy;
132 		} flow_init;
133 		uint64_t flow_init_data[16];
134 	} __attribute((aligned(16)));
135 #define flow_l3                 flow_init.__l3
136 #define flow_classq             flow_init.__classq
137 #define flow_ulen               flow_init.__ulen
138 #define flow_ulp_encap          flow_init.__ulp_encap
139 #define flow_source             flow_init.__source
140 #define flow_policy             flow_init.__policy
141 
142 #define flow_ipv4_addrs         flow_l3._l3_ipv4
143 #define flow_ipv4_src           flow_l3._l3_ipv4._src
144 #define flow_ipv4_dst           flow_l3._l3_ipv4._dst
145 #define flow_ipv6_addrs         flow_l3._l3_ipv6
146 #define flow_ipv6_src           flow_l3._l3_ipv6._src
147 #define flow_ipv6_dst           flow_l3._l3_ipv6._dst
148 #define flow_ip_ver             flow_l3._l3_ip_ver
149 #define flow_ip_proto           flow_l3._l3_proto
150 #define flow_ip_hlen            flow_l3._l3_hlen
151 #define flow_ip_hdr             flow_l3._l3_ptr
152 #define flow_ip_frag_id         flow_l3._l3_frag_id
153 #define flow_ip_is_frag         flow_l3._l3_is_frag
154 #define flow_ip_is_first_frag   flow_l3._l3_is_first_frag
155 
156 #define flow_classq_hash        flow_classq._fcq_hash
157 #define flow_classq_flags       flow_classq._fcq_flags
158 
159 #define flow_src_token          flow_source._fsrc_id_32[0]
160 #define flow_src_id             flow_source._fsrc_id
161 #define flow_src_fidx           flow_source._fsrc_fidx
162 #define flow_src_type           flow_source._fsrc_type
163 
164 #define flow_policy_id          flow_policy._fpc_id
165 #define flow_policy_euuid       flow_policy._fpc_euuid
166 
167 	/*
168 	 * Layer 4.
169 	 */
170 	union {
171 		struct __flow_l4 {
172 			union {
173 				struct __flow_l4_tcp {
174 					in_port_t _src;
175 					in_port_t _dst;
176 					uint32_t _seq;
177 					uint32_t _ack;
178 					union {
179 						struct {
180 #if BYTE_ORDER == LITTLE_ENDIAN
181 							uint8_t _tcp_res:4;
182 							uint8_t _off:4;
183 #else /* BYTE_ORDER == BIG_ENDIAN */
184 							uint8_t _off:4;
185 							uint8_t _tcp_res:4;
186 #endif /* BYTE_ORDER == BIG_ENDIAN */
187 							uint8_t _flags;
188 							uint16_t _win;
189 						};
190 						uint32_t _ofw;
191 					};
192 				} _l4_tcp;
193 				struct __flow_l4_udp {
194 					in_port_t _src;
195 					in_port_t _dst;
196 					uint32_t _ls;
197 				} _l4_udp;
198 				struct __flow_l4_esp {
199 					uint32_t _spi;
200 				} _l4_esp;
201 			};
202 			uint8_t _l4_hlen;
203 			uint8_t _l4_agg_fast;
204 			uint8_t _l4_pad[6];
205 			mach_vm_address_t _l4_ptr;
206 		} flow_l4;
207 		uint64_t flow_l4_data[4];
208 	} __attribute((aligned(sizeof(uint64_t))));
209 #define flow_tcp                flow_l4._l4_tcp
210 #define flow_tcp_src            flow_l4._l4_tcp._src
211 #define flow_tcp_dst            flow_l4._l4_tcp._dst
212 #define flow_tcp_seq            flow_l4._l4_tcp._seq
213 #define flow_tcp_ack            flow_l4._l4_tcp._ack
214 #define flow_tcp_off            flow_l4._l4_tcp._off
215 #define flow_tcp_flags          flow_l4._l4_tcp._flags
216 #define flow_tcp_win            flow_l4._l4_tcp._win
217 #define flow_tcp_hlen           flow_l4._l4_hlen
218 #define flow_tcp_hdr            flow_l4._l4_ptr
219 #define flow_tcp_agg_fast       flow_l4._l4_agg_fast
220 #define flow_udp                flow_l4._l4_udp
221 #define flow_udp_src            flow_l4._l4_udp._src
222 #define flow_udp_dst            flow_l4._l4_udp._dst
223 #define flow_udp_hlen           flow_l4._l4_hlen
224 #define flow_udp_hdr            flow_l4._l4_ptr
225 #define flow_esp_spi            flow_l4._l4_esp._spi
226 } __attribute((aligned(16)));
227 #endif /* BSD_KERNEL_PRIVATE */
228 
229 /*
230  * Maximum size of L2, L3 & L4 headers combined.
231  */
232 #define PKT_MAX_PROTO_HEADER_SIZE       256
233 
234 /* based on 2KB buflet size */
235 #define BUFLETS_MIN             1       /* Ethernet MTU (default) */
236 #define BUFLETS_9K_JUMBO        5       /* 9000 bytes MTU */
237 #define BUFLETS_GSO             46      /* 64KB GSO, Ethernet MTU */
238 
239 /*
240  * Common buflet structure shared by {__user,__kern}_buflet.
241  */
242 struct __buflet {
243 	union {
244 		/* for skmem batch alloc/free */
245 		uint64_t __buflet_next;
246 		/* address of next buflet in chain */
247 		const mach_vm_address_t __nbft_addr;
248 	};
249 	/* buffer data address */
250 	const mach_vm_address_t __baddr;
251 	/* index of buflet object in the owning buflet region */
252 	const obj_idx_t __bft_idx;
253 	/* buffer object index in buffer region */
254 	const obj_idx_t __bidx;
255 	/* object index in buflet region of next buflet(for buflet chaining) */
256 	const obj_idx_t __nbft_idx;
257 	const uint32_t  __dlim;         /* maximum length */
258 	uint32_t        __doff;         /* offset of data in buflet */
259 	uint32_t        __dlen;         /* length of data in buflet */
260 	const uint16_t  __flag;
261 #define BUFLET_FLAG_EXTERNAL    0x0001
262 #define BUFLET_FLAG_LARGE_BUF   0x0002 /* buflet holds large buffer */
263 } __attribute((packed));
264 
265 /*
266  * A buflet represents the smallest buffer fragment representing
267  * part of the packet.  The index refers to the position of the buflet
268  * in the pool, and the data length represents the actual payload
269  * size -- not the buflet size itself as it is fixed for all objects
270  * in the pool.
271  */
272 struct __user_buflet {
273 	/*
274 	 * Common area between user and kernel variants.
275 	 */
276 	struct __buflet buf_com;
277 #define buf_addr        buf_com.__baddr
278 #define buf_nbft_addr   buf_com.__nbft_addr
279 #define buf_idx         buf_com.__bidx
280 #define buf_nbft_idx    buf_com.__nbft_idx
281 #define buf_dlim        buf_com.__dlim
282 #define buf_dlen        buf_com.__dlen
283 #define buf_doff        buf_com.__doff
284 #define buf_flag        buf_com.__flag
285 #define buf_bft_idx_reg buf_com.__bft_idx
286 };
287 
288 #define BUFLET_HAS_LARGE_BUF(_buf)    \
289 	(((_buf)->buf_flag & BUFLET_FLAG_LARGE_BUF) != 0)
290 
291 #define BUF_BADDR(_buf, _addr)                                              \
292 	*__DECONST(mach_vm_address_t *, &(_buf)->buf_addr) =                \
293 	(mach_vm_address_t)(_addr)
294 
295 #define BUF_BIDX(_buf, _idx)                                                \
296 	*__DECONST(obj_idx_t *, &(_buf)->buf_idx) = (obj_idx_t)(_idx)
297 
298 #define BUF_NBFT_ADDR(_buf, _addr)                                          \
299 	*__DECONST(mach_vm_address_t *, &(_buf)->buf_nbft_addr) =           \
300 	(mach_vm_address_t)(_addr)
301 
302 #define BUF_NBFT_IDX(_buf, _idx)                                            \
303 	*__DECONST(obj_idx_t *, &(_buf)->buf_nbft_idx) = (obj_idx_t)(_idx)
304 
305 #define BUF_BFT_IDX_REG(_buf, _idx)    \
306 	*__DECONST(obj_idx_t *, &(_buf)->buf_bft_idx_reg) = (_idx)
307 
308 #define UBUF_LINK(_pubft, _ubft) do {                                   \
309 	ASSERT((_ubft) != NULL);                                        \
310 	BUF_NBFT_ADDR(_pubft, _ubft);                                   \
311 	BUF_NBFT_IDX(_pubft, (_ubft)->buf_bft_idx_reg);                 \
312 } while (0)
313 
314 #ifdef KERNEL
315 #define BUF_CTOR(_buf, _baddr, _bidx, _dlim, _dlen, _doff, _nbaddr, _nbidx, _bflag) do {  \
316 	_CASSERT(sizeof ((_buf)->buf_addr) == sizeof (mach_vm_address_t)); \
317 	_CASSERT(sizeof ((_buf)->buf_idx) == sizeof (obj_idx_t));       \
318 	_CASSERT(sizeof ((_buf)->buf_dlim) == sizeof (uint32_t));       \
319 	BUF_BADDR(_buf, _baddr);                                        \
320 	BUF_NBFT_ADDR(_buf, _nbaddr);                                   \
321 	BUF_BIDX(_buf, _bidx);                                          \
322 	BUF_NBFT_IDX(_buf, _nbidx);                                     \
323 	*(uint32_t *)(uintptr_t)&(_buf)->buf_dlim = (_dlim);            \
324 	(_buf)->buf_dlen = (_dlen);                                     \
325 	(_buf)->buf_doff = (_doff);                                     \
326 	*(uint16_t *)(uintptr_t)&(_buf)->buf_flag = (_bflag);           \
327 } while (0)
328 
329 #define BUF_INIT(_buf, _dlen, _doff) do {                               \
330 	(_buf)->buf_dlen = (_dlen);                                     \
331 	(_buf)->buf_doff = (_doff);                                     \
332 } while (0)
333 
334 #endif /* KERNEL */
335 
336 #ifdef KERNEL
337 #define BUF_IN_RANGE(_buf)                                              \
338 	((_buf)->buf_addr >= (mach_vm_address_t)(_buf)->buf_objaddr &&  \
339 	((uintptr_t)(_buf)->buf_addr + (_buf)->buf_dlim) <=             \
340 	((uintptr_t)(_buf)->buf_objaddr + (_buf)->buf_objlim) &&        \
341 	((_buf)->buf_doff + (_buf)->buf_dlen) <= (_buf)->buf_dlim)
342 #else /* !KERNEL */
343 #define BUF_IN_RANGE(_buf)                                              \
344 	(((_buf)->buf_doff + (_buf)->buf_dlen) <= (_buf)->buf_dlim)
345 #endif /* !KERNEL */
346 
347 /*
348  * Metadata preamble.  This structure is placed at begining of each
349  * __{user,kern}_{quantum,packet} object.  Each user metadata object has a
350  * unique red zone pattern, which is an XOR of the redzone cookie and
351  * offset of the metadata object in the object's region.  Due to the use
352  * of tagged pointer, we need the structure size to be multiples of 16.
353  * See SK_PTR_TAG() definition for details.
354  */
355 struct __metadata_preamble {
356 	union {
357 		uint64_t        _mdp_next;      /* for batch alloc/free (K) */
358 		uint64_t        mdp_redzone;    /* red zone cookie (U) */
359 	};
360 	const obj_idx_t         mdp_idx;        /* index within region (UK) */
361 	uint16_t                mdp_type;       /* nexus_meta_type_t (UK) */
362 	uint16_t                mdp_subtype;    /* nexus_meta_subtype_t (UK) */
363 };
364 
365 #define METADATA_PREAMBLE_SZ    (sizeof (struct __metadata_preamble))
366 
367 #define METADATA_PREAMBLE(_md)                  \
368 	((struct __metadata_preamble *)         \
369 	((mach_vm_address_t)(_md) - METADATA_PREAMBLE_SZ))
370 
371 #define METADATA_IDX(_md)                       \
372 	(METADATA_PREAMBLE(_md)->mdp_idx)
373 
374 #define METADATA_TYPE(_md)                      \
375 	(METADATA_PREAMBLE(_md)->mdp_type)
376 
377 #define METADATA_SUBTYPE(_md)                   \
378 	(METADATA_PREAMBLE(_md)->mdp_subtype)
379 
380 /*
381  * Common packet structure shared by {__user,__kern}_quantum.
382  */
383 struct __quantum {
384 	union {
385 		uuid_t          __uuid;         /* flow UUID */
386 		uint8_t         __val8[16];
387 		uint16_t        __val16[8];
388 		uint32_t        __val32[4];
389 		uint64_t        __val64[2];
390 	} __flow_id_u;
391 #define __q_flow_id             __flow_id_u.__uuid
392 #define __q_flow_id_val8        __flow_id_u.__val8
393 #define __q_flow_id_val16       __flow_id_u.__val16
394 #define __q_flow_id_val32       __flow_id_u.__val32
395 #define __q_flow_id_val64       __flow_id_u.__val64
396 
397 	uint32_t                __q_len;
398 
399 	/* QoS service class, see packet_svc_class_t */
400 	uint32_t                __q_svc_class;  /* PKT_SC_* values */
401 
402 	/*
403 	 * See notes on _QUM_{INTERNALIZE,EXTERNALIZE}() regarding
404 	 * portion of this structure above __flags that gets copied.
405 	 * Adding more user-mutable fields after __flags would also
406 	 * require adjusting those macros as well.
407 	 */
408 	volatile uint16_t       __q_flags;      /* QUMF_* flags */
409 	uint16_t                __q_pad[3];
410 } __attribute((aligned(sizeof(uint64_t))));
411 
412 /*
413  * Quantum.
414  *
415  * This structure is aligned for efficient copy and accesses.
416  * It is the user version of the __kernel_quantum structure.
417  *
418  * XXX: Do NOT store kernel pointer/address values here.
419  */
420 struct __user_quantum {
421 	/*
422 	 * Common area between user and kernel variants.
423 	 */
424 	struct __quantum qum_com;
425 #define qum_flow_id             qum_com.__q_flow_id
426 #define qum_flow_id_val8        qum_com.__q_flow_id_val8
427 #define qum_flow_id_val16       qum_com.__q_flow_id_val16
428 #define qum_flow_id_val32       qum_com.__q_flow_id_val32
429 #define qum_flow_id_val64       qum_com.__q_flow_id_val64
430 #define qum_len                 qum_com.__q_len
431 #define qum_qflags              qum_com.__q_flags
432 #define qum_svc_class           qum_com.__q_svc_class
433 
434 	/*
435 	 * Userland specific.
436 	 */
437 	struct __user_buflet    qum_buf[1];             /* 1 buflet */
438 	/*
439 	 * use count for packet.
440 	 */
441 	uint16_t qum_usecnt;
442 } __attribute((aligned(sizeof(uint64_t))));
443 
444 /*
445  * Valid values for (16-bit) qum_qflags.
446  */
447 #define QUM_F_FINALIZED         0x0001  /* has been finalized */
448 #define QUM_F_DROPPED           0x0002  /* has been dropped */
449 #define QUM_F_FLOW_CLASSIFIED   0x0010  /* flow has been classified */
450 #ifdef KERNEL
451 #define QUM_F_INTERNALIZED      0x1000  /* has been internalized */
452 #define QUM_F_KERNEL_ONLY       0x8000  /* kernel only; no user counterpart */
453 
454 /* invariant flags we want to keep */
455 #define QUM_F_SAVE_MASK         (QUM_F_KERNEL_ONLY)
456 /* kernel-only flags that's never externalized */
457 #define QUM_F_KERNEL_FLAGS      (QUM_F_INTERNALIZED|QUM_F_KERNEL_ONLY)
458 #endif /* KERNEL */
459 
460 #ifdef KERNEL
461 #define _KQUM_CTOR(_kqum, _flags, _len, _baddr, _bidx, _dlim, _qidx) do {    \
462 	(_kqum)->qum_flow_id_val64[0] = 0;                                   \
463 	(_kqum)->qum_flow_id_val64[1] = 0;                                   \
464 	(_kqum)->qum_qflags = (_flags);                                      \
465 	(_kqum)->qum_len = (_len);                                           \
466 	_CASSERT(sizeof(METADATA_IDX(_kqum)) == sizeof(obj_idx_t));          \
467 	*(obj_idx_t *)(uintptr_t)&METADATA_IDX(_kqum) = (_qidx);             \
468 	BUF_CTOR(&(_kqum)->qum_buf[0], (_baddr), (_bidx), (_dlim), 0, 0, 0,  \
469 	    OBJ_IDX_NONE, 0);                                                \
470 } while (0)
471 
472 #define _KQUM_INIT(_kqum, _flags, _len, _qidx) do {                          \
473 	(_kqum)->qum_flow_id_val64[0] = 0;                                   \
474 	(_kqum)->qum_flow_id_val64[1] = 0;                                   \
475 	(_kqum)->qum_qflags = (_flags);                                      \
476 	(_kqum)->qum_len = (_len);                                           \
477 	BUF_INIT(&(_kqum)->qum_buf[0], 0, 0);                                \
478 } while (0)
479 #endif /* KERNEL */
480 
481 /*
482  * Common packet structure shared by {__user,__kern}_packet.
483  */
484 struct __packet_com {
485 	/* Link layer (offset relevant to first buflet) */
486 	uint16_t __link_flags;                          /* PKT_LINKF_* flags */
487 
488 	/*
489 	 * Headroom/protocol header length
490 	 *
491 	 * Since the security model of Skywalk nexus is that we doesn't trust
492 	 * packets either from above (userspace) or below (driver/firmware),
493 	 * the only metadata field that nexus makes use of from external is the
494 	 * headroom. Based on headroom, the flowswitch starts demux routine on
495 	 * l2 header, if any. The l2_len is stored in this step. Then the flow
496 	 * extraction (l3+l4 flow) begins parsing from (headroom + l2_len).
497 	 *
498 	 * __headroom is the empty buffer space before any packet data,
499 	 * it is also the equivalent to the first header offset.
500 	 *
501 	 * __l2_len is l2 (link layer) protocol header length, if any.
502 	 */
503 	uint8_t __headroom;
504 	uint8_t __l2_len;
505 
506 	/*
507 	 * Checksum offload.
508 	 *
509 	 * Partial checksum does not require any header parsing and is
510 	 * therefore simpler to implement both in software and hardware.
511 	 *
512 	 * On transmit, PKT_CSUMF_PARTIAL indicates that a partial one's
513 	 * complement checksum to be computed on the span starting from
514 	 * pkt_csum_tx_start_off to the end of the packet, and have the
515 	 * resulted checksum value written at the location specified by
516 	 * pkt_csum_tx_stuff_off.
517 	 *
518 	 * The PKT_CSUMF_ZERO_INVERT flag is used on transmit to indicate
519 	 * that the value 0xffff (negative 0 in one's complement) must be
520 	 * substituted for the value of 0.  This is set for UDP packets,
521 	 * since otherwise the receiver may not validate the checksum
522 	 * (UDP/IPv4), or drop the packet altogether (UDP/IPv6).
523 	 *
524 	 * On receive, PKT_CSUMF_PARTIAL indicates that a partial one's
525 	 * complement checksum has been computed on the span beginning at
526 	 * pkt_csum_rx_start_off to the end of the packet, and that the
527 	 * computed value is now stored in pkt_csum_rx_value.
528 	 *
529 	 * All offsets are relative to the base of the first buflet.
530 	 */
531 	uint32_t __csum_flags;                          /* PKT_CSUMF_* flags */
532 	union {
533 		struct {
534 			uint16_t __csum_start_off;      /* start offset */
535 			uint16_t __csum_value;          /* checksum value */
536 		} __csum_rx;
537 		struct {
538 			uint16_t __csum_start_off;      /* start offset */
539 			uint16_t __csum_stuff_off;      /* stuff offset */
540 		} __csum_tx;
541 		uint32_t __csum_data;
542 	};
543 
544 	/* Compression generation count */
545 	uint32_t __comp_gencnt;
546 
547 	/*
548 	 * Trace ID for each sampled packet.
549 	 * Non-zero ID indicates that the packet is being actively traced.
550 	 */
551 	packet_trace_id_t __trace_id;
552 
553 	/* Aggregation type */
554 	uint8_t __aggr_type;                     /* PKT_AGGR_* values */
555 	uint8_t __seg_cnt;                       /* Number of LRO-packets */
556 
557 	uint16_t __proto_seg_sz;                 /* Protocol segment size */
558 
559 	/*
560 	 * See notes on _PKT_{INTERNALIZE,EXTERNALIZE}() regarding portion
561 	 * of this structure above __p_flags that gets copied.  Adding
562 	 * more user-mutable fields after __p_flags would also require
563 	 * adjusting those macros as well.
564 	 */
565 	union {
566 		volatile uint32_t __flags32[2];
567 		volatile uint64_t __flags;              /* PKT_F_* flags */
568 	};
569 } __attribute((aligned(sizeof(uint64_t))));
570 
571 struct __packet {
572 	union {
573 		uint64_t                __pkt_data[4];
574 		struct __packet_com     __pkt_com;
575 	};
576 #define __p_link_flags          __pkt_com.__link_flags
577 #define __p_headroom            __pkt_com.__headroom
578 #define __p_l2_len              __pkt_com.__l2_len
579 #define __p_csum_flags          __pkt_com.__csum_flags
580 #define __p_csum_rx             __pkt_com.__csum_rx
581 #define __p_csum_tx             __pkt_com.__csum_tx
582 #define __p_csum_data           __pkt_com.__csum_data
583 #define __p_comp_gencnt         __pkt_com.__comp_gencnt
584 #define __p_aggr_type           __pkt_com.__aggr_type
585 #define __p_seg_cnt             __pkt_com.__seg_cnt
586 #define __p_proto_seg_sz        __pkt_com.__proto_seg_sz
587 #define __p_trace_id            __pkt_com.__trace_id
588 #define __p_flags32             __pkt_com.__flags32
589 #define __p_flags               __pkt_com.__flags
590 };
591 
592 /* optional packet token types */
593 #define PKT_OPT_TOKEN_TYPE_OPAQUE       1 /* token has opaque data */
594 #define PKT_OPT_TOKEN_TYPE_PACKET_ID    2 /* token has packet_id */
595 
596 /* maximum token size */
597 #define PKT_OPT_MAX_TOKEN_SIZE          16
598 
599 struct __packet_opt_com {
600 	union {
601 		uint64_t        __token_data[2];
602 		uint8_t         __token[PKT_OPT_MAX_TOKEN_SIZE];
603 	};
604 	uint64_t        __expire_ts;
605 	uint64_t        __pkt_tx_time;
606 	uint16_t        __vlan_tag;
607 	uint16_t        __token_len;
608 	uint8_t         __token_type;
609 	uint8_t         __expiry_action;
610 	uint8_t         __app_type;
611 	uint8_t         __app_metadata;
612 } __attribute((aligned(sizeof(uint64_t))));
613 
614 struct __packet_opt {
615 	union {
616 		uint64_t                __pkt_opt_data[5];
617 		struct __packet_opt_com __pkt_opt_com;
618 	};
619 #define __po_token_type         __pkt_opt_com.__token_type
620 #define __po_token_len          __pkt_opt_com.__token_len
621 #define __po_vlan_tag           __pkt_opt_com.__vlan_tag
622 #define __po_token_data         __pkt_opt_com.__token_data
623 #define __po_token              __pkt_opt_com.__token
624 #define __po_expire_ts          __pkt_opt_com.__expire_ts
625 #define __po_expiry_action      __pkt_opt_com.__expiry_action
626 #define __po_app_type           __pkt_opt_com.__app_type
627 #define __po_app_metadata       __pkt_opt_com.__app_metadata
628 #define __po_pkt_tx_time        __pkt_opt_com.__pkt_tx_time
629 };
630 
631 /*
632  * Packet.
633  *
634  * This structure is aligned for efficient copy and accesses.
635  * It is the user version of the __kern_packet structure.
636  *
637  * XXX: Do NOT store kernel pointer/address values here.
638  */
639 struct __user_packet {
640 	struct __user_quantum   pkt_qum;
641 /*
642  * pkt_flow_id is the flow identifier used by user space stack to identfy a
643  * flow. This identifier is passed as a metadata on all packets generated by
644  * the user space stack. On RX flowswitch fills in this metadata on every
645  * packet and can be used by user space stack for flow classification purposes.
646  */
647 #define pkt_flow_id             pkt_qum.qum_flow_id
648 #define pkt_flow_id_64          pkt_qum.qum_flow_id_val64
649 #define pkt_qum_qflags          pkt_qum.qum_qflags
650 #define pkt_length              pkt_qum.qum_len
651 #define pkt_qum_buf             pkt_qum.qum_buf[0]
652 #define pkt_svc_class           pkt_qum.qum_svc_class
653 #ifdef KERNEL
654 /*
655  * pkt_flow_token is a globally unique flow identifier generated by the
656  * flowswitch for each flow. Flowswitch stamps every TX packet with this
657  * identifier. This is the flow identifier which would be visible to the AQM
658  * logic and the driver.
659  * pkt_flow_token uses the first 4 bytes of pkt_flow_id as the storage space.
660  * This is not a problem as pkt_flow_id is only for flowswitch consumption
661  * and is not required by any other module after the flowswitch TX processing
662  * stage.
663  */
664 #define pkt_flow_token          pkt_qum.qum_flow_id_val32[0]
665 #endif /* KERNEL */
666 
667 	/*
668 	 * Common area between user and kernel variants.
669 	 */
670 	struct __packet pkt_com;
671 #define pkt_link_flags          pkt_com.__p_link_flags
672 #define pkt_headroom            pkt_com.__p_headroom
673 #define pkt_l2_len              pkt_com.__p_l2_len
674 #define pkt_csum_flags          pkt_com.__p_csum_flags
675 #define pkt_csum_rx_start_off   pkt_com.__p_csum_rx.__csum_start_off
676 #define pkt_csum_rx_value       pkt_com.__p_csum_rx.__csum_value
677 #define pkt_csum_tx_start_off   pkt_com.__p_csum_tx.__csum_start_off
678 #define pkt_csum_tx_stuff_off   pkt_com.__p_csum_tx.__csum_stuff_off
679 #define pkt_csum_data           pkt_com.__p_csum_data
680 #define pkt_comp_gencnt         pkt_com.__p_comp_gencnt
681 #define pkt_aggr_type           pkt_com.__p_aggr_type
682 #define pkt_seg_cnt             pkt_com.__p_seg_cnt
683 #define pkt_proto_seg_sz        pkt_com.__p_proto_seg_sz
684 #define pkt_trace_id            pkt_com.__p_trace_id
685 #if BYTE_ORDER == LITTLE_ENDIAN
686 #define pkt_pflags32            pkt_com.__p_flags32[0]
687 #else /* BYTE_ORDER != LITTLE_ENDIAN */
688 #define pkt_pflags32            pkt_com.__p_flags32[1]
689 #endif /* BYTE_ORDER != LITTLE_ENDIAN */
690 #define pkt_pflags              pkt_com.__p_flags
691 
692 	/*
693 	 * Optional common metadata.
694 	 */
695 	struct __packet_opt pkt_com_opt;
696 
697 	/*
698 	 * Userland specific.
699 	 */
700 
701 	/*
702 	 * pkt_{bufs,max} aren't part of the common area, on purpose,
703 	 * since we selectively update them on internalize/externalize.
704 	 */
705 	const uint16_t  pkt_bufs_max;       /* maximum size of buflet chain */
706 	const uint16_t  pkt_bufs_cnt;       /* buflet chain size */
707 } __attribute((aligned(sizeof(uint64_t))));
708 
709 /* the size of __user_packet structure for n total buflets */
710 #define _USER_PACKET_SIZE(n) sizeof(struct __user_packet)
711 
712 /*
713  * Valid values for pkt_link_flags.
714  */
715 #define PKT_LINKF_BCAST         0x0001  /* send/received as link-level bcast */
716 #define PKT_LINKF_MCAST         0x0002  /* send/received as link-level mcast */
717 #define PKT_LINKF_ETHFCS        0x0004  /* has Ethernet FCS */
718 
719 /*
720  * XXX IMPORTANT - READ THIS XXX
721  *
722  * Valid values for (64-bit) pkt_pflags.
723  *
724  * The lower 32-bit values are equivalent to PKTF_* flags used by mbufs,
725  * hence the unused values are reserved.  Do not use define any of these
726  * values unless they correspond to PKTF_* flags.  Make sure to do the
727  * following when adding a value in the lower 32-bit range:
728  *
729  * a. If the flag is kernel-only, prefix it with 2 underscore characters,
730  *    then add a PKT_F_* alias under the KERNEL block conditional.  This
731  *    will help ensure that the libsyscall code doesn't mistakenly use it.
732  *
733  * b. In pp_init(), add compile-time assertion to ensure that the PKT_F_*
734  *    value matches the corresponding PKTF_* as defined in <sys/mbuf.h>.
735  *
736  * c. Add the new flag to PKT_F_USER_MASK depending on whether it's allowed
737  *    to be used by userland.  Flags not added to this mask will only be
738  *    used by the kernel.  We only internalize and externalize flags listed
739  *    in PKT_F_USER_MASK.
740  *
741  * d. Add the new flag to PKT_F_COMMON_MASK.
742  *
743  * When adding an upper 32-bit value, ensure (a) and (c) above are done.
744  *
745  * Legend:
746  *
747  * (K)        - Kernel-only
748  * (U+K)      - User and kernel
749  * (reserved) - Only to be used for mapping with mbuf PKTF_* flags
750  */
751 #define __PKT_F_FLOW_ID         0x0000000000000001ULL /* (K) */
752 #define __PKT_F_FLOW_ADV        0x0000000000000002ULL /* (K) */
753 /*                              0x0000000000000004ULL    (reserved) */
754 /*                              0x0000000000000008ULL    (reserved) */
755 /*                              0x0000000000000010ULL    (reserved) */
756 /*                              0x0000000000000020ULL    (reserved) */
757 /*                              0x0000000000000040ULL    (reserved) */
758 /*                              0x0000000000000080ULL    (reserved) */
759 /*                              0x0000000000000100ULL    (reserved) */
760 /*                              0x0000000000000200ULL    (reserved) */
761 #define PKT_F_WAKE_PKT          0x0000000000000400ULL /* (U+K) */
762 /*                              0x0000000000000800ULL    (reserved) */
763 /*                              0x0000000000001000ULL    (reserved) */
764 /*                              0x0000000000002000ULL    (reserved) */
765 /*                              0x0000000000004000ULL    (reserved) */
766 #define PKT_F_BACKGROUND        0x0000000000008000ULL /* (U+K) */
767 /*                              0x0000000000010000ULL    (reserved) */
768 /*                              0x0000000000020000ULL    (reserved) */
769 #define PKT_F_KEEPALIVE         0x0000000000040000ULL /* (U+K) */
770 #define PKT_F_REALTIME          0x0000000000080000ULL /* (U+K) */
771 /*                              0x0000000000100000ULL    (reserved) */
772 #define PKT_F_REXMT             0x0000000000200000ULL /* (U+K) */
773 /*                              0x0000000000400000ULL    (reserved) */
774 #define __PKT_F_TX_COMPL_TS_REQ 0x0000000000800000ULL /* (K) */
775 #define __PKT_F_TS_VALID        0x0000000001000000ULL /* (K) */
776 /*                              0x0000000002000000ULL    (reserved) */
777 #define __PKT_F_NEW_FLOW        0x0000000004000000ULL /* (K) */
778 #define __PKT_F_START_SEQ       0x0000000008000000ULL /* (K) */
779 #define PKT_F_LAST_PKT          0x0000000010000000ULL /* (U+K) */
780 /*                              0x0000000020000000ULL    (reserved) */
781 /*                              0x0000000040000000ULL    (reserved) */
782 /*                              0x0000000080000000ULL    (reserved) */
783 /*                              ---------------------    upper 32-bit below */
784 #define PKT_F_OPT_GROUP_START   0x0000000100000000ULL /* (U+K) */
785 #define PKT_F_OPT_GROUP_END     0x0000000200000000ULL /* (U+K) */
786 #define PKT_F_OPT_EXPIRE_TS     0x0000000400000000ULL /* (U+K) */
787 #define PKT_F_OPT_TOKEN         0x0000000800000000ULL /* (U+K) */
788 #define __PKT_F_FLOW_DATA       0x0000001000000000ULL /* (K) */
789 #define __PKT_F_TX_COMPL_DATA   0x0000002000000000ULL /* (K) */
790 #define __PKT_F_MBUF_DATA       0x0000004000000000ULL /* (K) */
791 #define PKT_F_TRUNCATED         0x0000008000000000ULL /* (U+K) */
792 #define __PKT_F_PKT_DATA        0x0000010000000000ULL /* (K) */
793 #define PKT_F_PROMISC           0x0000020000000000ULL /* (U+K) */
794 #define PKT_F_OPT_VLTAG         0x0000040000000000ULL /* (U+K) */
795 #define PKT_F_OPT_VLTAG_IN_PKT  0x0000080000000000ULL /* (U+K) */
796 #define __PKT_F_TX_PORT_DATA    0x0000100000000000ULL /* (K) */
797 #define PKT_F_OPT_EXP_ACTION    0x0000200000000000ULL /* (U+K) */
798 #define PKT_F_OPT_APP_METADATA  0x0000400000000000ULL /* (U+K) */
799 #define PKT_F_L4S               0x0000800000000000ULL /* (U+K) */
800 #define PKT_F_OPT_TX_TIMESTAMP  0x0001000000000000ULL /* (U+K) */
801 /*                              0x0002000000000000ULL */
802 /*                              0x0004000000000000ULL */
803 /*                              0x0008000000000000ULL */
804 /*                              0x0010000000000000ULL */
805 /*                              0x0020000000000000ULL */
806 /*                              0x0040000000000000ULL */
807 /*                              0x0080000000000000ULL */
808 #define __PKT_F_OPT_ALLOC       0x0100000000000000ULL /* (K) */
809 #define __PKT_F_FLOW_ALLOC      0x0200000000000000ULL /* (K) */
810 #define __PKT_F_TX_COMPL_ALLOC  0x0400000000000000ULL /* (K) */
811 /*                              0x0800000000000000ULL */
812 /*                              0x1000000000000000ULL */
813 /*                              0x2000000000000000ULL */
814 /*                              0x4000000000000000ULL */
815 /*                              0x8000000000000000ULL */
816 
817 /*
818  * Packet option flags.
819  */
820 #define PKT_F_OPT_DATA                                                  \
821 	(PKT_F_OPT_GROUP_START | PKT_F_OPT_GROUP_END |                  \
822 	PKT_F_OPT_EXPIRE_TS | PKT_F_OPT_TOKEN |                         \
823 	PKT_F_OPT_VLTAG | PKT_F_OPT_VLTAG_IN_PKT | PKT_F_OPT_EXP_ACTION | \
824 	PKT_F_OPT_APP_METADATA | PKT_F_OPT_TX_TIMESTAMP)
825 
826 #ifdef KERNEL
827 /*
828  * Flags exposed to user (and kernel).  See notes above.
829  */
830 #define PKT_F_USER_MASK                                                 \
831 	(PKT_F_BACKGROUND | PKT_F_REALTIME | PKT_F_REXMT |              \
832 	PKT_F_LAST_PKT | PKT_F_OPT_DATA | PKT_F_PROMISC |               \
833 	PKT_F_TRUNCATED | PKT_F_WAKE_PKT | PKT_F_L4S)
834 
835 /*
836  * Aliases for kernel-only flags.  See notes above.  The ones marked
837  * with (common) have corresponding PKTF_* definitions and are also
838  * included in PKT_F_COMMON_MASK below.
839  */
840 #define PKT_F_FLOW_ID           __PKT_F_FLOW_ID         /* (common) */
841 #define PKT_F_FLOW_ADV          __PKT_F_FLOW_ADV        /* (common) */
842 #define PKT_F_TX_COMPL_TS_REQ   __PKT_F_TX_COMPL_TS_REQ /* (common) */
843 #define PKT_F_TS_VALID          __PKT_F_TS_VALID        /* (common) */
844 #define PKT_F_NEW_FLOW          __PKT_F_NEW_FLOW        /* (common) */
845 #define PKT_F_START_SEQ         __PKT_F_START_SEQ       /* (common) */
846 #define PKT_F_FLOW_DATA         __PKT_F_FLOW_DATA
847 #define PKT_F_TX_COMPL_DATA     __PKT_F_TX_COMPL_DATA
848 #define PKT_F_MBUF_DATA         __PKT_F_MBUF_DATA
849 #define PKT_F_PKT_DATA          __PKT_F_PKT_DATA
850 #define PKT_F_OPT_ALLOC         __PKT_F_OPT_ALLOC
851 #define PKT_F_FLOW_ALLOC        __PKT_F_FLOW_ALLOC
852 #define PKT_F_TX_COMPL_ALLOC    __PKT_F_TX_COMPL_ALLOC
853 #define PKT_F_TX_PORT_DATA      __PKT_F_TX_PORT_DATA
854 
855 /*
856  * Flags related to mbuf attached to the packet.
857  */
858 #define PKT_F_MBUF_MASK         (PKT_F_MBUF_DATA | PKT_F_TRUNCATED)
859 
860 /*
861  * Flags related to packet attached to the packet.
862  */
863 #define PKT_F_PKT_MASK         (PKT_F_PKT_DATA | PKT_F_TRUNCATED)
864 
865 /*
866  * Invariant flags kept during _PKT_COPY().  At the moment we keep
867  * all except those related to the attached mbuf.
868  */
869 #define PKT_F_COPY_MASK         (~(PKT_F_MBUF_MASK | PKT_F_PKT_MASK))
870 
871 /*
872  * Lower 32-bit flags common to mbuf and __kern_packet.  See notes above.
873  * DO NOT add flags to this mask unless they have equivalent PKTF_* flags
874  * defined in <sys/mbuf.h>
875  */
876 #define PKT_F_COMMON_MASK                                               \
877 	(PKT_F_BACKGROUND | PKT_F_REALTIME | PKT_F_REXMT |              \
878 	PKT_F_LAST_PKT | PKT_F_FLOW_ID | PKT_F_FLOW_ADV |               \
879 	PKT_F_TX_COMPL_TS_REQ | PKT_F_TS_VALID | PKT_F_NEW_FLOW |       \
880 	PKT_F_START_SEQ | PKT_F_KEEPALIVE | PKT_F_WAKE_PKT)
881 
882 /*
883  * Flags retained across alloc/free.
884  */
885 #define PKT_F_INIT_MASK                                                 \
886 	(PKT_F_OPT_ALLOC | PKT_F_FLOW_ALLOC | PKT_F_TX_COMPL_ALLOC)
887 #endif /* KERNEL */
888 
889 /*
890  * 64-bit tagged pointer (limit tag to least significant byte).
891  * We use 2 bits to encode type, and another 2 bits for subtype.
892  */
893 #define SK_PTR_TYPE_MASK        ((uint64_t)0x3)         /* 00 11 */
894 #define SK_PTR_SUBTYPE_MASK     ((uint64_t)0xc)         /* 11 00 */
895 #define SK_PTR_TAG_MASK         ((uint64_t)0xf)         /* 11 11 */
896 
897 #define SK_PTR_TAG(_p)          ((uint64_t)(_p) & SK_PTR_TAG_MASK)
898 #define SK_PTR_ADDR_MASK        (~SK_PTR_TAG_MASK)
899 
900 #define SK_PTR_TYPE(_p)         ((uint64_t)(_p) & SK_PTR_TYPE_MASK)
901 #define SK_PTR_TYPE_ENC(_t)     ((uint64_t)(_t) & SK_PTR_TYPE_MASK)
902 
903 #define SK_PTR_SUBTYPE(_p)      (((uint64_t)(_p) & SK_PTR_SUBTYPE_MASK) >> 2)
904 #define SK_PTR_SUBTYPE_ENC(_s)  (((uint64_t)(_s) << 2) & SK_PTR_SUBTYPE_MASK)
905 
906 #define SK_PTR_ADDR(_p)         ((uint64_t)(_p) & SK_PTR_ADDR_MASK)
907 #define SK_PTR_ADDR_ENC(_p)     ((uint64_t)(_p) & SK_PTR_ADDR_MASK)
908 
909 #define SK_PTR_ENCODE(_p, _t, _s)       \
910 	(SK_PTR_ADDR_ENC(_p) | SK_PTR_TYPE_ENC(_t) | SK_PTR_SUBTYPE_ENC(_s))
911 
912 #define SK_PTR_ADDR_UQUM(_ph)   ((struct __user_quantum *)SK_PTR_ADDR(_ph))
913 #define SK_PTR_ADDR_UPKT(_ph)   ((struct __user_packet *)SK_PTR_ADDR(_ph))
914 
915 #ifdef KERNEL
916 __BEGIN_DECLS
917 /*
918  * Packets.
919  */
920 extern struct mbuf *kern_packet_get_mbuf(const kern_packet_t);
921 __END_DECLS
922 #else /* !KERNEL */
923 #if defined(LIBSYSCALL_INTERFACE)
924 __BEGIN_DECLS
925 extern void pkt_subtype_assert_fail(const packet_t, uint64_t, uint64_t);
926 extern void pkt_type_assert_fail(const packet_t, uint64_t);
927 __END_DECLS
928 #endif /* LIBSYSCALL_INTERFACE */
929 #endif /* !KERNEL */
930 #if defined(LIBSYSCALL_INTERFACE) || defined(BSD_KERNEL_PRIVATE)
931 #include <skywalk/packet_common.h>
932 #endif /* LIBSYSCALL_INTERFACE || BSD_KERNEL_PRIVATE */
933 #endif /* PRIVATE || BSD_KERNEL_PRIVATE */
934 #endif /* !_SKYWALK_OS_PACKET_PRIVATE_H_ */
935