xref: /xnu-10063.121.3/bsd/sys/mbuf.h (revision 2c2f96dc2b9a4408a43d3150ae9c105355ca3daa)
1 /*
2  * Copyright (c) 1999-2021 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /* Copyright (c) 1998, 1999 Apple Computer, Inc. All Rights Reserved */
29 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
30 /*
31  * Mach Operating System
32  * Copyright (c) 1987 Carnegie-Mellon University
33  * All rights reserved.  The CMU software License Agreement specifies
34  * the terms and conditions for use and redistribution.
35  */
36 /*
37  * Copyright (c) 1994 NeXT Computer, Inc. All rights reserved.
38  *
39  * Copyright (c) 1982, 1986, 1988 Regents of the University of California.
40  * All rights reserved.
41  *
42  * Redistribution and use in source and binary forms, with or without
43  * modification, are permitted provided that the following conditions
44  * are met:
45  * 1. Redistributions of source code must retain the above copyright
46  *    notice, this list of conditions and the following disclaimer.
47  * 2. Redistributions in binary form must reproduce the above copyright
48  *    notice, this list of conditions and the following disclaimer in the
49  *    documentation and/or other materials provided with the distribution.
50  * 3. All advertising materials mentioning features or use of this software
51  *    must display the following acknowledgement:
52  *      This product includes software developed by the University of
53  *      California, Berkeley and its contributors.
54  * 4. Neither the name of the University nor the names of its contributors
55  *    may be used to endorse or promote products derived from this software
56  *    without specific prior written permission.
57  *
58  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
59  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
60  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
61  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
62  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
63  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
64  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
65  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
66  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
67  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
68  * SUCH DAMAGE.
69  *
70  *	@(#)mbuf.h	8.3 (Berkeley) 1/21/94
71  */
72 /*
73  * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
74  * support for mandatory and extensible security protections.  This notice
75  * is included in support of clause 2.2 (b) of the Apple Public License,
76  * Version 2.0.
77  */
78 
79 #ifndef _SYS_MBUF_H_
80 #define _SYS_MBUF_H_
81 
82 #include <sys/appleapiopts.h>
83 #include <sys/cdefs.h>
84 #include <sys/_types/_u_int32_t.h> /* u_int32_t */
85 #include <sys/_types/_u_int64_t.h> /* u_int64_t */
86 #include <sys/_types/_u_short.h> /* u_short */
87 
88 #ifdef KERNEL
89 #include <sys/kpi_mbuf.h>
90 #endif
91 
92 #ifdef XNU_KERNEL_PRIVATE
93 #include <sys/lock.h>
94 #include <sys/queue.h>
95 #include <machine/endian.h>
96 /*
97  * Mbufs are of a single size, which includes overhead.
98  * An mbuf may add a single "mbuf cluster" of size
99  * MCLBYTES/MBIGCLBYTES/M16KCLBYTES (also in machine/param.h), which has
100  * no additional overhead and is used instead of the internal data area;
101  * this is done when at least MINCLSIZE of data must be stored.
102  */
103 #if CONFIG_MBUF_MCACHE
104 #define _MSIZESHIFT      8                       /* 256 */
105 #define _MSIZE           (1 << _MSIZESHIFT)       /* size of an mbuf */
106 #else /* CONFIG_MBUF_MCACHE */
107 #define _MSIZE           512
108 #endif  /* CONFIG_MBUF_MCACHE */
109 
110 #define NCLPGSHIFT      (PAGE_SHIFT - MCLSHIFT)
111 #define NCLPG           (1 << NCLPGSHIFT)       /* # of cl per page */
112 
113 #define NBCLPGSHIFT     (PAGE_SHIFT - MBIGCLSHIFT)
114 #define NBCLPG          (1 << NBCLPGSHIFT)      /* # of big cl per page */
115 
116 #define NMBPCL             (MCLBYTES / _MSIZE)
117 
118 #define NCLPJCLSHIFT    (M16KCLSHIFT - MCLSHIFT)
119 #define NCLPJCL         (1 << NCLPJCLSHIFT)     /* # of cl per jumbo cl */
120 
121 #define NCLPBGSHIFT     (MBIGCLSHIFT - MCLSHIFT)
122 #define NCLPBG          (1 << NCLPBGSHIFT)      /* # of cl per big cl */
123 
124 /*
125  * Macros for type conversion
126  * mtod(m,t) -	convert mbuf pointer to data pointer of correct type
127  * mtodo(m, o) -- Same as above but with offset 'o' into data.
128  */
129 #define mtod(m, t)      ((t)(void *)m_mtod_current(m))
130 #define mtodo(m, o)     ((void *)(mtod(m, uint8_t *) + (o)))
131 
132 /* header at beginning of each mbuf: */
133 struct m_hdr {
134 	struct mbuf                *mh_next;       /* next buffer in chain */
135 	struct mbuf                *mh_nextpkt;    /* next chain in queue/record */
136 	uintptr_t                  mh_data;        /* location of data */
137 	int32_t                    mh_len;         /* amount of data in this mbuf */
138 	u_int16_t                  mh_type;        /* type of data in this mbuf */
139 	u_int16_t                  mh_flags;       /* flags; see below */
140 #if __arm__ && (__BIGGEST_ALIGNMENT__ > 4)
141 /* This is needed because of how _MLEN is defined and used. Ideally, _MLEN
142  * should be defined using the offsetof(struct mbuf, M_dat), since there is
143  * no guarantee that mbuf.M_dat will start where mbuf.m_hdr ends. The compiler
144  * may (and does in the armv7k case) insert padding between m_hdr and M_dat in
145  * mbuf. We cannot easily use offsetof, however, since _MLEN is referenced
146  * in the definition of mbuf.
147  */
148 } __attribute__((aligned(8)));
149 #else
150 };
151 #endif
152 
153 /*
154  * Packet tag structure (see below for details).
155  */
156 struct m_tag {
157 	uint64_t               m_tag_cookie;   /* Error checking */
158 	SLIST_ENTRY(m_tag)     m_tag_link;     /* List of packet tags */
159 	void                   *m_tag_data;
160 	uint16_t               m_tag_type;     /* Module specific type */
161 	uint16_t               m_tag_len;      /* Length of data */
162 	uint32_t               m_tag_id;       /* Module ID */
163 	void                   *m_tag_mb_cl;    /* pointer to mbuf or cluster container */
164 #ifndef __LP64__
165 	u_int32_t              m_tag_pad;
166 #endif /* !__LP64__ */
167 };
168 
169 #define M_TAG_ALIGN(len) \
170 	(P2ROUNDUP(len, sizeof (u_int64_t)) + sizeof (struct m_tag))
171 
172 #define M_TAG_INIT(tag, id, type, len, data, mb_cl) {   \
173 	VERIFY(IS_P2ALIGNED((tag), sizeof(u_int64_t)));     \
174 	(tag)->m_tag_type = (type);                         \
175 	(tag)->m_tag_len = (uint16_t)(len);                 \
176 	(tag)->m_tag_id = (id);                             \
177 	(tag)->m_tag_data = (data);                         \
178 	(tag)->m_tag_mb_cl = (mb_cl);                       \
179 	m_tag_create_cookie(tag);                           \
180 }
181 
182 #define M_TAG_VALID_PATTERN     0xfeedfacefeedfaceULL
183 #define M_TAG_FREE_PATTERN      0xdeadbeefdeadbeefULL
184 
185 /*
186  * Packet tag header structure at the top of mbuf whe mbufs are use for m_tag
187  * Pointers are 32-bit in ILP32; m_tag needs 64-bit alignment, hence padded.
188  */
189 struct m_taghdr {
190 #ifndef __LP64__
191 	u_int32_t               pad;            /* For structure alignment */
192 #endif /* !__LP64__ */
193 	u_int64_t               mth_refcnt;         /* Number of tags in this mbuf */
194 };
195 
196 /*
197  * Driver auxiliary metadata tag (KERNEL_TAG_TYPE_DRVAUX).
198  */
199 struct m_drvaux_tag {
200 	u_int32_t       da_family;      /* IFNET_FAMILY values */
201 	u_int32_t       da_subfamily;   /* IFNET_SUBFAMILY values */
202 	u_int32_t       da_reserved;    /* for future */
203 	u_int32_t       da_length;      /* length of following data */
204 };
205 
206 /* Values for pftag_flags (16-bit wide) */
207 #define PF_TAG_GENERATED                0x1     /* pkt generated by PF */
208 #define PF_TAG_FRAGCACHE                0x2
209 #define PF_TAG_TRANSLATE_LOCALHOST      0x4
210 #if PF_ECN
211 #define PF_TAG_HDR_INET                 0x8     /* hdr points to IPv4 */
212 #define PF_TAG_HDR_INET6                0x10    /* hdr points to IPv6 */
213 #endif /* PF_ECN */
214 #define PF_TAG_REASSEMBLED              0x20    /* pkt reassembled by PF */
215 #define PF_TAG_REFRAGMENTED             0x40    /* pkt refragmented by PF */
216 /*
217  * PF mbuf tag
218  */
219 struct pf_mtag {
220 	u_int16_t       pftag_flags;    /* PF_TAG flags */
221 	u_int16_t       pftag_rtableid; /* alternate routing table id */
222 	u_int16_t       pftag_tag;
223 	u_int16_t       pftag_routed;
224 #if PF_ECN
225 	void            *pftag_hdr;     /* saved hdr pos in mbuf, for ECN */
226 #endif /* PF_ECN */
227 };
228 
229 /* System reserved PF tags */
230 #define PF_TAG_ID_SYSTEM_SERVICE        0xff00
231 #define PF_TAG_ID_STACK_DROP            0xff01
232 
233 /*
234  * PF fragment tag
235  */
236 struct pf_fragment_tag {
237 	uint32_t ft_id;     /* fragment id */
238 	uint16_t ft_hdrlen; /* header length of reassembled pkt */
239 	uint16_t ft_unfragpartlen; /* length of the per-fragment headers */
240 	uint16_t ft_extoff; /* last extension header offset or 0 */
241 	uint16_t ft_maxlen; /* maximum fragment payload length */
242 };
243 
244 /*
245  * TCP mbuf tag
246  */
247 struct tcp_pktinfo {
248 	union {
249 		struct {
250 			uint32_t segsz;        /* segment size (actual MSS) */
251 			uint32_t start_seq;    /* start seq of this packet */
252 			pid_t     pid;
253 			pid_t     e_pid;
254 		} __tx;
255 		struct {
256 			uint8_t  seg_cnt;    /* # of coalesced TCP pkts */
257 		} __rx;
258 	} __offload;
259 #define tso_segsz       proto_mtag.__pr_u.tcp.tm_tcp.__offload.__tx.segsz
260 #define tx_start_seq    proto_mtag.__pr_u.tcp.tm_tcp.__offload.__tx.start_seq
261 #define tx_tcp_pid      proto_mtag.__pr_u.tcp.tm_tcp.__offload.__tx.pid
262 #define tx_tcp_e_pid    proto_mtag.__pr_u.tcp.tm_tcp.__offload.__tx.e_pid
263 #define seg_cnt         proto_mtag.__pr_u.tcp.tm_tcp.__offload.__rx.seg_cnt
264 };
265 
266 /*
267  * MPTCP mbuf tag
268  */
269 struct mptcp_pktinfo {
270 	uint64_t       mtpi_dsn;       /* MPTCP Data Sequence Number */
271 	uint32_t       mtpi_rel_seq;   /* Relative Seq Number */
272 	uint16_t       mtpi_length;    /* Length of mapping */
273 	uint16_t       mtpi_csum;
274 #define mp_dsn          proto_mtag.__pr_u.tcp.tm_mptcp.mtpi_dsn
275 #define mp_rseq         proto_mtag.__pr_u.tcp.tm_mptcp.mtpi_rel_seq
276 #define mp_rlen         proto_mtag.__pr_u.tcp.tm_mptcp.mtpi_length
277 #define mp_csum         proto_mtag.__pr_u.tcp.tm_mptcp.mtpi_csum
278 };
279 
280 /*
281  * TCP specific mbuf tag.  Note that the current implementation uses
282  * MPTCP metadata strictly between MPTCP and the TCP subflow layers,
283  * hence tm_tcp and tm_mptcp are mutually exclusive.  This also means
284  * that TCP messages functionality is currently incompatible with MPTCP.
285  */
286 struct tcp_mtag {
287 	union {
288 		struct tcp_pktinfo      tm_tcp;         /* TCP and below */
289 		struct mptcp_pktinfo    tm_mptcp;       /* MPTCP-TCP only */
290 	};
291 };
292 
293 struct udp_mtag {
294 	pid_t     _pid;
295 	pid_t     _e_pid;
296 #define tx_udp_pid      proto_mtag.__pr_u.udp._pid
297 #define tx_udp_e_pid    proto_mtag.__pr_u.udp._e_pid
298 };
299 
300 struct rawip_mtag {
301 	pid_t     _pid;
302 	pid_t     _e_pid;
303 #define tx_rawip_pid    proto_mtag.__pr_u.rawip._pid
304 #define tx_rawip_e_pid  proto_mtag.__pr_u.rawip._e_pid
305 };
306 
307 struct driver_mtag_ {
308 	uintptr_t               _drv_tx_compl_arg;
309 	uintptr_t               _drv_tx_compl_data;
310 	kern_return_t           _drv_tx_status;
311 	uint16_t                _drv_flowid;
312 #define drv_tx_compl_arg        builtin_mtag._drv_mtag._drv_tx_compl_arg
313 #define drv_tx_compl_data       builtin_mtag._drv_mtag._drv_tx_compl_data
314 #define drv_tx_status           builtin_mtag._drv_mtag._drv_tx_status
315 #define drv_flowid              builtin_mtag._drv_mtag._drv_flowid
316 };
317 
318 /*
319  * Protocol specific mbuf tag (at most one protocol metadata per mbuf).
320  *
321  * Care must be taken to ensure that they are mutually exclusive, e.g.
322  * IPsec policy ID implies no TCP segment offload (which is fine given
323  * that the former is used on the virtual ipsec interface that does
324  * not advertise the TSO capability.)
325  */
326 struct proto_mtag_ {
327 	union {
328 		struct tcp_mtag tcp;            /* TCP specific */
329 		struct udp_mtag         udp;    /* UDP specific */
330 		struct rawip_mtag       rawip;  /* raw IPv4/IPv6 specific */
331 	} __pr_u;
332 };
333 
334 /*
335  * NECP specific mbuf tag.
336  */
337 struct necp_mtag_ {
338 	u_int32_t       necp_policy_id;
339 	u_int32_t       necp_skip_policy_id;
340 	u_int32_t       necp_route_rule_id;
341 	u_int16_t       necp_last_interface_index;
342 	u_int16_t       necp_app_id;
343 };
344 
345 union builtin_mtag {
346 	struct {
347 		struct proto_mtag_ _proto_mtag; /* built-in protocol-specific tag */
348 		struct pf_mtag  _pf_mtag;       /* built-in PF tag */
349 		struct necp_mtag_ _necp_mtag; /* built-in NECP tag */
350 	} _net_mtag;
351 	struct driver_mtag_ _drv_mtag;
352 #define necp_mtag builtin_mtag._net_mtag._necp_mtag
353 #define proto_mtag builtin_mtag._net_mtag._proto_mtag
354 #define driver_mtag builtin_mtag._drv_mtag
355 };
356 
357 /*
358  * Record/packet header in first mbuf of chain; valid only if M_PKTHDR set.
359  */
360 struct pkthdr {
361 	struct ifnet *rcvif;            /* rcv interface */
362 	/* variables for ip and tcp reassembly */
363 	void    *pkt_hdr;               /* pointer to packet header */
364 	int32_t len;                    /* total packet length */
365 	/* variables for hardware checksum */
366 	/* Note: csum_flags is used for hardware checksum and VLAN */
367 	u_int32_t csum_flags;           /* flags regarding checksum */
368 	union {
369 		struct {
370 			u_int16_t val;   /* checksum value */
371 			u_int16_t start; /* checksum start offset */
372 		} _csum_rx;
373 #define csum_rx_val     _csum_rx.val
374 #define csum_rx_start   _csum_rx.start
375 		struct {
376 			u_int16_t start; /* checksum start offset */
377 			u_int16_t stuff; /* checksum stuff offset */
378 		} _csum_tx;
379 #define csum_tx_start   _csum_tx.start
380 #define csum_tx_stuff   _csum_tx.stuff
381 		/*
382 		 * Generic data field used by csum routines.
383 		 * It gets used differently in different contexts.
384 		 */
385 		u_int32_t csum_data;
386 	};
387 	u_int16_t vlan_tag;             /* VLAN tag, host byte order */
388 	/*
389 	 * Packet classifier info
390 	 *
391 	 * PKTF_FLOW_ID set means valid flow ID.  A non-zero flow ID value
392 	 * means the packet has been classified by one of the flow sources.
393 	 * It is also a prerequisite for flow control advisory, which is
394 	 * enabled by additionally setting PKTF_FLOW_ADV.
395 	 *
396 	 * The protocol value is a best-effort representation of the payload.
397 	 * It is opportunistically updated and used only for optimization.
398 	 * It is not a substitute for parsing the protocol header(s); use it
399 	 * only as a hint.
400 	 *
401 	 * If PKTF_IFAINFO is set, pkt_ifainfo contains one or both of the
402 	 * indices of interfaces which own the source and/or destination
403 	 * addresses of the packet.  For the local/loopback case (PKTF_LOOP),
404 	 * both should be valid, and thus allows for the receiving end to
405 	 * quickly determine the actual interfaces used by the the addresses;
406 	 * they may not necessarily be the same or refer to the loopback
407 	 * interface.  Otherwise, in the non-local/loopback case, the indices
408 	 * are opportunistically set, and because of that only one may be set
409 	 * (0 means the index has not been determined.)  In addition, the
410 	 * interface address flags are also recorded.  This allows us to avoid
411 	 * storing the corresponding {in,in6}_ifaddr in an mbuf tag.  Ideally
412 	 * this would be a superset of {ia,ia6}_flags, but the namespaces are
413 	 * overlapping at present, so we'll need a new set of values in future
414 	 * to achieve this.  For now, we will just rely on the address family
415 	 * related code paths examining this mbuf to interpret the flags.
416 	 */
417 	u_int8_t pkt_proto;             /* IPPROTO value */
418 	u_int8_t pkt_flowsrc;           /* FLOWSRC values */
419 	u_int32_t pkt_flowid;           /* flow ID */
420 	u_int32_t pkt_flags;            /* PKTF flags (see below) */
421 	u_int32_t pkt_svc;              /* MBUF_SVC value */
422 
423 	u_int32_t pkt_compl_context;            /* Packet completion context */
424 
425 	union {
426 		struct {
427 			u_int16_t src;          /* ifindex of src addr i/f */
428 			u_int16_t src_flags;    /* src PKT_IFAIFF flags */
429 			u_int16_t dst;          /* ifindex of dst addr i/f */
430 			u_int16_t dst_flags;    /* dst PKT_IFAIFF flags */
431 		} _pkt_iaif;
432 #define src_ifindex     _pkt_iaif.src
433 #define src_iff         _pkt_iaif.src_flags
434 #define dst_ifindex     _pkt_iaif.dst
435 #define dst_iff         _pkt_iaif.dst_flags
436 		u_int64_t pkt_ifainfo;  /* data field used by ifainfo */
437 		struct {
438 			u_int32_t if_data; /* bytes in interface queue */
439 			u_int32_t sndbuf_data; /* bytes in socket buffer */
440 		} _pkt_bsr;     /* Buffer status report used by cellular interface */
441 #define bufstatus_if    _pkt_bsr.if_data
442 #define bufstatus_sndbuf        _pkt_bsr.sndbuf_data
443 	};
444 	u_int64_t pkt_timestamp;        /* TX: enqueue time, RX: receive timestamp */
445 
446 	/*
447 	 * Tags (external and built-in)
448 	 */
449 	SLIST_HEAD(packet_tags, m_tag) tags; /* list of external tags */
450 	union builtin_mtag builtin_mtag;
451 
452 	uint32_t comp_gencnt;
453 	uint16_t pkt_ext_flags;
454 	uint16_t pkt_crumbs;
455 	/*
456 	 * Module private scratch space (32-bit aligned), currently 16-bytes
457 	 * large. Anything stored here is not guaranteed to survive across
458 	 * modules.  The AQM layer (outbound) uses all 16-bytes for both
459 	 * packet scheduling and flow advisory information.
460 	 */
461 	struct {
462 		union {
463 			u_int8_t        __mpriv8[16];
464 			u_int16_t       __mpriv16[8];
465 			struct {
466 				union {
467 					u_int8_t        __val8[4];
468 					u_int16_t       __val16[2];
469 					u_int32_t       __val32;
470 				} __mpriv32_u;
471 			}               __mpriv32[4];
472 			u_int64_t       __mpriv64[2];
473 		} __mpriv_u;
474 	} pkt_mpriv __attribute__((aligned(4)));
475 #define pkt_mpriv_hash  pkt_mpriv.__mpriv_u.__mpriv32[0].__mpriv32_u.__val32
476 #define pkt_mpriv_flags pkt_mpriv.__mpriv_u.__mpriv32[1].__mpriv32_u.__val32
477 #define pkt_mpriv_srcid pkt_mpriv.__mpriv_u.__mpriv32[2].__mpriv32_u.__val32
478 #define pkt_mpriv_fidx  pkt_mpriv.__mpriv_u.__mpriv32[3].__mpriv32_u.__val32
479 
480 	u_int32_t redzone;              /* red zone */
481 	u_int32_t pkt_compl_callbacks;  /* Packet completion callbacks */
482 };
483 
484 /*
485  * Flow data source type.  A data source module is responsible for generating
486  * a unique flow ID and associating it to each data flow as pkt_flowid.
487  * This is required for flow control/advisory, as it allows the output queue
488  * to identify the data source object and inform that it can resume its
489  * transmission (in the event it was flow controlled.)
490  */
491 #define FLOWSRC_INPCB           1       /* flow ID generated by INPCB */
492 #define FLOWSRC_IFNET           2       /* flow ID generated by interface */
493 #define FLOWSRC_PF              3       /* flow ID generated by PF */
494 #define FLOWSRC_CHANNEL         4       /* flow ID generated by channel */
495 
496 /*
497  * FLOWSRC_MPKL_INPUT is not a true flow data source and is used for
498  * multi-layer packet logging. We're usurping the pkt_flowsrc field because
499  * the mbuf packet header ran out of space and pkt_flowsrc is normally
500  * used on output so we assume we can safely overwrite the normal semantic of
501  * the field.
502  * This value is meant to be used on incoming packet from a lower level protocol
503  * to pass information to some upper level protocol. When FLOWSRC_MPKL_INPUT
504  * is set, the following fields are used:
505  * - pkt_proto: the IP protocol ID of the lower level protocol
506  * - pkt_flowid: the identifier of the packet at the lower protocol.
507  * For example ESP would set pkt_proto to IPPROTO_ESP and pkt_flowid to the SPI.
508  */
509 
510 /*
511  * Packet flags.  Unlike m_flags, all packet flags are copied along when
512  * copying m_pkthdr, i.e. no equivalent of M_COPYFLAGS here.  These flags
513  * (and other classifier info) will be cleared during DLIL input.
514  *
515  * Some notes about M_LOOP and PKTF_LOOP:
516  *
517  *    - M_LOOP flag is overloaded, and its use is discouraged.  Historically,
518  *	that flag was used by the KAME implementation for allowing certain
519  *	certain exceptions to be made in the IP6_EXTHDR_CHECK() logic; this
520  *	was originally meant to be set as the packet is looped back to the
521  *	system, and in some circumstances temporarily set in ip6_output().
522  *	Over time, this flag was used by the pre-output routines to indicate
523  *	to the DLIL frameout and output routines, that the packet may be
524  *	looped back to the system under the right conditions.  In addition,
525  *	this is an mbuf flag rather than an mbuf packet header flag.
526  *
527  *    - PKTF_LOOP is an mbuf packet header flag, which is set if and only
528  *	if the packet was looped back to the system.  This flag should be
529  *	used instead for newer code.
530  */
531 #define PKTF_FLOW_ID            0x1     /* pkt has valid flowid value */
532 #define PKTF_FLOW_ADV           0x2     /* pkt triggers local flow advisory */
533 #define PKTF_FLOW_LOCALSRC      0x4     /* pkt is locally originated  */
534 #define PKTF_FLOW_RAWSOCK       0x8     /* pkt locally generated by raw sock */
535 #define PKTF_PRIO_PRIVILEGED    0x10    /* packet priority is privileged */
536 #define PKTF_PROXY_DST          0x20    /* processed but not locally destined */
537 #define PKTF_INET_RESOLVE       0x40    /* IPv4 resolver packet */
538 #define PKTF_INET6_RESOLVE      0x80    /* IPv6 resolver packet */
539 #define PKTF_RESOLVE_RTR        0x100   /* pkt is for resolving router */
540 #define PKTF_SKIP_PKTAP         0x200   /* pkt has already passed through pktap */
541 #define PKTF_WAKE_PKT           0x400   /* packet caused system to wake from sleep */
542 #define PKTF_MPTCP              0x800   /* TCP with MPTCP metadata */
543 #define PKTF_MPSO               0x1000  /* MPTCP socket meta data */
544 #define PKTF_LOOP               0x2000  /* loopbacked packet */
545 #define PKTF_IFAINFO            0x4000  /* pkt has valid interface addr info */
546 #define PKTF_SO_BACKGROUND      0x8000  /* data is from background source */
547 #define PKTF_FORWARDED          0x10000 /* pkt was forwarded from another i/f */
548 #define PKTF_PRIV_GUARDED       0x20000 /* pkt_mpriv area guard enabled */
549 #define PKTF_KEEPALIVE          0x40000 /* pkt is kernel-generated keepalive */
550 #define PKTF_SO_REALTIME        0x80000 /* data is realtime traffic */
551 #define PKTF_VALID_UNSENT_DATA  0x100000 /* unsent data is valid */
552 #define PKTF_TCP_REXMT          0x200000 /* packet is TCP retransmission */
553 #define PKTF_REASSEMBLED        0x400000 /* Packet was reassembled */
554 #define PKTF_TX_COMPL_TS_REQ    0x800000 /* tx completion timestamp requested */
555 #define PKTF_TS_VALID           0x1000000 /* pkt timestamp is valid */
556 #define PKTF_DRIVER_MTAG        0x2000000 /* driver mbuf tags fields inited */
557 #define PKTF_NEW_FLOW           0x4000000 /* Data from a new flow */
558 #define PKTF_START_SEQ          0x8000000 /* valid start sequence */
559 #define PKTF_LAST_PKT           0x10000000 /* last packet in the flow */
560 #define PKTF_MPTCP_REINJ        0x20000000 /* Packet has been reinjected for MPTCP */
561 #define PKTF_MPTCP_DFIN         0x40000000 /* Packet is a data-fin */
562 #define PKTF_HBH_CHKED          0x80000000 /* HBH option is checked */
563 
564 #define PKTF_EXT_OUTPUT_SCOPE   0x1     /* outgoing packet has ipv6 address scope id */
565 #define PKTF_EXT_L4S            0x2     /* pkts is from a L4S connection */
566 #define PKTF_EXT_QUIC           0x4     /* flag to denote a QUIC packet */
567 
568 #define PKT_CRUMB_TS_COMP_REQ   0x0001 /* timestamp completion requested */
569 #define PKT_CRUMB_TS_COMP_CB    0x0002 /* timestamp callback called */
570 #define PKT_CRUMB_DLIL_OUTPUT   0x0004 /* dlil_output called */
571 #define PKT_CRUMB_FLOW_TX       0x0008 /* dp_flow_tx_process called */
572 #define PKT_CRUMB_FQ_ENQUEUE    0x0010 /* fq_enqueue called */
573 #define PKT_CRUMB_FQ_DEQUEUE    0x0020 /* fq_dequeue called */
574 #define PKT_CRUMB_SK_PKT_COPY   0x0040 /* copy from mbuf to skywalk packet */
575 #define PKT_CRUMB_TCP_OUTPUT    0x0080
576 #define PKT_CRUMB_UDP_OUTPUT    0x0100
577 #define PKT_CRUMB_SOSEND        0x0200
578 #define PKT_CRUMB_DLIL_INPUT    0x0400
579 #define PKT_CRUMB_IP_INPUT      0x0800
580 #define PKT_CRUMB_TCP_INPUT     0x1000
581 #define PKT_CRUMB_UDP_INPUT     0x2000
582 
583 /* flags related to flow control/advisory and identification */
584 #define PKTF_FLOW_MASK  \
585 	(PKTF_FLOW_ID | PKTF_FLOW_ADV | PKTF_FLOW_LOCALSRC | PKTF_FLOW_RAWSOCK)
586 
587 /*
588  * Description of external storage mapped into mbuf, valid only if M_EXT set.
589  */
590 typedef void (*m_ext_free_func_t)(caddr_t, u_int, caddr_t);
591 struct m_ext {
592 	caddr_t __counted_by(ext_size) ext_buf; /* start of buffer */
593 	m_ext_free_func_t ext_free;     /* free routine if not the usual */
594 	u_int   ext_size;               /* size of buffer, for ext_free */
595 	caddr_t ext_arg;                /* additional ext_free argument */
596 	struct ext_ref {
597 		struct mbuf *paired;
598 		u_int16_t minref;
599 		u_int16_t refcnt;
600 		u_int16_t prefcnt;
601 		u_int16_t flags;
602 		u_int32_t priv;
603 		uintptr_t ext_token;
604 	} *ext_refflags;
605 };
606 
607 /* define m_ext to a type since it gets redefined below */
608 typedef struct m_ext _m_ext_t;
609 
610 #if CONFIG_MBUF_MCACHE
611 /*
612  * The following _MLEN and _MHLEN macros are private to xnu.  Private code
613  * that are outside of xnu must use the mbuf_get_{mlen,mhlen} routines since
614  * the sizes of the structures are dependent upon specific xnu configs.
615  */
616 #define _MLEN           (_MSIZE - sizeof(struct m_hdr))  /* normal data len */
617 #define _MHLEN          (_MLEN - sizeof(struct pkthdr)) /* data len w/pkthdr */
618 
619 #define NMBPGSHIFT      (PAGE_SHIFT - _MSIZESHIFT)
620 #define NMBPG           (1 << NMBPGSHIFT)       /* # of mbufs per page */
621 
622 #define NMBPCLSHIFT     (MCLSHIFT - _MSIZESHIFT)
623 
624 /*
625  * The mbuf object
626  */
627 struct mbuf {
628 	struct m_hdr m_hdr;
629 	union {
630 		struct {
631 			struct pkthdr MH_pkthdr;        /* M_PKTHDR set */
632 			union {
633 				struct m_ext MH_ext;    /* M_EXT set */
634 				char    MH_databuf[_MHLEN];
635 			} MH_dat;
636 		} MH;
637 		char    M_databuf[_MLEN];               /* !M_PKTHDR, !M_EXT */
638 	} M_dat;
639 };
640 
641 #define m_next          m_hdr.mh_next
642 #define m_len           m_hdr.mh_len
643 #define m_data          m_hdr.mh_data
644 #define m_type          m_hdr.mh_type
645 #define m_flags         m_hdr.mh_flags
646 #define m_nextpkt       m_hdr.mh_nextpkt
647 #define m_act           m_nextpkt
648 
649 #define m_ext           M_dat.MH.MH_dat.MH_ext
650 #define m_pkthdr        M_dat.MH.MH_pkthdr
651 #define m_pktdat        M_dat.MH.MH_dat.MH_databuf
652 
653 #else /* !CONFIG_MBUF_MCACHE */
654 /*
655  * The following _MLEN and _MHLEN macros are private to xnu.  Private code
656  * that are outside of xnu must use the mbuf_get_{mlen,mhlen} routines since
657  * the sizes of the structures are dependent upon specific xnu configs.
658  */
659 #define _MLEN           (_MSIZE - sizeof(struct m_hdr_common))  /* normal data len */
660 #define _MHLEN          (_MLEN)                                /* data len w/pkthdr */
661 
662 struct m_hdr_common {
663 	struct m_hdr M_hdr;
664 	struct m_ext M_ext  __attribute__((aligned(16)));             /* M_EXT set */
665 	struct pkthdr M_pkthdr  __attribute__((aligned(16)));         /* M_PKTHDR set */
666 };
667 
668 /*
669  * The mbuf object
670  */
671 struct mbuf {
672 	struct m_hdr_common             M_hdr_common;
673 	union {
674 		char                    MH_databuf[_MHLEN];
675 		char                    M_databuf[_MLEN];           /* !M_PKTHDR, !M_EXT */
676 	} M_dat __attribute__((aligned(16)));
677 };
678 
679 #define m_next          M_hdr_common.M_hdr.mh_next
680 #define m_len           M_hdr_common.M_hdr.mh_len
681 #define m_data          M_hdr_common.M_hdr.mh_data
682 #define m_type          M_hdr_common.M_hdr.mh_type
683 #define m_flags         M_hdr_common.M_hdr.mh_flags
684 #define m_nextpkt       M_hdr_common.M_hdr.mh_nextpkt
685 
686 #define m_ext           M_hdr_common.M_ext
687 #define m_pkthdr        M_hdr_common.M_pkthdr
688 #define m_pktdat        M_dat.MH_databuf
689 
690 #endif /* CONFIG_MBUF_MCACHE */
691 
692 #define m_act           m_nextpkt
693 #define m_dat           M_dat.M_databuf
694 #define m_pktlen(_m)    ((_m)->m_pkthdr.len)
695 #define m_pftag(_m)     (&(_m)->m_pkthdr.builtin_mtag._net_mtag._pf_mtag)
696 #define m_necptag(_m)   (&(_m)->m_pkthdr.builtin_mtag._net_mtag._necp_mtag)
697 
698 /* mbuf flags (private) */
699 #define M_EXT           0x0001  /* has associated external storage */
700 #define M_PKTHDR        0x0002  /* start of record */
701 #define M_EOR           0x0004  /* end of record */
702 #define M_PROTO1        0x0008  /* protocol-specific */
703 #define M_PROTO2        0x0010  /* protocol-specific */
704 #define M_PROTO3        0x0020  /* protocol-specific */
705 #define M_LOOP          0x0040  /* packet is looped back (also see PKTF_LOOP) */
706 #define M_PROTO5        0x0080  /* protocol-specific */
707 
708 /* mbuf pkthdr flags, also in m_flags (private) */
709 #define M_BCAST         0x0100  /* send/received as link-level broadcast */
710 #define M_MCAST         0x0200  /* send/received as link-level multicast */
711 #define M_FRAG          0x0400  /* packet is a fragment of a larger packet */
712 #define M_FIRSTFRAG     0x0800  /* packet is first fragment */
713 #define M_LASTFRAG      0x1000  /* packet is last fragment */
714 #define M_PROMISC       0x2000  /* packet is promiscuous (shouldn't go to stack) */
715 #define M_HASFCS        0x4000  /* packet has FCS */
716 #define M_TAGHDR        0x8000  /* m_tag hdr structure at top of mbuf data */
717 
718 /*
719  * Flags to purge when crossing layers.
720  */
721 #define M_PROTOFLAGS \
722 	(M_PROTO1|M_PROTO2|M_PROTO3|M_PROTO5)
723 
724 /* flags copied when copying m_pkthdr */
725 #define M_COPYFLAGS                                                     \
726 	(M_PKTHDR|M_EOR|M_PROTO1|M_PROTO2|M_PROTO3 |                    \
727 	M_LOOP|M_PROTO5|M_BCAST|M_MCAST|M_FRAG |                        \
728 	M_FIRSTFRAG|M_LASTFRAG|M_PROMISC|M_HASFCS)
729 
730 /* flags indicating hw checksum support and sw checksum requirements */
731 #define CSUM_IP                 0x0001          /* will csum IP */
732 #define CSUM_TCP                0x0002          /* will csum TCP */
733 #define CSUM_UDP                0x0004          /* will csum UDP */
734 #define CSUM_IP_FRAGS           0x0008          /* will csum IP fragments */
735 #define CSUM_FRAGMENT           0x0010          /* will do IP fragmentation */
736 #define CSUM_TCPIPV6            0x0020          /* will csum TCP for IPv6 */
737 #define CSUM_UDPIPV6            0x0040          /* will csum UDP for IPv6 */
738 #define CSUM_FRAGMENT_IPV6      0x0080          /* will do IPv6 fragmentation */
739 
740 #define CSUM_IP_CHECKED         0x0100          /* did csum IP */
741 #define CSUM_IP_VALID           0x0200          /*   ... the csum is valid */
742 #define CSUM_DATA_VALID         0x0400          /* csum_data field is valid */
743 #define CSUM_PSEUDO_HDR         0x0800          /* csum_data has pseudo hdr */
744 #define CSUM_PARTIAL            0x1000          /* simple Sum16 computation */
745 #define CSUM_ZERO_INVERT        0x2000          /* invert 0 to -0 (0xffff) */
746 
747 #define CSUM_DELAY_DATA         (CSUM_TCP | CSUM_UDP)
748 #define CSUM_DELAY_IP           (CSUM_IP)       /* IPv4 only: no IPv6 IP cksum */
749 #define CSUM_DELAY_IPV6_DATA    (CSUM_TCPIPV6 | CSUM_UDPIPV6)
750 #define CSUM_DATA_IPV6_VALID    CSUM_DATA_VALID /* csum_data field is valid */
751 
752 #define CSUM_TX_FLAGS                                                   \
753 	(CSUM_DELAY_IP | CSUM_DELAY_DATA | CSUM_DELAY_IPV6_DATA |       \
754 	CSUM_DATA_VALID | CSUM_PARTIAL | CSUM_ZERO_INVERT)
755 
756 #define CSUM_RX_FULL_FLAGS                                              \
757 	(CSUM_IP_CHECKED | CSUM_IP_VALID | CSUM_PSEUDO_HDR |            \
758 	CSUM_DATA_VALID)
759 
760 #define CSUM_RX_FLAGS                                                   \
761 	(CSUM_RX_FULL_FLAGS | CSUM_PARTIAL)
762 
763 
764 
765 /*
766  * Note: see also IF_HWASSIST_CSUM defined in <net/if_var.h>
767  */
768 
769 /* VLAN tag present */
770 #define CSUM_VLAN_TAG_VALID     0x00010000      /* vlan_tag field is valid */
771 
772 /* checksum start adjustment has been done */
773 #define CSUM_ADJUST_DONE        0x00020000
774 
775 /* VLAN encapsulation present */
776 #define CSUM_VLAN_ENCAP_PRESENT    0x00040000      /* mbuf has vlan encapsulation */
777 
778 /* TCP Segment Offloading requested on this mbuf */
779 #define CSUM_TSO_IPV4           0x00100000      /* This mbuf needs to be segmented by the NIC */
780 #define CSUM_TSO_IPV6           0x00200000      /* This mbuf needs to be segmented by the NIC */
781 
782 #define TSO_IPV4_OK(_ifp, _m)                                           \
783     (((_ifp)->if_hwassist & IFNET_TSO_IPV4) &&                          \
784     ((_m)->m_pkthdr.csum_flags & CSUM_TSO_IPV4))                        \
785 
786 #define TSO_IPV4_NOTOK(_ifp, _m)                                        \
787     (!((_ifp)->if_hwassist & IFNET_TSO_IPV4) &&                         \
788     ((_m)->m_pkthdr.csum_flags & CSUM_TSO_IPV4))                        \
789 
790 #define TSO_IPV6_OK(_ifp, _m)                                           \
791     (((_ifp)->if_hwassist & IFNET_TSO_IPV6) &&                          \
792     ((_m)->m_pkthdr.csum_flags & CSUM_TSO_IPV6))                        \
793 
794 #define TSO_IPV6_NOTOK(_ifp, _m)                                        \
795     (!((_ifp)->if_hwassist & IFNET_TSO_IPV6) &&                         \
796     ((_m)->m_pkthdr.csum_flags & CSUM_TSO_IPV6))                        \
797 
798 #endif /* XNU_KERNEL_PRIVATE */
799 
800 /* mbuf types */
801 #define MT_FREE         0       /* should be on free list */
802 #define MT_DATA         1       /* dynamic (data) allocation */
803 #define MT_HEADER       2       /* packet header */
804 #define MT_SOCKET       3       /* socket structure */
805 #define MT_PCB          4       /* protocol control block */
806 #define MT_RTABLE       5       /* routing tables */
807 #define MT_HTABLE       6       /* IMP host tables */
808 #define MT_ATABLE       7       /* address resolution tables */
809 #define MT_SONAME       8       /* socket name */
810 #define MT_SOOPTS       10      /* socket options */
811 #define MT_FTABLE       11      /* fragment reassembly header */
812 #define MT_RIGHTS       12      /* access rights */
813 #define MT_IFADDR       13      /* interface address */
814 #define MT_CONTROL      14      /* extra-data protocol message */
815 #define MT_OOBDATA      15      /* expedited data  */
816 #define MT_TAG          16      /* volatile metadata associated to pkts */
817 #define MT_MAX          32      /* enough? */
818 
819 enum {
820 	MTF_FREE        = (1 << MT_FREE),
821 	MTF_DATA        = (1 << MT_DATA),
822 	MTF_HEADER      = (1 << MT_HEADER),
823 	MTF_SOCKET      = (1 << MT_SOCKET),
824 	MTF_PCB         = (1 << MT_PCB),
825 	MTF_RTABLE      = (1 << MT_RTABLE),
826 	MTF_HTABLE      = (1 << MT_HTABLE),
827 	MTF_ATABLE      = (1 << MT_ATABLE),
828 	MTF_SONAME      = (1 << MT_SONAME),
829 	MTF_SOOPTS      = (1 << MT_SOOPTS),
830 	MTF_FTABLE      = (1 << MT_FTABLE),
831 	MTF_RIGHTS      = (1 << MT_RIGHTS),
832 	MTF_IFADDR      = (1 << MT_IFADDR),
833 	MTF_CONTROL     = (1 << MT_CONTROL),
834 	MTF_OOBDATA     = (1 << MT_OOBDATA),
835 	MTF_TAG         = (1 << MT_TAG),
836 };
837 
838 #ifdef XNU_KERNEL_PRIVATE
839 /*
840  * mbuf allocation/deallocation macros:
841  *
842  *	MGET(struct mbuf *m, int how, int type)
843  * allocates an mbuf and initializes it to contain internal data.
844  *
845  *	MGETHDR(struct mbuf *m, int how, int type)
846  * allocates an mbuf and initializes it to contain a packet header
847  * and internal data.
848  */
849 
850 #if 1
851 #define MCHECK(m) m_mcheck(m)
852 #else
853 #define MCHECK(m)
854 #endif
855 
856 #define MGET(m, how, type) ((m) = m_get((how), (type)))
857 
858 #define MGETHDR(m, how, type)   ((m) = m_gethdr((how), (type)))
859 
860 /*
861  * Mbuf cluster macros.
862  * MCLALLOC(caddr_t p, int how) allocates an mbuf cluster.
863  * MCLGET adds such clusters to a normal mbuf;
864  * the flag M_EXT is set upon success.
865  * MCLFREE releases a reference to a cluster allocated by MCLALLOC,
866  * freeing the cluster if the reference count has reached 0.
867  *
868  * Normal mbuf clusters are normally treated as character arrays
869  * after allocation, but use the first word of the buffer as a free list
870  * pointer while on the free list.
871  */
872 union mcluster {
873 	union   mcluster *mcl_next;
874 	char    mcl_buf[MCLBYTES];
875 };
876 
877 #define MCLALLOC(p, how)        ((p) = m_mclalloc(how))
878 
879 #define MCLFREE(p)              m_mclfree(p)
880 
881 #define MCLGET(m, how)          ((m) = m_mclget(m, how))
882 
883 /*
884  * Mbuf big cluster
885  */
886 union mbigcluster {
887 	union mbigcluster       *mbc_next;
888 	char                    mbc_buf[MBIGCLBYTES];
889 };
890 
891 /*
892  * Mbuf jumbo cluster
893  */
894 union m16kcluster {
895 	union m16kcluster       *m16kcl_next;
896 	char                    m16kcl_buf[M16KCLBYTES];
897 };
898 
899 #define MCLHASREFERENCE(m)      m_mclhasreference(m)
900 
901 /*
902  * MFREE(struct mbuf *m, struct mbuf *n)
903  * Free a single mbuf and associated external storage.
904  * Place the successor, if any, in n.
905  */
906 
907 #define MFREE(m, n) ((n) = m_free(m))
908 
909 /*
910  * Copy mbuf pkthdr from from to to.
911  * from must have M_PKTHDR set, and to must be empty.
912  * aux pointer will be moved to `to'.
913  */
914 #define M_COPY_PKTHDR(to, from)         m_copy_pkthdr(to, from)
915 
916 #define M_COPY_PFTAG(to, from)          m_copy_pftag(to, from)
917 
918 #define M_COPY_NECPTAG(to, from)        m_copy_necptag(to, from)
919 
920 #define M_COPY_CLASSIFIER(to, from)     m_copy_classifier(to, from)
921 
922 /*
923  * Evaluate TRUE if it's safe to write to the mbuf m's data region (this can
924  * be both the local data payload, or an external buffer area, depending on
925  * whether M_EXT is set).
926  */
927 #define M_WRITABLE(m)   (((m)->m_flags & M_EXT) == 0 || !MCLHASREFERENCE(m))
928 
929 /*
930  * These macros are mapped to the appropriate KPIs, so that private code
931  * can be simply recompiled in order to be forward-compatible with future
932  * changes toward the struture sizes.
933  */
934 #ifdef XNU_KERNEL_PRIVATE
935 #define MLEN            _MLEN
936 #define MHLEN           _MHLEN
937 #define MINCLSIZE       (MLEN + MHLEN)
938 #else
939 #define MLEN            mbuf_get_mlen()         /* normal mbuf data len */
940 #define MHLEN           mbuf_get_mhlen()        /* data len in an mbuf w/pkthdr */
941 #define MINCLSIZE       mbuf_get_minclsize()    /* cluster usage threshold */
942 #endif
943 /*
944  * Return the address of the start of the buffer associated with an mbuf,
945  * handling external storage, packet-header mbufs, and regular data mbufs.
946  */
947 #define M_START(m)                                                      \
948 	(((m)->m_flags & M_EXT) ? (caddr_t)(m)->m_ext.ext_buf :             \
949 	 ((m)->m_flags & M_PKTHDR) ? &(m)->m_pktdat[0] :                \
950 	 &(m)->m_dat[0])
951 
952 /*
953  * Return the size of the buffer associated with an mbuf, handling external
954  * storage, packet-header mbufs, and regular data mbufs.
955  */
956 #define M_SIZE(m)                                                       \
957 	(((m)->m_flags & M_EXT) ? (m)->m_ext.ext_size :                 \
958 	 ((m)->m_flags & M_PKTHDR) ? MHLEN :                            \
959 	 MLEN)
960 
961 #define M_ALIGN(m, len)         m_align(m, len)
962 #define MH_ALIGN(m, len)        m_align(m, len)
963 #define MEXT_ALIGN(m, len)      m_align(m, len)
964 
965 /*
966  * Compute the amount of space available before the current start of data in
967  * an mbuf.
968  *
969  * The M_WRITABLE() is a temporary, conservative safety measure: the burden
970  * of checking writability of the mbuf data area rests solely with the caller.
971  */
972 #define M_LEADINGSPACE(m)                                               \
973 	(M_WRITABLE(m) ? ((m)->m_data - (uintptr_t)M_START(m)) : 0)
974 
975 /*
976  * Compute the amount of space available after the end of data in an mbuf.
977  *
978  * The M_WRITABLE() is a temporary, conservative safety measure: the burden
979  * of checking writability of the mbuf data area rests solely with the caller.
980  */
981 #define M_TRAILINGSPACE(m)                                              \
982 	(M_WRITABLE(m) ?                                                \
983 	    ((M_START(m) + M_SIZE(m)) - (mtod(m, caddr_t) + (m)->m_len)) : 0)
984 
985 /*
986  * Arrange to prepend space of size plen to mbuf m.
987  * If a new mbuf must be allocated, how specifies whether to wait.
988  * If how is M_DONTWAIT and allocation fails, the original mbuf chain
989  * is freed and m is set to NULL.
990  */
991 #define M_PREPEND(m, plen, how, align)  \
992     ((m) = m_prepend_2((m), (plen), (how), (align)))
993 
994 /* change mbuf to new type */
995 #define MCHTYPE(m, t)           m_mchtype(m, t)
996 
997 /* compatiblity with 4.3 */
998 #define m_copy(m, o, l)         m_copym((m), (o), (l), M_DONTWAIT)
999 
1000 #define MBSHIFT         20                              /* 1MB */
1001 #define MBSIZE          (1 << MBSHIFT)
1002 #define GBSHIFT         30                              /* 1GB */
1003 #define GBSIZE          (1 << GBSHIFT)
1004 
1005 /*
1006  * M_STRUCT_GET ensures that intermediate protocol header (from "off" to
1007  * "off+len") is located in single mbuf, on contiguous memory region.
1008  * The pointer to the region will be returned to pointer variable "val",
1009  * with type "typ".
1010  *
1011  * M_STRUCT_GET0 does the same, except that it aligns the structure at
1012  * very top of mbuf.  GET0 is likely to make memory copy than GET.
1013  */
1014 #define M_STRUCT_GET(val, typ, m, off, len)                             \
1015 do {                                                                    \
1016 	struct mbuf *t;                                                 \
1017 	int tmp;                                                        \
1018                                                                         \
1019 	if ((m)->m_len >= (off) + (len)) {                              \
1020 	        (val) = (typ)(mtod((m), caddr_t) + (off));              \
1021 	} else {                                                        \
1022 	        t = m_pulldown((m), (off), (len), &tmp);                \
1023 	        if (t != NULL) {                                        \
1024 	                if (t->m_len < tmp + (len))                     \
1025 	                        panic("m_pulldown malfunction");        \
1026 	                (val) = (typ)(mtod(t, caddr_t) + tmp);          \
1027 	        } else {                                                \
1028 	                (val) = (typ)NULL;                              \
1029 	                (m) = NULL;                                     \
1030 	        }                                                       \
1031 	}                                                               \
1032 } while (0)
1033 
1034 #define M_STRUCT_GET0(val, typ, m, off, len)                            \
1035 do {                                                                    \
1036 	struct mbuf *t;                                                 \
1037                                                                         \
1038 	if ((off) == 0 && ((m)->m_len >= (len))) {                      \
1039 	        (val) = (typ)(void *)mtod(m, caddr_t);                  \
1040 	} else {                                                        \
1041 	        t = m_pulldown((m), (off), (len), NULL);                \
1042 	        if (t != NULL) {                                        \
1043 	                if (t->m_len < (len))                           \
1044 	                        panic("m_pulldown malfunction");        \
1045 	                (val) = (typ)(void *)mtod(t, caddr_t);          \
1046 	        } else {                                                \
1047 	                (val) = (typ)NULL;                              \
1048 	                (m) = NULL;                                     \
1049 	        }                                                       \
1050 	}                                                               \
1051 } while (0)
1052 
1053 #define MBUF_INPUT_CHECK(m, rcvif)                                      \
1054 do {                                                                    \
1055 	if (!(m->m_flags & MBUF_PKTHDR) ||                              \
1056 	    m->m_len < 0 ||                                             \
1057 	    m->m_len > ((njcl > 0) ? njclbytes : MBIGCLBYTES) ||        \
1058 	    m->m_type == MT_FREE ||                                     \
1059 	    ((m->m_flags & M_EXT) != 0 && m->m_ext.ext_buf == NULL)) {  \
1060 	        panic_plain("Failed mbuf validity check: mbuf %p len %d "  \
1061 	            "type %d flags 0x%x data %p rcvif %s ifflags 0x%x", \
1062 	            m, m->m_len, m->m_type, m->m_flags,                 \
1063 	            ((m->m_flags & M_EXT)                               \
1064 	                                ? m->m_ext.ext_buf                              \
1065 	                                : (caddr_t __unsafe_indexable)m->m_data),       \
1066 	            if_name(rcvif),                                     \
1067 	            (rcvif->if_flags & 0xffff));                        \
1068 	}                                                               \
1069 } while (0)
1070 
1071 /*
1072  * Simple mbuf queueing system
1073  *
1074  * This is basically a SIMPLEQ adapted to mbuf use (i.e. using
1075  * m_nextpkt instead of field.sqe_next).
1076  *
1077  * m_next is ignored, so queueing chains of mbufs is possible
1078  */
1079 #define MBUFQ_HEAD(name)                                        \
1080 struct name {                                                   \
1081 	struct mbuf *mq_first;  /* first packet */              \
1082 	struct mbuf **mq_last;  /* addr of last next packet */  \
1083 }
1084 
1085 #define MBUFQ_INIT(q)           do {                            \
1086 	MBUFQ_FIRST(q) = NULL;                                  \
1087 	(q)->mq_last = &MBUFQ_FIRST(q);                         \
1088 } while (0)
1089 
1090 #define MBUFQ_PREPEND(q, m)     do {                            \
1091 	if ((MBUFQ_NEXT(m) = MBUFQ_FIRST(q)) == NULL)           \
1092 	        (q)->mq_last = &MBUFQ_NEXT(m);                  \
1093 	MBUFQ_FIRST(q) = (m);                                   \
1094 } while (0)
1095 
1096 #define MBUFQ_ENQUEUE(q, m)     do {                            \
1097 	MBUFQ_NEXT(m) = NULL;                                   \
1098 	*(q)->mq_last = (m);                                    \
1099 	(q)->mq_last = &MBUFQ_NEXT(m);                          \
1100 } while (0)
1101 
1102 #define MBUFQ_ENQUEUE_MULTI(q, m, n)    do {                    \
1103 	MBUFQ_NEXT(n) = NULL;                                   \
1104 	*(q)->mq_last = (m);                                    \
1105 	(q)->mq_last = &MBUFQ_NEXT(n);                          \
1106 } while (0)
1107 
1108 #define MBUFQ_DEQUEUE(q, m)     do {                            \
1109 	if (((m) = MBUFQ_FIRST(q)) != NULL) {                   \
1110 	        if ((MBUFQ_FIRST(q) = MBUFQ_NEXT(m)) == NULL)   \
1111 	                (q)->mq_last = &MBUFQ_FIRST(q);         \
1112 	        else                                            \
1113 	                MBUFQ_NEXT(m) = NULL;                   \
1114 	}                                                       \
1115 } while (0)
1116 
1117 #define MBUFQ_REMOVE(q, m)      do {                            \
1118 	if (MBUFQ_FIRST(q) == (m)) {                            \
1119 	        MBUFQ_DEQUEUE(q, m);                            \
1120 	} else {                                                \
1121 	        struct mbuf *_m = MBUFQ_FIRST(q);               \
1122 	        while (MBUFQ_NEXT(_m) != (m))                   \
1123 	                _m = MBUFQ_NEXT(_m);                    \
1124 	        if ((MBUFQ_NEXT(_m) =                           \
1125 	            MBUFQ_NEXT(MBUFQ_NEXT(_m))) == NULL)        \
1126 	                (q)->mq_last = &MBUFQ_NEXT(_m);         \
1127 	}                                                       \
1128 } while (0)
1129 
1130 #define MBUFQ_DRAIN(q)          do {                            \
1131 	struct mbuf *__m0;                                      \
1132 	while ((__m0 = MBUFQ_FIRST(q)) != NULL) {               \
1133 	        MBUFQ_FIRST(q) = MBUFQ_NEXT(__m0);              \
1134 	        MBUFQ_NEXT(__m0) = NULL;                        \
1135 	        m_freem(__m0);                                  \
1136 	}                                                       \
1137 	(q)->mq_last = &MBUFQ_FIRST(q);                         \
1138 } while (0)
1139 
1140 #define MBUFQ_FOREACH(m, q)                                     \
1141 	for ((m) = MBUFQ_FIRST(q);                              \
1142 	    (m);                                                \
1143 	    (m) = MBUFQ_NEXT(m))
1144 
1145 #define MBUFQ_FOREACH_SAFE(m, q, tvar)                          \
1146 	for ((m) = MBUFQ_FIRST(q);                              \
1147 	    (m) && ((tvar) = MBUFQ_NEXT(m), 1);                 \
1148 	    (m) = (tvar))
1149 
1150 #define MBUFQ_EMPTY(q)          ((q)->mq_first == NULL)
1151 #define MBUFQ_FIRST(q)          ((q)->mq_first)
1152 #define MBUFQ_NEXT(m)           ((m)->m_nextpkt)
1153 /*
1154  * mq_last is initialized to point to mq_first, so check if they're
1155  * equal and return NULL when the list is empty.  Otherwise, we need
1156  * to subtract the offset of MBUQ_NEXT (i.e. m_nextpkt field) to get
1157  * to the base mbuf address to return to caller.
1158  */
1159 #define MBUFQ_LAST(head)                                        \
1160 	(((head)->mq_last == &MBUFQ_FIRST(head)) ? NULL :       \
1161 	((struct mbuf *)(void *)((char *)(head)->mq_last -      \
1162 	     __builtin_offsetof(struct mbuf, m_nextpkt))))
1163 
1164 #if (DEBUG || DEVELOPMENT)
1165 #define MBUFQ_ADD_CRUMB_MULTI(_q, _h, _t, _f) do {              \
1166 	struct mbuf * _saved = (_t)->m_nextpkt;                 \
1167 	struct mbuf * _m;                                       \
1168 	for (_m = (_h); _m != NULL; _m = MBUFQ_NEXT(_m)) {      \
1169 	        m_add_crumb((_m), (_f));                        \
1170 	}                                                       \
1171 	(_t)->m_nextpkt = _saved;                               \
1172 } while (0)
1173 
1174 #define MBUFQ_ADD_CRUMB(_q, _m, _f) do {                \
1175 	m_add_crumb((_m), (_f));                        \
1176 } while (0)
1177 #else
1178 #define MBUFQ_ADD_CRUMB_MULTI(_q, _h, _t, _f)
1179 #define MBUFQ_ADD_CRUMB(_q, _m, _f)
1180 #endif /* (DEBUG || DEVELOPMENT) */
1181 
1182 #endif /* XNU_KERNEL_PRIVATE */
1183 
1184 /*
1185  * Mbuf statistics (legacy).
1186  */
1187 struct mbstat {
1188 	u_int32_t       m_mbufs;        /* mbufs obtained from page pool */
1189 	u_int32_t       m_clusters;     /* clusters obtained from page pool */
1190 	u_int32_t       m_spare;        /* spare field */
1191 	u_int32_t       m_clfree;       /* free clusters */
1192 	u_int32_t       m_drops;        /* times failed to find space */
1193 	u_int32_t       m_wait;         /* times waited for space */
1194 	u_int32_t       m_drain;        /* times drained protocols for space */
1195 	u_short         m_mtypes[256];  /* type specific mbuf allocations */
1196 	u_int32_t       m_mcfail;       /* times m_copym failed */
1197 	u_int32_t       m_mpfail;       /* times m_pullup failed */
1198 	u_int32_t       m_msize;        /* length of an mbuf */
1199 	u_int32_t       m_mclbytes;     /* length of an mbuf cluster */
1200 	u_int32_t       m_minclsize;    /* min length of data to allocate a cluster */
1201 	u_int32_t       m_mlen;         /* length of data in an mbuf */
1202 	u_int32_t       m_mhlen;        /* length of data in a header mbuf */
1203 	u_int32_t       m_bigclusters;  /* clusters obtained from page pool */
1204 	u_int32_t       m_bigclfree;    /* free clusters */
1205 	u_int32_t       m_bigmclbytes;  /* length of an mbuf cluster */
1206 	u_int32_t       m_forcedefunct; /* times we force defunct'ed an app's sockets */
1207 };
1208 
1209 /* Compatibillity with 10.3 */
1210 struct ombstat {
1211 	u_int32_t       m_mbufs;        /* mbufs obtained from page pool */
1212 	u_int32_t       m_clusters;     /* clusters obtained from page pool */
1213 	u_int32_t       m_spare;        /* spare field */
1214 	u_int32_t       m_clfree;       /* free clusters */
1215 	u_int32_t       m_drops;        /* times failed to find space */
1216 	u_int32_t       m_wait;         /* times waited for space */
1217 	u_int32_t       m_drain;        /* times drained protocols for space */
1218 	u_short         m_mtypes[256];  /* type specific mbuf allocations */
1219 	u_int32_t       m_mcfail;       /* times m_copym failed */
1220 	u_int32_t       m_mpfail;       /* times m_pullup failed */
1221 	u_int32_t       m_msize;        /* length of an mbuf */
1222 	u_int32_t       m_mclbytes;     /* length of an mbuf cluster */
1223 	u_int32_t       m_minclsize;    /* min length of data to allocate a cluster */
1224 	u_int32_t       m_mlen;         /* length of data in an mbuf */
1225 	u_int32_t       m_mhlen;        /* length of data in a header mbuf */
1226 };
1227 
1228 /*
1229  * mbuf class statistics.
1230  */
1231 #define MAX_MBUF_CNAME  15
1232 
1233 #if defined(XNU_KERNEL_PRIVATE)
1234 /* For backwards compatibility with 32-bit userland process */
1235 struct omb_class_stat {
1236 	char            mbcl_cname[MAX_MBUF_CNAME + 1]; /* class name */
1237 	u_int32_t       mbcl_size;      /* buffer size */
1238 	u_int32_t       mbcl_total;     /* # of buffers created */
1239 	u_int32_t       mbcl_active;    /* # of active buffers */
1240 	u_int32_t       mbcl_infree;    /* # of available buffers */
1241 	u_int32_t       mbcl_slab_cnt;  /* # of available slabs */
1242 	u_int32_t       mbcl_pad;       /* padding */
1243 	u_int64_t       mbcl_alloc_cnt; /* # of times alloc is called */
1244 	u_int64_t       mbcl_free_cnt;  /* # of times free is called */
1245 	u_int64_t       mbcl_notified;  /* # of notified wakeups */
1246 	u_int64_t       mbcl_purge_cnt; /* # of purges so far */
1247 	u_int64_t       mbcl_fail_cnt;  /* # of allocation failures */
1248 	u_int32_t       mbcl_ctotal;    /* total only for this class */
1249 	u_int32_t       mbcl_release_cnt; /* amount of memory returned */
1250 	/*
1251 	 * Cache layer statistics
1252 	 */
1253 	u_int32_t       mbcl_mc_state;  /* cache state (see below) */
1254 	u_int32_t       mbcl_mc_cached; /* # of cached buffers */
1255 	u_int32_t       mbcl_mc_waiter_cnt;  /* # waiters on the cache */
1256 	u_int32_t       mbcl_mc_wretry_cnt;  /* # of wait retries */
1257 	u_int32_t       mbcl_mc_nwretry_cnt; /* # of no-wait retry attempts */
1258 	u_int32_t       mbcl_reserved[7];    /* for future use */
1259 } __attribute__((__packed__));
1260 #endif /* XNU_KERNEL_PRIVATE */
1261 
1262 typedef struct mb_class_stat {
1263 	char            mbcl_cname[MAX_MBUF_CNAME + 1]; /* class name */
1264 	u_int32_t       mbcl_size;      /* buffer size */
1265 	u_int32_t       mbcl_total;     /* # of buffers created */
1266 	u_int32_t       mbcl_active;    /* # of active buffers */
1267 	u_int32_t       mbcl_infree;    /* # of available buffers */
1268 	u_int32_t       mbcl_slab_cnt;  /* # of available slabs */
1269 #if defined(KERNEL) || defined(__LP64__)
1270 	u_int32_t       mbcl_pad;       /* padding */
1271 #endif /* KERNEL || __LP64__ */
1272 	u_int64_t       mbcl_alloc_cnt; /* # of times alloc is called */
1273 	u_int64_t       mbcl_free_cnt;  /* # of times free is called */
1274 	u_int64_t       mbcl_notified;  /* # of notified wakeups */
1275 	u_int64_t       mbcl_purge_cnt; /* # of purges so far */
1276 	u_int64_t       mbcl_fail_cnt;  /* # of allocation failures */
1277 	u_int32_t       mbcl_ctotal;    /* total only for this class */
1278 	u_int32_t       mbcl_release_cnt; /* amount of memory returned */
1279 	/*
1280 	 * Cache layer statistics
1281 	 */
1282 	u_int32_t       mbcl_mc_state;  /* cache state (see below) */
1283 	u_int32_t       mbcl_mc_cached; /* # of cached buffers */
1284 	u_int32_t       mbcl_mc_waiter_cnt;  /* # waiters on the cache */
1285 	u_int32_t       mbcl_mc_wretry_cnt;  /* # of wait retries */
1286 	u_int32_t       mbcl_mc_nwretry_cnt; /* # of no-wait retry attempts */
1287 	u_int32_t       mbcl_reserved[7];    /* for future use */
1288 } mb_class_stat_t;
1289 
1290 #define MCS_DISABLED    0       /* cache is permanently disabled */
1291 #define MCS_ONLINE      1       /* cache is online */
1292 #define MCS_PURGING     2       /* cache is being purged */
1293 #define MCS_OFFLINE     3       /* cache is offline (resizing) */
1294 
1295 #if defined(XNU_KERNEL_PRIVATE)
1296 /* For backwards compatibility with 32-bit userland process */
1297 struct omb_stat {
1298 	u_int32_t               mbs_cnt;        /* number of classes */
1299 	u_int32_t               mbs_pad;        /* padding */
1300 	struct omb_class_stat   mbs_class[1];   /* class array */
1301 } __attribute__((__packed__));
1302 #endif /* XNU_KERNEL_PRIVATE */
1303 
1304 typedef struct mb_stat {
1305 	u_int32_t       mbs_cnt;        /* number of classes */
1306 #if defined(KERNEL) || defined(__LP64__)
1307 	u_int32_t       mbs_pad;        /* padding */
1308 #endif /* KERNEL || __LP64__ */
1309 	mb_class_stat_t mbs_class[1];   /* class array */
1310 } mb_stat_t;
1311 
1312 #ifdef PRIVATE
1313 #define MLEAK_STACK_DEPTH       16      /* Max PC stack depth */
1314 
1315 typedef struct mleak_trace_stat {
1316 	u_int64_t       mltr_collisions;
1317 	u_int64_t       mltr_hitcount;
1318 	u_int64_t       mltr_allocs;
1319 	u_int64_t       mltr_depth;
1320 	u_int64_t       mltr_addr[MLEAK_STACK_DEPTH];
1321 } mleak_trace_stat_t;
1322 
1323 typedef struct mleak_stat {
1324 	u_int32_t               ml_isaddr64;    /* 64-bit KVA? */
1325 	u_int32_t               ml_cnt;         /* number of traces */
1326 	mleak_trace_stat_t      ml_trace[1];    /* trace array */
1327 } mleak_stat_t;
1328 
1329 struct mleak_table {
1330 	u_int32_t mleak_capture;        /* sampling capture counter */
1331 	u_int32_t mleak_sample_factor;  /* sample factor */
1332 
1333 	/* Times two active records want to occupy the same spot */
1334 	u_int64_t alloc_collisions;
1335 	u_int64_t trace_collisions;
1336 
1337 	/* Times new record lands on spot previously occupied by freed alloc */
1338 	u_int64_t alloc_overwrites;
1339 	u_int64_t trace_overwrites;
1340 
1341 	/* Times a new alloc or trace is put into the hash table */
1342 	u_int64_t alloc_recorded;
1343 	u_int64_t trace_recorded;
1344 
1345 	/* Total number of outstanding allocs */
1346 	u_int64_t outstanding_allocs;
1347 
1348 	/* Times mleak_log returned false because couldn't acquire the lock */
1349 	u_int64_t total_conflicts;
1350 };
1351 
1352 #define HAS_M_TAG_STATS 1
1353 
1354 struct m_tag_stats {
1355 	u_int32_t mts_id;
1356 	u_int16_t mts_type;
1357 	u_int16_t mts_len;
1358 	u_int64_t mts_alloc_count;
1359 	u_int64_t mts_alloc_failed;
1360 	u_int64_t mts_free_count;
1361 };
1362 
1363 
1364 #define M_TAG_TYPE_NAMES \
1365     "other,dummynet,ipfilt,encap,inet6,ipsec,cfil_udp,pf_reass,aqm,drvaux"
1366 
1367 #endif /* PRIVATE */
1368 
1369 #ifdef KERNEL_PRIVATE
1370 __BEGIN_DECLS
1371 
1372 /*
1373  * Exported (private)
1374  */
1375 
1376 extern struct mbstat mbstat;                    /* statistics */
1377 
1378 __END_DECLS
1379 #endif /* KERNEL_PRIVATE */
1380 
1381 #ifdef XNU_KERNEL_PRIVATE
1382 __BEGIN_DECLS
1383 
1384 /*
1385  * Not exported (xnu private)
1386  */
1387 
1388 /* flags to m_get/MGET */
1389 /* Need to include malloc.h to get right options for malloc  */
1390 #include        <sys/malloc.h>
1391 
1392 struct mbuf;
1393 
1394 /* length to m_copy to copy all */
1395 #define M_COPYALL       1000000000
1396 
1397 #define M_DONTWAIT      M_NOWAIT
1398 #define M_WAIT          M_WAITOK
1399 
1400 /* modes for m_copym and variants */
1401 #define M_COPYM_NOOP_HDR        0       /* don't copy/move pkthdr contents */
1402 #define M_COPYM_COPY_HDR        1       /* copy pkthdr from old to new */
1403 #define M_COPYM_MOVE_HDR        2       /* move pkthdr from old to new */
1404 #define M_COPYM_MUST_COPY_HDR   3       /* MUST copy pkthdr from old to new */
1405 #define M_COPYM_MUST_MOVE_HDR   4       /* MUST move pkthdr from old to new */
1406 
1407 extern void m_freem(struct mbuf *) __XNU_INTERNAL(m_freem);
1408 extern u_int64_t mcl_to_paddr(char *);
1409 extern void m_adj(struct mbuf *, int);
1410 extern void m_cat(struct mbuf *, struct mbuf *);
1411 extern void m_copydata(struct mbuf *, int, int, void *);
1412 extern struct mbuf *m_copym(struct mbuf *, int, int, int);
1413 extern struct mbuf *m_copym_mode(struct mbuf *, int, int, int, struct mbuf **, int *, uint32_t);
1414 extern struct mbuf *m_get(int, int);
1415 extern struct mbuf *m_gethdr(int, int);
1416 extern struct mbuf *m_getpacket(void);
1417 extern struct mbuf *m_getpackets(int, int, int);
1418 extern struct mbuf *m_mclget(struct mbuf *, int);
1419 extern void *__unsafe_indexable m_mtod(struct mbuf *);
1420 extern struct mbuf *m_prepend_2(struct mbuf *, int, int, int);
1421 extern struct mbuf *m_pullup(struct mbuf *, int);
1422 extern struct mbuf *m_split(struct mbuf *, int, int);
1423 extern void m_mclfree(caddr_t p);
1424 extern bool mbuf_class_under_pressure(struct mbuf *m);
1425 
1426 /*
1427  * Accessors for the mbuf data range.
1428  * The "lower bound" is the start of the memory range that m->m_data is allowed
1429  * to point into. The "start" is where m->m_data points to; equivalent to the
1430  * late m_mtod. The end is where m->m_data + m->m_len points to. The upper bound
1431  * is the end of the memory range that m->m_data + m->m_len is allowed to point
1432  * into.
1433  * In a well-formed range, lower bound <= start <= end <= upper bound. An
1434  * ill-formed range always means a programming error.
1435  */
1436 __stateful_pure static inline caddr_t __header_bidi_indexable
m_mtod_lower_bound(struct mbuf * m)1437 m_mtod_lower_bound(struct mbuf *m)
1438 {
1439 	return M_START(m);
1440 }
1441 
1442 __stateful_pure static inline caddr_t __header_bidi_indexable
m_mtod_current(struct mbuf * m)1443 m_mtod_current(struct mbuf *m)
1444 {
1445 	caddr_t data = m_mtod_lower_bound(m);
1446 	return data + (m->m_data - (uintptr_t)data);
1447 }
1448 
1449 __stateful_pure static inline caddr_t __header_bidi_indexable
m_mtod_end(struct mbuf * m)1450 m_mtod_end(struct mbuf *m)
1451 {
1452 	return m_mtod_current(m) + m->m_len;
1453 }
1454 
1455 __stateful_pure static inline caddr_t __header_bidi_indexable
m_mtod_upper_bound(struct mbuf * m)1456 m_mtod_upper_bound(struct mbuf *m)
1457 {
1458 	return m_mtod_lower_bound(m) + M_SIZE(m);
1459 }
1460 
1461 static inline bool
m_has_mtype(const struct mbuf * m,int mtype_flags)1462 m_has_mtype(const struct mbuf *m, int mtype_flags)
1463 {
1464 	return (1 << m->m_type) & mtype_flags;
1465 }
1466 
1467 /*
1468  * On platforms which require strict alignment (currently for anything but
1469  * i386 or x86_64 or arm64), this macro checks whether the data pointer of an mbuf
1470  * is 32-bit aligned (this is the expected minimum alignment for protocol
1471  * headers), and assert otherwise.
1472  */
1473 #if defined(__i386__) || defined(__x86_64__) || defined(__arm64__)
1474 #define MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(_m)
1475 #else /* !__i386__ && !__x86_64__ && !__arm64__ */
1476 #define MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(_m) do {                    \
1477 	if (!IS_P2ALIGNED((_m)->m_data, sizeof (u_int32_t))) {          \
1478 	        if (((_m)->m_flags & M_PKTHDR) &&                       \
1479 	            (_m)->m_pkthdr.rcvif != NULL) {                     \
1480 	                panic_plain("\n%s: mbuf %p data ptr %p is not " \
1481 	                    "32-bit aligned [%s: alignerrs=%lld]\n",    \
1482 	                    __func__, (_m),                             \
1483 	                    (caddr_t __unsafe_indexable)(_m)->m_data,   \
1484 	                    if_name((_m)->m_pkthdr.rcvif),              \
1485 	                    (_m)->m_pkthdr.rcvif->if_alignerrs);        \
1486 	        } else {                                                \
1487 	                panic_plain("\n%s: mbuf %p data ptr %p is not " \
1488 	                    "32-bit aligned\n",                         \
1489 	                    __func__, (_m),                             \
1490 	                    (caddr_t __unsafe_indexable)(_m)->m_data);  \
1491 	        }                                                       \
1492 	}                                                               \
1493 } while (0)
1494 #endif /* !__i386__ && !__x86_64__ && !__arm64__ */
1495 
1496 /* Maximum number of MBUF_SC values (excluding MBUF_SC_UNSPEC) */
1497 #define MBUF_SC_MAX_CLASSES     10
1498 
1499 /*
1500  * These conversion macros rely on the corresponding MBUF_SC and
1501  * MBUF_TC values in order to establish the following mapping:
1502  *
1503  *	MBUF_SC_BK_SYS	] ==>	MBUF_TC_BK
1504  *	MBUF_SC_BK	]
1505  *
1506  *	MBUF_SC_BE	] ==>	MBUF_TC_BE
1507  *	MBUF_SC_RD	]
1508  *	MBUF_SC_OAM	]
1509  *
1510  *	MBUF_SC_AV	] ==>	MBUF_TC_VI
1511  *	MBUF_SC_RV	]
1512  *	MBUF_SC_VI	]
1513  *	MBUF_SC_SIG	]
1514  *
1515  *	MBUF_SC_VO	] ==>	MBUF_TC_VO
1516  *	MBUF_SC_CTL	]
1517  *
1518  * The values assigned to each service class allows for a fast mapping to
1519  * the corresponding MBUF_TC traffic class values, as well as to retrieve the
1520  * assigned index; therefore care must be taken when comparing against these
1521  * values.  Use the corresponding class and index macros to retrieve the
1522  * corresponding portion, and never assume that a higher class corresponds
1523  * to a higher index.
1524  */
1525 #define MBUF_SCVAL(x)           ((x) & 0xffff)
1526 #define MBUF_SCIDX(x)           ((((x) >> 16) & 0xff) >> 3)
1527 #define MBUF_SC2TC(_sc)         (MBUF_SCVAL(_sc) >> 7)
1528 #define MBUF_TC2SCVAL(_tc)      ((_tc) << 7)
1529 #define IS_MBUF_SC_BACKGROUND(_sc) (((_sc) == MBUF_SC_BK_SYS) || \
1530 	((_sc) == MBUF_SC_BK))
1531 #define IS_MBUF_SC_REALTIME(_sc)        ((_sc) >= MBUF_SC_AV && (_sc) <= MBUF_SC_VO)
1532 #define IS_MBUF_SC_BESTEFFORT(_sc)      ((_sc) == MBUF_SC_BE || \
1533     (_sc) == MBUF_SC_RD || (_sc) == MBUF_SC_OAM)
1534 
1535 #define SCIDX_BK_SYS            MBUF_SCIDX(MBUF_SC_BK_SYS)
1536 #define SCIDX_BK                MBUF_SCIDX(MBUF_SC_BK)
1537 #define SCIDX_BE                MBUF_SCIDX(MBUF_SC_BE)
1538 #define SCIDX_RD                MBUF_SCIDX(MBUF_SC_RD)
1539 #define SCIDX_OAM               MBUF_SCIDX(MBUF_SC_OAM)
1540 #define SCIDX_AV                MBUF_SCIDX(MBUF_SC_AV)
1541 #define SCIDX_RV                MBUF_SCIDX(MBUF_SC_RV)
1542 #define SCIDX_VI                MBUF_SCIDX(MBUF_SC_VI)
1543 #define SCIDX_SIG               MBUF_SCIDX(MBUF_SC_SIG)
1544 #define SCIDX_VO                MBUF_SCIDX(MBUF_SC_VO)
1545 #define SCIDX_CTL               MBUF_SCIDX(MBUF_SC_CTL)
1546 
1547 #define SCVAL_BK_SYS            MBUF_SCVAL(MBUF_SC_BK_SYS)
1548 #define SCVAL_BK                MBUF_SCVAL(MBUF_SC_BK)
1549 #define SCVAL_BE                MBUF_SCVAL(MBUF_SC_BE)
1550 #define SCVAL_RD                MBUF_SCVAL(MBUF_SC_RD)
1551 #define SCVAL_OAM               MBUF_SCVAL(MBUF_SC_OAM)
1552 #define SCVAL_AV                MBUF_SCVAL(MBUF_SC_AV)
1553 #define SCVAL_RV                MBUF_SCVAL(MBUF_SC_RV)
1554 #define SCVAL_VI                MBUF_SCVAL(MBUF_SC_VI)
1555 #define SCVAL_SIG               MBUF_SCVAL(MBUF_SC_SIG)
1556 #define SCVAL_VO                MBUF_SCVAL(MBUF_SC_VO)
1557 #define SCVAL_CTL               MBUF_SCVAL(MBUF_SC_CTL)
1558 
1559 #define MBUF_VALID_SC(c)                                                \
1560 	(c == MBUF_SC_BK_SYS || c == MBUF_SC_BK || c == MBUF_SC_BE ||   \
1561 	c == MBUF_SC_RD || c == MBUF_SC_OAM || c == MBUF_SC_AV ||       \
1562 	c == MBUF_SC_RV || c == MBUF_SC_VI || c == MBUF_SC_SIG ||       \
1563 	c == MBUF_SC_VO || c == MBUF_SC_CTL)
1564 
1565 #define MBUF_VALID_SCIDX(c)                                             \
1566 	(c == SCIDX_BK_SYS || c == SCIDX_BK || c == SCIDX_BE ||         \
1567 	c == SCIDX_RD || c == SCIDX_OAM || c == SCIDX_AV ||             \
1568 	c == SCIDX_RV || c == SCIDX_VI || c == SCIDX_SIG ||             \
1569 	c == SCIDX_VO || c == SCIDX_CTL)
1570 
1571 #define MBUF_VALID_SCVAL(c)                                             \
1572 	(c == SCVAL_BK_SYS || c == SCVAL_BK || c == SCVAL_BE ||         \
1573 	c == SCVAL_RD || c == SCVAL_OAM || c == SCVAL_AV ||             \
1574 	c == SCVAL_RV || c == SCVAL_VI || c == SCVAL_SIG ||             \
1575 	c == SCVAL_VO || SCVAL_CTL)
1576 
1577 extern unsigned char *mbutl;    /* start VA of mbuf pool */
1578 extern unsigned char *embutl;   /* end VA of mbuf pool */
1579 extern unsigned int nmbclusters;        /* number of mapped clusters */
1580 extern int njcl;                /* # of jumbo clusters  */
1581 extern int njclbytes;   /* size of a jumbo cluster */
1582 extern int max_hdr;             /* largest link+protocol header */
1583 extern int max_datalen; /* MHLEN - max_hdr */
1584 
1585 extern int max_linkhdr;        /* largest link-level header */
1586 
1587 /* Use max_protohdr instead of _max_protohdr */
1588 extern int max_protohdr;       /* largest protocol header */
1589 
1590 __private_extern__ unsigned int mbuf_default_ncl(uint64_t);
1591 __private_extern__ void mbinit(void);
1592 __private_extern__ struct mbuf *m_clattach(struct mbuf *, int, caddr_t,
1593     void (*)(caddr_t, u_int, caddr_t), size_t, caddr_t, int, int);
1594 __private_extern__ caddr_t m_bigalloc(int);
1595 __private_extern__ void m_bigfree(caddr_t, u_int, caddr_t);
1596 __private_extern__ struct mbuf *m_mbigget(struct mbuf *, int);
1597 __private_extern__ caddr_t m_16kalloc(int);
1598 __private_extern__ void m_16kfree(caddr_t, u_int, caddr_t);
1599 __private_extern__ struct mbuf *m_m16kget(struct mbuf *, int);
1600 __private_extern__ int m_reinit(struct mbuf *, int);
1601 __private_extern__ struct mbuf *m_free(struct mbuf *) __XNU_INTERNAL(m_free);
1602 __private_extern__ struct mbuf *m_getclr(int, int);
1603 __private_extern__ struct mbuf *m_getptr(struct mbuf *, int, int *);
1604 __private_extern__ unsigned int m_length(struct mbuf *);
1605 __private_extern__ unsigned int m_length2(struct mbuf *, struct mbuf **);
1606 __private_extern__ unsigned int m_fixhdr(struct mbuf *);
1607 __private_extern__ struct mbuf *m_defrag(struct mbuf *, int);
1608 __private_extern__ struct mbuf *m_defrag_offset(struct mbuf *, u_int32_t, int);
1609 __private_extern__ struct mbuf *m_prepend(struct mbuf *, int, int);
1610 __private_extern__ struct mbuf *m_copyup(struct mbuf *, int, int);
1611 __private_extern__ struct mbuf *m_retry(int, int);
1612 __private_extern__ struct mbuf *m_retryhdr(int, int);
1613 __private_extern__ int m_freem_list(struct mbuf *);
1614 __private_extern__ int m_append(struct mbuf *, int, caddr_t);
1615 __private_extern__ struct mbuf *m_last(struct mbuf *);
1616 __private_extern__ struct mbuf *m_devget(char *, int, int, struct ifnet *,
1617     void (*)(const void *, void *, size_t));
1618 __private_extern__ struct mbuf *m_pulldown(struct mbuf *, int, int, int *);
1619 
1620 __private_extern__ struct mbuf *m_getcl(int, int, int);
1621 __private_extern__ caddr_t m_mclalloc(int);
1622 __private_extern__ int m_mclhasreference(struct mbuf *);
1623 __private_extern__ void m_copy_pkthdr(struct mbuf *, struct mbuf *);
1624 __private_extern__ int m_dup_pkthdr(struct mbuf *, struct mbuf *, int);
1625 __private_extern__ void m_copy_pftag(struct mbuf *, struct mbuf *);
1626 __private_extern__ void m_copy_necptag(struct mbuf *, struct mbuf *);
1627 __private_extern__ void m_copy_classifier(struct mbuf *, struct mbuf *);
1628 
1629 __private_extern__ struct mbuf *m_dtom(void *);
1630 __private_extern__ int m_mtocl(void *);
1631 __private_extern__ union mcluster *m_cltom(int);
1632 
1633 __private_extern__ void m_align(struct mbuf *, int);
1634 
1635 __private_extern__ struct mbuf *m_normalize(struct mbuf *m);
1636 __private_extern__ void m_mchtype(struct mbuf *m, int t);
1637 __private_extern__ void m_mcheck(struct mbuf *);
1638 
1639 __private_extern__ void m_copyback(struct mbuf *, int, int, const void *);
1640 __private_extern__ struct mbuf *m_copyback_cow(struct mbuf *, int, int,
1641     const void *, int);
1642 __private_extern__ int m_makewritable(struct mbuf **, int, int, int);
1643 __private_extern__ struct mbuf *m_dup(struct mbuf *m, int how);
1644 __private_extern__ struct mbuf *m_copym_with_hdrs(struct mbuf *, int, int, int,
1645     struct mbuf **, int *, uint32_t);
1646 __private_extern__ struct mbuf *m_getpackethdrs(int, int);
1647 __private_extern__ struct mbuf *m_getpacket_how(int);
1648 __private_extern__ struct mbuf *m_getpackets_internal(unsigned int *, int,
1649     int, int, size_t);
1650 __private_extern__ struct mbuf *m_allocpacket_internal(unsigned int *, size_t,
1651     unsigned int *, int, int, size_t);
1652 
1653 __private_extern__ int m_ext_set_prop(struct mbuf *, uint32_t, uint32_t);
1654 __private_extern__ uint32_t m_ext_get_prop(struct mbuf *);
1655 __private_extern__ int m_ext_paired_is_active(struct mbuf *);
1656 __private_extern__ void m_ext_paired_activate(struct mbuf *);
1657 
1658 __private_extern__ void m_add_crumb(struct mbuf *, uint16_t);
1659 
1660 __private_extern__ void mbuf_drain(boolean_t);
1661 
1662 /*
1663  * Packets may have annotations attached by affixing a list of "packet
1664  * tags" to the pkthdr structure.  Packet tags are dynamically allocated
1665  * semi-opaque data structures that have a fixed header (struct m_tag)
1666  * that specifies the size of the memory block and an <id,type> pair that
1667  * identifies it. The id identifies the module and the type identifies the
1668  * type of data for that module. The id of zero is reserved for the kernel.
1669  *
1670  * By default packet tags are allocated via kalloc except on Intel that still
1671  * uses the legacy implementation of using mbufs for packet tags.
1672  * This can be overidden via the boot-args 'mb_tag_mbuf'
1673  *
1674  * When kalloc is used for allocation, packet tags returned by m_tag_allocate have
1675  * the default memory alignment implemented by kalloc.
1676  *
1677  * When mbufs are used for allocation packets tag returned by m_tag_allocate has
1678  * the default memory alignment implemented by malloc.
1679  *
1680  * To reference the private data one should use a construct like:
1681  *      struct m_tag *mtag = m_tag_allocate(...);
1682  *      struct foo *p = (struct foo *)(mtag->m_tag_data);
1683  *
1684  * There should be no assumption on the location of the private data relative to the
1685  * 'struct m_tag'
1686  *
1687  * When kalloc is used, packet tags that are internal to xnu use KERNEL_MODULE_TAG_ID and
1688  * they are allocated with kalloc_type using a single container data structure that has
1689  * the 'struct m_tag' followed by a data structure for the private data
1690  *
1691  * Packet tags that are allocated by KEXTs are external to xnu and type of the private data
1692  * is unknown to xnu, so they are allocated in two chunks:
1693  *  - one allocation with kalloc_type for the 'struct m_tag'
1694  *  - one allocation using kheap_alloc as for the private data
1695  *
1696  * Note that packet tags of type KERNEL_TAG_TYPE_DRVAUX are allocated by KEXTs with
1697  * a variable length so they are allocated in two chunks
1698  *
1699  * In all cases the 'struct m_tag' is allocated using kalloc_type to avoid type
1700  * confusion.
1701  */
1702 
1703 #define KERNEL_MODULE_TAG_ID    0
1704 
1705 enum {
1706 	KERNEL_TAG_TYPE_NONE                    = 0,
1707 	KERNEL_TAG_TYPE_DUMMYNET                = 1,
1708 	KERNEL_TAG_TYPE_IPFILT                  = 2,
1709 	KERNEL_TAG_TYPE_ENCAP                   = 3,
1710 	KERNEL_TAG_TYPE_INET6                   = 4,
1711 	KERNEL_TAG_TYPE_IPSEC                   = 5,
1712 	KERNEL_TAG_TYPE_CFIL_UDP                = 6,
1713 	KERNEL_TAG_TYPE_PF_REASS                = 7,
1714 	KERNEL_TAG_TYPE_AQM                     = 8,
1715 	KERNEL_TAG_TYPE_DRVAUX                  = 9,
1716 	KERNEL_TAG_TYPE_COUNT                   = 10
1717 };
1718 
1719 /* Packet tag routines */
1720 __private_extern__ struct m_tag *m_tag_alloc(u_int32_t, u_int16_t, int, int);
1721 __private_extern__ struct  m_tag *m_tag_create(u_int32_t, u_int16_t, int, int,
1722     struct mbuf *);
1723 __private_extern__ void m_tag_free(struct m_tag *);
1724 __private_extern__ void m_tag_prepend(struct mbuf *, struct m_tag *);
1725 __private_extern__ void m_tag_unlink(struct mbuf *, struct m_tag *);
1726 __private_extern__ void m_tag_delete(struct mbuf *, struct m_tag *);
1727 __private_extern__ void m_tag_delete_chain(struct mbuf *);
1728 __private_extern__ struct m_tag *m_tag_locate(struct mbuf *, u_int32_t,
1729     u_int16_t);
1730 __private_extern__ struct m_tag *m_tag_copy(struct m_tag *, int);
1731 __private_extern__ int m_tag_copy_chain(struct mbuf *, struct mbuf *, int);
1732 __private_extern__ void m_tag_init(struct mbuf *, int);
1733 __private_extern__ struct  m_tag *m_tag_first(struct mbuf *);
1734 __private_extern__ struct  m_tag *m_tag_next(struct mbuf *, struct m_tag *);
1735 
1736 typedef struct m_tag * (*m_tag_kalloc_func_t)(u_int32_t id, u_int16_t type, uint16_t len, int wait);
1737 typedef void (*m_tag_kfree_func_t)(struct m_tag *tag);
1738 
1739 int m_register_internal_tag_type(uint16_t type, uint16_t len, m_tag_kalloc_func_t alloc_func, m_tag_kfree_func_t free_func);
1740 void m_tag_create_cookie(struct m_tag *);
1741 
1742 void mbuf_tag_init(void);
1743 
1744 __private_extern__ void m_scratch_init(struct mbuf *);
1745 __private_extern__ u_int32_t m_scratch_get(struct mbuf *, u_int8_t **);
1746 
1747 __private_extern__ void m_classifier_init(struct mbuf *, uint32_t);
1748 
1749 __private_extern__ int m_set_service_class(struct mbuf *, mbuf_svc_class_t);
1750 __private_extern__ mbuf_svc_class_t m_get_service_class(struct mbuf *);
1751 __private_extern__ mbuf_svc_class_t m_service_class_from_idx(u_int32_t);
1752 __private_extern__ mbuf_svc_class_t m_service_class_from_val(u_int32_t);
1753 __private_extern__ int m_set_traffic_class(struct mbuf *, mbuf_traffic_class_t);
1754 __private_extern__ mbuf_traffic_class_t m_get_traffic_class(struct mbuf *);
1755 
1756 __private_extern__ struct  m_tag *m_tag_alloc(u_int32_t, u_int16_t, int, int);
1757 __private_extern__ void mbuf_tag_init(void);
1758 
1759 #define ADDCARRY(_x)  do {                                              \
1760 	while (((_x) >> 16) != 0)                                       \
1761 	        (_x) = ((_x) >> 16) + ((_x) & 0xffff);                  \
1762 } while (0)
1763 
1764 __private_extern__ u_int16_t m_adj_sum16(struct mbuf *, u_int32_t,
1765     u_int32_t, u_int32_t, u_int32_t);
1766 __private_extern__ u_int16_t m_sum16(struct mbuf *, u_int32_t, u_int32_t);
1767 
1768 __private_extern__ void m_set_ext(struct mbuf *, struct ext_ref *,
1769     m_ext_free_func_t, caddr_t);
1770 __private_extern__ struct ext_ref *m_get_rfa(struct mbuf *);
1771 __private_extern__ m_ext_free_func_t m_get_ext_free(struct mbuf *);
1772 __private_extern__ caddr_t m_get_ext_arg(struct mbuf *);
1773 
1774 __private_extern__ void m_do_tx_compl_callback(struct mbuf *, struct ifnet *);
1775 __private_extern__ mbuf_tx_compl_func m_get_tx_compl_callback(u_int32_t);
1776 
1777 
1778 
1779 __END_DECLS
1780 #endif /* XNU_KERNEL_PRIVATE */
1781 #endif  /* !_SYS_MBUF_H_ */
1782