xref: /xnu-8796.101.5/bsd/skywalk/packet/pbufpool_var.h (revision aca3beaa3dfbd42498b42c5e5ce20a938e6554e5)
1 /*
2  * Copyright (c) 2016-2022 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #ifndef _SKYWALK_PACKET_PBUFPOOLVAR_H_
30 #define _SKYWALK_PACKET_PBUFPOOLVAR_H_
31 
32 #ifdef BSD_KERNEL_PRIVATE
33 #include <skywalk/core/skywalk_var.h>
34 
35 struct __kern_quantum;
36 struct __kern_packet;
37 
38 /*
39  * User packet pool hash bucket.  Packets allocated by user space are
40  * kept in the hash table.  This allows the kernel to validate whether
41  * or not a given packet object is valid or is already-freed, and thus
42  * take the appropriate measure during internalize.
43  */
44 struct kern_pbufpool_u_bkt {
45 	SLIST_HEAD(, __kern_quantum) upp_head;
46 };
47 
48 struct kern_pbufpool_u_bft_bkt {
49 	SLIST_HEAD(, __kern_buflet_ext) upp_head;
50 };
51 
52 #define PBUFPOOL_MAX_BUF_REGIONS    2
53 #define PBUFPOOL_BUF_IDX_DEF        0
54 #define PBUFPOOL_BUF_IDX_LARGE      1
55 
56 struct kern_pbufpool {
57 	decl_lck_mtx_data(, pp_lock);
58 	uint32_t                pp_refcnt;
59 	uint32_t                pp_flags;
60 	uint32_t                pp_buf_obj_size[PBUFPOOL_MAX_BUF_REGIONS];
61 	uint16_t                pp_buf_size[PBUFPOOL_MAX_BUF_REGIONS];
62 	uint16_t                pp_max_frags;
63 
64 	/*
65 	 * Caches
66 	 */
67 	struct skmem_cache      *pp_buf_cache[PBUFPOOL_MAX_BUF_REGIONS];
68 	struct skmem_cache      *pp_kmd_cache;
69 	struct skmem_cache      *pp_kbft_cache[PBUFPOOL_MAX_BUF_REGIONS];
70 	struct skmem_cache      *pp_raw_kbft_cache;
71 
72 	/*
73 	 * Regions
74 	 */
75 	struct skmem_region     *pp_buf_region[PBUFPOOL_MAX_BUF_REGIONS];
76 	struct skmem_region     *pp_kmd_region;
77 	struct skmem_region     *pp_umd_region;
78 	struct skmem_region     *pp_ubft_region;
79 	struct skmem_region     *pp_kbft_region;
80 
81 	/*
82 	 * User packet pool: packet metadata hash table
83 	 */
84 	struct kern_pbufpool_u_bkt *pp_u_hash_table;
85 	uint64_t                pp_u_bufinuse;
86 
87 	/*
88 	 * User packet pool: buflet hash table
89 	 */
90 	struct kern_pbufpool_u_bft_bkt *pp_u_bft_hash_table;
91 	uint64_t                pp_u_bftinuse;
92 
93 	void                    *pp_ctx;
94 	pbuf_ctx_retain_fn_t    pp_ctx_retain;
95 	pbuf_ctx_release_fn_t   pp_ctx_release;
96 	nexus_meta_type_t       pp_md_type;
97 	nexus_meta_subtype_t    pp_md_subtype;
98 	uint32_t                pp_midx_start;
99 	uint32_t                pp_bidx_start;
100 	pbufpool_name_t         pp_name;
101 	pbuf_seg_ctor_fn_t      pp_pbuf_seg_ctor;
102 	pbuf_seg_dtor_fn_t      pp_pbuf_seg_dtor;
103 };
104 
105 /* valid values for pp_flags */
106 #define PPF_EXTERNAL            0x1     /* externally configured */
107 #define PPF_CLOSED              0x2     /* closed; awaiting final destruction */
108 #define PPF_MONOLITHIC          0x4     /* non slab-based buffer region */
109 /* buflet is truncated and may not contain the full payload */
110 #define PPF_TRUNCATED_BUF       0x8
111 #define PPF_KERNEL              0x10    /* kernel only, no user region(s) */
112 #define PPF_BUFFER_ON_DEMAND    0x20    /* attach buffers to packet on demand */
113 #define PPF_BATCH               0x40    /* capable of batch alloc/free */
114 #define PPF_DYNAMIC             0x80    /* capable of magazine resizing */
115 #define PPF_LARGE_BUF           0x100   /* configured with large buffers */
116 #define PPF_RAW_BUFLT           0x200   /* configured with raw buflet */
117 
118 #define PP_KERNEL_ONLY(_pp)             \
119 	(((_pp)->pp_flags & PPF_KERNEL) != 0)
120 
121 #define PP_HAS_TRUNCATED_BUF(_pp)               \
122 	(((_pp)->pp_flags & PPF_TRUNCATED_BUF) != 0)
123 
124 #define PP_HAS_BUFFER_ON_DEMAND(_pp)            \
125 	(((_pp)->pp_flags & PPF_BUFFER_ON_DEMAND) != 0)
126 
127 #define PP_BATCH_CAPABLE(_pp)           \
128 	(((_pp)->pp_flags & PPF_BATCH) != 0)
129 
130 #define PP_DYNAMIC(_pp)                 \
131 	(((_pp)->pp_flags & PPF_DYNAMIC) != 0)
132 
133 #define PP_HAS_LARGE_BUF(_pp)                 \
134 	(((_pp)->pp_flags & PPF_LARGE_BUF) != 0)
135 
136 #define PP_HAS_RAW_BFLT(_pp)                 \
137 	(((_pp)->pp_flags & PPF_RAW_BUFLT) != 0)
138 
139 #define PP_LOCK(_pp)                    \
140 	lck_mtx_lock(&_pp->pp_lock)
141 #define PP_LOCK_ASSERT_HELD(_pp)        \
142 	LCK_MTX_ASSERT(&_pp->pp_lock, LCK_MTX_ASSERT_OWNED)
143 #define PP_LOCK_ASSERT_NOTHELD(_pp)     \
144 	LCK_MTX_ASSERT(&_pp->pp_lock, LCK_MTX_ASSERT_NOTOWNED)
145 #define PP_UNLOCK(_pp)                  \
146 	lck_mtx_unlock(&_pp->pp_lock)
147 
148 #define PP_BUF_SIZE_DEF(_pp)      ((_pp)->pp_buf_size[PBUFPOOL_BUF_IDX_DEF])
149 #define PP_BUF_SIZE_LARGE(_pp)    ((_pp)->pp_buf_size[PBUFPOOL_BUF_IDX_LARGE])
150 
151 #define PP_BUF_OBJ_SIZE_DEF(_pp)    \
152 	((_pp)->pp_buf_obj_size[PBUFPOOL_BUF_IDX_DEF])
153 #define PP_BUF_OBJ_SIZE_LARGE(_pp)    \
154 	((_pp)->pp_buf_obj_size[PBUFPOOL_BUF_IDX_LARGE])
155 
156 #define PP_BUF_REGION_DEF(_pp)    ((_pp)->pp_buf_region[PBUFPOOL_BUF_IDX_DEF])
157 #define PP_BUF_REGION_LARGE(_pp)  ((_pp)->pp_buf_region[PBUFPOOL_BUF_IDX_LARGE])
158 
159 #define PP_BUF_CACHE_DEF(_pp)    ((_pp)->pp_buf_cache[PBUFPOOL_BUF_IDX_DEF])
160 #define PP_BUF_CACHE_LARGE(_pp)  ((_pp)->pp_buf_cache[PBUFPOOL_BUF_IDX_LARGE])
161 
162 #define PP_KBFT_CACHE_DEF(_pp)    ((_pp)->pp_kbft_cache[PBUFPOOL_BUF_IDX_DEF])
163 #define PP_KBFT_CACHE_LARGE(_pp)  ((_pp)->pp_kbft_cache[PBUFPOOL_BUF_IDX_LARGE])
164 
165 __BEGIN_DECLS
166 extern int pp_init(void);
167 extern void pp_fini(void);
168 extern void pp_close(struct kern_pbufpool *);
169 
170 /* create flags for pp_create() */
171 #define PPCREATEF_EXTERNAL      0x1     /* externally requested */
172 #define PPCREATEF_KERNEL_ONLY   0x2     /* kernel-only */
173 #define PPCREATEF_TRUNCATED_BUF 0x4     /* compat-only (buf is short) */
174 #define PPCREATEF_ONDEMAND_BUF  0x8     /* buf alloc/free is decoupled */
175 #define PPCREATEF_DYNAMIC       0x10    /* dynamic per-CPU magazines */
176 #define PPCREATEF_RAW_BFLT      0x20    /* buflet can be alloced w/o buf */
177 
178 extern struct kern_pbufpool *pp_create(const char *name,
179     struct skmem_region_params *srp_array, pbuf_seg_ctor_fn_t buf_seg_ctor,
180     pbuf_seg_dtor_fn_t buf_seg_dtor, const void *ctx,
181     pbuf_ctx_retain_fn_t ctx_retain, pbuf_ctx_release_fn_t ctx_release,
182     uint32_t ppcreatef);
183 extern void pp_destroy(struct kern_pbufpool *);
184 
185 extern int pp_init_upp(struct kern_pbufpool *, boolean_t);
186 extern void pp_insert_upp(struct kern_pbufpool *, struct __kern_quantum *,
187     pid_t);
188 extern void pp_insert_upp_locked(struct kern_pbufpool *,
189     struct __kern_quantum *, pid_t);
190 extern void pp_insert_upp_batch(struct kern_pbufpool *pp, pid_t pid,
191     uint64_t *array, uint32_t num);
192 extern struct __kern_quantum *pp_remove_upp(struct kern_pbufpool *, obj_idx_t,
193     int *);
194 extern struct __kern_quantum *pp_remove_upp_locked(struct kern_pbufpool *,
195     obj_idx_t, int *);
196 extern struct __kern_quantum *pp_find_upp(struct kern_pbufpool *, obj_idx_t);
197 extern void pp_purge_upp(struct kern_pbufpool *, pid_t);
198 extern struct __kern_buflet *pp_remove_upp_bft(struct kern_pbufpool *,
199     obj_idx_t, int *);
200 extern void pp_insert_upp_bft(struct kern_pbufpool *, struct __kern_buflet *,
201     pid_t);
202 extern boolean_t pp_isempty_upp(struct kern_pbufpool *);
203 
204 extern void pp_retain_locked(struct kern_pbufpool *);
205 extern void pp_retain(struct kern_pbufpool *);
206 extern boolean_t pp_release_locked(struct kern_pbufpool *);
207 extern boolean_t pp_release(struct kern_pbufpool *);
208 
209 /* flags for pp_regions_params_adjust() */
210 /* configure packet pool regions for RX only */
211 #define PP_REGION_CONFIG_BUF_IODIR_IN          0x00000001
212 /* configure packet pool regions for TX only */
213 #define PP_REGION_CONFIG_BUF_IODIR_OUT         0x00000002
214 /* configure packet pool regions for bidirectional operation */
215 #define PP_REGION_CONFIG_BUF_IODIR_BIDIR    \
216     (PP_REGION_CONFIG_BUF_IODIR_IN | PP_REGION_CONFIG_BUF_IODIR_OUT)
217 /* configure packet pool metadata regions as persistent (wired) */
218 #define PP_REGION_CONFIG_MD_PERSISTENT         0x00000004
219 /* configure packet pool buffer regions as persistent (wired) */
220 #define PP_REGION_CONFIG_BUF_PERSISTENT        0x00000008
221 /* Enable magazine layer (per-cpu caches) for packet pool metadata regions */
222 #define PP_REGION_CONFIG_MD_MAGAZINE_ENABLE    0x00000010
223 /* configure packet pool regions required for kernel-only operations */
224 #define PP_REGION_CONFIG_KERNEL_ONLY           0x00000020
225 /* configure packet pool buflet regions */
226 #define PP_REGION_CONFIG_BUFLET                0x00000040
227 /* configure packet pool buffer region as user read-only */
228 #define PP_REGION_CONFIG_BUF_UREADONLY         0x00000080
229 /* configure packet pool buffer region as kernel read-only */
230 #define PP_REGION_CONFIG_BUF_KREADONLY         0x00000100
231 /* configure packet pool buffer region as a single segment */
232 #define PP_REGION_CONFIG_BUF_MONOLITHIC        0x00000200
233 /* configure packet pool buffer region as physically contiguous segment */
234 #define PP_REGION_CONFIG_BUF_SEGPHYSCONTIG     0x00000400
235 /* configure packet pool buffer region as cache-inhibiting */
236 #define PP_REGION_CONFIG_BUF_NOCACHE           0x00000800
237 /* configure buflet without buffer attached at construction */
238 #define PP_REGION_CONFIG_RAW_BUFLET            0x00001000
239 /* configure packet pool buffer region (backing IOMD) as thread safe */
240 #define PP_REGION_CONFIG_BUF_THREADSAFE        0x00002000
241 
242 extern void pp_regions_params_adjust(struct skmem_region_params *,
243     nexus_meta_type_t, nexus_meta_subtype_t, uint32_t, uint16_t, uint32_t,
244     uint32_t, uint32_t, uint32_t, uint32_t);
245 
246 extern uint64_t pp_alloc_packet(struct kern_pbufpool *, uint16_t, uint32_t);
247 extern uint64_t pp_alloc_packet_by_size(struct kern_pbufpool *, uint32_t,
248     uint32_t);
249 extern int pp_alloc_packet_batch(struct kern_pbufpool *, uint16_t, uint64_t *,
250     uint32_t *, boolean_t, alloc_cb_func_t, const void *, uint32_t);
251 extern int pp_alloc_pktq(struct kern_pbufpool *, uint16_t, struct pktq *,
252     uint32_t, alloc_cb_func_t, const void *, uint32_t);
253 extern void pp_free_packet(struct kern_pbufpool *, uint64_t);
254 extern void pp_free_packet_batch(struct kern_pbufpool *, uint64_t *, uint32_t);
255 extern void pp_free_packet_single(struct __kern_packet *);
256 extern void pp_free_packet_chain(struct __kern_packet *, int *);
257 extern void pp_free_pktq(struct pktq *);
258 extern errno_t pp_alloc_buffer(const kern_pbufpool_t, mach_vm_address_t *,
259     kern_segment_t *, kern_obj_idx_seg_t *, uint32_t);
260 extern void pp_free_buffer(const kern_pbufpool_t, mach_vm_address_t);
261 
262 /* flags for pp_alloc_buflet */
263 /* alloc a buflet with an attached large-sized buffer */
264 #define PP_ALLOC_BFT_LARGE                        0x01
265 /* alloc a buflet with an attached buffer */
266 #define PP_ALLOC_BFT_ATTACH_BUFFER                0x02
267 
268 extern errno_t pp_alloc_buflet(struct kern_pbufpool *pp, kern_buflet_t *kbft,
269     uint32_t skmflag, uint32_t flags);
270 extern errno_t pp_alloc_buflet_batch(struct kern_pbufpool *pp, uint64_t *array,
271     uint32_t *size, uint32_t skmflag, uint32_t flags);
272 
273 extern void pp_free_buflet(const kern_pbufpool_t, kern_buflet_t);
274 extern void pp_reap_caches(boolean_t);
275 __END_DECLS
276 #endif /* BSD_KERNEL_PRIVATE */
277 #endif /* !_SKYWALK_PACKET_PBUFPOOLVAR_H_ */
278