1 /*
2 * Copyright (c) 2016-2022 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <skywalk/os_skywalk_private.h>
30 #include <skywalk/packet/pbufpool_var.h>
31
32 static errno_t kern_pbufpool_alloc_common(const kern_pbufpool_t,
33 const uint32_t, kern_packet_t *, uint32_t);
34 static errno_t kern_pbufpool_alloc_batch_common(const kern_pbufpool_t,
35 const uint32_t, kern_packet_t *__counted_by(*size), uint32_t *size,
36 alloc_cb_func_t, const void *, uint32_t);
37
38 #define KBI_INVALID_CB_PAIRS(cb1, cb2) \
39 (!(init->kbi_##cb1 == NULL && init->kbi_##cb2 == NULL) && \
40 ((init->kbi_##cb1 == NULL) ^ (init->kbi_##cb2 == NULL)))
41
42 errno_t
kern_pbufpool_create(const struct kern_pbufpool_init * init,kern_pbufpool_t * ppp,struct kern_pbufpool_memory_info * pp_info)43 kern_pbufpool_create(const struct kern_pbufpool_init *init,
44 kern_pbufpool_t *ppp, struct kern_pbufpool_memory_info *pp_info)
45 {
46 /* XXX: woodford_s - find a way to get 'srp' off the kernel stack */
47 struct skmem_region_params srp[SKMEM_REGIONS];
48 struct kern_pbufpool *pp = NULL;
49 nexus_meta_type_t md_type;
50 uint32_t buf_cnt;
51 uint16_t max_frags;
52 uint32_t ppcreatef = PPCREATEF_EXTERNAL;
53 uint32_t pkt_cnt;
54 uint32_t pp_region_flags = 0;
55 int err = 0;
56 bool kernel_only;
57 bool tx_pool = true;
58
59 if (ppp == NULL || init == NULL ||
60 init->kbi_version != KERN_PBUFPOOL_CURRENT_VERSION ||
61 init->kbi_packets == 0 || (init->kbi_buflets != 0 &&
62 init->kbi_buflets < init->kbi_packets &&
63 !(init->kbi_flags & KBIF_BUFFER_ON_DEMAND)) ||
64 init->kbi_bufsize == 0 || init->kbi_max_frags == 0 ||
65 ((init->kbi_flags & KBIF_QUANTUM) &&
66 (init->kbi_flags & KBIF_BUFFER_ON_DEMAND)) ||
67 KBI_INVALID_CB_PAIRS(buf_seg_ctor, buf_seg_dtor)) {
68 err = EINVAL;
69 goto done;
70 }
71
72 *ppp = NULL;
73
74 md_type = ((init->kbi_flags & KBIF_QUANTUM) ?
75 NEXUS_META_TYPE_QUANTUM : NEXUS_META_TYPE_PACKET);
76
77 /*
78 * If packet, we assume this is for a driver handling raw frames.
79 * This also implies that at present, we do not create mirrored
80 * regions for user space to conserve memory (since those regions
81 * aren't going to be used anyway.)
82 *
83 * XXX: [email protected] - to allow for "direct" channels from
84 * user process to driver, we will need to revisit this.
85 */
86 kernel_only = (md_type == NEXUS_META_TYPE_PACKET) &&
87 #if (DEVELOPMENT || DEBUG)
88 !skywalk_netif_direct_enabled() &&
89 #endif /* (DEVELOPMENT || DEBUG) */
90 ((init->kbi_flags & KBIF_USER_ACCESS) == 0);
91
92 VERIFY((init->kbi_max_frags != 0) &&
93 (init->kbi_max_frags <= UINT16_MAX));
94 max_frags = (uint16_t)init->kbi_max_frags;
95 if (md_type == NEXUS_META_TYPE_QUANTUM && max_frags > 1) {
96 err = EINVAL;
97 goto done;
98 }
99 if ((max_frags > 1) && !(init->kbi_flags & KBIF_BUFFER_ON_DEMAND)) {
100 err = EINVAL;
101 goto done;
102 }
103
104 bzero(&srp, sizeof(srp));
105 for (int i = 0; i < SKMEM_REGIONS; i++) {
106 srp[i] = *skmem_get_default(i);
107 }
108
109 switch (init->kbi_flags & (KBIF_IODIR_IN | KBIF_IODIR_OUT)) {
110 case KBIF_IODIR_IN:
111 pp_region_flags |= PP_REGION_CONFIG_BUF_IODIR_IN;
112 tx_pool = false;
113 break;
114 case KBIF_IODIR_OUT:
115 pp_region_flags |= PP_REGION_CONFIG_BUF_IODIR_OUT;
116 break;
117 case (KBIF_IODIR_IN | KBIF_IODIR_OUT):
118 default:
119 pp_region_flags |= PP_REGION_CONFIG_BUF_IODIR_BIDIR;
120 break;
121 }
122
123 if (init->kbi_flags & KBIF_BUFFER_ON_DEMAND) {
124 pp_region_flags |= PP_REGION_CONFIG_BUFLET;
125 }
126 if (kernel_only) {
127 pp_region_flags |= PP_REGION_CONFIG_KERNEL_ONLY;
128 }
129 if (init->kbi_flags & KBIF_KERNEL_READONLY) {
130 pp_region_flags |= PP_REGION_CONFIG_BUF_KREADONLY;
131 }
132 if (init->kbi_flags & KBIF_THREADSAFE) {
133 pp_region_flags |= PP_REGION_CONFIG_BUF_THREADSAFE;
134 }
135 /*
136 * Enable magazine layer for metadata.
137 */
138 if (!(init->kbi_flags & KBIF_NO_MAGAZINES)) {
139 pp_region_flags |= PP_REGION_CONFIG_MD_MAGAZINE_ENABLE;
140 }
141 pp_region_flags |= PP_REGION_CONFIG_MD_PERSISTENT;
142
143 pkt_cnt = init->kbi_packets;
144 /*
145 * For TCP to be able to send a 4MB window worth of data, packet pool
146 * must have at least 4MB/MTU packets. On devices which are not
147 * memory constrained, we can increase the pool to be atleast
148 * 4K packets.
149 */
150 if (tx_pool && !SKMEM_MEM_CONSTRAINED_DEVICE() &&
151 #if (DEVELOPMENT || DEBUG)
152 !skmem_test_enabled() &&
153 #endif /* (DEVELOPMENT || DEBUG) */
154 !(init->kbi_flags & KBIF_MONOLITHIC) &&
155 !(init->kbi_flags & KBIF_VIRTUAL_DEVICE) &&
156 !(init->kbi_flags & KBIF_PHYS_CONTIGUOUS) &&
157 !(init->kbi_flags & KBIF_KERNEL_READONLY) &&
158 !(init->kbi_flags & KBIF_QUANTUM)) {
159 pkt_cnt = MAX((4 * 1024), pkt_cnt);
160 }
161 #if (DEVELOPMENT || DEBUG)
162 if (sk_min_pool_size != 0) {
163 pkt_cnt = MAX(pkt_cnt, sk_min_pool_size);
164 }
165 #endif /* (DEVELOPMENT || DEBUG) */
166 /* make sure # of buffers is >= # of packets */
167 buf_cnt = MAX(pkt_cnt, init->kbi_buflets);
168
169 /*
170 * Apply same logic as in nxprov_create_common().
171 */
172 if (init->kbi_flags &
173 (KBIF_PERSISTENT | KBIF_MONOLITHIC | KBIF_INHIBIT_CACHE |
174 KBIF_PHYS_CONTIGUOUS)) {
175 if (init->kbi_flags & KBIF_PERSISTENT) {
176 pp_region_flags |= PP_REGION_CONFIG_BUF_PERSISTENT;
177 }
178 if (init->kbi_flags & KBIF_MONOLITHIC) {
179 pp_region_flags |= PP_REGION_CONFIG_BUF_MONOLITHIC;
180 }
181 if (init->kbi_flags & KBIF_INHIBIT_CACHE) {
182 pp_region_flags |= PP_REGION_CONFIG_BUF_NOCACHE;
183 }
184 if (init->kbi_flags & KBIF_PHYS_CONTIGUOUS) {
185 pp_region_flags |= PP_REGION_CONFIG_BUF_SEGPHYSCONTIG;
186 }
187 }
188
189 /* adjust region params */
190 pp_regions_params_adjust(srp, NEXUS_META_TYPE_PACKET, NEXUS_META_SUBTYPE_RAW,
191 pkt_cnt, max_frags, init->kbi_bufsize, 0, buf_cnt, init->kbi_buf_seg_size,
192 pp_region_flags);
193
194 /*
195 * Create packet pool.
196 */
197 ASSERT(ppcreatef & PPCREATEF_EXTERNAL);
198 if (kernel_only) {
199 ppcreatef |= PPCREATEF_KERNEL_ONLY;
200 }
201 if (init->kbi_flags & KBIF_BUFFER_ON_DEMAND) {
202 ppcreatef |= PPCREATEF_ONDEMAND_BUF;
203 }
204 /*
205 * Enable CPU-layer magazine resizing if this is a long-lived
206 * pbufpool, e.g. one that's allocated by a device driver.
207 */
208 if (!(init->kbi_flags & KBIF_VIRTUAL_DEVICE)) {
209 ppcreatef |= PPCREATEF_DYNAMIC;
210 }
211 if ((pp = pp_create(
212 __unsafe_null_terminated_from_indexable(init->kbi_name), srp,
213 init->kbi_buf_seg_ctor, init->kbi_buf_seg_dtor,
214 init->kbi_ctx, init->kbi_ctx_retain, init->kbi_ctx_release,
215 ppcreatef)) == NULL) {
216 err = ENOMEM;
217 goto done;
218 }
219
220 *ppp = pp;
221
222 if (pp_info != NULL) {
223 err = kern_pbufpool_get_memory_info(pp, pp_info);
224 VERIFY(err == 0);
225 }
226
227 done:
228 if (err != 0 && pp != NULL) {
229 /* callee drops reference */
230 pp_close(pp);
231 pp = NULL;
232 }
233
234 return err;
235 }
236
237 /*
238 * -fbounds-safety: This function is mainly used by kexts in C++, which we're
239 * not doing bound checks yet. So just leave it as __single
240 */
241 void *__single
kern_pbufpool_get_context(const kern_pbufpool_t pp)242 kern_pbufpool_get_context(const kern_pbufpool_t pp)
243 {
244 void *__single ctx = (pp->pp_flags & PPF_EXTERNAL) ? pp->pp_ctx : NULL;
245 if (ctx != NULL) {
246 pp->pp_ctx_retain(ctx);
247 }
248 return ctx;
249 }
250
251 errno_t
kern_pbufpool_get_memory_info(const kern_pbufpool_t pp,struct kern_pbufpool_memory_info * pp_info)252 kern_pbufpool_get_memory_info(const kern_pbufpool_t pp,
253 struct kern_pbufpool_memory_info *pp_info)
254 {
255 if (pp_info == NULL) {
256 return EINVAL;
257 }
258
259 bzero(pp_info, sizeof(*pp_info));
260 if (pp->pp_flags & PPF_EXTERNAL) {
261 pp_info->kpm_flags |= KPMF_EXTERNAL;
262 }
263 pp_info->kpm_packets = pp->pp_kmd_region->skr_c_obj_cnt;
264 pp_info->kpm_max_frags = pp->pp_max_frags;
265 pp_info->kpm_buflets = PP_BUF_REGION_DEF(pp)->skr_c_obj_cnt;
266 pp_info->kpm_bufsize = PP_BUF_SIZE_DEF(pp);
267 pp_info->kpm_buf_obj_size = PP_BUF_OBJ_SIZE_DEF(pp);
268 pp_info->kpm_bufsegs = PP_BUF_REGION_DEF(pp)->skr_seg_max_cnt;
269 pp_info->kpm_buf_seg_size = PP_BUF_REGION_DEF(pp)->skr_seg_size;
270
271 return 0;
272 }
273
274 kern_segment_idx_t
kern_segment_get_index(const kern_segment_t seg)275 kern_segment_get_index(const kern_segment_t seg)
276 {
277 return seg->sg_index;
278 }
279
280 static errno_t
kern_pbufpool_alloc_common(const kern_pbufpool_t pp,const uint32_t bufcnt,kern_packet_t * pph,uint32_t skmflag)281 kern_pbufpool_alloc_common(const kern_pbufpool_t pp, const uint32_t bufcnt,
282 kern_packet_t *pph, uint32_t skmflag)
283 {
284 struct __kern_quantum *kqum;
285
286 *pph = 0;
287
288 if (__improbable(bufcnt > pp->pp_max_frags)) {
289 return EINVAL;
290 }
291
292 if (__improbable((bufcnt != pp->pp_max_frags) &&
293 !PP_HAS_BUFFER_ON_DEMAND(pp))) {
294 return EINVAL;
295 }
296
297 kqum = SK_PTR_ADDR_KQUM(pp_alloc_packet(pp, (uint16_t)bufcnt, skmflag));
298 if (__probable(kqum != NULL)) {
299 *pph = SK_PTR_ENCODE(kqum, METADATA_TYPE(kqum),
300 METADATA_SUBTYPE(kqum));
301 }
302
303 return (kqum != NULL) ? 0 : ENOMEM;
304 }
305
306 errno_t
kern_pbufpool_alloc(const kern_pbufpool_t pp,const uint32_t bufcnt,kern_packet_t * pph)307 kern_pbufpool_alloc(const kern_pbufpool_t pp, const uint32_t bufcnt,
308 kern_packet_t *pph)
309 {
310 return kern_pbufpool_alloc_common(pp, bufcnt, pph, SKMEM_SLEEP);
311 }
312
313 errno_t
kern_pbufpool_alloc_nosleep(const kern_pbufpool_t pp,const uint32_t bufcnt,kern_packet_t * pph)314 kern_pbufpool_alloc_nosleep(const kern_pbufpool_t pp, const uint32_t bufcnt,
315 kern_packet_t *pph)
316 {
317 return kern_pbufpool_alloc_common(pp, bufcnt, pph, SKMEM_NOSLEEP);
318 }
319
320 static errno_t
kern_pbufpool_alloc_batch_common(const kern_pbufpool_t pp,const uint32_t bufcnt,kern_packet_t * __counted_by (* size)array,uint32_t * size,alloc_cb_func_t cb,const void * ctx,uint32_t skmflag)321 kern_pbufpool_alloc_batch_common(const kern_pbufpool_t pp,
322 const uint32_t bufcnt, kern_packet_t *__counted_by(*size)array,
323 uint32_t *size, alloc_cb_func_t cb, const void *ctx, uint32_t skmflag)
324 {
325 if (__improbable(array == NULL || size == NULL || *size == 0 ||
326 bufcnt > pp->pp_max_frags || (cb == NULL && ctx != NULL))) {
327 return EINVAL;
328 }
329
330 if (__improbable((bufcnt != pp->pp_max_frags) &&
331 !PP_HAS_BUFFER_ON_DEMAND(pp))) {
332 return EINVAL;
333 }
334
335 return pp_alloc_packet_batch(pp, (uint16_t)bufcnt, array, size, TRUE,
336 cb, ctx, skmflag);
337 }
338
339 errno_t
kern_pbufpool_alloc_batch(const kern_pbufpool_t pp,const uint32_t bufcnt,kern_packet_t * __counted_by (* size)array,uint32_t * size)340 kern_pbufpool_alloc_batch(const kern_pbufpool_t pp, const uint32_t bufcnt,
341 kern_packet_t *__counted_by(*size)array, uint32_t *size)
342 {
343 return kern_pbufpool_alloc_batch_common(pp, bufcnt, array,
344 size, NULL, NULL, SKMEM_SLEEP);
345 }
346
347 errno_t
kern_pbufpool_alloc_batch_callback(const kern_pbufpool_t pp,const uint32_t bufcnt,kern_packet_t * __counted_by (* size)array,uint32_t * size,alloc_cb_func_t cb,const void * ctx)348 kern_pbufpool_alloc_batch_callback(const kern_pbufpool_t pp,
349 const uint32_t bufcnt, kern_packet_t *__counted_by(*size)array,
350 uint32_t *size, alloc_cb_func_t cb, const void *ctx)
351 {
352 return kern_pbufpool_alloc_batch_common(pp, bufcnt, array,
353 size, cb, ctx, SKMEM_SLEEP);
354 }
355
356 errno_t
kern_pbufpool_alloc_batch_nosleep(const kern_pbufpool_t pp,const uint32_t bufcnt,kern_packet_t * __counted_by (* size)array,uint32_t * size)357 kern_pbufpool_alloc_batch_nosleep(const kern_pbufpool_t pp,
358 const uint32_t bufcnt, kern_packet_t *__counted_by(*size)array,
359 uint32_t *size)
360 {
361 return kern_pbufpool_alloc_batch_common(pp, bufcnt, array,
362 size, NULL, NULL, SKMEM_NOSLEEP);
363 }
364
365 errno_t
kern_pbufpool_alloc_batch_nosleep_callback(const kern_pbufpool_t pp,const uint32_t bufcnt,kern_packet_t * __counted_by (* size)array,uint32_t * size,alloc_cb_func_t cb,const void * ctx)366 kern_pbufpool_alloc_batch_nosleep_callback(const kern_pbufpool_t pp,
367 const uint32_t bufcnt, kern_packet_t *__counted_by(*size)array,
368 uint32_t *size, alloc_cb_func_t cb, const void *ctx)
369 {
370 return kern_pbufpool_alloc_batch_common(pp, bufcnt, array,
371 size, cb, ctx, SKMEM_NOSLEEP);
372 }
373
374 void
kern_pbufpool_free(const kern_pbufpool_t pp,kern_packet_t ph)375 kern_pbufpool_free(const kern_pbufpool_t pp, kern_packet_t ph)
376 {
377 pp_free_packet(pp, SK_PTR_ADDR(ph));
378 }
379
380 void
kern_pbufpool_free_batch(const kern_pbufpool_t pp,kern_packet_t * __counted_by (size)array,uint32_t size)381 kern_pbufpool_free_batch(const kern_pbufpool_t pp,
382 kern_packet_t *__counted_by(size)array, uint32_t size)
383 {
384 if (__improbable(array == NULL || size == 0)) {
385 return;
386 }
387
388 pp_free_packet_batch(pp, array, size);
389 }
390
391 void
kern_pbufpool_free_chain(const kern_pbufpool_t pp,kern_packet_t chain)392 kern_pbufpool_free_chain(const kern_pbufpool_t pp, kern_packet_t chain)
393 {
394 struct __kern_packet *pkt_chain = SK_PTR_ADDR_KPKT(chain);
395
396 VERIFY(pp == pkt_chain->pkt_qum.qum_pp);
397 pp_free_packet_chain(pkt_chain, NULL);
398 }
399
400 errno_t
kern_pbufpool_alloc_buffer(const kern_pbufpool_t pp,mach_vm_address_t * buf,kern_segment_t * sg,kern_obj_idx_seg_t * sg_idx)401 kern_pbufpool_alloc_buffer(const kern_pbufpool_t pp, mach_vm_address_t *buf,
402 kern_segment_t *sg, kern_obj_idx_seg_t *sg_idx)
403 {
404 return pp_alloc_buffer(pp, buf, sg, sg_idx, 0);
405 }
406
407
408 errno_t
kern_pbufpool_alloc_buffer_nosleep(const kern_pbufpool_t pp,mach_vm_address_t * buf,kern_segment_t * sg,kern_obj_idx_seg_t * sg_idx)409 kern_pbufpool_alloc_buffer_nosleep(const kern_pbufpool_t pp,
410 mach_vm_address_t *buf, kern_segment_t *sg, kern_obj_idx_seg_t *sg_idx)
411 {
412 return pp_alloc_buffer(pp, buf, sg, sg_idx, SKMEM_NOSLEEP);
413 }
414
415 void
kern_pbufpool_free_buffer(const kern_pbufpool_t pp,mach_vm_address_t baddr)416 kern_pbufpool_free_buffer(const kern_pbufpool_t pp, mach_vm_address_t baddr)
417 {
418 pp_free_buffer(pp, baddr);
419 }
420
421 void
kern_pbufpool_destroy(kern_pbufpool_t pp)422 kern_pbufpool_destroy(kern_pbufpool_t pp)
423 {
424 VERIFY(pp->pp_flags & PPF_EXTERNAL);
425 pp_close(pp);
426 }
427
428 errno_t
kern_pbufpool_alloc_buflet(const kern_pbufpool_t pp,kern_buflet_t * pbuf)429 kern_pbufpool_alloc_buflet(const kern_pbufpool_t pp, kern_buflet_t *pbuf)
430 {
431 return pp_alloc_buflet(pp, pbuf, SKMEM_SLEEP, false);
432 }
433
434 errno_t
kern_pbufpool_alloc_buflet_nosleep(const kern_pbufpool_t pp,kern_buflet_t * pbuf)435 kern_pbufpool_alloc_buflet_nosleep(const kern_pbufpool_t pp,
436 kern_buflet_t *pbuf)
437 {
438 return pp_alloc_buflet(pp, pbuf, SKMEM_NOSLEEP, false);
439 }
440