1 /*
2 * Copyright (c) 2016-2021 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <skywalk/os_skywalk_private.h>
30 #include <skywalk/packet/pbufpool_var.h>
31
32 static errno_t kern_pbufpool_alloc_common(const kern_pbufpool_t,
33 const uint32_t, kern_packet_t *, uint32_t);
34 static errno_t kern_pbufpool_alloc_batch_common(const kern_pbufpool_t,
35 const uint32_t, kern_packet_t *, uint32_t *, alloc_cb_func_t,
36 const void *, uint32_t);
37
38 #define KBI_INVALID_CB_PAIRS(cb1, cb2) \
39 (!(init->kbi_##cb1 == NULL && init->kbi_##cb2 == NULL) && \
40 ((init->kbi_##cb1 == NULL) ^ (init->kbi_##cb2 == NULL)))
41
42 errno_t
kern_pbufpool_create(const struct kern_pbufpool_init * init,kern_pbufpool_t * ppp,struct kern_pbufpool_memory_info * pp_info)43 kern_pbufpool_create(const struct kern_pbufpool_init *init,
44 kern_pbufpool_t *ppp, struct kern_pbufpool_memory_info *pp_info)
45 {
46 /* XXX: woodford_s - find a way to get 'srp' off the kernel stack */
47 struct skmem_region_params srp[SKMEM_REGIONS];
48 struct skmem_region_params *buf_srp = NULL;
49 struct skmem_region_params *kmd_srp = NULL;
50 struct skmem_region_params *umd_srp = NULL;
51 struct skmem_region_params *ubft_srp = NULL;
52 struct skmem_region_params *kbft_srp = NULL;
53 struct kern_pbufpool *pp = NULL;
54 nexus_meta_type_t md_type;
55 nexus_meta_subtype_t md_subtype;
56 uint32_t buf_cnt;
57 uint16_t max_frags;
58 uint32_t ppcreatef = PPCREATEF_EXTERNAL;
59 uint32_t pkt_cnt;
60 int err = 0;
61 bool kernel_only;
62 bool tx_pool = true;
63
64 if (ppp == NULL || init == NULL ||
65 init->kbi_version != KERN_PBUFPOOL_CURRENT_VERSION ||
66 init->kbi_packets == 0 || (init->kbi_buflets != 0 &&
67 init->kbi_buflets < init->kbi_packets &&
68 !(init->kbi_flags & KBIF_BUFFER_ON_DEMAND)) ||
69 init->kbi_bufsize == 0 || init->kbi_max_frags == 0 ||
70 ((init->kbi_flags & KBIF_QUANTUM) &&
71 (init->kbi_flags & KBIF_BUFFER_ON_DEMAND)) ||
72 KBI_INVALID_CB_PAIRS(buf_seg_ctor, buf_seg_dtor)) {
73 err = EINVAL;
74 goto done;
75 }
76
77 *ppp = NULL;
78
79 md_type = ((init->kbi_flags & KBIF_QUANTUM) ?
80 NEXUS_META_TYPE_QUANTUM : NEXUS_META_TYPE_PACKET);
81
82 /*
83 * If packet, we assume this is for a driver handling raw frames.
84 * This also implies that at present, we do not create mirrored
85 * regions for user space to conserve memory (since those regions
86 * aren't going to be used anyway.)
87 *
88 * XXX: [email protected] - to allow for "direct" channels from
89 * user process to driver, we will need to revisit this.
90 */
91 md_subtype = ((md_type == NEXUS_META_TYPE_QUANTUM) ?
92 NEXUS_META_SUBTYPE_PAYLOAD : NEXUS_META_SUBTYPE_RAW);
93 kernel_only = (md_type == NEXUS_META_TYPE_PACKET) &&
94 #if (DEVELOPMENT || DEBUG)
95 !skywalk_netif_direct_enabled() &&
96 #endif /* (DEVELOPMENT || DEBUG) */
97 ((init->kbi_flags & KBIF_USER_ACCESS) == 0);
98
99 VERIFY((init->kbi_max_frags != 0) &&
100 (init->kbi_max_frags <= UINT16_MAX));
101 max_frags = (uint16_t)init->kbi_max_frags;
102 if (md_type == NEXUS_META_TYPE_QUANTUM && max_frags > 1) {
103 err = EINVAL;
104 goto done;
105 }
106 if ((max_frags > 1) && !(init->kbi_flags & KBIF_BUFFER_ON_DEMAND)) {
107 err = EINVAL;
108 goto done;
109 }
110
111 /* pick the right md and buf region based on direction */
112 bzero(&srp, sizeof(srp));
113 srp[SKMEM_REGION_UMD] = *skmem_get_default(SKMEM_REGION_UMD);
114 umd_srp = &srp[SKMEM_REGION_UMD];
115
116 if (init->kbi_flags & KBIF_BUFFER_ON_DEMAND) {
117 srp[SKMEM_REGION_KBFT] = *skmem_get_default(SKMEM_REGION_KBFT);
118 kbft_srp = &srp[SKMEM_REGION_KBFT];
119 }
120 if ((kbft_srp != NULL) && (init->kbi_flags & KBIF_USER_ACCESS)) {
121 srp[SKMEM_REGION_UBFT] = *skmem_get_default(SKMEM_REGION_UBFT);
122 ubft_srp = &srp[SKMEM_REGION_UBFT];
123 }
124
125 switch (init->kbi_flags & (KBIF_IODIR_IN | KBIF_IODIR_OUT)) {
126 case KBIF_IODIR_IN:
127 srp[SKMEM_REGION_RXBUF] = *skmem_get_default(SKMEM_REGION_RXBUF);
128 srp[SKMEM_REGION_RXKMD] = *skmem_get_default(SKMEM_REGION_RXKMD);
129 buf_srp = &srp[SKMEM_REGION_RXBUF];
130 kmd_srp = &srp[SKMEM_REGION_RXKMD];
131 tx_pool = false;
132 break;
133 case KBIF_IODIR_OUT:
134 srp[SKMEM_REGION_TXBUF] = *skmem_get_default(SKMEM_REGION_TXBUF);
135 srp[SKMEM_REGION_TXKMD] = *skmem_get_default(SKMEM_REGION_TXKMD);
136 buf_srp = &srp[SKMEM_REGION_TXBUF];
137 kmd_srp = &srp[SKMEM_REGION_TXKMD];
138 break;
139 case (KBIF_IODIR_IN | KBIF_IODIR_OUT):
140 default:
141 srp[SKMEM_REGION_BUF] = *skmem_get_default(SKMEM_REGION_BUF);
142 srp[SKMEM_REGION_KMD] = *skmem_get_default(SKMEM_REGION_KMD);
143 buf_srp = &srp[SKMEM_REGION_BUF];
144 kmd_srp = &srp[SKMEM_REGION_KMD];
145 break;
146 }
147
148 if (init->kbi_flags & KBIF_KERNEL_READONLY) {
149 buf_srp->srp_cflags |= SKMEM_REGION_CR_KREADONLY;
150 }
151
152 /*
153 * Disable/enable magazine layer for metadata.
154 */
155 if (init->kbi_flags & KBIF_NO_MAGAZINES) {
156 umd_srp->srp_cflags |= SKMEM_REGION_CR_NOMAGAZINES;
157 kmd_srp->srp_cflags |= SKMEM_REGION_CR_NOMAGAZINES;
158 if (kbft_srp != NULL) {
159 kbft_srp->srp_cflags |= SKMEM_REGION_CR_NOMAGAZINES;
160 }
161 if (ubft_srp != NULL) {
162 ubft_srp->srp_cflags |= SKMEM_REGION_CR_NOMAGAZINES;
163 }
164 } else {
165 umd_srp->srp_cflags &= ~SKMEM_REGION_CR_NOMAGAZINES;
166 kmd_srp->srp_cflags &= ~SKMEM_REGION_CR_NOMAGAZINES;
167 if (kbft_srp != NULL) {
168 kbft_srp->srp_cflags &= ~SKMEM_REGION_CR_NOMAGAZINES;
169 }
170 if (ubft_srp != NULL) {
171 ubft_srp->srp_cflags &= ~SKMEM_REGION_CR_NOMAGAZINES;
172 }
173 }
174 umd_srp->srp_cflags |= SKMEM_REGION_CR_PERSISTENT;
175 kmd_srp->srp_cflags |= SKMEM_REGION_CR_PERSISTENT;
176 if (kbft_srp != NULL) {
177 kbft_srp->srp_cflags |= SKMEM_REGION_CR_PERSISTENT;
178 }
179 if (ubft_srp != NULL) {
180 ubft_srp->srp_cflags |= SKMEM_REGION_CR_PERSISTENT;
181 }
182
183 pkt_cnt = init->kbi_packets;
184 /*
185 * For TCP to be able to send a 4MB window worth of data, packet pool
186 * must have at least 4MB/MTU packets. On devices which are not
187 * memory constrained, we can increase the pool to be atleast
188 * 4K packets.
189 */
190 if (tx_pool && !SKMEM_MEM_CONSTRAINED_DEVICE &&
191 #if (DEVELOPMENT || DEBUG)
192 !skmem_test_enabled() &&
193 #endif /* (DEVELOPMENT || DEBUG) */
194 !(init->kbi_flags & KBIF_MONOLITHIC) &&
195 !(init->kbi_flags & KBIF_VIRTUAL_DEVICE) &&
196 !(init->kbi_flags & KBIF_PHYS_CONTIGUOUS) &&
197 !(init->kbi_flags & KBIF_KERNEL_READONLY) &&
198 !(init->kbi_flags & KBIF_QUANTUM)) {
199 pkt_cnt = MAX((4 * 1024), pkt_cnt);
200 }
201 #if (DEVELOPMENT || DEBUG)
202 if (sk_min_pool_size != 0) {
203 pkt_cnt = MAX(pkt_cnt, sk_min_pool_size);
204 }
205 #endif /* (DEVELOPMENT || DEBUG) */
206 /* make sure # of buffers is >= # of packets */
207 buf_cnt = MAX(pkt_cnt, init->kbi_buflets);
208
209 /* adjust region params; we may override below */
210 pp_regions_params_adjust(buf_srp, kmd_srp, umd_srp, kbft_srp,
211 ubft_srp, md_type, md_subtype, pkt_cnt, max_frags,
212 init->kbi_bufsize, buf_cnt);
213
214 /*
215 * Apply same logic as in nxprov_create_common().
216 */
217 if (init->kbi_flags &
218 (KBIF_PERSISTENT | KBIF_MONOLITHIC | KBIF_INHIBIT_CACHE |
219 KBIF_PHYS_CONTIGUOUS)) {
220 if (init->kbi_flags & KBIF_PERSISTENT) {
221 buf_srp->srp_cflags |= SKMEM_REGION_CR_PERSISTENT;
222 } else {
223 buf_srp->srp_cflags &= ~SKMEM_REGION_CR_PERSISTENT;
224 }
225
226 /*
227 * Set SKMEM_REGION_CR_MONOLITHIC if the provider does
228 * not want more than a single segment for entire region.
229 */
230 if (init->kbi_flags & KBIF_MONOLITHIC) {
231 buf_srp->srp_cflags |= SKMEM_REGION_CR_MONOLITHIC;
232 } else {
233 buf_srp->srp_cflags &= ~SKMEM_REGION_CR_MONOLITHIC;
234 }
235
236 if (init->kbi_flags & KBIF_INHIBIT_CACHE) {
237 buf_srp->srp_cflags |= SKMEM_REGION_CR_NOCACHE;
238 } else {
239 buf_srp->srp_cflags &= ~SKMEM_REGION_CR_NOCACHE;
240 }
241 if (init->kbi_flags & KBIF_PHYS_CONTIGUOUS) {
242 buf_srp->srp_cflags |= SKMEM_REGION_CR_SEGPHYSCONTIG;
243 } else {
244 buf_srp->srp_cflags &= ~SKMEM_REGION_CR_SEGPHYSCONTIG;
245 }
246 }
247
248 buf_srp->srp_r_seg_size = init->kbi_buf_seg_size;
249 skmem_region_params_config(buf_srp);
250
251 /*
252 * Create packet pool.
253 */
254 ASSERT(ppcreatef & PPCREATEF_EXTERNAL);
255 if (kernel_only) {
256 ppcreatef |= PPCREATEF_KERNEL_ONLY;
257 }
258 if (init->kbi_flags & KBIF_BUFFER_ON_DEMAND) {
259 ppcreatef |= PPCREATEF_ONDEMAND_BUF;
260 }
261 /*
262 * Enable CPU-layer magazine resizing if this is a long-lived
263 * pbufpool, e.g. one that's allocated by a device driver.
264 */
265 if (!(init->kbi_flags & KBIF_VIRTUAL_DEVICE)) {
266 ppcreatef |= PPCREATEF_DYNAMIC;
267 }
268 if ((pp = pp_create((const char *)init->kbi_name, buf_srp, kmd_srp,
269 umd_srp, &srp[SKMEM_REGION_KBFT], &srp[SKMEM_REGION_UBFT],
270 init->kbi_buf_seg_ctor, init->kbi_buf_seg_dtor,
271 init->kbi_ctx, init->kbi_ctx_retain, init->kbi_ctx_release,
272 ppcreatef)) == NULL) {
273 err = ENOMEM;
274 goto done;
275 }
276
277 *ppp = pp;
278
279 if (pp_info != NULL) {
280 err = kern_pbufpool_get_memory_info(pp, pp_info);
281 VERIFY(err == 0);
282 }
283
284 done:
285 if (err != 0 && pp != NULL) {
286 /* callee drops reference */
287 pp_close(pp);
288 pp = NULL;
289 }
290
291 return err;
292 }
293
294 void *
kern_pbufpool_get_context(const kern_pbufpool_t pp)295 kern_pbufpool_get_context(const kern_pbufpool_t pp)
296 {
297 void *ctx = (pp->pp_flags & PPF_EXTERNAL) ? pp->pp_ctx : NULL;
298 if (ctx != NULL) {
299 pp->pp_ctx_retain(ctx);
300 }
301 return ctx;
302 }
303
304 errno_t
kern_pbufpool_get_memory_info(const kern_pbufpool_t pp,struct kern_pbufpool_memory_info * pp_info)305 kern_pbufpool_get_memory_info(const kern_pbufpool_t pp,
306 struct kern_pbufpool_memory_info *pp_info)
307 {
308 if (pp_info == NULL) {
309 return EINVAL;
310 }
311
312 bzero(pp_info, sizeof(*pp_info));
313 if (pp->pp_flags & PPF_EXTERNAL) {
314 pp_info->kpm_flags |= KPMF_EXTERNAL;
315 }
316 pp_info->kpm_packets = pp->pp_kmd_region->skr_c_obj_cnt;
317 pp_info->kpm_max_frags = pp->pp_max_frags;
318 pp_info->kpm_buflets = pp->pp_buf_region->skr_c_obj_cnt;
319 pp_info->kpm_bufsize = pp->pp_buflet_size;
320 pp_info->kpm_bufsegs = pp->pp_buf_region->skr_seg_max_cnt;
321 pp_info->kpm_buf_seg_size = pp->pp_buf_region->skr_seg_size;
322
323 return 0;
324 }
325
326 kern_segment_idx_t
kern_segment_get_index(const kern_segment_t seg)327 kern_segment_get_index(const kern_segment_t seg)
328 {
329 return seg->sg_index;
330 }
331
332 static errno_t
kern_pbufpool_alloc_common(const kern_pbufpool_t pp,const uint32_t bufcnt,kern_packet_t * pph,uint32_t skmflag)333 kern_pbufpool_alloc_common(const kern_pbufpool_t pp, const uint32_t bufcnt,
334 kern_packet_t *pph, uint32_t skmflag)
335 {
336 struct __kern_quantum *kqum;
337
338 *pph = 0;
339
340 if (__improbable(bufcnt > pp->pp_max_frags)) {
341 return EINVAL;
342 }
343
344 if (__improbable((bufcnt != pp->pp_max_frags) &&
345 !PP_HAS_BUFFER_ON_DEMAND(pp))) {
346 return EINVAL;
347 }
348
349 kqum = SK_PTR_ADDR_KQUM(pp_alloc_packet(pp, (uint16_t)bufcnt, skmflag));
350 if (__probable(kqum != NULL)) {
351 *pph = SK_PTR_ENCODE(kqum, METADATA_TYPE(kqum),
352 METADATA_SUBTYPE(kqum));
353 }
354
355 return (kqum != NULL) ? 0 : ENOMEM;
356 }
357
358 errno_t
kern_pbufpool_alloc(const kern_pbufpool_t pp,const uint32_t bufcnt,kern_packet_t * pph)359 kern_pbufpool_alloc(const kern_pbufpool_t pp, const uint32_t bufcnt,
360 kern_packet_t *pph)
361 {
362 return kern_pbufpool_alloc_common(pp, bufcnt, pph, SKMEM_SLEEP);
363 }
364
365 errno_t
kern_pbufpool_alloc_nosleep(const kern_pbufpool_t pp,const uint32_t bufcnt,kern_packet_t * pph)366 kern_pbufpool_alloc_nosleep(const kern_pbufpool_t pp, const uint32_t bufcnt,
367 kern_packet_t *pph)
368 {
369 return kern_pbufpool_alloc_common(pp, bufcnt, pph, SKMEM_NOSLEEP);
370 }
371
372 static errno_t
kern_pbufpool_alloc_batch_common(const kern_pbufpool_t pp,const uint32_t bufcnt,kern_packet_t * array,uint32_t * size,alloc_cb_func_t cb,const void * ctx,uint32_t skmflag)373 kern_pbufpool_alloc_batch_common(const kern_pbufpool_t pp,
374 const uint32_t bufcnt, kern_packet_t *array, uint32_t *size,
375 alloc_cb_func_t cb, const void *ctx, uint32_t skmflag)
376 {
377 if (__improbable(array == NULL || size == NULL || *size == 0 ||
378 bufcnt > pp->pp_max_frags || (cb == NULL && ctx != NULL))) {
379 return EINVAL;
380 }
381
382 if (__improbable((bufcnt != pp->pp_max_frags) &&
383 !PP_HAS_BUFFER_ON_DEMAND(pp))) {
384 return EINVAL;
385 }
386
387 return pp_alloc_packet_batch(pp, (uint16_t)bufcnt, array, size, TRUE,
388 cb, ctx, skmflag);
389 }
390
391 errno_t
kern_pbufpool_alloc_batch(const kern_pbufpool_t pp,const uint32_t bufcnt,kern_packet_t * array,uint32_t * size)392 kern_pbufpool_alloc_batch(const kern_pbufpool_t pp, const uint32_t bufcnt,
393 kern_packet_t *array, uint32_t *size)
394 {
395 return kern_pbufpool_alloc_batch_common(pp, bufcnt, array,
396 size, NULL, NULL, SKMEM_SLEEP);
397 }
398
399 errno_t
kern_pbufpool_alloc_batch_callback(const kern_pbufpool_t pp,const uint32_t bufcnt,kern_packet_t * array,uint32_t * size,alloc_cb_func_t cb,const void * ctx)400 kern_pbufpool_alloc_batch_callback(const kern_pbufpool_t pp,
401 const uint32_t bufcnt, kern_packet_t *array, uint32_t *size,
402 alloc_cb_func_t cb, const void *ctx)
403 {
404 return kern_pbufpool_alloc_batch_common(pp, bufcnt, array,
405 size, cb, ctx, SKMEM_SLEEP);
406 }
407
408 errno_t
kern_pbufpool_alloc_batch_nosleep(const kern_pbufpool_t pp,const uint32_t bufcnt,kern_packet_t * array,uint32_t * size)409 kern_pbufpool_alloc_batch_nosleep(const kern_pbufpool_t pp,
410 const uint32_t bufcnt, kern_packet_t *array, uint32_t *size)
411 {
412 return kern_pbufpool_alloc_batch_common(pp, bufcnt, array,
413 size, NULL, NULL, SKMEM_NOSLEEP);
414 }
415
416 errno_t
kern_pbufpool_alloc_batch_nosleep_callback(const kern_pbufpool_t pp,const uint32_t bufcnt,kern_packet_t * array,uint32_t * size,alloc_cb_func_t cb,const void * ctx)417 kern_pbufpool_alloc_batch_nosleep_callback(const kern_pbufpool_t pp,
418 const uint32_t bufcnt, kern_packet_t *array, uint32_t *size,
419 alloc_cb_func_t cb, const void *ctx)
420 {
421 return kern_pbufpool_alloc_batch_common(pp, bufcnt, array,
422 size, cb, ctx, SKMEM_NOSLEEP);
423 }
424
425 void
kern_pbufpool_free(const kern_pbufpool_t pp,kern_packet_t ph)426 kern_pbufpool_free(const kern_pbufpool_t pp, kern_packet_t ph)
427 {
428 pp_free_packet(pp, SK_PTR_ADDR(ph));
429 }
430
431 void
kern_pbufpool_free_batch(const kern_pbufpool_t pp,kern_packet_t * array,uint32_t size)432 kern_pbufpool_free_batch(const kern_pbufpool_t pp, kern_packet_t *array,
433 uint32_t size)
434 {
435 if (__improbable(array == NULL || size == 0)) {
436 return;
437 }
438
439 pp_free_packet_batch(pp, array, size);
440 }
441
442 void
kern_pbufpool_free_chain(const kern_pbufpool_t pp,kern_packet_t chain)443 kern_pbufpool_free_chain(const kern_pbufpool_t pp, kern_packet_t chain)
444 {
445 struct __kern_packet *pkt_chain = SK_PTR_ADDR_KPKT(chain);
446
447 VERIFY(pp == pkt_chain->pkt_qum.qum_pp);
448 pp_free_packet_chain(pkt_chain, NULL);
449 }
450
451 errno_t
kern_pbufpool_alloc_buffer(const kern_pbufpool_t pp,mach_vm_address_t * buf,kern_segment_t * sg,kern_obj_idx_seg_t * sg_idx)452 kern_pbufpool_alloc_buffer(const kern_pbufpool_t pp, mach_vm_address_t *buf,
453 kern_segment_t *sg, kern_obj_idx_seg_t *sg_idx)
454 {
455 return pp_alloc_buffer(pp, buf, sg, sg_idx, 0);
456 }
457
458
459 errno_t
kern_pbufpool_alloc_buffer_nosleep(const kern_pbufpool_t pp,mach_vm_address_t * buf,kern_segment_t * sg,kern_obj_idx_seg_t * sg_idx)460 kern_pbufpool_alloc_buffer_nosleep(const kern_pbufpool_t pp,
461 mach_vm_address_t *buf, kern_segment_t *sg, kern_obj_idx_seg_t *sg_idx)
462 {
463 return pp_alloc_buffer(pp, buf, sg, sg_idx, SKMEM_NOSLEEP);
464 }
465
466 void
kern_pbufpool_free_buffer(const kern_pbufpool_t pp,mach_vm_address_t baddr)467 kern_pbufpool_free_buffer(const kern_pbufpool_t pp, mach_vm_address_t baddr)
468 {
469 pp_free_buffer(pp, baddr);
470 }
471
472 void
kern_pbufpool_destroy(kern_pbufpool_t pp)473 kern_pbufpool_destroy(kern_pbufpool_t pp)
474 {
475 VERIFY(pp->pp_flags & PPF_EXTERNAL);
476 pp_close(pp);
477 }
478
479 errno_t
kern_pbufpool_alloc_buflet(const kern_pbufpool_t pp,kern_buflet_t * pbuf)480 kern_pbufpool_alloc_buflet(const kern_pbufpool_t pp, kern_buflet_t *pbuf)
481 {
482 return pp_alloc_buflet(pp, pbuf, SKMEM_SLEEP);
483 }
484
485 errno_t
kern_pbufpool_alloc_buflet_nosleep(const kern_pbufpool_t pp,kern_buflet_t * pbuf)486 kern_pbufpool_alloc_buflet_nosleep(const kern_pbufpool_t pp,
487 kern_buflet_t *pbuf)
488 {
489 return pp_alloc_buflet(pp, pbuf, SKMEM_NOSLEEP);
490 }
491