1 /*
2 * Copyright (c) 2018-2021 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #if (DEVELOPMENT || DEBUG) /* XXX make this whole file a config option? */
30
31 #include <skywalk/os_skywalk_private.h>
32
33 #define SKMEM_TEST_BUFSIZE 2048
34
35 #if CONFIG_ARROW
36 #define TEST_OPTION_INHIBIT_CACHE 0
37 #else /* !CONFIG_ARROW */
38 #define TEST_OPTION_INHIBIT_CACHE KBIF_INHIBIT_CACHE
39 #endif /* CONFIG_ARROW */
40
41 static void skmem_test_start(void *, wait_result_t);
42 static void skmem_test_stop(void *, wait_result_t);
43 static void skmem_test_func(void *v, wait_result_t w);
44 static void skmem_test_mbfreecb(caddr_t cl, uint32_t size, caddr_t arg);
45 static void skmem_test_alloccb(kern_packet_t, uint32_t, const void *);
46
47 extern unsigned int ml_wait_max_cpus(void);
48 extern kern_return_t thread_terminate(thread_t);
49
50 static int skmt_enabled;
51 static int skmt_busy;
52 static int skmt_mbcnt;
53
54 decl_lck_mtx_data(static, skmt_lock);
55
56 struct skmt_alloc_ctx {
57 uint32_t stc_req; /* # of objects requested */
58 uint32_t stc_idx; /* expected index */
59 };
60
61 static struct skmt_alloc_ctx skmt_alloccb_ctx;
62
63 struct skmt_thread_info {
64 kern_packet_t sti_mph; /* master packet */
65 kern_packet_t sti_mpc; /* cloned packet */
66 thread_t sti_thread; /* thread instance */
67 boolean_t sti_nosleep; /* non-sleeping allocation */
68 } __attribute__((aligned(CHANNEL_CACHE_ALIGN_MAX)));
69
70 static struct skmt_thread_info *skmth_info;
71 static uint32_t skmth_info_size;
72 static int32_t skmth_cnt;
73 static boolean_t skmth_run;
74 static kern_pbufpool_t skmth_pp;
75
76 void
skmem_test_init(void)77 skmem_test_init(void)
78 {
79 lck_mtx_init(&skmt_lock, &sk_lock_group, &sk_lock_attr);
80 }
81
82 void
skmem_test_fini(void)83 skmem_test_fini(void)
84 {
85 lck_mtx_destroy(&skmt_lock, &sk_lock_group);
86 }
87
88 bool
skmem_test_enabled(void)89 skmem_test_enabled(void)
90 {
91 bool enabled;
92 lck_mtx_lock(&skmt_lock);
93 enabled = (skmt_busy != 0);
94 lck_mtx_unlock(&skmt_lock);
95 return enabled;
96 }
97
98 typedef union {
99 char c[2];
100 uint16_t s;
101 } short_union_t;
102
103 typedef union {
104 uint16_t s[2];
105 long l;
106 } long_union_t;
107
108 static void
_reduce(int * sum)109 _reduce(int *sum)
110 {
111 long_union_t l_util;
112
113 l_util.l = *sum;
114 *sum = l_util.s[0] + l_util.s[1];
115 if (*sum > 65535) {
116 *sum -= 65535;
117 }
118 }
119
120 static uint16_t
skmem_reference_sum(void * buffer,int len,int sum0)121 skmem_reference_sum(void *buffer, int len, int sum0)
122 {
123 uint16_t *w;
124 int sum = sum0;
125
126 w = (uint16_t *)buffer;
127 while ((len -= 32) >= 0) {
128 sum += w[0]; sum += w[1];
129 sum += w[2]; sum += w[3];
130 sum += w[4]; sum += w[5];
131 sum += w[6]; sum += w[7];
132 sum += w[8]; sum += w[9];
133 sum += w[10]; sum += w[11];
134 sum += w[12]; sum += w[13];
135 sum += w[14]; sum += w[15];
136 w += 16;
137 }
138 len += 32;
139 while ((len -= 8) >= 0) {
140 sum += w[0]; sum += w[1];
141 sum += w[2]; sum += w[3];
142 w += 4;
143 }
144 len += 8;
145 if (len) {
146 _reduce(&sum);
147 while ((len -= 2) >= 0) {
148 sum += *w++;
149 }
150 }
151 if (len == -1) { /* odd-length packet */
152 short_union_t s_util;
153
154 s_util.s = 0;
155 s_util.c[0] = *((char *)w);
156 s_util.c[1] = 0;
157 sum += s_util.s;
158 }
159 _reduce(&sum);
160 return sum & 0xffff;
161 }
162
163 /*
164 * At present, the number of objects created in the pool will be
165 * higher than the requested amount, if the pool is allowed to use
166 * the magazines layer. Round up a bit to accomodate any rounding
167 * ups done by the pool allocator.
168 */
169 #define MAX_PH_ARY P2ROUNDUP(skmem_cache_magazine_max(1) + 129, 256)
170
171 struct skmem_pp_ctx_s {
172 os_refcnt_t skmem_pp_ctx_refcnt;
173 };
174
175 static struct skmem_pp_ctx_s skmem_pp_ctx;
176
177 static uint32_t
skmem_pp_ctx_refcnt(void * ctx)178 skmem_pp_ctx_refcnt(void *ctx)
179 {
180 struct skmem_pp_ctx_s *pp_ctx = ctx;
181 VERIFY(pp_ctx == &skmem_pp_ctx);
182 return os_ref_get_count(&pp_ctx->skmem_pp_ctx_refcnt);
183 }
184
185 static void
skmem_pp_ctx_retain(void * ctx)186 skmem_pp_ctx_retain(void *ctx)
187 {
188 struct skmem_pp_ctx_s *pp_ctx = ctx;
189 VERIFY(pp_ctx == &skmem_pp_ctx);
190 os_ref_retain(&pp_ctx->skmem_pp_ctx_refcnt);
191 }
192
193 static void
skmem_pp_ctx_release(void * ctx)194 skmem_pp_ctx_release(void *ctx)
195 {
196 struct skmem_pp_ctx_s *pp_ctx = ctx;
197 VERIFY(pp_ctx == &skmem_pp_ctx);
198 (void)os_ref_release(&pp_ctx->skmem_pp_ctx_refcnt);
199 }
200
201 #define BUFLEN 2048
202
203 static void
skmem_buflet_tests(uint32_t flags)204 skmem_buflet_tests(uint32_t flags)
205 {
206 struct kern_pbufpool_init pp_init;
207 struct kern_pbufpool_memory_info pp_mem_info;
208 kern_pbufpool_t pp = NULL;
209 struct kern_pbufpool_init pp_init_mb;
210 kern_pbufpool_t pp_mb = NULL;
211 mach_vm_address_t baddr = 0;
212 kern_obj_idx_seg_t sg_idx;
213 kern_segment_t sg;
214 kern_packet_t *phary = NULL;
215 kern_packet_t *phary2 = NULL;
216 kern_packet_t *pharyc = NULL;
217 struct mbuf **mbary = NULL;
218 uint32_t mbcnt = 0;
219 uint32_t phcnt = 0, maxphcnt = 0;
220 uint32_t phcloned = 0;
221 size_t mblen = BUFLEN;
222 kern_packet_t ph, ph_mb;
223 uint32_t i;
224 errno_t err;
225
226 /* packets only */
227 VERIFY(!(flags & KBIF_QUANTUM));
228
229 SK_ERR("flags 0x%x", flags);
230
231 phary = (kern_packet_t *) kalloc_data(sizeof(kern_packet_t) * MAX_PH_ARY,
232 Z_WAITOK | Z_ZERO);
233 phary2 = (kern_packet_t *) kalloc_data(sizeof(kern_packet_t) * MAX_PH_ARY,
234 Z_WAITOK | Z_ZERO);
235 pharyc = (kern_packet_t *) kalloc_data(sizeof(kern_packet_t) * MAX_PH_ARY,
236 Z_WAITOK | Z_ZERO);
237 mbary = _MALLOC(sizeof(struct mbuf *) * MAX_PH_ARY, M_TEMP,
238 M_WAITOK | M_ZERO);
239
240 os_ref_init(&skmem_pp_ctx.skmem_pp_ctx_refcnt, NULL);
241 bzero(&pp_init, sizeof(pp_init));
242 pp_init.kbi_version = KERN_PBUFPOOL_CURRENT_VERSION;
243 pp_init.kbi_buf_seg_size = skmem_usr_buf_seg_size;
244 (void) snprintf((char *)pp_init.kbi_name, sizeof(pp_init.kbi_name),
245 "%s", "skmem_buflet_tests");
246 pp_init.kbi_flags = flags;
247 pp_init.kbi_ctx = &skmem_pp_ctx;
248 pp_init.kbi_ctx_retain = skmem_pp_ctx_retain;
249 pp_init.kbi_ctx_release = skmem_pp_ctx_release;
250
251 /* must fail if packets is 0 */
252 VERIFY(kern_pbufpool_create(&pp_init, &pp, NULL) == EINVAL);
253 pp_init.kbi_packets = 64;
254 /* must fail if bufsize is 0 */
255 VERIFY(kern_pbufpool_create(&pp_init, &pp, NULL) == EINVAL);
256 pp_init.kbi_bufsize = SKMEM_TEST_BUFSIZE;
257 /* must fail if max_frags is 0 */
258 VERIFY(kern_pbufpool_create(&pp_init, &pp, NULL) == EINVAL);
259
260 pp_init.kbi_max_frags = 1;
261 VERIFY(kern_pbufpool_create(&pp_init, &pp, NULL) == 0);
262 VERIFY(skmem_pp_ctx_refcnt(&skmem_pp_ctx) == 2);
263 void *ctx = kern_pbufpool_get_context(pp);
264 VERIFY(ctx == &skmem_pp_ctx);
265 VERIFY(skmem_pp_ctx_refcnt(&skmem_pp_ctx) == 3);
266 skmem_pp_ctx_release(ctx);
267 VERIFY(skmem_pp_ctx_refcnt(&skmem_pp_ctx) == 2);
268 bzero(&pp_mem_info, sizeof(pp_mem_info));
269 VERIFY(kern_pbufpool_get_memory_info(pp, NULL) == EINVAL);
270 VERIFY(kern_pbufpool_get_memory_info(pp, &pp_mem_info) == 0);
271 VERIFY(pp_mem_info.kpm_flags & KPMF_EXTERNAL);
272 VERIFY(pp_mem_info.kpm_buflets >= pp_mem_info.kpm_packets);
273 VERIFY(pp_mem_info.kpm_packets >= 64);
274 VERIFY(pp_mem_info.kpm_packets <= MAX_PH_ARY);
275 VERIFY(pp_mem_info.kpm_max_frags == 1);
276 VERIFY(pp_mem_info.kpm_buflets >= 64);
277 VERIFY(pp_mem_info.kpm_bufsize == SKMEM_TEST_BUFSIZE);
278 VERIFY(kern_pbufpool_alloc(pp, 0, &ph) == EINVAL ||
279 (flags & KBIF_BUFFER_ON_DEMAND));
280 if (ph != 0) {
281 kern_packet_t phc = 0;
282 kern_buflet_t buflet;
283
284 VERIFY(flags & KBIF_BUFFER_ON_DEMAND);
285 VERIFY((buflet = kern_packet_get_next_buflet(ph, NULL)) == NULL);
286 VERIFY(kern_packet_clone(ph, &phc, KPKT_COPY_LIGHT) == EINVAL);
287 VERIFY(kern_packet_clone(ph, &phc, KPKT_COPY_HEAVY) == EINVAL);
288 kern_pbufpool_free(pp, ph);
289 ph = 0;
290 }
291 maxphcnt = 32;
292 VERIFY(kern_pbufpool_alloc(pp, 5, &ph) == EINVAL);
293 if (flags & KBIF_BUFFER_ON_DEMAND) {
294 /* allocate and free one at a time (no buflet) */
295 for (i = 0, phcnt = 0; i < maxphcnt; i++) {
296 boolean_t stop = FALSE;
297 /*
298 * This may fail if skmem_region_mtbf is set, or if
299 * the system is short on memory. Perform retries at
300 * this layer to get at least 32 packets.
301 */
302 while ((err = kern_pbufpool_alloc_nosleep(pp, 0, &ph)) != 0) {
303 VERIFY(err == ENOMEM);
304 if (phcnt < 32) {
305 SK_ERR("[a] retrying alloc for packet %u",
306 phcnt);
307 delay(250 * NSEC_PER_USEC); /* 1/4 sec */
308 continue;
309 }
310 stop = TRUE;
311 break;
312 }
313 if (stop) {
314 break;
315 }
316 VERIFY(ph != 0);
317 VERIFY(kern_packet_get_data_length(ph) == 0);
318 VERIFY(kern_packet_get_buflet_count(ph) == 0);
319 phary[phcnt++] = ph;
320 }
321 VERIFY(phcnt >= 32);
322 for (i = 0; i < phcnt; i++) {
323 kern_pbufpool_free(pp, phary[i]);
324 phary[i] = 0;
325 }
326 }
327 /* allocate and free one at a time (1 buflet) */
328 for (i = 0, phcnt = 0; i < maxphcnt; i++) {
329 boolean_t stop = FALSE;
330 /*
331 * This may fail if skmem_region_mtbf is set, or if
332 * the system is short on memory. Perform retries at
333 * this layer to get at least 32 packets.
334 */
335 while ((err = kern_pbufpool_alloc_nosleep(pp, 1, &ph)) != 0) {
336 VERIFY(err == ENOMEM);
337 if (phcnt < 32) {
338 SK_ERR("[a] retrying alloc for packet %u",
339 phcnt);
340 delay(250 * NSEC_PER_USEC); /* 1/4 sec */
341 continue;
342 }
343 stop = TRUE;
344 break;
345 }
346 if (stop) {
347 break;
348 }
349 VERIFY(ph != 0);
350 VERIFY(kern_packet_get_data_length(ph) == 0);
351 VERIFY(kern_packet_get_buflet_count(ph) == 1);
352 phary[phcnt++] = ph;
353 }
354 VERIFY(phcnt >= 32);
355 for (i = 0; i < phcnt; i++) {
356 kern_pbufpool_free(pp, phary[i]);
357 phary[i] = 0;
358 }
359 /* allocate and free in batch */
360 phcnt = maxphcnt;
361 for (;;) {
362 err = kern_pbufpool_alloc_batch_nosleep(pp, 1, phary, &phcnt);
363 VERIFY(err != EINVAL);
364 if (err == ENOMEM) {
365 phcnt = maxphcnt;
366 SK_ERR("retrying batch alloc for %u packets", phcnt);
367 delay(250 * NSEC_PER_USEC); /* 1/4 sec */
368 } else if (err == EAGAIN) {
369 SK_ERR("batch alloc for %u packets only returned %u",
370 maxphcnt, phcnt);
371 break;
372 } else {
373 VERIFY(err == 0);
374 break;
375 }
376 }
377 VERIFY(phcnt > 0);
378 for (i = 0; i < phcnt; i++) {
379 VERIFY(phary[i] != 0);
380 VERIFY(kern_packet_get_data_length(phary[i]) == 0);
381 VERIFY(kern_packet_get_buflet_count(phary[i]) == 1);
382 }
383 kern_pbufpool_free_batch(pp, phary, phcnt);
384 /* allocate and free one at a time (blocking) */
385 for (i = 0, phcnt = 0; i < maxphcnt; i++) {
386 VERIFY(kern_pbufpool_alloc(pp, 1, &ph) == 0);
387 VERIFY(ph != 0);
388 VERIFY(kern_packet_get_data_length(ph) == 0);
389 VERIFY(kern_packet_get_buflet_count(ph) == 1);
390 phary[phcnt++] = ph;
391 }
392 VERIFY(phcnt >= 32);
393 for (i = 0; i < phcnt; i++) {
394 kern_pbufpool_free(pp, phary[i]);
395 phary[i] = 0;
396 }
397 /* allocate with callback */
398 bzero(&skmt_alloccb_ctx, sizeof(skmt_alloccb_ctx));
399 skmt_alloccb_ctx.stc_req = phcnt;
400 VERIFY(kern_pbufpool_alloc_batch_callback(pp, 1, phary, &phcnt,
401 NULL, &skmt_alloccb_ctx) == EINVAL);
402 VERIFY(kern_pbufpool_alloc_batch_callback(pp, 1, phary, &phcnt,
403 skmem_test_alloccb, &skmt_alloccb_ctx) == 0);
404 VERIFY(skmt_alloccb_ctx.stc_idx == phcnt);
405 kern_pbufpool_free_batch(pp, phary, phcnt);
406
407 /*
408 * Allocate and free test
409 * Case 1: Packet has an mbuf attached
410 */
411 mbcnt = phcnt;
412 VERIFY(kern_pbufpool_alloc_batch(pp, 1, phary, &phcnt) == 0);
413 /* clone packets (lightweight, without mbufs) */
414 for (i = 0; i < phcnt; i++) {
415 kern_buflet_t buflet, buflet2;
416 kern_obj_idx_seg_t buf_idx_seg, buf2_idx_seg;
417
418 VERIFY((buflet = kern_packet_get_next_buflet(phary[i],
419 NULL)) != NULL);
420 VERIFY(kern_buflet_set_data_length(buflet, BUFLEN) == 0);
421 VERIFY(__packet_finalize(phary[i]) == 0);
422 VERIFY(kern_packet_get_data_length(phary[i]) == BUFLEN);
423 (void) memset(kern_buflet_get_data_address(buflet), i, BUFLEN);
424 kern_packet_set_trace_id(phary[i], i);
425 VERIFY(kern_packet_get_trace_id(phary[i]) == i);
426 VERIFY(kern_packet_clone(phary[i], &pharyc[i],
427 KPKT_COPY_LIGHT) == 0 || !(flags & KBIF_BUFFER_ON_DEMAND));
428 if (pharyc[i] != 0) {
429 struct __kern_packet *kpkt2 = SK_PTR_ADDR_KPKT(pharyc[i]);
430 /*
431 * Source packet was allocated with 1 buffer, so
432 * validate that the clone packet points to that
433 * same buffer, and that the buffer's usecnt is 2.
434 */
435 VERIFY(!(QUM_ADDR(pharyc[i])->qum_qflags & QUM_F_FINALIZED));
436 VERIFY(kpkt2->pkt_mbuf == NULL);
437 VERIFY(!(kpkt2->pkt_pflags & PKT_F_MBUF_MASK));
438 VERIFY((buflet2 = kern_packet_get_next_buflet(pharyc[i],
439 NULL)) != NULL);
440 VERIFY(kern_buflet_get_object_address(buflet) ==
441 kern_buflet_get_object_address(buflet2));
442 VERIFY(kern_buflet_get_data_address(buflet) ==
443 kern_buflet_get_data_address(buflet2));
444 VERIFY(kern_buflet_get_data_limit(buflet) ==
445 kern_buflet_get_data_limit(buflet2));
446 VERIFY(kern_buflet_get_data_offset(buflet) ==
447 kern_buflet_get_data_offset(buflet2));
448 VERIFY(kern_buflet_get_data_length(buflet) ==
449 kern_buflet_get_data_length(buflet2));
450 VERIFY(kern_buflet_set_data_limit(buflet2,
451 (uint16_t)kern_buflet_get_object_limit(buflet2) + 1)
452 == ERANGE);
453 VERIFY(kern_buflet_set_data_limit(buflet2,
454 (uint16_t)kern_buflet_get_object_limit(buflet2) - 16)
455 == 0);
456 VERIFY(kern_buflet_set_data_address(buflet2,
457 (const void *)((uintptr_t)kern_buflet_get_object_address(buflet2) - 1))
458 == ERANGE);
459 VERIFY(kern_buflet_set_data_address(buflet2,
460 (const void *)((uintptr_t)kern_buflet_get_object_address(buflet2) + 16))
461 == 0);
462 VERIFY(kern_buflet_set_data_length(buflet2,
463 kern_buflet_get_data_length(buflet2) - 32) == 0);
464 VERIFY(kern_buflet_get_object_segment(buflet,
465 &buf_idx_seg) ==
466 kern_buflet_get_object_segment(buflet2,
467 &buf2_idx_seg));
468 VERIFY(buf_idx_seg == buf2_idx_seg);
469 VERIFY(buflet->buf_ctl == buflet2->buf_ctl);
470 VERIFY(buflet->buf_ctl->bc_usecnt == 2);
471 ++phcloned;
472 VERIFY(__packet_finalize(pharyc[i]) == 0);
473 /* verify trace id isn't reused */
474 VERIFY(kern_packet_get_trace_id(pharyc[i]) == 0);
475 kern_packet_set_trace_id(pharyc[i], phcnt - i);
476 VERIFY(kern_packet_get_trace_id(pharyc[i]) == (phcnt - i));
477 VERIFY(kern_packet_get_trace_id(phary[i]) == i);
478 }
479 }
480 VERIFY(phcloned == phcnt || phcloned == 0);
481 if (phcloned != 0) {
482 kern_pbufpool_free_batch(pp, pharyc, phcloned);
483 phcloned = 0;
484 }
485 kern_pbufpool_free_batch(pp, phary, phcnt);
486 VERIFY(kern_pbufpool_alloc_batch(pp, 1, phary, &phcnt) == 0);
487 VERIFY(phcnt == mbcnt);
488 VERIFY(skmt_mbcnt == 0);
489 for (i = 0; i < mbcnt; i++) {
490 struct __kern_packet *kpkt = SK_PTR_ADDR_KPKT(phary[i]);
491 kern_buflet_t buflet;
492
493 VERIFY((buflet = kern_packet_get_next_buflet(phary[i],
494 NULL)) != NULL);
495 VERIFY(kern_buflet_set_data_length(buflet, BUFLEN) == 0);
496 (void) memset(kern_buflet_get_data_address(buflet), i, BUFLEN);
497 /* attach mbuf to packets and initialize packets */
498 mblen = BUFLEN;
499 VERIFY(mbuf_ring_cluster_alloc(MBUF_WAITOK, MBUF_TYPE_HEADER,
500 &mbary[i], skmem_test_mbfreecb, &mblen) == 0);
501 VERIFY(mblen == BUFLEN);
502 VERIFY(mbary[i] != NULL);
503 VERIFY(mbary[i]->m_nextpkt == NULL);
504 mbuf_setlen(mbary[i], mblen);
505 mbuf_pkthdr_setlen(mbary[i], mblen);
506 VERIFY((size_t)m_pktlen(mbary[i]) == mblen);
507 (void) memset(mbuf_data(mbary[i]), i, mblen);
508 kpkt->pkt_mbuf = mbary[i];
509 kpkt->pkt_pflags |= PKT_F_MBUF_DATA;
510 VERIFY(__packet_finalize_with_mbuf(kpkt) == 0);
511 VERIFY(kern_packet_get_data_length(phary[i]) == BUFLEN);
512 VERIFY(mbuf_ring_cluster_activate(kpkt->pkt_mbuf) == 0);
513 }
514 /* clone packets (heavyweight) */
515 for (i = 0; i < phcnt; i++) {
516 VERIFY(kern_packet_clone(phary[i], &pharyc[i],
517 KPKT_COPY_HEAVY) == 0);
518 struct __kern_packet *kpkt = SK_PTR_ADDR_KPKT(phary[i]);
519 struct __kern_packet *kpkt2 = SK_PTR_ADDR_KPKT(pharyc[i]);
520 kern_buflet_t buflet, buflet2;
521 /*
522 * Source packet was allocated with 1 buffer, so
523 * validate that the clone packet points to different
524 * buffer, and that the clone's attached mbuf is also
525 * different than the source's.
526 */
527 VERIFY(!(QUM_ADDR(pharyc[i])->qum_qflags & QUM_F_FINALIZED));
528 VERIFY((buflet = kern_packet_get_next_buflet(phary[i],
529 NULL)) != NULL);
530 VERIFY((buflet2 = kern_packet_get_next_buflet(pharyc[i],
531 NULL)) != NULL);
532 VERIFY(kern_buflet_get_object_address(buflet) !=
533 kern_buflet_get_object_address(buflet2));
534 VERIFY(kern_buflet_get_data_address(buflet) !=
535 kern_buflet_get_data_address(buflet2));
536 VERIFY(kern_buflet_get_data_limit(buflet) ==
537 kern_buflet_get_data_limit(buflet2));
538 VERIFY(kern_buflet_get_data_offset(buflet) ==
539 kern_buflet_get_data_offset(buflet2));
540 VERIFY(kern_buflet_get_data_length(buflet) == BUFLEN);
541 VERIFY(kern_buflet_get_data_length(buflet) ==
542 kern_buflet_get_data_length(buflet2));
543 VERIFY(kpkt->pkt_pflags & PKT_F_MBUF_DATA);
544 VERIFY(kpkt2->pkt_pflags & PKT_F_MBUF_DATA);
545 VERIFY(m_pktlen(kpkt2->pkt_mbuf) == m_pktlen(kpkt->pkt_mbuf));
546 VERIFY(kern_packet_get_data_length(phary[i]) == BUFLEN);
547 VERIFY(kern_packet_get_data_length(phary[i]) ==
548 kern_packet_get_data_length(pharyc[i]));
549 VERIFY(buflet->buf_ctl != buflet2->buf_ctl);
550 VERIFY(buflet->buf_ctl->bc_usecnt == 1);
551 VERIFY(buflet2->buf_ctl->bc_usecnt == 1);
552 VERIFY(memcmp(kern_buflet_get_data_address(buflet),
553 kern_buflet_get_data_address(buflet2),
554 kern_buflet_get_data_length(buflet)) == 0);
555 VERIFY(kpkt->pkt_mbuf != NULL);
556 VERIFY(kpkt2->pkt_mbuf != NULL);
557 VERIFY(mbuf_data(kpkt->pkt_mbuf) != mbuf_data(kpkt2->pkt_mbuf));
558 VERIFY(mbuf_len(kpkt->pkt_mbuf) == mbuf_len(kpkt2->pkt_mbuf));
559 /* mbuf contents must have been copied */
560 VERIFY(memcmp(mbuf_data(kpkt->pkt_mbuf),
561 mbuf_data(kpkt2->pkt_mbuf), mbuf_len(kpkt->pkt_mbuf)) == 0);
562 VERIFY(__packet_finalize(pharyc[i]) == 0);
563 ++phcloned;
564 }
565 VERIFY(phcloned == phcnt);
566 kern_pbufpool_free_batch(pp, pharyc, phcloned);
567 phcloned = 0;
568 skmt_mbcnt = mbcnt;
569 kern_pbufpool_free_batch(pp, phary, phcnt);
570 /* skmem_test_mbfreecb() should have been called for all mbufs by now */
571 VERIFY(skmt_mbcnt == 0);
572 for (i = 0; i < mbcnt; i++) {
573 VERIFY(mbary[i] != NULL);
574 m_freem(mbary[i]);
575 mbary[i] = NULL;
576 }
577 mbcnt = 0;
578
579 /*
580 * Allocate and free test
581 * Case 2: Packet has a packet attached
582 */
583 VERIFY(pp_mem_info.kpm_packets >= 64);
584 phcnt = 32;
585 VERIFY(kern_pbufpool_alloc_batch(pp, 1, phary, &phcnt) == 0);
586 VERIFY(phcnt == 32);
587 VERIFY(kern_pbufpool_alloc_batch(pp, 1, phary2, &phcnt) == 0);
588 VERIFY(phcnt == 32);
589 /* attach each packet to a packet */
590 for (i = 0; i < phcnt; i++) {
591 struct __kern_packet *kpkt = SK_PTR_ADDR_KPKT(phary[i]);
592 struct __kern_packet *kpkt2 = SK_PTR_ADDR_KPKT(phary2[i]);
593
594 kpkt->pkt_pkt = kpkt2;
595 kpkt->pkt_pflags |= PKT_F_PKT_DATA;
596 }
597 /* free the batch of packets (also free the attached packets) */
598 kern_pbufpool_free_batch(pp, phary, phcnt);
599
600 /*
601 * Allocate and free test
602 * Case 3: Packet has a packet attached. The attached packet itself has
603 * an mbuf attached.
604 */
605 VERIFY(pp_mem_info.kpm_packets >= 64);
606 phcnt = 32;
607 mbcnt = 32;
608 VERIFY(kern_pbufpool_alloc_batch(pp, 1, phary, &phcnt) == 0);
609 VERIFY(phcnt == 32);
610 VERIFY(kern_pbufpool_alloc_batch(pp, 1, phary2, &phcnt) == 0);
611 VERIFY(phcnt == 32);
612 VERIFY(skmt_mbcnt == 0);
613 for (i = 0; i < mbcnt; i++) {
614 mblen = BUFLEN;
615 VERIFY(mbuf_ring_cluster_alloc(MBUF_WAITOK, MBUF_TYPE_HEADER,
616 &mbary[i], skmem_test_mbfreecb, &mblen) == 0);
617 VERIFY(mbary[i] != NULL);
618 VERIFY(mbary[i]->m_nextpkt == NULL);
619 }
620 /* attach each packet to a packet */
621 for (i = 0; i < phcnt; i++) {
622 struct __kern_packet *kpkt = SK_PTR_ADDR_KPKT(phary[i]);
623 struct __kern_packet *kpkt2 = SK_PTR_ADDR_KPKT(phary2[i]);
624
625 VERIFY(mbary[i] != NULL);
626 VERIFY(__packet_initialize_with_mbuf(kpkt2,
627 mbary[i], 0, 0) == 0);
628 VERIFY(mbuf_ring_cluster_activate(kpkt2->pkt_mbuf) == 0);
629 kpkt->pkt_pkt = kpkt2;
630 kpkt->pkt_pflags |= PKT_F_PKT_DATA;
631 }
632 skmt_mbcnt = mbcnt;
633 /* free the batch of packets (also free the attached packets) */
634 kern_pbufpool_free_batch(pp, phary, phcnt);
635 /* skmem_test_mbfreecb() should have been called for all mbufs by now */
636 VERIFY(skmt_mbcnt == 0);
637 for (i = 0; i < mbcnt; i++) {
638 VERIFY(mbary[i] != NULL);
639 m_freem(mbary[i]);
640 mbary[i] = NULL;
641 }
642 mbcnt = 0;
643
644 kern_pbufpool_destroy(pp);
645 pp = NULL;
646 /* check that ctx_release has been called */
647 VERIFY(skmem_pp_ctx_refcnt(&skmem_pp_ctx) == 1);
648
649 pp_init.kbi_ctx = NULL;
650 pp_init.kbi_ctx_retain = NULL;
651 pp_init.kbi_ctx_release = NULL;
652 pp_init.kbi_buflets = 1;
653 /* must fail if buflets is non-zero and less than packets */
654 if (!(flags & KBIF_BUFFER_ON_DEMAND)) {
655 VERIFY(kern_pbufpool_create(&pp_init, &pp, NULL) == EINVAL);
656 } else {
657 VERIFY(kern_pbufpool_create(&pp_init, &pp, NULL) == 0);
658 kern_pbufpool_destroy(pp);
659 pp = NULL;
660 }
661 pp_init.kbi_buflets = (64 * 2);
662 VERIFY(kern_pbufpool_create(&pp_init, &pp, NULL) == 0);
663 bzero(&pp_mem_info, sizeof(pp_mem_info));
664 VERIFY(kern_pbufpool_get_memory_info(pp, &pp_mem_info) == 0);
665 VERIFY(pp_mem_info.kpm_flags & KPMF_EXTERNAL);
666 VERIFY(pp_mem_info.kpm_buflets >= pp_mem_info.kpm_packets);
667 VERIFY(pp_mem_info.kpm_packets >= 64);
668 VERIFY(pp_mem_info.kpm_max_frags == 1);
669 VERIFY(pp_mem_info.kpm_buflets >= (64 * 2));
670 VERIFY(pp_mem_info.kpm_bufsize == SKMEM_TEST_BUFSIZE);
671 VERIFY(kern_pbufpool_alloc(pp, 1, &ph) == 0);
672 VERIFY(kern_packet_get_buflet_count(ph) == 1);
673 kern_pbufpool_free(pp, ph);
674 ph = 0;
675 phcnt = 4;
676 VERIFY(kern_pbufpool_alloc_batch(pp, 4, phary, &phcnt) == EINVAL);
677 VERIFY(kern_pbufpool_alloc_batch(pp, 1, phary, &phcnt) == 0);
678 VERIFY(kern_packet_get_buflet_count(phary[0]) == 1);
679 VERIFY(kern_packet_get_buflet_count(phary[1]) == 1);
680 VERIFY(kern_packet_get_buflet_count(phary[2]) == 1);
681 VERIFY(kern_packet_get_buflet_count(phary[3]) == 1);
682 kern_pbufpool_free_batch(pp, phary, phcnt);
683 kern_pbufpool_destroy(pp);
684 pp = NULL;
685
686 /* check multi-buflet KPIs */
687 bzero(&pp_init_mb, sizeof(pp_init_mb));
688 pp_init_mb.kbi_version = KERN_PBUFPOOL_CURRENT_VERSION;
689 pp_init_mb.kbi_buf_seg_size = skmem_usr_buf_seg_size;
690 (void) snprintf((char *)pp_init_mb.kbi_name,
691 sizeof(pp_init_mb.kbi_name), "%s", "skmem_buflet_tests_mb");
692 pp_init_mb.kbi_flags = flags;
693 pp_init_mb.kbi_max_frags = 4;
694 pp_init_mb.kbi_packets = 64;
695 pp_init_mb.kbi_bufsize = 512;
696 pp_init_mb.kbi_buflets =
697 pp_init_mb.kbi_packets * pp_init_mb.kbi_max_frags;
698
699 VERIFY((kern_pbufpool_create(&pp_init_mb, &pp_mb, NULL) == EINVAL) ||
700 (flags & KBIF_BUFFER_ON_DEMAND));
701
702 if (pp_mb != NULL) {
703 bzero(&pp_mem_info, sizeof(pp_mem_info));
704 VERIFY(kern_pbufpool_get_memory_info(pp_mb, &pp_mem_info) == 0);
705 VERIFY(kern_pbufpool_alloc(pp_mb, 0, &ph_mb) == 0 ||
706 !(flags & KBIF_BUFFER_ON_DEMAND));
707 if (ph_mb != 0) {
708 VERIFY(flags & KBIF_BUFFER_ON_DEMAND);
709 kern_pbufpool_free(pp_mb, ph_mb);
710 ph_mb = 0;
711 }
712 VERIFY(kern_pbufpool_alloc_buffer(pp_mb, &baddr, &sg,
713 &sg_idx) == 0 || !(flags & KBIF_BUFFER_ON_DEMAND));
714 if (baddr != 0) {
715 VERIFY(flags & KBIF_BUFFER_ON_DEMAND);
716 kern_pbufpool_free_buffer(pp_mb, baddr);
717 baddr = 0;
718 }
719 kern_pbufpool_destroy(pp_mb);
720 pp_mb = NULL;
721 }
722
723 _FREE(mbary, M_TEMP);
724 mbary = NULL;
725
726 kfree_data(phary, sizeof(kern_packet_t) * MAX_PH_ARY);
727 phary = NULL;
728
729 kfree_data(phary2, sizeof(kern_packet_t) * MAX_PH_ARY);
730 phary2 = NULL;
731
732 kfree_data(pharyc, sizeof(kern_packet_t) * MAX_PH_ARY);
733 pharyc = NULL;
734 }
735
736 static void
skmem_test_mbfreecb(caddr_t cl,uint32_t size,caddr_t arg)737 skmem_test_mbfreecb(caddr_t cl, uint32_t size, caddr_t arg)
738 {
739 #pragma unused(cl, size)
740 struct mbuf *m = (void *)arg;
741
742 VERIFY(!mbuf_ring_cluster_is_active(m));
743 VERIFY(skmt_mbcnt > 0);
744 atomic_add_32(&skmt_mbcnt, -1);
745 }
746
747 static void
skmem_test_alloccb(kern_packet_t ph,uint32_t idx,const void * ctx)748 skmem_test_alloccb(kern_packet_t ph, uint32_t idx, const void *ctx)
749 {
750 VERIFY(ph != 0);
751 VERIFY(ctx == &skmt_alloccb_ctx);
752 VERIFY(idx < skmt_alloccb_ctx.stc_req);
753 VERIFY(idx == atomic_add_32_ov(&skmt_alloccb_ctx.stc_idx, 1));
754 }
755 static void
skmem_packet_tests(uint32_t flags)756 skmem_packet_tests(uint32_t flags)
757 {
758 struct kern_pbufpool_memory_info pp_mb_mem_info;
759 struct kern_pbufpool_memory_info pp_mem_info;
760 struct kern_pbufpool_init pp_init;
761 kern_pbufpool_t pp = NULL;
762 struct kern_pbufpool_init pp_init_mb;
763 kern_pbufpool_t pp_mb = NULL;
764 mach_vm_address_t baddr = 0;
765 uint8_t *buffer, *ref_buffer;
766 kern_obj_idx_seg_t sg_idx;
767 kern_buflet_t buflet;
768 kern_segment_t sg;
769 kern_packet_t ph = 0, ph_mb = 0;
770 struct mbuf *m = NULL;
771 uint16_t len;
772 uint32_t i;
773 uint32_t csum_eee_ref, csum_eeo_ref, csum_eoe_ref, csum_eoo_ref;
774 uint32_t csum_oee_ref, csum_oeo_ref, csum_ooe_ref, csum_ooo_ref, csum;
775 boolean_t test_unaligned;
776 kern_buflet_t bft0, bft1;
777
778 SK_ERR("flags 0x%x", flags);
779
780 /*
781 * XXX: Skip packet tests involving unaligned addresses when
782 * KBIF_INHIBIT_CACHE is set, as the copy-and-checksum routine
783 * currently assumes normal memory, rather than device memory.
784 */
785 test_unaligned = !(flags & KBIF_INHIBIT_CACHE);
786
787 /* allocate separately in case pool is setup for device memory */
788 ref_buffer = (uint8_t *) kalloc_data(SKMEM_TEST_BUFSIZE,
789 Z_WAITOK | Z_ZERO);
790
791 bzero(&pp_init_mb, sizeof(pp_init_mb));
792 pp_init_mb.kbi_version = KERN_PBUFPOOL_CURRENT_VERSION;
793 pp_init_mb.kbi_buf_seg_size = skmem_usr_buf_seg_size;
794 (void) snprintf((char *)pp_init_mb.kbi_name,
795 sizeof(pp_init_mb.kbi_name), "%s", "skmem_packet_tests_mb");
796 pp_init_mb.kbi_flags = flags | KBIF_BUFFER_ON_DEMAND;
797 pp_init_mb.kbi_max_frags = 4;
798 pp_init_mb.kbi_packets = 64;
799 pp_init_mb.kbi_bufsize = 512;
800 pp_init_mb.kbi_buflets =
801 pp_init_mb.kbi_packets * pp_init_mb.kbi_max_frags;
802 pp_init_mb.kbi_ctx = NULL;
803 pp_init_mb.kbi_ctx_retain = NULL;
804 pp_init_mb.kbi_ctx_release = NULL;
805
806 VERIFY(kern_pbufpool_create(&pp_init_mb, &pp_mb, &pp_mb_mem_info) == 0);
807 VERIFY(kern_pbufpool_alloc_buffer(pp_mb, &baddr, NULL, NULL) == 0);
808 kern_pbufpool_free_buffer(pp_mb, baddr);
809 VERIFY(kern_pbufpool_alloc_buffer(pp_mb, &baddr, &sg, &sg_idx) == 0);
810 VERIFY(sg != NULL);
811 VERIFY(sg->sg_region != NULL);
812 VERIFY(sg->sg_md != NULL);
813 VERIFY(sg->sg_start != 0);
814 VERIFY(sg->sg_end != 0);
815 VERIFY(sg->sg_type == SKSEG_TYPE_ALLOC);
816 kern_pbufpool_free_buffer(pp_mb, baddr);
817 baddr = 0;
818
819 /* add buflet to a packet with buf count 1 */
820 VERIFY(kern_pbufpool_alloc(pp_mb, 1, &ph_mb) == 0);
821 VERIFY(kern_pbufpool_alloc_buflet(pp_mb, &bft1) == 0);
822 VERIFY(bft1 != NULL);
823 VERIFY(kern_buflet_get_data_address(bft1) != NULL);
824 VERIFY(kern_buflet_get_object_address(bft1) != NULL);
825 VERIFY((bft0 = kern_packet_get_next_buflet(ph_mb, NULL)) != NULL);
826 VERIFY(kern_packet_add_buflet(ph_mb, bft0, bft1) == 0);
827 VERIFY(kern_packet_get_buflet_count(ph_mb) == 2);
828 VERIFY(kern_packet_get_next_buflet(ph_mb, NULL) == bft0);
829 VERIFY(kern_packet_get_next_buflet(ph_mb, bft0) == bft1);
830 VERIFY(kern_packet_get_next_buflet(ph_mb, bft1) == NULL);
831 VERIFY(kern_packet_finalize(ph_mb) == 0);
832 kern_pbufpool_free(pp_mb, ph_mb);
833 ph_mb = 0;
834
835 /* add buflet to a packet with buf count 0 */
836 VERIFY(kern_pbufpool_alloc(pp_mb, 0, &ph_mb) == 0);
837 VERIFY(kern_packet_get_buflet_count(ph_mb) == 0);
838 VERIFY((bft0 = kern_packet_get_next_buflet(ph_mb, NULL)) == NULL);
839 VERIFY(kern_pbufpool_alloc_buflet(pp_mb, &bft1) == 0);
840 VERIFY(bft1 != NULL);
841 VERIFY(kern_packet_add_buflet(ph_mb, bft0, bft1) == 0);
842 VERIFY(kern_packet_get_buflet_count(ph_mb) == 1);
843 VERIFY(kern_packet_get_next_buflet(ph_mb, bft0) == bft1);
844 VERIFY(kern_packet_get_next_buflet(ph_mb, bft1) == NULL);
845 VERIFY(kern_buflet_get_data_address(bft1) != NULL);
846 VERIFY(kern_buflet_get_object_address(bft1) != NULL);
847 VERIFY(kern_buflet_get_data_limit(bft1) != 0);
848 VERIFY(kern_buflet_get_data_length(bft1) == 0);
849 VERIFY(kern_packet_finalize(ph_mb) == 0);
850 kern_pbufpool_free(pp_mb, ph_mb);
851 ph_mb = 0;
852
853 bzero(&pp_init, sizeof(pp_init));
854 pp_init.kbi_version = KERN_PBUFPOOL_CURRENT_VERSION;
855 pp_init.kbi_buf_seg_size = skmem_usr_buf_seg_size;
856 (void) snprintf((char *)pp_init.kbi_name, sizeof(pp_init.kbi_name),
857 "%s", "skmem_packet_tests");
858 pp_init.kbi_flags = flags;
859 pp_init.kbi_packets = 64;
860 pp_init.kbi_bufsize = SKMEM_TEST_BUFSIZE;
861 pp_init.kbi_max_frags = 1;
862 pp_init.kbi_buflets = (64 * 2);
863 pp_init.kbi_ctx = NULL;
864 pp_init.kbi_ctx_retain = NULL;
865 pp_init.kbi_ctx_release = NULL;
866
867 /* validate multi-buflet packet checksum/copy+checksum routines */
868 VERIFY(kern_pbufpool_create(&pp_init, &pp, &pp_mem_info) == 0);
869 VERIFY(kern_pbufpool_alloc(pp, 1, &ph) == 0);
870 VERIFY(kern_packet_get_buflet_count(ph) == 1);
871
872 VERIFY((buflet = kern_packet_get_next_buflet(ph, NULL)) != NULL);
873 VERIFY((buffer = kern_buflet_get_data_address(buflet)) != NULL);
874 len = SKMEM_TEST_BUFSIZE;
875 for (i = 0; i < len; i++) {
876 ref_buffer[i] = (i & 0xff);
877 }
878 /* enforce load/store byte for device memory case */
879 volatile uint8_t *bufp = buffer;
880 for (i = 0; i < len; i++) {
881 bufp[i] = ref_buffer[i];
882 }
883 VERIFY(kern_buflet_set_data_length(buflet, len) == 0);
884 VERIFY(__packet_finalize(ph) == 0);
885
886 /* calculate and validate reference value */
887 csum_eee_ref = __packet_cksum(buffer, len, 0);
888 VERIFY(skmem_reference_sum(ref_buffer, len, 0) == csum_eee_ref);
889 csum_eoe_ref = __packet_cksum(buffer, len - 2, 0);
890 VERIFY(skmem_reference_sum(ref_buffer, len - 2, 0) == csum_eoe_ref);
891 csum_eoo_ref = csum_eeo_ref = __packet_cksum(buffer, len - 1, 0);
892 VERIFY(skmem_reference_sum(ref_buffer, len - 1, 0) == csum_eoo_ref);
893 csum_oeo_ref = csum_ooo_ref = __packet_cksum(buffer + 1, len - 1, 0);
894 VERIFY(skmem_reference_sum(ref_buffer + 1, len - 1, 0) == csum_oeo_ref);
895 csum_ooe_ref = csum_oee_ref = __packet_cksum(buffer + 1, len - 2, 0);
896 VERIFY(skmem_reference_sum(ref_buffer + 1, len - 2, 0) == csum_ooe_ref);
897
898 /* sanity tests */
899 VERIFY(skmem_reference_sum(ref_buffer + 2, len - 2, 0) ==
900 __packet_cksum(buffer + 2, len - 2, 0));
901 VERIFY(skmem_reference_sum(ref_buffer + 3, len - 3, 0) ==
902 __packet_cksum(buffer + 3, len - 3, 0));
903 VERIFY(skmem_reference_sum(ref_buffer + 4, len - 4, 0) ==
904 __packet_cksum(buffer + 4, len - 4, 0));
905 VERIFY(skmem_reference_sum(ref_buffer + 5, len - 5, 0) ==
906 __packet_cksum(buffer + 5, len - 5, 0));
907 VERIFY(skmem_reference_sum(ref_buffer + 6, len - 6, 0) ==
908 __packet_cksum(buffer + 6, len - 6, 0));
909 VERIFY(skmem_reference_sum(ref_buffer + 7, len - 7, 0) ==
910 __packet_cksum(buffer + 7, len - 7, 0));
911
912 VERIFY(mbuf_gethdr(MBUF_WAITOK, MBUF_TYPE_HEADER, &m) == 0);
913 VERIFY(mbuf_copyback(m, 0, len, buffer, MBUF_WAITOK) == 0);
914
915 /* verify copy-checksum between packets */
916 VERIFY(kern_pbufpool_alloc(pp_mb, 4, &ph_mb) == 0);
917 VERIFY(kern_packet_get_buflet_count(ph_mb) == 4);
918 pkt_copypkt_sum(ph, 0, ph_mb, 0, len - 1, &csum, TRUE);
919 METADATA_ADJUST_LEN(SK_PTR_ADDR_KQUM(ph_mb), 0, 0);
920 VERIFY(__packet_finalize(ph_mb) == 0);
921 if (csum_eeo_ref != csum) {
922 SK_ERR("pkt_copypkt_sum: csum_eeo_mismatch 0x%x, "
923 "0x%x, 0x%llx", csum_eeo_ref, csum,
924 SK_KVA(SK_PTR_ADDR_KQUM(ph_mb)));
925 }
926 VERIFY(csum_eeo_ref == csum);
927 kern_pbufpool_free(pp_mb, ph_mb);
928 ph_mb = 0;
929
930 if (test_unaligned) {
931 VERIFY(kern_pbufpool_alloc(pp_mb, 4, &ph_mb) == 0);
932 pkt_copypkt_sum(ph, 0, ph_mb, 1, len - 2, &csum, TRUE);
933 METADATA_ADJUST_LEN(SK_PTR_ADDR_KQUM(ph_mb), 0, 1);
934 SK_PTR_ADDR_KPKT(ph_mb)->pkt_headroom = 1;
935 SK_PTR_ADDR_KPKT(ph_mb)->pkt_l2_len = 0;
936 VERIFY(__packet_finalize(ph_mb) == 0);
937 if (csum_eoe_ref != csum) {
938 SK_ERR("pkt_copypkt_sum: csum_eoe_mismatch 0x%x, "
939 "0x%x, 0x%llx", csum_eoe_ref, csum,
940 SK_KVA(SK_PTR_ADDR_KQUM(ph_mb)));
941 }
942 VERIFY(csum_eoe_ref == csum);
943 kern_pbufpool_free(pp_mb, ph_mb);
944 ph_mb = 0;
945
946 VERIFY(kern_pbufpool_alloc(pp_mb, 4, &ph_mb) == 0);
947 pkt_copypkt_sum(ph, 0, ph_mb, 1, len - 1, &csum, TRUE);
948 METADATA_ADJUST_LEN(SK_PTR_ADDR_KQUM(ph_mb), 0, 1);
949 SK_PTR_ADDR_KPKT(ph_mb)->pkt_headroom = 1;
950 SK_PTR_ADDR_KPKT(ph_mb)->pkt_l2_len = 0;
951 VERIFY(__packet_finalize(ph_mb) == 0);
952 if (csum_eoo_ref != csum) {
953 SK_ERR("pkt_copypkt_sum: csum_eoo_mismatch 0x%x, "
954 "0x%x, 0x%llx", csum_eoo_ref, csum,
955 SK_KVA(SK_PTR_ADDR_KQUM(ph_mb)));
956 }
957 VERIFY(csum_eoo_ref == csum);
958 kern_pbufpool_free(pp_mb, ph_mb);
959 ph_mb = 0;
960
961 VERIFY(kern_pbufpool_alloc(pp_mb, 4, &ph_mb) == 0);
962 pkt_copypkt_sum(ph, 1, ph_mb, 0, len - 1, &csum, TRUE);
963 METADATA_ADJUST_LEN(SK_PTR_ADDR_KQUM(ph_mb), 0, 0);
964 SK_PTR_ADDR_KPKT(ph_mb)->pkt_headroom = 0;
965 SK_PTR_ADDR_KPKT(ph_mb)->pkt_l2_len = 0;
966 VERIFY(__packet_finalize(ph_mb) == 0);
967 if (csum_oeo_ref != csum) {
968 SK_ERR("pkt_copypkt_sum: csum_oeo_mismatch 0x%x, "
969 "0x%x, 0x%llx", csum_oeo_ref, csum,
970 SK_KVA(SK_PTR_ADDR_KQUM(ph_mb)));
971 }
972 VERIFY(csum_oeo_ref == csum);
973 kern_pbufpool_free(pp_mb, ph_mb);
974 ph_mb = 0;
975
976 VERIFY(kern_pbufpool_alloc(pp_mb, 4, &ph_mb) == 0);
977 pkt_copypkt_sum(ph, 1, ph_mb, 1, len - 1, &csum, TRUE);
978 METADATA_ADJUST_LEN(SK_PTR_ADDR_KQUM(ph_mb), 0, 1);
979 SK_PTR_ADDR_KPKT(ph_mb)->pkt_headroom = 1;
980 VERIFY(__packet_finalize(ph_mb) == 0);
981 if (csum_ooo_ref != csum) {
982 SK_ERR("pkt_copypkt_sum: csum_ooo_mismatch 0x%x, "
983 "0x%x, 0x%llx", csum_ooo_ref, csum,
984 SK_KVA(SK_PTR_ADDR_KQUM(ph_mb)));
985 }
986 VERIFY(csum_ooo_ref == csum);
987 kern_pbufpool_free(pp_mb, ph_mb);
988 ph_mb = 0;
989
990 VERIFY(kern_pbufpool_alloc(pp_mb, 4, &ph_mb) == 0);
991 pkt_copypkt_sum(ph, 1, ph_mb, 1, len - 2, &csum, TRUE);
992 METADATA_ADJUST_LEN(SK_PTR_ADDR_KQUM(ph_mb), 0, 1);
993 SK_PTR_ADDR_KPKT(ph_mb)->pkt_headroom = 1;
994 VERIFY(__packet_finalize(ph_mb) == 0);
995 if (csum_ooe_ref != csum) {
996 SK_ERR("pkt_copypkt_sum: csum_ooe_mismatch 0x%x, "
997 "0x%x, 0x%llx", csum_ooe_ref, csum,
998 SK_KVA(SK_PTR_ADDR_KQUM(ph_mb)));
999 }
1000 VERIFY(csum_ooe_ref == csum);
1001 kern_pbufpool_free(pp_mb, ph_mb);
1002 ph_mb = 0;
1003
1004 VERIFY(kern_pbufpool_alloc(pp_mb, 4, &ph_mb) == 0);
1005 pkt_copypkt_sum(ph, 1, ph_mb, 0, len - 2, &csum, TRUE);
1006 METADATA_ADJUST_LEN(SK_PTR_ADDR_KQUM(ph_mb), 0, 0);
1007 SK_PTR_ADDR_KPKT(ph_mb)->pkt_headroom = 0;
1008 SK_PTR_ADDR_KPKT(ph_mb)->pkt_l2_len = 0;
1009 VERIFY(__packet_finalize(ph_mb) == 0);
1010 if (csum_ooe_ref != csum) {
1011 SK_ERR("pkt_copypkt_sum: csum_oee_mismatch 0x%x, "
1012 "0x%x, 0x%llx", csum_oee_ref, csum,
1013 SK_KVA(SK_PTR_ADDR_KQUM(ph_mb)));
1014 }
1015 VERIFY(csum_oee_ref == csum);
1016 kern_pbufpool_free(pp_mb, ph_mb);
1017 ph_mb = 0;
1018 }
1019
1020 VERIFY(kern_pbufpool_alloc(pp_mb, 4, &ph_mb) == 0);
1021 VERIFY(kern_packet_get_buflet_count(ph_mb) == 4);
1022 pkt_copypkt_sum(ph, 0, ph_mb, 0, len, &csum, TRUE);
1023 METADATA_ADJUST_LEN(SK_PTR_ADDR_KQUM(ph_mb), 0, 0);
1024 SK_PTR_ADDR_KPKT(ph_mb)->pkt_headroom = 0;
1025 SK_PTR_ADDR_KPKT(ph_mb)->pkt_l2_len = 0;
1026 VERIFY(__packet_finalize(ph_mb) == 0);
1027 if (csum_eee_ref != csum) {
1028 SK_ERR("pkt_copypkt_sum: csum_eee_mismatch 0x%x, "
1029 "0x%x, 0x%llx", csum_eee_ref, csum,
1030 SK_KVA(SK_PTR_ADDR_KQUM(ph_mb)));
1031 }
1032 VERIFY(csum_eee_ref == csum);
1033
1034 /* verify copy-checksum from packet to buffer */
1035 csum = pkt_copyaddr_sum(ph_mb, 0, buffer, len - 1, TRUE, 0, NULL);
1036 if (csum_eeo_ref != csum) {
1037 SK_ERR("pkt_copyaddr_sum: csum_eeo_mismatch "
1038 "0x%x, 0x%x, 0x%llx, 0x%llx", csum_eeo_ref,
1039 csum, SK_KVA(SK_PTR_ADDR_KQUM(ph_mb)),
1040 SK_KVA(buffer));
1041 }
1042 VERIFY(csum_eeo_ref == csum);
1043
1044 if (test_unaligned) {
1045 csum = pkt_copyaddr_sum(ph_mb, 0, buffer + 1, len - 1, TRUE, 0, NULL);
1046 if (csum_eoo_ref != csum) {
1047 SK_ERR("pkt_copyaddr_sum: csum_eoo_mismatch "
1048 "0x%x, 0x%x, 0x%llx, 0x%llx", csum_eoo_ref,
1049 csum, SK_KVA(SK_PTR_ADDR_KQUM(ph_mb)),
1050 SK_KVA(buffer));
1051 }
1052 VERIFY(csum_eoo_ref == csum);
1053
1054 csum = pkt_copyaddr_sum(ph_mb, 0, buffer + 1, len - 2, TRUE, 0, NULL);
1055 if (csum_eoe_ref != csum) {
1056 SK_ERR("pkt_copyaddr_sum: csum_eoe_mismatch "
1057 "0x%x, 0x%x, 0x%llx, 0x%llx", csum_eoe_ref,
1058 csum, SK_KVA(SK_PTR_ADDR_KQUM(ph_mb)),
1059 SK_KVA(buffer));
1060 }
1061 VERIFY(csum_eoe_ref == csum);
1062
1063 csum = pkt_copyaddr_sum(ph_mb, 1, buffer + 1, len - 2, TRUE, 0, NULL);
1064 if (csum_ooe_ref != csum) {
1065 SK_ERR("pkt_copyaddr_sum: csum_ooe_mismatch "
1066 "0x%x, 0x%x, 0x%llx, 0x%llx", csum_ooe_ref,
1067 csum, SK_KVA(SK_PTR_ADDR_KQUM(ph_mb)),
1068 SK_KVA(buffer));
1069 }
1070 VERIFY(csum_ooe_ref == csum);
1071
1072 csum = pkt_copyaddr_sum(ph_mb, 1, buffer, len - 2, TRUE, 0, NULL);
1073 if (csum_oee_ref != csum) {
1074 SK_ERR("pkt_copyaddr_sum: csum_oee_mismatch "
1075 "0x%x, 0x%x, 0x%llx, 0x%llx", csum_oee_ref,
1076 csum, SK_KVA(SK_PTR_ADDR_KQUM(ph_mb)),
1077 SK_KVA(buffer));
1078 }
1079 VERIFY(csum_oee_ref == csum);
1080
1081 csum = pkt_copyaddr_sum(ph_mb, 1, buffer, len - 1, TRUE, 0, NULL);
1082 if (csum_oeo_ref != csum) {
1083 SK_ERR("pkt_copyaddr_sum: csum_oeo_mismatch "
1084 "0x%x, 0x%x, 0x%llx, 0x%llx", csum_oeo_ref,
1085 csum, SK_KVA(SK_PTR_ADDR_KQUM(ph_mb)),
1086 SK_KVA(buffer));
1087 }
1088 VERIFY(csum_oeo_ref == csum);
1089
1090 csum = pkt_copyaddr_sum(ph_mb, 1, buffer + 1, len - 1, TRUE, 0, NULL);
1091 if (csum_ooo_ref != csum) {
1092 SK_ERR("pkt_copyaddr_sum: csum_ooo_mismatch "
1093 "0x%x, 0x%x, 0x%llx, 0x%llx", csum_ooo_ref,
1094 csum, SK_KVA(SK_PTR_ADDR_KQUM(ph_mb)),
1095 SK_KVA(buffer));
1096 }
1097 VERIFY(csum_ooo_ref == csum);
1098 }
1099
1100 csum = pkt_copyaddr_sum(ph_mb, 0, buffer, len, TRUE, 0, NULL);
1101 if (csum_eee_ref != csum) {
1102 SK_ERR("pkt_copyaddr_sum: csum_eee_mismatch "
1103 "0x%x, 0x%x, 0x%llx, 0x%llx", csum_eee_ref,
1104 csum, SK_KVA(SK_PTR_ADDR_KQUM(ph_mb)),
1105 SK_KVA(buffer));
1106 }
1107 VERIFY(csum_eee_ref == csum);
1108
1109 for (i = 0; i < len; i++) {
1110 VERIFY(buffer[i] == (i & 0xff));
1111 }
1112 kern_pbufpool_free(pp_mb, ph_mb);
1113 ph_mb = 0;
1114
1115 if (test_unaligned) {
1116 /* verify copy-checksum from mbuf to packet */
1117 VERIFY(kern_pbufpool_alloc(pp_mb, 4, &ph_mb) == 0);
1118 VERIFY(kern_packet_get_buflet_count(ph_mb) == 4);
1119 csum = pkt_mcopypkt_sum(m, 0, ph_mb, 0, len, TRUE);
1120 METADATA_ADJUST_LEN(SK_PTR_ADDR_KQUM(ph_mb), 0, 0);
1121 SK_PTR_ADDR_KPKT(ph_mb)->pkt_headroom = 0;
1122 SK_PTR_ADDR_KPKT(ph_mb)->pkt_l2_len = 0;
1123 VERIFY(__packet_finalize(ph_mb) == 0);
1124 if (csum_eee_ref != csum) {
1125 SK_ERR("pkt_mcopypkt_sum: csum_eee_mismatch "
1126 "0x%x, 0x%x, 0x%llx, 0x%llx", csum_eee_ref,
1127 csum, SK_KVA(SK_PTR_ADDR_KQUM(ph_mb)),
1128 SK_KVA(m));
1129 }
1130 VERIFY(csum_eee_ref == csum);
1131 kern_pbufpool_free(pp_mb, ph_mb);
1132 ph_mb = 0;
1133
1134 VERIFY(kern_pbufpool_alloc(pp_mb, 4, &ph_mb) == 0);
1135 VERIFY(kern_packet_get_buflet_count(ph_mb) == 4);
1136 csum = pkt_mcopypkt_sum(m, 0, ph_mb, 1, len - 2, TRUE);
1137 METADATA_ADJUST_LEN(SK_PTR_ADDR_KQUM(ph_mb), 0, 1);
1138 SK_PTR_ADDR_KPKT(ph_mb)->pkt_headroom = 1;
1139 SK_PTR_ADDR_KPKT(ph_mb)->pkt_l2_len = 0;
1140 VERIFY(__packet_finalize(ph_mb) == 0);
1141 if (csum_eoe_ref != csum) {
1142 SK_ERR("pkt_mcopypkt_sum: csum_eoe_mismatch "
1143 "0x%x, 0x%x, 0x%llx, 0x%llx", csum_eoe_ref,
1144 csum, SK_KVA(SK_PTR_ADDR_KQUM(ph_mb)),
1145 SK_KVA(m));
1146 }
1147 VERIFY(csum_eoe_ref == csum);
1148 kern_pbufpool_free(pp_mb, ph_mb);
1149 ph_mb = 0;
1150
1151 VERIFY(kern_pbufpool_alloc(pp_mb, 4, &ph_mb) == 0);
1152 VERIFY(kern_packet_get_buflet_count(ph_mb) == 4);
1153 csum = pkt_mcopypkt_sum(m, 0, ph_mb, 1, len - 1, TRUE);
1154 METADATA_ADJUST_LEN(SK_PTR_ADDR_KQUM(ph_mb), 0, 1);
1155 SK_PTR_ADDR_KPKT(ph_mb)->pkt_headroom = 1;
1156 SK_PTR_ADDR_KPKT(ph_mb)->pkt_l2_len = 0;
1157 VERIFY(__packet_finalize(ph_mb) == 0);
1158 if (csum_eoo_ref != csum) {
1159 SK_ERR("pkt_mcopypkt_sum: csum_eoo_mismatch "
1160 "0x%x, 0x%x, 0x%llx, 0x%llx", csum_eoo_ref,
1161 csum, SK_KVA(SK_PTR_ADDR_KQUM(ph_mb)),
1162 SK_KVA(m));
1163 }
1164 VERIFY(csum_eoo_ref == csum);
1165 kern_pbufpool_free(pp_mb, ph_mb);
1166 ph_mb = 0;
1167 }
1168
1169 VERIFY(kern_pbufpool_alloc(pp_mb, 4, &ph_mb) == 0);
1170 VERIFY(kern_packet_get_buflet_count(ph_mb) == 4);
1171 csum = pkt_mcopypkt_sum(m, 0, ph_mb, 0, len - 1, TRUE);
1172 METADATA_ADJUST_LEN(SK_PTR_ADDR_KQUM(ph_mb), 0, 0);
1173 SK_PTR_ADDR_KPKT(ph_mb)->pkt_headroom = 0;
1174 SK_PTR_ADDR_KPKT(ph_mb)->pkt_l2_len = 0;
1175 VERIFY(__packet_finalize(ph_mb) == 0);
1176 if (csum_eeo_ref != csum) {
1177 SK_ERR("pkt_mcopypkt_sum: csum_eeo_mismatch "
1178 "0x%x, 0x%x, 0x%llx, 0x%llx", csum_eeo_ref,
1179 csum, SK_KVA(SK_PTR_ADDR_KQUM(ph_mb)),
1180 SK_KVA(m));
1181 }
1182 VERIFY(csum_eeo_ref == csum);
1183 kern_pbufpool_free(pp_mb, ph_mb);
1184 ph_mb = 0;
1185
1186 if (test_unaligned) {
1187 VERIFY(kern_pbufpool_alloc(pp_mb, 4, &ph_mb) == 0);
1188 VERIFY(kern_packet_get_buflet_count(ph_mb) == 4);
1189 csum = pkt_mcopypkt_sum(m, 1, ph_mb, 0, len - 1, TRUE);
1190 METADATA_ADJUST_LEN(SK_PTR_ADDR_KQUM(ph_mb), 0, 0);
1191 SK_PTR_ADDR_KPKT(ph_mb)->pkt_headroom = 0;
1192 SK_PTR_ADDR_KPKT(ph_mb)->pkt_l2_len = 0;
1193 VERIFY(__packet_finalize(ph_mb) == 0);
1194 if (csum_oeo_ref != csum) {
1195 SK_ERR("pkt_mcopypkt_sum: csum_oeo_mismatch "
1196 "0x%x, 0x%x, 0x%llx, 0x%llx", csum_oeo_ref,
1197 csum, SK_KVA(SK_PTR_ADDR_KQUM(ph_mb)),
1198 SK_KVA(m));
1199 }
1200 VERIFY(csum_oeo_ref == csum);
1201 kern_pbufpool_free(pp_mb, ph_mb);
1202 ph_mb = 0;
1203
1204 VERIFY(kern_pbufpool_alloc(pp_mb, 4, &ph_mb) == 0);
1205 VERIFY(kern_packet_get_buflet_count(ph_mb) == 4);
1206 csum = pkt_mcopypkt_sum(m, 1, ph_mb, 0, len - 2, TRUE);
1207 METADATA_ADJUST_LEN(SK_PTR_ADDR_KQUM(ph_mb), 0, 0);
1208 SK_PTR_ADDR_KPKT(ph_mb)->pkt_headroom = 0;
1209 SK_PTR_ADDR_KPKT(ph_mb)->pkt_l2_len = 0;
1210 VERIFY(__packet_finalize(ph_mb) == 0);
1211 if (csum_oee_ref != csum) {
1212 SK_ERR("pkt_mcopypkt_sum: csum_oee_mismatch "
1213 "0x%x, 0x%x, 0x%llx, 0x%llx", csum_oee_ref,
1214 csum, SK_KVA(SK_PTR_ADDR_KQUM(ph_mb)),
1215 SK_KVA(m));
1216 }
1217 VERIFY(csum_oee_ref == csum);
1218 kern_pbufpool_free(pp_mb, ph_mb);
1219 ph_mb = 0;
1220
1221 VERIFY(kern_pbufpool_alloc(pp_mb, 4, &ph_mb) == 0);
1222 VERIFY(kern_packet_get_buflet_count(ph_mb) == 4);
1223 csum = pkt_mcopypkt_sum(m, 1, ph_mb, 1, len - 2, TRUE);
1224 METADATA_ADJUST_LEN(SK_PTR_ADDR_KQUM(ph_mb), 0, 1);
1225 SK_PTR_ADDR_KPKT(ph_mb)->pkt_headroom = 1;
1226 SK_PTR_ADDR_KPKT(ph_mb)->pkt_l2_len = 0;
1227 VERIFY(__packet_finalize(ph_mb) == 0);
1228 if (csum_ooe_ref != csum) {
1229 SK_ERR("pkt_mcopypkt_sum: csum_ooe_mismatch "
1230 "0x%x, 0x%x, 0x%llx, 0x%llx", csum_ooe_ref,
1231 csum, SK_KVA(SK_PTR_ADDR_KQUM(ph_mb)),
1232 SK_KVA(m));
1233 }
1234 VERIFY(csum_ooe_ref == csum);
1235 kern_pbufpool_free(pp_mb, ph_mb);
1236 ph_mb = 0;
1237
1238 VERIFY(kern_pbufpool_alloc(pp_mb, 4, &ph_mb) == 0);
1239 VERIFY(kern_packet_get_buflet_count(ph_mb) == 4);
1240 csum = pkt_mcopypkt_sum(m, 1, ph_mb, 1, len - 1, TRUE);
1241 METADATA_ADJUST_LEN(SK_PTR_ADDR_KQUM(ph_mb), 0, 1);
1242 SK_PTR_ADDR_KPKT(ph_mb)->pkt_headroom = 1;
1243 SK_PTR_ADDR_KPKT(ph_mb)->pkt_l2_len = 0;
1244 VERIFY(__packet_finalize(ph_mb) == 0);
1245 if (csum_ooo_ref != csum) {
1246 SK_ERR("pkt_mcopypkt_sum: csum_ooo_mismatch "
1247 "0x%x, 0x%x, 0x%llx, 0x%llx", csum_ooo_ref,
1248 csum, SK_KVA(SK_PTR_ADDR_KQUM(ph_mb)),
1249 SK_KVA(m));
1250 }
1251 VERIFY(csum_ooo_ref == csum);
1252 kern_pbufpool_free(pp_mb, ph_mb);
1253 ph_mb = 0;
1254 }
1255
1256 kern_pbufpool_free(pp, ph);
1257 ph = 0;
1258 m_freem(m);
1259 m = NULL;
1260 kern_pbufpool_destroy(pp_mb);
1261 pp_mb = NULL;
1262 kern_pbufpool_destroy(pp);
1263 pp = NULL;
1264
1265 kfree_data(ref_buffer, SKMEM_TEST_BUFSIZE);
1266 ref_buffer = NULL;
1267 }
1268
1269 static void
skmem_quantum_tests(uint32_t flags)1270 skmem_quantum_tests(uint32_t flags)
1271 {
1272 struct kern_pbufpool_init pp_init;
1273 struct kern_pbufpool_memory_info pp_mem_info;
1274 kern_pbufpool_t pp = NULL;
1275 kern_packet_t *phary = NULL;
1276 uint32_t phcnt = 0;
1277 kern_packet_t ph = 0;
1278 uint32_t i;
1279 errno_t err;
1280
1281 flags |= KBIF_QUANTUM;
1282
1283 SK_ERR("flags 0x%x", flags);
1284
1285 phary = (kern_packet_t *) kalloc_data(sizeof(kern_packet_t) * MAX_PH_ARY,
1286 Z_WAITOK | Z_ZERO);
1287
1288 bzero(&pp_init, sizeof(pp_init));
1289 pp_init.kbi_version = KERN_PBUFPOOL_CURRENT_VERSION;
1290 pp_init.kbi_buf_seg_size = skmem_usr_buf_seg_size;
1291 (void) snprintf((char *)pp_init.kbi_name, sizeof(pp_init.kbi_name),
1292 "%s", "skmem_quantum_tests");
1293 pp_init.kbi_flags = (KBIF_QUANTUM | flags);
1294 pp_init.kbi_packets = 64;
1295 pp_init.kbi_bufsize = SKMEM_TEST_BUFSIZE;
1296 pp_init.kbi_buflets = (64 * 2);
1297 pp_init.kbi_ctx = NULL;
1298 pp_init.kbi_ctx_retain = NULL;
1299 pp_init.kbi_ctx_release = NULL;
1300
1301 pp_init.kbi_max_frags = 4;
1302 /* max_frags must be 1 for quantum type */
1303 VERIFY(kern_pbufpool_create(&pp_init, &pp, NULL) == EINVAL);
1304 pp_init.kbi_max_frags = 1;
1305 if ((flags & KBIF_QUANTUM) && (flags & KBIF_BUFFER_ON_DEMAND)) {
1306 VERIFY(kern_pbufpool_create(&pp_init, &pp, NULL) == EINVAL);
1307 goto done;
1308 }
1309 VERIFY(kern_pbufpool_create(&pp_init, &pp, NULL) == 0);
1310 bzero(&pp_mem_info, sizeof(pp_mem_info));
1311 VERIFY(kern_pbufpool_get_memory_info(pp, &pp_mem_info) == 0);
1312 VERIFY(pp_mem_info.kpm_flags & KPMF_EXTERNAL);
1313 VERIFY(pp_mem_info.kpm_buflets >= pp_mem_info.kpm_packets);
1314 VERIFY(pp_mem_info.kpm_packets >= 64);
1315 VERIFY(pp_mem_info.kpm_packets <= MAX_PH_ARY);
1316 VERIFY(pp_mem_info.kpm_max_frags == 1);
1317 VERIFY(pp_mem_info.kpm_buflets >= 64);
1318 VERIFY(pp_mem_info.kpm_bufsize == SKMEM_TEST_BUFSIZE);
1319 VERIFY(kern_pbufpool_alloc(pp, 4, &ph) == EINVAL);
1320 /* allocate and free one at a time */
1321 for (i = 0, phcnt = 0; i < pp_mem_info.kpm_packets; i++) {
1322 boolean_t stop = FALSE;
1323 /*
1324 * This may fail if skmem_region_mtbf is set, or if
1325 * the system is short on memory. Perform retries
1326 * at this layer to get at least 64 packets.
1327 */
1328 while ((err = kern_pbufpool_alloc_nosleep(pp, 1, &ph)) != 0) {
1329 VERIFY(err == ENOMEM);
1330 if (phcnt < 64) {
1331 SK_ERR("retrying alloc for quantum %u", phcnt);
1332 delay(250 * NSEC_PER_USEC); /* 1/4 sec */
1333 continue;
1334 }
1335 stop = TRUE;
1336 break;
1337 }
1338 if (stop) {
1339 break;
1340 }
1341 VERIFY(ph != 0);
1342 VERIFY(kern_packet_get_data_length(ph) == 0);
1343 VERIFY(kern_packet_get_buflet_count(ph) == 1);
1344 phary[phcnt++] = ph;
1345 }
1346 VERIFY(phcnt >= 64);
1347 for (i = 0; i < phcnt; i++) {
1348 kern_pbufpool_free(pp, phary[i]);
1349 phary[i] = 0;
1350 }
1351 /* allocate and free in batch */
1352 phcnt = pp_mem_info.kpm_packets;
1353 for (;;) {
1354 err = kern_pbufpool_alloc_batch_nosleep(pp, 1, phary, &phcnt);
1355 VERIFY(err != EINVAL && err != ENOTSUP);
1356 if (err == ENOMEM) {
1357 phcnt = pp_mem_info.kpm_packets;
1358 SK_ERR("retrying batch alloc for %u quantums", phcnt);
1359 delay(250 * NSEC_PER_USEC); /* 1/4 sec */
1360 } else if (err == EAGAIN) {
1361 SK_ERR("batch alloc for %u quantums only returned %u",
1362 pp_mem_info.kpm_packets, phcnt);
1363 break;
1364 } else {
1365 VERIFY(err == 0);
1366 break;
1367 }
1368 }
1369 VERIFY(phcnt > 0);
1370 for (i = 0; i < phcnt; i++) {
1371 VERIFY(phary[i] != 0);
1372 VERIFY(kern_packet_get_data_length(phary[i]) == 0);
1373 VERIFY(kern_packet_get_buflet_count(phary[i]) == 1);
1374 }
1375 kern_pbufpool_free_batch(pp, phary, phcnt);
1376 /* allocate and free one at a time (blocking) */
1377 for (i = 0, phcnt = 0; i < pp_mem_info.kpm_packets; i++) {
1378 VERIFY(kern_pbufpool_alloc(pp, 1, &ph) == 0);
1379 VERIFY(ph != 0);
1380 VERIFY(kern_packet_get_data_length(ph) == 0);
1381 VERIFY(kern_packet_get_buflet_count(ph) == 1);
1382 phary[phcnt++] = ph;
1383 }
1384 VERIFY(phcnt >= 64);
1385 for (i = 0; i < phcnt; i++) {
1386 kern_pbufpool_free(pp, phary[i]);
1387 phary[i] = 0;
1388 }
1389 /* allocate and free in batch (blocking) */
1390 bzero(&skmt_alloccb_ctx, sizeof(skmt_alloccb_ctx));
1391 skmt_alloccb_ctx.stc_req = phcnt;
1392 VERIFY(kern_pbufpool_alloc_batch_callback(pp, 1, phary, &phcnt,
1393 skmem_test_alloccb, &skmt_alloccb_ctx) == 0);
1394 VERIFY(skmt_alloccb_ctx.stc_idx == phcnt);
1395 kern_pbufpool_free_batch(pp, phary, phcnt);
1396 kern_pbufpool_destroy(pp);
1397 pp = NULL;
1398 done:
1399 kfree_data(phary, sizeof(kern_packet_t) * MAX_PH_ARY);
1400 phary = NULL;
1401 }
1402
1403 static void
skmem_basic_tests(void)1404 skmem_basic_tests(void)
1405 {
1406 /* basic sanity (alloc/free) tests on packet buflet KPIs */
1407 skmem_buflet_tests(0);
1408 skmem_buflet_tests(KBIF_PERSISTENT);
1409 skmem_buflet_tests(KBIF_PERSISTENT | KBIF_NO_MAGAZINES);
1410 skmem_buflet_tests(KBIF_PERSISTENT | KBIF_PHYS_CONTIGUOUS);
1411 skmem_buflet_tests(KBIF_PERSISTENT | KBIF_PHYS_CONTIGUOUS |
1412 KBIF_USER_ACCESS);
1413 skmem_buflet_tests(KBIF_PERSISTENT | KBIF_MONOLITHIC |
1414 KBIF_USER_ACCESS);
1415 skmem_buflet_tests(KBIF_PERSISTENT | KBIF_BUFFER_ON_DEMAND);
1416 skmem_buflet_tests(KBIF_PERSISTENT | TEST_OPTION_INHIBIT_CACHE);
1417 skmem_buflet_tests(KBIF_PERSISTENT | KBIF_MONOLITHIC |
1418 KBIF_BUFFER_ON_DEMAND);
1419 skmem_buflet_tests(KBIF_PERSISTENT | KBIF_MONOLITHIC |
1420 KBIF_USER_ACCESS | TEST_OPTION_INHIBIT_CACHE);
1421 skmem_buflet_tests(KBIF_PERSISTENT | KBIF_MONOLITHIC |
1422 KBIF_BUFFER_ON_DEMAND | TEST_OPTION_INHIBIT_CACHE);
1423 skmem_buflet_tests(KBIF_MONOLITHIC | KBIF_PHYS_CONTIGUOUS);
1424 skmem_buflet_tests(KBIF_MONOLITHIC | KBIF_USER_ACCESS);
1425 skmem_buflet_tests(KBIF_MONOLITHIC | KBIF_USER_ACCESS |
1426 KBIF_NO_MAGAZINES);
1427 skmem_buflet_tests(KBIF_MONOLITHIC | KBIF_PHYS_CONTIGUOUS |
1428 KBIF_USER_ACCESS);
1429 skmem_buflet_tests(KBIF_MONOLITHIC | KBIF_BUFFER_ON_DEMAND);
1430 skmem_buflet_tests(KBIF_MONOLITHIC | KBIF_USER_ACCESS |
1431 TEST_OPTION_INHIBIT_CACHE);
1432 skmem_buflet_tests(KBIF_MONOLITHIC | KBIF_BUFFER_ON_DEMAND |
1433 TEST_OPTION_INHIBIT_CACHE);
1434 skmem_buflet_tests(KBIF_BUFFER_ON_DEMAND | KBIF_NO_MAGAZINES);
1435 skmem_buflet_tests(KBIF_BUFFER_ON_DEMAND | TEST_OPTION_INHIBIT_CACHE);
1436
1437 /* basic sanity (alloc/free) tests on packet buflet KPIs (vdev) */
1438 skmem_buflet_tests(KBIF_VIRTUAL_DEVICE);
1439 skmem_buflet_tests(KBIF_VIRTUAL_DEVICE | KBIF_NO_MAGAZINES);
1440 skmem_buflet_tests(KBIF_VIRTUAL_DEVICE | KBIF_PERSISTENT);
1441 skmem_buflet_tests(KBIF_VIRTUAL_DEVICE | KBIF_PHYS_CONTIGUOUS);
1442 skmem_buflet_tests(KBIF_VIRTUAL_DEVICE | KBIF_PERSISTENT |
1443 KBIF_PHYS_CONTIGUOUS);
1444 skmem_buflet_tests(KBIF_VIRTUAL_DEVICE | KBIF_PERSISTENT |
1445 KBIF_MONOLITHIC | KBIF_USER_ACCESS | KBIF_PHYS_CONTIGUOUS);
1446 skmem_buflet_tests(KBIF_VIRTUAL_DEVICE | KBIF_PERSISTENT |
1447 KBIF_MONOLITHIC | KBIF_USER_ACCESS);
1448 skmem_buflet_tests(KBIF_VIRTUAL_DEVICE | KBIF_PERSISTENT |
1449 KBIF_BUFFER_ON_DEMAND);
1450 skmem_buflet_tests(KBIF_VIRTUAL_DEVICE | KBIF_PERSISTENT |
1451 TEST_OPTION_INHIBIT_CACHE);
1452 skmem_buflet_tests(KBIF_VIRTUAL_DEVICE | KBIF_PERSISTENT |
1453 KBIF_MONOLITHIC | KBIF_BUFFER_ON_DEMAND);
1454 skmem_buflet_tests(KBIF_VIRTUAL_DEVICE | KBIF_PERSISTENT |
1455 KBIF_MONOLITHIC | KBIF_USER_ACCESS | TEST_OPTION_INHIBIT_CACHE);
1456 skmem_buflet_tests(KBIF_VIRTUAL_DEVICE | KBIF_PERSISTENT |
1457 KBIF_MONOLITHIC | KBIF_BUFFER_ON_DEMAND | TEST_OPTION_INHIBIT_CACHE);
1458 skmem_buflet_tests(KBIF_VIRTUAL_DEVICE | KBIF_MONOLITHIC |
1459 KBIF_USER_ACCESS);
1460 skmem_buflet_tests(KBIF_VIRTUAL_DEVICE | KBIF_MONOLITHIC |
1461 KBIF_PHYS_CONTIGUOUS);
1462 skmem_buflet_tests(KBIF_VIRTUAL_DEVICE | KBIF_MONOLITHIC |
1463 KBIF_USER_ACCESS | KBIF_PHYS_CONTIGUOUS);
1464 skmem_buflet_tests(KBIF_VIRTUAL_DEVICE | KBIF_MONOLITHIC |
1465 KBIF_BUFFER_ON_DEMAND);
1466 skmem_buflet_tests(KBIF_VIRTUAL_DEVICE | KBIF_MONOLITHIC |
1467 KBIF_USER_ACCESS | TEST_OPTION_INHIBIT_CACHE);
1468 skmem_buflet_tests(KBIF_VIRTUAL_DEVICE | KBIF_MONOLITHIC |
1469 KBIF_BUFFER_ON_DEMAND | TEST_OPTION_INHIBIT_CACHE);
1470 skmem_buflet_tests(KBIF_VIRTUAL_DEVICE | KBIF_BUFFER_ON_DEMAND);
1471 skmem_buflet_tests(KBIF_VIRTUAL_DEVICE | KBIF_BUFFER_ON_DEMAND |
1472 TEST_OPTION_INHIBIT_CACHE);
1473
1474 /* check packet KPIs (also touches data) */
1475 skmem_packet_tests(0);
1476 skmem_packet_tests(KBIF_PHYS_CONTIGUOUS);
1477 skmem_packet_tests(KBIF_PERSISTENT);
1478 skmem_packet_tests(KBIF_PERSISTENT | KBIF_NO_MAGAZINES);
1479 skmem_packet_tests(KBIF_PERSISTENT | KBIF_PHYS_CONTIGUOUS);
1480 skmem_packet_tests(KBIF_PERSISTENT | KBIF_MONOLITHIC |
1481 KBIF_PHYS_CONTIGUOUS | KBIF_USER_ACCESS);
1482 skmem_packet_tests(KBIF_PERSISTENT | KBIF_MONOLITHIC |
1483 KBIF_USER_ACCESS);
1484 skmem_packet_tests(KBIF_PERSISTENT | KBIF_BUFFER_ON_DEMAND);
1485 skmem_packet_tests(KBIF_PERSISTENT | TEST_OPTION_INHIBIT_CACHE);
1486 skmem_packet_tests(KBIF_PERSISTENT | KBIF_MONOLITHIC |
1487 KBIF_BUFFER_ON_DEMAND);
1488 skmem_packet_tests(KBIF_PERSISTENT | KBIF_MONOLITHIC |
1489 KBIF_USER_ACCESS | TEST_OPTION_INHIBIT_CACHE);
1490 skmem_packet_tests(KBIF_PERSISTENT | KBIF_MONOLITHIC |
1491 KBIF_BUFFER_ON_DEMAND | TEST_OPTION_INHIBIT_CACHE);
1492 skmem_packet_tests(KBIF_MONOLITHIC | KBIF_PHYS_CONTIGUOUS);
1493 skmem_packet_tests(KBIF_MONOLITHIC | KBIF_USER_ACCESS);
1494 skmem_packet_tests(KBIF_MONOLITHIC | KBIF_USER_ACCESS |
1495 KBIF_NO_MAGAZINES);
1496 skmem_packet_tests(KBIF_MONOLITHIC | KBIF_USER_ACCESS |
1497 KBIF_PHYS_CONTIGUOUS);
1498 skmem_packet_tests(KBIF_MONOLITHIC | KBIF_BUFFER_ON_DEMAND);
1499 #if 0
1500 /* XXX: commented out failed tests on ARM64e platforms */
1501 skmem_packet_tests(KBIF_MONOLITHIC | KBIF_USER_ACCESS |
1502 TEST_OPTION_INHIBIT_CACHE);
1503 skmem_packet_tests(KBIF_MONOLITHIC | KBIF_BUFFER_ON_DEMAND |
1504 TEST_OPTION_INHIBIT_CACHE);
1505 skmem_packet_tests(KBIF_BUFFER_ON_DEMAND);
1506 skmem_packet_tests(KBIF_BUFFER_ON_DEMAND | TEST_OPTION_INHIBIT_CACHE);
1507 #endif
1508
1509 /* check packet KPIs (also touches data) (vdev) */
1510 skmem_packet_tests(KBIF_VIRTUAL_DEVICE);
1511 skmem_packet_tests(KBIF_VIRTUAL_DEVICE | KBIF_NO_MAGAZINES);
1512 skmem_packet_tests(KBIF_VIRTUAL_DEVICE | KBIF_PERSISTENT);
1513 skmem_packet_tests(KBIF_VIRTUAL_DEVICE | KBIF_PHYS_CONTIGUOUS);
1514 skmem_packet_tests(KBIF_VIRTUAL_DEVICE | KBIF_PERSISTENT |
1515 KBIF_PHYS_CONTIGUOUS);
1516 skmem_packet_tests(KBIF_VIRTUAL_DEVICE | KBIF_PERSISTENT |
1517 KBIF_MONOLITHIC | KBIF_USER_ACCESS);
1518 skmem_packet_tests(KBIF_VIRTUAL_DEVICE | KBIF_PERSISTENT |
1519 KBIF_BUFFER_ON_DEMAND);
1520 skmem_packet_tests(KBIF_VIRTUAL_DEVICE | KBIF_PERSISTENT |
1521 TEST_OPTION_INHIBIT_CACHE);
1522 skmem_packet_tests(KBIF_VIRTUAL_DEVICE | KBIF_PERSISTENT |
1523 KBIF_MONOLITHIC | KBIF_BUFFER_ON_DEMAND);
1524 skmem_packet_tests(KBIF_VIRTUAL_DEVICE | KBIF_PERSISTENT |
1525 KBIF_MONOLITHIC | KBIF_USER_ACCESS | TEST_OPTION_INHIBIT_CACHE);
1526 skmem_packet_tests(KBIF_VIRTUAL_DEVICE | KBIF_PERSISTENT |
1527 KBIF_MONOLITHIC | KBIF_BUFFER_ON_DEMAND | TEST_OPTION_INHIBIT_CACHE);
1528 skmem_packet_tests(KBIF_VIRTUAL_DEVICE | KBIF_MONOLITHIC |
1529 KBIF_PHYS_CONTIGUOUS);
1530 skmem_packet_tests(KBIF_VIRTUAL_DEVICE | KBIF_MONOLITHIC |
1531 KBIF_USER_ACCESS | KBIF_PHYS_CONTIGUOUS);
1532 skmem_packet_tests(KBIF_VIRTUAL_DEVICE | KBIF_MONOLITHIC |
1533 KBIF_USER_ACCESS);
1534 skmem_packet_tests(KBIF_VIRTUAL_DEVICE | KBIF_MONOLITHIC |
1535 KBIF_BUFFER_ON_DEMAND);
1536 skmem_packet_tests(KBIF_VIRTUAL_DEVICE | KBIF_MONOLITHIC |
1537 KBIF_BUFFER_ON_DEMAND | KBIF_PHYS_CONTIGUOUS);
1538 #if 0
1539 /* XXX: commented out failed tests on ARM64e platforms */
1540 skmem_packet_tests(KBIF_VIRTUAL_DEVICE | KBIF_MONOLITHIC |
1541 KBIF_USER_ACCESS | TEST_OPTION_INHIBIT_CACHE);
1542 skmem_packet_tests(KBIF_VIRTUAL_DEVICE | KBIF_MONOLITHIC |
1543 KBIF_BUFFER_ON_DEMAND | TEST_OPTION_INHIBIT_CACHE);
1544 skmem_packet_tests(KBIF_VIRTUAL_DEVICE | KBIF_BUFFER_ON_DEMAND);
1545 skmem_packet_tests(KBIF_VIRTUAL_DEVICE | KBIF_BUFFER_ON_DEMAND |
1546 TEST_OPTION_INHIBIT_CACHE);
1547 #endif
1548
1549 /* check quantum KPIs */
1550 skmem_quantum_tests(0);
1551 skmem_quantum_tests(KBIF_PHYS_CONTIGUOUS);
1552 skmem_quantum_tests(KBIF_PERSISTENT);
1553 skmem_quantum_tests(KBIF_PERSISTENT | KBIF_NO_MAGAZINES);
1554 skmem_quantum_tests(KBIF_PERSISTENT | KBIF_PHYS_CONTIGUOUS);
1555 skmem_quantum_tests(KBIF_PERSISTENT | KBIF_MONOLITHIC |
1556 KBIF_USER_ACCESS);
1557 skmem_quantum_tests(KBIF_PERSISTENT | KBIF_BUFFER_ON_DEMAND);
1558 skmem_quantum_tests(KBIF_PERSISTENT | TEST_OPTION_INHIBIT_CACHE);
1559 skmem_quantum_tests(KBIF_PERSISTENT | KBIF_MONOLITHIC |
1560 KBIF_BUFFER_ON_DEMAND);
1561 skmem_quantum_tests(KBIF_PERSISTENT | KBIF_MONOLITHIC |
1562 KBIF_USER_ACCESS | TEST_OPTION_INHIBIT_CACHE);
1563 skmem_quantum_tests(KBIF_PERSISTENT | KBIF_MONOLITHIC |
1564 KBIF_BUFFER_ON_DEMAND | TEST_OPTION_INHIBIT_CACHE);
1565 skmem_quantum_tests(KBIF_MONOLITHIC | KBIF_PHYS_CONTIGUOUS);
1566 skmem_quantum_tests(KBIF_MONOLITHIC | KBIF_USER_ACCESS);
1567 skmem_quantum_tests(KBIF_MONOLITHIC | KBIF_USER_ACCESS |
1568 KBIF_PHYS_CONTIGUOUS);
1569 skmem_quantum_tests(KBIF_MONOLITHIC | KBIF_BUFFER_ON_DEMAND);
1570 skmem_quantum_tests(KBIF_MONOLITHIC | KBIF_USER_ACCESS |
1571 TEST_OPTION_INHIBIT_CACHE);
1572 skmem_quantum_tests(KBIF_MONOLITHIC | KBIF_BUFFER_ON_DEMAND |
1573 TEST_OPTION_INHIBIT_CACHE);
1574 skmem_quantum_tests(KBIF_BUFFER_ON_DEMAND);
1575 skmem_quantum_tests(KBIF_BUFFER_ON_DEMAND | KBIF_NO_MAGAZINES);
1576 skmem_quantum_tests(KBIF_BUFFER_ON_DEMAND | KBIF_PHYS_CONTIGUOUS);
1577 skmem_quantum_tests(KBIF_BUFFER_ON_DEMAND | TEST_OPTION_INHIBIT_CACHE);
1578
1579 /* check quantum KPIs (vdev) */
1580 skmem_quantum_tests(KBIF_VIRTUAL_DEVICE);
1581 skmem_quantum_tests(KBIF_VIRTUAL_DEVICE | KBIF_NO_MAGAZINES);
1582 skmem_quantum_tests(KBIF_VIRTUAL_DEVICE | KBIF_PHYS_CONTIGUOUS);
1583 skmem_quantum_tests(KBIF_VIRTUAL_DEVICE | KBIF_PERSISTENT);
1584 skmem_quantum_tests(KBIF_VIRTUAL_DEVICE | KBIF_PERSISTENT |
1585 KBIF_MONOLITHIC | KBIF_USER_ACCESS);
1586 skmem_quantum_tests(KBIF_VIRTUAL_DEVICE | KBIF_PERSISTENT |
1587 KBIF_BUFFER_ON_DEMAND);
1588 skmem_quantum_tests(KBIF_VIRTUAL_DEVICE | KBIF_PERSISTENT |
1589 TEST_OPTION_INHIBIT_CACHE);
1590 skmem_quantum_tests(KBIF_VIRTUAL_DEVICE | KBIF_PERSISTENT |
1591 KBIF_MONOLITHIC | KBIF_BUFFER_ON_DEMAND);
1592 skmem_quantum_tests(KBIF_VIRTUAL_DEVICE | KBIF_PERSISTENT |
1593 KBIF_MONOLITHIC | KBIF_USER_ACCESS | TEST_OPTION_INHIBIT_CACHE);
1594 skmem_quantum_tests(KBIF_VIRTUAL_DEVICE | KBIF_PERSISTENT |
1595 KBIF_MONOLITHIC | KBIF_BUFFER_ON_DEMAND | TEST_OPTION_INHIBIT_CACHE);
1596 skmem_quantum_tests(KBIF_VIRTUAL_DEVICE | KBIF_MONOLITHIC |
1597 KBIF_PHYS_CONTIGUOUS);
1598 skmem_quantum_tests(KBIF_VIRTUAL_DEVICE | KBIF_MONOLITHIC |
1599 KBIF_USER_ACCESS | KBIF_PHYS_CONTIGUOUS);
1600 skmem_quantum_tests(KBIF_VIRTUAL_DEVICE | KBIF_MONOLITHIC |
1601 KBIF_USER_ACCESS);
1602 skmem_quantum_tests(KBIF_VIRTUAL_DEVICE | KBIF_MONOLITHIC |
1603 KBIF_BUFFER_ON_DEMAND);
1604 skmem_quantum_tests(KBIF_VIRTUAL_DEVICE | KBIF_MONOLITHIC |
1605 KBIF_USER_ACCESS | TEST_OPTION_INHIBIT_CACHE);
1606 skmem_quantum_tests(KBIF_VIRTUAL_DEVICE | KBIF_MONOLITHIC |
1607 KBIF_BUFFER_ON_DEMAND | TEST_OPTION_INHIBIT_CACHE);
1608 skmem_quantum_tests(KBIF_VIRTUAL_DEVICE | KBIF_BUFFER_ON_DEMAND);
1609 skmem_quantum_tests(KBIF_VIRTUAL_DEVICE | KBIF_BUFFER_ON_DEMAND |
1610 KBIF_PHYS_CONTIGUOUS);
1611 skmem_quantum_tests(KBIF_VIRTUAL_DEVICE | KBIF_BUFFER_ON_DEMAND |
1612 TEST_OPTION_INHIBIT_CACHE);
1613 }
1614
1615 static void
skmem_advanced_tests(int n,int32_t th_max,uint32_t mode,boolean_t nosleep,uint32_t flags)1616 skmem_advanced_tests(int n, int32_t th_max, uint32_t mode, boolean_t nosleep,
1617 uint32_t flags)
1618 {
1619 struct kern_pbufpool_init pp_init;
1620 kern_packet_t mph = 0;
1621 kern_buflet_t buflet = 0;
1622 int i;
1623
1624 VERIFY(skmth_pp == NULL);
1625 VERIFY(skmth_cnt == 0);
1626
1627 bzero(&pp_init, sizeof(pp_init));
1628 pp_init.kbi_version = KERN_PBUFPOOL_CURRENT_VERSION;
1629 pp_init.kbi_buf_seg_size = skmem_usr_buf_seg_size;
1630 pp_init.kbi_flags |= flags;
1631 (void) snprintf((char *)pp_init.kbi_name,
1632 sizeof(pp_init.kbi_name), "%s", "skmem_advanced");
1633
1634 /* prepare */
1635 switch (mode) {
1636 case 0:
1637 pp_init.kbi_packets = th_max;
1638 pp_init.kbi_bufsize = SKMEM_TEST_BUFSIZE;
1639 pp_init.kbi_max_frags = 1;
1640 pp_init.kbi_flags |= KBIF_MONOLITHIC | KBIF_USER_ACCESS;
1641 VERIFY(kern_pbufpool_create(&pp_init,
1642 &skmth_pp, NULL) == 0);
1643 break;
1644
1645 case 1:
1646 pp_init.kbi_packets = th_max;
1647 pp_init.kbi_bufsize = SKMEM_TEST_BUFSIZE;
1648 pp_init.kbi_max_frags = 1;
1649 pp_init.kbi_flags |= KBIF_MONOLITHIC | KBIF_USER_ACCESS |
1650 KBIF_VIRTUAL_DEVICE;
1651 VERIFY(kern_pbufpool_create(&pp_init,
1652 &skmth_pp, NULL) == 0);
1653 break;
1654
1655 case 2:
1656 pp_init.kbi_packets = th_max;
1657 pp_init.kbi_bufsize = SKMEM_TEST_BUFSIZE;
1658 pp_init.kbi_max_frags = 1;
1659 pp_init.kbi_flags |= KBIF_MONOLITHIC | KBIF_USER_ACCESS |
1660 KBIF_PERSISTENT;
1661 VERIFY(kern_pbufpool_create(&pp_init,
1662 &skmth_pp, NULL) == 0);
1663 break;
1664
1665 case 3:
1666 pp_init.kbi_packets = th_max;
1667 pp_init.kbi_bufsize = SKMEM_TEST_BUFSIZE;
1668 pp_init.kbi_max_frags = 1;
1669 pp_init.kbi_flags |= KBIF_MONOLITHIC | KBIF_USER_ACCESS |
1670 KBIF_PERSISTENT | KBIF_VIRTUAL_DEVICE;
1671 VERIFY(kern_pbufpool_create(&pp_init,
1672 &skmth_pp, NULL) == 0);
1673 break;
1674
1675 case 4:
1676 pp_init.kbi_packets = th_max;
1677 pp_init.kbi_bufsize = SKMEM_TEST_BUFSIZE;
1678 pp_init.kbi_max_frags = 1;
1679 pp_init.kbi_flags |= KBIF_PERSISTENT | KBIF_USER_ACCESS;
1680 VERIFY(kern_pbufpool_create(&pp_init,
1681 &skmth_pp, NULL) == 0);
1682 break;
1683
1684 case 5:
1685 pp_init.kbi_packets = th_max;
1686 pp_init.kbi_bufsize = SKMEM_TEST_BUFSIZE;
1687 pp_init.kbi_max_frags = 1;
1688 pp_init.kbi_flags |= KBIF_PERSISTENT | KBIF_VIRTUAL_DEVICE;
1689 VERIFY(kern_pbufpool_create(&pp_init,
1690 &skmth_pp, NULL) == 0);
1691 break;
1692
1693 case 6:
1694 pp_init.kbi_packets = th_max;
1695 pp_init.kbi_bufsize = SKMEM_TEST_BUFSIZE;
1696 pp_init.kbi_max_frags = 1;
1697 pp_init.kbi_flags |= 0;
1698 VERIFY(kern_pbufpool_create(&pp_init,
1699 &skmth_pp, NULL) == 0);
1700 break;
1701
1702 case 7:
1703 pp_init.kbi_packets = th_max;
1704 pp_init.kbi_bufsize = SKMEM_TEST_BUFSIZE;
1705 pp_init.kbi_max_frags = 1;
1706 pp_init.kbi_flags |= KBIF_VIRTUAL_DEVICE;
1707 VERIFY(kern_pbufpool_create(&pp_init,
1708 &skmth_pp, NULL) == 0);
1709 break;
1710
1711 case 8:
1712 pp_init.kbi_packets = (th_max * 2) + 1;
1713 pp_init.kbi_bufsize = SKMEM_TEST_BUFSIZE;
1714 pp_init.kbi_max_frags = 1;
1715 pp_init.kbi_flags |= KBIF_BUFFER_ON_DEMAND;
1716 VERIFY(kern_pbufpool_create(&pp_init,
1717 &skmth_pp, NULL) == 0);
1718 break;
1719
1720 default:
1721 VERIFY(0);
1722 /* NOTREACHED */
1723 __builtin_unreachable();
1724 }
1725
1726 SK_ERR("%d: th_max %d mode %u nosleep %u nomagazines %u",
1727 n, th_max, mode, nosleep, !!(flags & KBIF_NO_MAGAZINES));
1728
1729 if (pp_init.kbi_flags & KBIF_BUFFER_ON_DEMAND) {
1730 /* create 1 master packet to clone */
1731 VERIFY(kern_pbufpool_alloc(skmth_pp, 1, &mph) == 0);
1732 VERIFY((buflet = kern_packet_get_next_buflet(mph, NULL)) != NULL);
1733 VERIFY(kern_buflet_set_data_length(buflet, SKMEM_TEST_BUFSIZE) == 0);
1734 VERIFY(__packet_finalize(mph) == 0);
1735 }
1736
1737 bzero(skmth_info, skmth_info_size);
1738
1739 /* spawn as many threads as there are CPUs */
1740 for (i = 0; i < th_max; i++) {
1741 skmth_info[i].sti_mph = mph;
1742 skmth_info[i].sti_nosleep = nosleep;
1743 if (kernel_thread_start(skmem_test_func, (void *)(uintptr_t)i,
1744 &skmth_info[i].sti_thread) != KERN_SUCCESS) {
1745 panic("Failed to create skmem test thread");
1746 /* NOTREACHED */
1747 __builtin_unreachable();
1748 }
1749 }
1750
1751 lck_mtx_lock(&skmt_lock);
1752 do {
1753 struct timespec ts = { .tv_sec = 0, .tv_nsec = 100 * USEC_PER_SEC };
1754 (void) msleep(&skmth_cnt, &skmt_lock, (PZERO - 1),
1755 "skmtstartw", &ts);
1756 } while (skmth_cnt < th_max);
1757 VERIFY(skmth_cnt == th_max);
1758 lck_mtx_unlock(&skmt_lock);
1759
1760 lck_mtx_lock(&skmt_lock);
1761 VERIFY(!skmth_run);
1762 skmth_run = TRUE;
1763 wakeup((caddr_t)&skmth_run);
1764 lck_mtx_unlock(&skmt_lock);
1765
1766 /* wait until all threads are done */
1767 lck_mtx_lock(&skmt_lock);
1768 do {
1769 struct timespec ts = { .tv_sec = 0, .tv_nsec = 100 * USEC_PER_SEC };
1770 (void) msleep(&skmth_cnt, &skmt_lock, (PZERO - 1),
1771 "skmtstopw", &ts);
1772 } while (skmth_cnt != 0);
1773 skmth_run = FALSE;
1774 lck_mtx_unlock(&skmt_lock);
1775
1776 if (mph != 0) {
1777 VERIFY((buflet = kern_packet_get_next_buflet( mph, NULL)) != NULL);
1778 VERIFY(buflet->buf_ctl->bc_usecnt == 1);
1779 kern_pbufpool_free(skmth_pp, mph);
1780 mph = 0;
1781 }
1782 kern_pbufpool_destroy(skmth_pp);
1783 skmth_pp = NULL;
1784 }
1785
1786 __attribute__((noreturn))
1787 static void
skmem_test_func(void * v,wait_result_t w)1788 skmem_test_func(void *v, wait_result_t w)
1789 {
1790 #pragma unused(w)
1791 int i = (int)(uintptr_t)v, c;
1792 kern_packet_t ph = 0;
1793
1794 /* let skmem_test_start() know we're ready */
1795 lck_mtx_lock(&skmt_lock);
1796 atomic_add_32(&skmth_cnt, 1);
1797 wakeup((caddr_t)&skmth_cnt);
1798 do {
1799 (void) msleep(&skmth_run, &skmt_lock, (PZERO - 1),
1800 "skmtfuncw", NULL);
1801 } while (!skmth_run);
1802 lck_mtx_unlock(&skmt_lock);
1803
1804 for (c = 0; c < 41; c++) {
1805 /* run alloc tests */
1806 VERIFY(skmth_pp != NULL);
1807 if (skmth_info[i].sti_nosleep) {
1808 errno_t err = kern_pbufpool_alloc_nosleep(skmth_pp,
1809 1, &ph);
1810 VERIFY(ph != 0 || err != 0);
1811 } else {
1812 VERIFY(kern_pbufpool_alloc(skmth_pp, 1, &ph) == 0);
1813 }
1814
1815 if (ph != 0) {
1816 kern_pbufpool_free(skmth_pp, ph);
1817 ph = 0;
1818 }
1819
1820 /* run clone tests */
1821 if (skmth_info[i].sti_mph != 0) {
1822 kern_buflet_t buflet, buflet2;
1823 kern_obj_idx_seg_t buf_idx_seg, buf2_idx_seg;
1824
1825 if (skmth_info[i].sti_nosleep) {
1826 errno_t err;
1827 err = kern_packet_clone_nosleep(skmth_info[i].sti_mph,
1828 &skmth_info[i].sti_mpc, KPKT_COPY_LIGHT);
1829 VERIFY(skmth_info[i].sti_mpc != 0 || err != 0);
1830 } else {
1831 VERIFY(kern_packet_clone(skmth_info[i].sti_mph,
1832 &skmth_info[i].sti_mpc, KPKT_COPY_LIGHT) == 0);
1833 }
1834 if (skmth_info[i].sti_mpc != 0) {
1835 VERIFY(!(QUM_ADDR(skmth_info[i].sti_mpc)->qum_qflags & QUM_F_FINALIZED));
1836 VERIFY((buflet = kern_packet_get_next_buflet(
1837 skmth_info[i].sti_mph, NULL)) != NULL);
1838 VERIFY((buflet2 = kern_packet_get_next_buflet(
1839 skmth_info[i].sti_mpc, NULL)) != NULL);
1840 VERIFY(kern_buflet_get_object_address(buflet) ==
1841 kern_buflet_get_object_address(buflet2));
1842 VERIFY(kern_buflet_get_data_address(buflet) ==
1843 kern_buflet_get_data_address(buflet2));
1844 VERIFY(kern_buflet_get_data_limit(buflet) ==
1845 kern_buflet_get_data_limit(buflet2));
1846 VERIFY(kern_buflet_get_data_offset(buflet) ==
1847 kern_buflet_get_data_offset(buflet2));
1848 VERIFY(kern_buflet_get_data_length(buflet) ==
1849 kern_buflet_get_data_length(buflet2));
1850 VERIFY(kern_buflet_get_object_segment(buflet,
1851 &buf_idx_seg) ==
1852 kern_buflet_get_object_segment(buflet2,
1853 &buf2_idx_seg));
1854 VERIFY(buf_idx_seg == buf2_idx_seg);
1855 VERIFY(buflet->buf_ctl == buflet2->buf_ctl);
1856 VERIFY(__packet_finalize(skmth_info[i].sti_mpc) == 0);
1857 kern_pbufpool_free(skmth_pp, skmth_info[i].sti_mpc);
1858 skmth_info[i].sti_mpc = 0;
1859 }
1860 skmth_info[i].sti_mph = 0;
1861 }
1862
1863 /* force cache purges to exercise related code paths */
1864 if (skmth_pp->pp_kmd_cache != NULL) {
1865 skmem_cache_reap_now(skmth_pp->pp_kmd_cache, TRUE);
1866 }
1867 if (skmth_pp->pp_buf_cache != NULL) {
1868 skmem_cache_reap_now(skmth_pp->pp_buf_cache, TRUE);
1869 }
1870 if (skmth_pp->pp_kbft_cache != NULL) {
1871 skmem_cache_reap_now(skmth_pp->pp_kbft_cache, TRUE);
1872 }
1873 }
1874
1875 /* let skmem_test_start() know we're finished */
1876 lck_mtx_lock(&skmt_lock);
1877 VERIFY(atomic_add_32_ov(&skmth_cnt, -1) != 0);
1878 wakeup((caddr_t)&skmth_cnt);
1879 lck_mtx_unlock(&skmt_lock);
1880
1881 /* for the extra refcnt from kernel_thread_start() */
1882 thread_deallocate(current_thread());
1883
1884 thread_terminate(current_thread());
1885 __builtin_unreachable();
1886 /* NOTREACHED */
1887 }
1888
1889 static int skmem_test_objs;
1890
1891 struct skmem_test_obj {
1892 uint64_t sto_val[2];
1893 };
1894
1895 static int
skmem_test_ctor(struct skmem_obj_info * oi,struct skmem_obj_info * oim,void * arg,uint32_t skmflag)1896 skmem_test_ctor(struct skmem_obj_info *oi, struct skmem_obj_info *oim,
1897 void *arg, uint32_t skmflag)
1898 {
1899 #pragma unused(skmflag)
1900 struct skmem_test_obj *sto = SKMEM_OBJ_ADDR(oi);
1901
1902 VERIFY(oim == NULL);
1903 VERIFY(arg == &skmem_test_init);
1904 VERIFY(SKMEM_OBJ_SIZE(oi) >= sizeof(struct skmem_test_obj));
1905 sto->sto_val[0] = (uint64_t)(void *)sto ^
1906 (uint64_t)(void *)&sto->sto_val[0];
1907 sto->sto_val[1] = (uint64_t)(void *)sto ^
1908 (uint64_t)(void *)&sto->sto_val[1];
1909 atomic_add_32(&skmem_test_objs, 1);
1910
1911 return 0;
1912 }
1913
1914 static void
skmem_test_dtor(void * addr,void * arg)1915 skmem_test_dtor(void *addr, void *arg)
1916 {
1917 struct skmem_test_obj *sto = addr;
1918
1919 VERIFY(arg == &skmem_test_init);
1920 VERIFY((sto->sto_val[0] ^ (uint64_t)(void *)&sto->sto_val[0]) ==
1921 (uint64_t)(void *)sto);
1922 VERIFY((sto->sto_val[1] ^ (uint64_t)(void *)&sto->sto_val[1]) ==
1923 (uint64_t)(void *)sto);
1924 VERIFY(skmem_test_objs > 0);
1925 atomic_add_32(&skmem_test_objs, -1);
1926 }
1927
1928 static void
skmem_tests(uint32_t align)1929 skmem_tests(uint32_t align)
1930 {
1931 struct skmem_cache *skm;
1932 uint32_t bufsize = sizeof(struct skmem_test_obj);
1933
1934 uint32_t objary_max = (uint32_t)MAX_PH_ARY;
1935 void **objary = NULL;
1936 char name[64];
1937
1938 VERIFY(align != 0);
1939
1940 SK_ERR("bufsize %u align %u", bufsize, align);
1941
1942 objary = _MALLOC(sizeof(void *) * objary_max, M_TEMP,
1943 M_WAITOK | M_ZERO);
1944
1945 (void) snprintf(name, sizeof(name), "skmem_test.%u.%u", bufsize, align);
1946
1947 skm = skmem_cache_create(name, bufsize, align, skmem_test_ctor,
1948 skmem_test_dtor, NULL, &skmem_test_init, NULL, 0);
1949
1950 VERIFY(skmem_test_objs == 0);
1951 for (int i = 0; i < objary_max; i++) {
1952 objary[i] = skmem_cache_alloc(skm, SKMEM_SLEEP);
1953 VERIFY(objary[i] != NULL);
1954 VERIFY(IS_P2ALIGNED(objary[i], align));
1955 }
1956 for (int i = 0; i < objary_max; i++) {
1957 VERIFY(objary[i] != NULL);
1958 skmem_cache_free(skm, objary[i]);
1959 objary[i] = NULL;
1960 }
1961 skmem_cache_destroy(skm);
1962 VERIFY(skmem_test_objs == 0);
1963
1964 _FREE(objary, M_TEMP);
1965 objary = NULL;
1966 }
1967
1968 static void
skmem_test_start(void * v,wait_result_t w)1969 skmem_test_start(void *v, wait_result_t w)
1970 {
1971 int32_t ncpus = ml_wait_max_cpus();
1972 int error = 0, n;
1973 uint32_t flags;
1974 uint64_t mtbf_saved;
1975
1976 lck_mtx_lock(&skmt_lock);
1977 VERIFY(!skmt_busy);
1978 skmt_busy = 1;
1979 skmem_cache_test_start(1); /* 1 second update interval */
1980 lck_mtx_unlock(&skmt_lock);
1981
1982 VERIFY(skmth_info == NULL);
1983 skmth_info_size = sizeof(struct skmt_thread_info) * ncpus;
1984 skmth_info = (struct skmt_thread_info *) kalloc_data(skmth_info_size,
1985 Z_WAITOK | Z_ZERO);
1986
1987 /*
1988 * Sanity tests.
1989 */
1990 (void) skmem_cache_magazine_max(1);
1991 (void) skmem_cache_magazine_max(32);
1992 (void) skmem_cache_magazine_max(64);
1993 (void) skmem_cache_magazine_max(128);
1994 (void) skmem_cache_magazine_max(256);
1995 (void) skmem_cache_magazine_max(512);
1996 (void) skmem_cache_magazine_max(1024);
1997 (void) skmem_cache_magazine_max(2048);
1998 (void) skmem_cache_magazine_max(4096);
1999 (void) skmem_cache_magazine_max(8192);
2000 (void) skmem_cache_magazine_max(16384);
2001 (void) skmem_cache_magazine_max(32768);
2002 (void) skmem_cache_magazine_max(65536);
2003
2004 /*
2005 * skmem allocator tests
2006 */
2007 skmem_tests(8);
2008 skmem_tests(16);
2009 skmem_tests(32);
2010 skmem_tests(64);
2011 skmem_tests(128);
2012
2013 /*
2014 * Basic packet buffer pool sanity tests
2015 */
2016 skmem_basic_tests();
2017
2018 /*
2019 * Multi-threaded alloc and free tests (blocking).
2020 */
2021 for (n = 0; n < 7; n++) {
2022 flags = ((n & 1) ? KBIF_NO_MAGAZINES : 0);
2023 skmem_advanced_tests(n, ncpus, 0, FALSE, flags);
2024 }
2025 for (n = 0; n < 7; n++) {
2026 flags = ((n & 1) ? KBIF_NO_MAGAZINES : 0);
2027 skmem_advanced_tests(n, ncpus, 0, TRUE, flags);
2028 }
2029 for (n = 0; n < 7; n++) {
2030 flags = ((n & 1) ? KBIF_NO_MAGAZINES : 0);
2031 skmem_advanced_tests(n, ncpus, 1, FALSE, flags);
2032 }
2033 for (n = 0; n < 7; n++) {
2034 flags = ((n & 1) ? KBIF_NO_MAGAZINES : 0);
2035 skmem_advanced_tests(n, ncpus, 1, TRUE, flags);
2036 }
2037 for (n = 0; n < 7; n++) {
2038 flags = ((n & 1) ? KBIF_NO_MAGAZINES : 0);
2039 skmem_advanced_tests(n, ncpus, 2, FALSE, flags);
2040 }
2041 for (n = 0; n < 7; n++) {
2042 flags = ((n & 1) ? KBIF_NO_MAGAZINES : 0);
2043 skmem_advanced_tests(n, ncpus, 2, TRUE, flags);
2044 }
2045 for (n = 0; n < 7; n++) {
2046 flags = ((n & 1) ? KBIF_NO_MAGAZINES : 0);
2047 skmem_advanced_tests(n, ncpus, 3, FALSE, flags);
2048 }
2049 for (n = 0; n < 7; n++) {
2050 flags = ((n & 1) ? KBIF_NO_MAGAZINES : 0);
2051 skmem_advanced_tests(n, ncpus, 3, TRUE, flags);
2052 }
2053 for (n = 0; n < 7; n++) {
2054 flags = ((n & 1) ? KBIF_NO_MAGAZINES : 0);
2055 skmem_advanced_tests(n, ncpus, 4, FALSE, flags);
2056 }
2057 for (n = 0; n < 7; n++) {
2058 flags = ((n & 1) ? KBIF_NO_MAGAZINES : 0);
2059 skmem_advanced_tests(n, ncpus, 5, FALSE, flags);
2060 }
2061
2062 /*
2063 * Modes 4-5 deal with persistent/mirrored regions, and to
2064 * maximize the chance of exercising the allocation failures
2065 * handling we lower the MTBF (if set) to the minimum possible,
2066 * and restore it to the saved value later.
2067 */
2068 mtbf_saved = skmem_region_get_mtbf();
2069 if (mtbf_saved != 0) {
2070 skmem_region_set_mtbf(SKMEM_REGION_MTBF_MIN);
2071 }
2072
2073 /*
2074 * Multi-threaded alloc and free tests (non-blocking).
2075 */
2076
2077 for (n = 0; n < 7; n++) {
2078 flags = ((n & 1) ? KBIF_NO_MAGAZINES : 0);
2079 skmem_advanced_tests(n, ncpus, 4, TRUE, flags);
2080 }
2081 for (n = 0; n < 7; n++) {
2082 flags = ((n & 1) ? KBIF_NO_MAGAZINES : 0);
2083 skmem_advanced_tests(n, ncpus, 5, TRUE, flags);
2084 }
2085
2086 /*
2087 * Restore MTBF to previous set value.
2088 */
2089 if (mtbf_saved != 0) {
2090 skmem_region_set_mtbf(mtbf_saved);
2091 }
2092
2093 for (n = 0; n < 7; n++) {
2094 flags = ((n & 1) ? KBIF_NO_MAGAZINES : 0);
2095 skmem_advanced_tests(n, ncpus, 6, FALSE, flags);
2096 }
2097 for (n = 0; n < 7; n++) {
2098 flags = ((n & 1) ? KBIF_NO_MAGAZINES : 0);
2099 skmem_advanced_tests(n, ncpus, 6, TRUE, flags);
2100 }
2101 for (n = 0; n < 7; n++) {
2102 flags = ((n & 1) ? KBIF_NO_MAGAZINES : 0);
2103 skmem_advanced_tests(n, ncpus, 7, FALSE, flags);
2104 }
2105 for (n = 0; n < 7; n++) {
2106 flags = ((n & 1) ? KBIF_NO_MAGAZINES : 0);
2107 skmem_advanced_tests(n, ncpus, 7, TRUE, flags);
2108 }
2109 for (n = 0; n < 7; n++) {
2110 flags = ((n & 1) ? KBIF_NO_MAGAZINES : 0);
2111 skmem_advanced_tests(n, ncpus, 8, FALSE, flags);
2112 }
2113 for (n = 0; n < 7; n++) {
2114 flags = ((n & 1) ? KBIF_NO_MAGAZINES : 0);
2115 skmem_advanced_tests(n, ncpus, 8, TRUE, flags);
2116 }
2117
2118 lck_mtx_lock(&skmt_lock);
2119 skmt_enabled = 1;
2120 wakeup((caddr_t)&skmt_enabled);
2121 lck_mtx_unlock(&skmt_lock);
2122
2123 if (error != 0) {
2124 skmem_test_stop(v, w);
2125 }
2126 }
2127
2128 static void
skmem_test_stop(void * v,wait_result_t w)2129 skmem_test_stop(void *v, wait_result_t w)
2130 {
2131 #pragma unused(v, w)
2132
2133 if (skmth_info != NULL) {
2134 kfree_data(skmth_info, skmth_info_size);
2135 skmth_info = NULL;
2136 }
2137
2138 lck_mtx_lock(&skmt_lock);
2139 skmem_cache_test_stop();
2140 VERIFY(skmt_busy);
2141 skmt_busy = 0;
2142 skmt_enabled = 0;
2143 wakeup((caddr_t)&skmt_enabled);
2144 lck_mtx_unlock(&skmt_lock);
2145 }
2146
2147 static int
sysctl_skmem_test(__unused struct sysctl_oid * oidp,__unused void * arg1,__unused int arg2,struct sysctl_req * req)2148 sysctl_skmem_test(__unused struct sysctl_oid *oidp,
2149 __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2150 {
2151 int error, newvalue, changed;
2152
2153 lck_mtx_lock(&skmt_lock);
2154 if ((error = sysctl_io_number(req, skmt_enabled, sizeof(int),
2155 &newvalue, &changed)) != 0) {
2156 goto done;
2157 }
2158
2159 if (changed && skmt_enabled != newvalue) {
2160 thread_t th;
2161 thread_continue_t func;
2162
2163 if (newvalue && skmt_busy) {
2164 SK_ERR("Older skmem test instance is still active");
2165 error = EBUSY;
2166 goto done;
2167 }
2168
2169 if (newvalue) {
2170 func = skmem_test_start;
2171 } else {
2172 func = skmem_test_stop;
2173 }
2174
2175 if (kernel_thread_start(func, NULL, &th) != KERN_SUCCESS) {
2176 SK_ERR("Failed to create skmem test action thread");
2177 error = EBUSY;
2178 goto done;
2179 }
2180 do {
2181 SK_DF(SK_VERB_MEM, "Waiting for %s to complete",
2182 newvalue ? "startup" : "shutdown");
2183 error = msleep(&skmt_enabled, &skmt_lock,
2184 PWAIT | PCATCH, "skmtw", NULL);
2185 /* BEGIN CSTYLED */
2186 /*
2187 * Loop exit conditions:
2188 * - we were interrupted
2189 * OR
2190 * - we are starting up and are enabled
2191 * (Startup complete)
2192 * OR
2193 * - we are starting up and are not busy
2194 * (Failed startup)
2195 * OR
2196 * - we are shutting down and are not busy
2197 * (Shutdown complete)
2198 */
2199 /* END CSTYLED */
2200 } while (!((error == EINTR) || (newvalue && skmt_enabled) ||
2201 (newvalue && !skmt_busy) || (!newvalue && !skmt_busy)));
2202
2203 thread_deallocate(th);
2204 }
2205
2206 done:
2207 lck_mtx_unlock(&skmt_lock);
2208 return error;
2209 }
2210
2211 SYSCTL_PROC(_kern_skywalk_mem, OID_AUTO, test,
2212 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, NULL, 0,
2213 sysctl_skmem_test, "I", "Start Skywalk memory test");
2214
2215 #endif /* DEVELOPMENT || DEBUG */
2216