1 /*
2 * Copyright (c) 2018-2021 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #if (DEVELOPMENT || DEBUG) /* XXX make this whole file a config option? */
30
31 #include <skywalk/os_skywalk_private.h>
32
33 #define SKMEM_TEST_BUFSIZE 2048
34
35 #if CONFIG_ARROW
36 #define TEST_OPTION_INHIBIT_CACHE 0
37 #else /* !CONFIG_ARROW */
38 #define TEST_OPTION_INHIBIT_CACHE KBIF_INHIBIT_CACHE
39 #endif /* CONFIG_ARROW */
40
41 static void skmem_test_start(void *, wait_result_t);
42 static void skmem_test_stop(void *, wait_result_t);
43 static void skmem_test_func(void *v, wait_result_t w);
44 static void skmem_test_mbfreecb(caddr_t cl, uint32_t size, caddr_t arg);
45 static void skmem_test_alloccb(kern_packet_t, uint32_t, const void *);
46
47 extern unsigned int ml_wait_max_cpus(void);
48 extern kern_return_t thread_terminate(thread_t);
49
50 static int skmt_enabled;
51 static int skmt_busy;
52 static int skmt_mbcnt;
53
54 decl_lck_mtx_data(static, skmt_lock);
55
56 struct skmt_alloc_ctx {
57 uint32_t stc_req; /* # of objects requested */
58 uint32_t stc_idx; /* expected index */
59 };
60
61 static struct skmt_alloc_ctx skmt_alloccb_ctx;
62
63 struct skmt_thread_info {
64 kern_packet_t sti_mph; /* master packet */
65 kern_packet_t sti_mpc; /* cloned packet */
66 thread_t sti_thread; /* thread instance */
67 boolean_t sti_nosleep; /* non-sleeping allocation */
68 } __attribute__((aligned(CHANNEL_CACHE_ALIGN_MAX)));
69
70 static struct skmt_thread_info *skmth_info;
71 static uint32_t skmth_info_size;
72 static int32_t skmth_cnt;
73 static boolean_t skmth_run;
74 static kern_pbufpool_t skmth_pp;
75
76 void
skmem_test_init(void)77 skmem_test_init(void)
78 {
79 lck_mtx_init(&skmt_lock, &sk_lock_group, &sk_lock_attr);
80 }
81
82 void
skmem_test_fini(void)83 skmem_test_fini(void)
84 {
85 lck_mtx_destroy(&skmt_lock, &sk_lock_group);
86 }
87
88 bool
skmem_test_enabled(void)89 skmem_test_enabled(void)
90 {
91 bool enabled;
92 lck_mtx_lock(&skmt_lock);
93 enabled = (skmt_busy != 0);
94 lck_mtx_unlock(&skmt_lock);
95 return enabled;
96 }
97
98 typedef union {
99 char c[2];
100 uint16_t s;
101 } short_union_t;
102
103 typedef union {
104 uint16_t s[2];
105 long l;
106 } long_union_t;
107
108 static void
_reduce(int * sum)109 _reduce(int *sum)
110 {
111 long_union_t l_util;
112
113 l_util.l = *sum;
114 *sum = l_util.s[0] + l_util.s[1];
115 if (*sum > 65535) {
116 *sum -= 65535;
117 }
118 }
119
120 static uint16_t
skmem_reference_sum(void * buffer,int len,int sum0)121 skmem_reference_sum(void *buffer, int len, int sum0)
122 {
123 uint16_t *w;
124 int sum = sum0;
125
126 w = (uint16_t *)buffer;
127 while ((len -= 32) >= 0) {
128 sum += w[0]; sum += w[1];
129 sum += w[2]; sum += w[3];
130 sum += w[4]; sum += w[5];
131 sum += w[6]; sum += w[7];
132 sum += w[8]; sum += w[9];
133 sum += w[10]; sum += w[11];
134 sum += w[12]; sum += w[13];
135 sum += w[14]; sum += w[15];
136 w += 16;
137 }
138 len += 32;
139 while ((len -= 8) >= 0) {
140 sum += w[0]; sum += w[1];
141 sum += w[2]; sum += w[3];
142 w += 4;
143 }
144 len += 8;
145 if (len) {
146 _reduce(&sum);
147 while ((len -= 2) >= 0) {
148 sum += *w++;
149 }
150 }
151 if (len == -1) { /* odd-length packet */
152 short_union_t s_util;
153
154 s_util.s = 0;
155 s_util.c[0] = *((char *)w);
156 s_util.c[1] = 0;
157 sum += s_util.s;
158 }
159 _reduce(&sum);
160 return sum & 0xffff;
161 }
162
163 /*
164 * At present, the number of objects created in the pool will be
165 * higher than the requested amount, if the pool is allowed to use
166 * the magazines layer. Round up a bit to accomodate any rounding
167 * ups done by the pool allocator.
168 */
169 #define MAX_PH_ARY P2ROUNDUP(skmem_cache_magazine_max(1) + 129, 256)
170
171 struct skmem_pp_ctx_s {
172 os_refcnt_t skmem_pp_ctx_refcnt;
173 };
174
175 static struct skmem_pp_ctx_s skmem_pp_ctx;
176
177 static uint32_t
skmem_pp_ctx_refcnt(void * ctx)178 skmem_pp_ctx_refcnt(void *ctx)
179 {
180 struct skmem_pp_ctx_s *pp_ctx = ctx;
181 VERIFY(pp_ctx == &skmem_pp_ctx);
182 return os_ref_get_count(&pp_ctx->skmem_pp_ctx_refcnt);
183 }
184
185 static void
skmem_pp_ctx_retain(void * ctx)186 skmem_pp_ctx_retain(void *ctx)
187 {
188 struct skmem_pp_ctx_s *pp_ctx = ctx;
189 VERIFY(pp_ctx == &skmem_pp_ctx);
190 os_ref_retain(&pp_ctx->skmem_pp_ctx_refcnt);
191 }
192
193 static void
skmem_pp_ctx_release(void * ctx)194 skmem_pp_ctx_release(void *ctx)
195 {
196 struct skmem_pp_ctx_s *pp_ctx = ctx;
197 VERIFY(pp_ctx == &skmem_pp_ctx);
198 (void)os_ref_release(&pp_ctx->skmem_pp_ctx_refcnt);
199 }
200
201 #define BUFLEN 2048
202
203 static void
skmem_buflet_tests(uint32_t flags)204 skmem_buflet_tests(uint32_t flags)
205 {
206 struct kern_pbufpool_init pp_init;
207 struct kern_pbufpool_memory_info pp_mem_info;
208 kern_pbufpool_t pp = NULL;
209 struct kern_pbufpool_init pp_init_mb;
210 kern_pbufpool_t pp_mb = NULL;
211 mach_vm_address_t baddr = 0;
212 kern_obj_idx_seg_t sg_idx;
213 kern_segment_t sg;
214 kern_packet_t *phary = NULL;
215 kern_packet_t *phary2 = NULL;
216 kern_packet_t *pharyc = NULL;
217 struct mbuf **mbary = NULL;
218 uint32_t mbcnt = 0;
219 uint32_t phcnt = 0, maxphcnt = 0;
220 uint32_t phcloned = 0;
221 size_t mblen = BUFLEN;
222 kern_packet_t ph, ph_mb;
223 uint32_t i;
224 errno_t err;
225
226 /* packets only */
227 VERIFY(!(flags & KBIF_QUANTUM));
228
229 SK_ERR("flags 0x%x", flags);
230
231 phary = (kern_packet_t *) kalloc_data(sizeof(kern_packet_t) * MAX_PH_ARY,
232 Z_WAITOK | Z_ZERO);
233 phary2 = (kern_packet_t *) kalloc_data(sizeof(kern_packet_t) * MAX_PH_ARY,
234 Z_WAITOK | Z_ZERO);
235 pharyc = (kern_packet_t *) kalloc_data(sizeof(kern_packet_t) * MAX_PH_ARY,
236 Z_WAITOK | Z_ZERO);
237 mbary = kalloc_type(struct mbuf *, MAX_PH_ARY, Z_WAITOK | Z_ZERO);
238
239 os_ref_init(&skmem_pp_ctx.skmem_pp_ctx_refcnt, NULL);
240 bzero(&pp_init, sizeof(pp_init));
241 pp_init.kbi_version = KERN_PBUFPOOL_CURRENT_VERSION;
242 pp_init.kbi_buf_seg_size = skmem_usr_buf_seg_size;
243 (void) snprintf((char *)pp_init.kbi_name, sizeof(pp_init.kbi_name),
244 "%s", "skmem_buflet_tests");
245 pp_init.kbi_flags = flags;
246 pp_init.kbi_ctx = &skmem_pp_ctx;
247 pp_init.kbi_ctx_retain = skmem_pp_ctx_retain;
248 pp_init.kbi_ctx_release = skmem_pp_ctx_release;
249
250 /* must fail if packets is 0 */
251 VERIFY(kern_pbufpool_create(&pp_init, &pp, NULL) == EINVAL);
252 pp_init.kbi_packets = 64;
253 /* must fail if bufsize is 0 */
254 VERIFY(kern_pbufpool_create(&pp_init, &pp, NULL) == EINVAL);
255 pp_init.kbi_bufsize = SKMEM_TEST_BUFSIZE;
256 /* must fail if max_frags is 0 */
257 VERIFY(kern_pbufpool_create(&pp_init, &pp, NULL) == EINVAL);
258
259 pp_init.kbi_max_frags = 1;
260 VERIFY(kern_pbufpool_create(&pp_init, &pp, NULL) == 0);
261 VERIFY(skmem_pp_ctx_refcnt(&skmem_pp_ctx) == 2);
262 void *ctx = kern_pbufpool_get_context(pp);
263 VERIFY(ctx == &skmem_pp_ctx);
264 VERIFY(skmem_pp_ctx_refcnt(&skmem_pp_ctx) == 3);
265 skmem_pp_ctx_release(ctx);
266 VERIFY(skmem_pp_ctx_refcnt(&skmem_pp_ctx) == 2);
267 bzero(&pp_mem_info, sizeof(pp_mem_info));
268 VERIFY(kern_pbufpool_get_memory_info(pp, NULL) == EINVAL);
269 VERIFY(kern_pbufpool_get_memory_info(pp, &pp_mem_info) == 0);
270 VERIFY(pp_mem_info.kpm_flags & KPMF_EXTERNAL);
271 VERIFY(pp_mem_info.kpm_buflets >= pp_mem_info.kpm_packets);
272 VERIFY(pp_mem_info.kpm_packets >= 64);
273 VERIFY(pp_mem_info.kpm_packets <= MAX_PH_ARY);
274 VERIFY(pp_mem_info.kpm_max_frags == 1);
275 VERIFY(pp_mem_info.kpm_buflets >= 64);
276 VERIFY(pp_mem_info.kpm_bufsize == SKMEM_TEST_BUFSIZE);
277 VERIFY(kern_pbufpool_alloc(pp, 0, &ph) == EINVAL ||
278 (flags & KBIF_BUFFER_ON_DEMAND));
279 if (ph != 0) {
280 kern_packet_t phc = 0;
281 kern_buflet_t buflet;
282
283 VERIFY(flags & KBIF_BUFFER_ON_DEMAND);
284 VERIFY((buflet = kern_packet_get_next_buflet(ph, NULL)) == NULL);
285 VERIFY(kern_packet_clone(ph, &phc, KPKT_COPY_LIGHT) == EINVAL);
286 VERIFY(kern_packet_clone(ph, &phc, KPKT_COPY_HEAVY) == EINVAL);
287 kern_pbufpool_free(pp, ph);
288 ph = 0;
289 }
290 maxphcnt = 32;
291 VERIFY(kern_pbufpool_alloc(pp, 5, &ph) == EINVAL);
292 if (flags & KBIF_BUFFER_ON_DEMAND) {
293 /* allocate and free one at a time (no buflet) */
294 for (i = 0, phcnt = 0; i < maxphcnt; i++) {
295 boolean_t stop = FALSE;
296 /*
297 * This may fail if skmem_region_mtbf is set, or if
298 * the system is short on memory. Perform retries at
299 * this layer to get at least 32 packets.
300 */
301 while ((err = kern_pbufpool_alloc_nosleep(pp, 0, &ph)) != 0) {
302 VERIFY(err == ENOMEM);
303 if (phcnt < 32) {
304 SK_ERR("[a] retrying alloc for packet %u",
305 phcnt);
306 delay(250 * NSEC_PER_USEC); /* 1/4 sec */
307 continue;
308 }
309 stop = TRUE;
310 break;
311 }
312 if (stop) {
313 break;
314 }
315 VERIFY(ph != 0);
316 VERIFY(kern_packet_get_data_length(ph) == 0);
317 VERIFY(kern_packet_get_buflet_count(ph) == 0);
318 phary[phcnt++] = ph;
319 }
320 VERIFY(phcnt >= 32);
321 for (i = 0; i < phcnt; i++) {
322 kern_pbufpool_free(pp, phary[i]);
323 phary[i] = 0;
324 }
325 }
326 /* allocate and free one at a time (1 buflet) */
327 for (i = 0, phcnt = 0; i < maxphcnt; i++) {
328 boolean_t stop = FALSE;
329 /*
330 * This may fail if skmem_region_mtbf is set, or if
331 * the system is short on memory. Perform retries at
332 * this layer to get at least 32 packets.
333 */
334 while ((err = kern_pbufpool_alloc_nosleep(pp, 1, &ph)) != 0) {
335 VERIFY(err == ENOMEM);
336 if (phcnt < 32) {
337 SK_ERR("[a] retrying alloc for packet %u",
338 phcnt);
339 delay(250 * NSEC_PER_USEC); /* 1/4 sec */
340 continue;
341 }
342 stop = TRUE;
343 break;
344 }
345 if (stop) {
346 break;
347 }
348 VERIFY(ph != 0);
349 VERIFY(kern_packet_get_data_length(ph) == 0);
350 VERIFY(kern_packet_get_buflet_count(ph) == 1);
351 phary[phcnt++] = ph;
352 }
353 VERIFY(phcnt >= 32);
354 for (i = 0; i < phcnt; i++) {
355 kern_pbufpool_free(pp, phary[i]);
356 phary[i] = 0;
357 }
358 /* allocate and free in batch */
359 phcnt = maxphcnt;
360 for (;;) {
361 err = kern_pbufpool_alloc_batch_nosleep(pp, 1, phary, &phcnt);
362 VERIFY(err != EINVAL);
363 if (err == ENOMEM) {
364 phcnt = maxphcnt;
365 SK_ERR("retrying batch alloc for %u packets", phcnt);
366 delay(250 * NSEC_PER_USEC); /* 1/4 sec */
367 } else if (err == EAGAIN) {
368 SK_ERR("batch alloc for %u packets only returned %u",
369 maxphcnt, phcnt);
370 break;
371 } else {
372 VERIFY(err == 0);
373 break;
374 }
375 }
376 VERIFY(phcnt > 0);
377 for (i = 0; i < phcnt; i++) {
378 VERIFY(phary[i] != 0);
379 VERIFY(kern_packet_get_data_length(phary[i]) == 0);
380 VERIFY(kern_packet_get_buflet_count(phary[i]) == 1);
381 }
382 kern_pbufpool_free_batch(pp, phary, phcnt);
383 /* allocate and free one at a time (blocking) */
384 for (i = 0, phcnt = 0; i < maxphcnt; i++) {
385 VERIFY(kern_pbufpool_alloc(pp, 1, &ph) == 0);
386 VERIFY(ph != 0);
387 VERIFY(kern_packet_get_data_length(ph) == 0);
388 VERIFY(kern_packet_get_buflet_count(ph) == 1);
389 phary[phcnt++] = ph;
390 }
391 VERIFY(phcnt >= 32);
392 for (i = 0; i < phcnt; i++) {
393 kern_pbufpool_free(pp, phary[i]);
394 phary[i] = 0;
395 }
396 /* allocate with callback */
397 bzero(&skmt_alloccb_ctx, sizeof(skmt_alloccb_ctx));
398 skmt_alloccb_ctx.stc_req = phcnt;
399 VERIFY(kern_pbufpool_alloc_batch_callback(pp, 1, phary, &phcnt,
400 NULL, &skmt_alloccb_ctx) == EINVAL);
401 VERIFY(kern_pbufpool_alloc_batch_callback(pp, 1, phary, &phcnt,
402 skmem_test_alloccb, &skmt_alloccb_ctx) == 0);
403 VERIFY(skmt_alloccb_ctx.stc_idx == phcnt);
404 kern_pbufpool_free_batch(pp, phary, phcnt);
405
406 /*
407 * Allocate and free test
408 * Case 1: Packet has an mbuf attached
409 */
410 mbcnt = phcnt;
411 VERIFY(kern_pbufpool_alloc_batch(pp, 1, phary, &phcnt) == 0);
412 /* clone packets (lightweight, without mbufs) */
413 for (i = 0; i < phcnt; i++) {
414 kern_buflet_t buflet, buflet2;
415 kern_obj_idx_seg_t buf_idx_seg, buf2_idx_seg;
416
417 VERIFY((buflet = kern_packet_get_next_buflet(phary[i],
418 NULL)) != NULL);
419 VERIFY(kern_buflet_set_data_length(buflet, BUFLEN) == 0);
420 VERIFY(__packet_finalize(phary[i]) == 0);
421 VERIFY(kern_packet_get_data_length(phary[i]) == BUFLEN);
422 (void) memset(kern_buflet_get_data_address(buflet), i, BUFLEN);
423 kern_packet_set_trace_id(phary[i], i);
424 VERIFY(kern_packet_get_trace_id(phary[i]) == i);
425 VERIFY(kern_packet_clone(phary[i], &pharyc[i],
426 KPKT_COPY_LIGHT) == 0 || !(flags & KBIF_BUFFER_ON_DEMAND));
427 if (pharyc[i] != 0) {
428 struct __kern_packet *kpkt2 = SK_PTR_ADDR_KPKT(pharyc[i]);
429 /*
430 * Source packet was allocated with 1 buffer, so
431 * validate that the clone packet points to that
432 * same buffer, and that the buffer's usecnt is 2.
433 */
434 VERIFY(!(QUM_ADDR(pharyc[i])->qum_qflags & QUM_F_FINALIZED));
435 VERIFY(kpkt2->pkt_mbuf == NULL);
436 VERIFY(!(kpkt2->pkt_pflags & PKT_F_MBUF_MASK));
437 VERIFY((buflet2 = kern_packet_get_next_buflet(pharyc[i],
438 NULL)) != NULL);
439 VERIFY(kern_buflet_get_object_address(buflet) ==
440 kern_buflet_get_object_address(buflet2));
441 VERIFY(kern_buflet_get_data_address(buflet) ==
442 kern_buflet_get_data_address(buflet2));
443 VERIFY(kern_buflet_get_data_limit(buflet) ==
444 kern_buflet_get_data_limit(buflet2));
445 VERIFY(kern_buflet_get_data_offset(buflet) ==
446 kern_buflet_get_data_offset(buflet2));
447 VERIFY(kern_buflet_get_data_length(buflet) ==
448 kern_buflet_get_data_length(buflet2));
449 VERIFY(kern_buflet_set_data_limit(buflet2,
450 (uint16_t)kern_buflet_get_object_limit(buflet2) + 1)
451 == ERANGE);
452 VERIFY(kern_buflet_set_data_limit(buflet2,
453 (uint16_t)kern_buflet_get_object_limit(buflet2) - 16)
454 == 0);
455 VERIFY(kern_buflet_set_data_address(buflet2,
456 (const void *)((uintptr_t)kern_buflet_get_object_address(buflet2) - 1))
457 == ERANGE);
458 VERIFY(kern_buflet_set_data_address(buflet2,
459 (const void *)((uintptr_t)kern_buflet_get_object_address(buflet2) + 16))
460 == 0);
461 VERIFY(kern_buflet_set_data_length(buflet2,
462 kern_buflet_get_data_length(buflet2) - 32) == 0);
463 VERIFY(kern_buflet_get_object_segment(buflet,
464 &buf_idx_seg) ==
465 kern_buflet_get_object_segment(buflet2,
466 &buf2_idx_seg));
467 VERIFY(buf_idx_seg == buf2_idx_seg);
468 VERIFY(buflet->buf_ctl == buflet2->buf_ctl);
469 VERIFY(buflet->buf_ctl->bc_usecnt == 2);
470 ++phcloned;
471 VERIFY(__packet_finalize(pharyc[i]) == 0);
472 /* verify trace id isn't reused */
473 VERIFY(kern_packet_get_trace_id(pharyc[i]) == 0);
474 kern_packet_set_trace_id(pharyc[i], phcnt - i);
475 VERIFY(kern_packet_get_trace_id(pharyc[i]) == (phcnt - i));
476 VERIFY(kern_packet_get_trace_id(phary[i]) == i);
477 }
478 }
479 VERIFY(phcloned == phcnt || phcloned == 0);
480 if (phcloned != 0) {
481 kern_pbufpool_free_batch(pp, pharyc, phcloned);
482 phcloned = 0;
483 }
484 kern_pbufpool_free_batch(pp, phary, phcnt);
485 VERIFY(kern_pbufpool_alloc_batch(pp, 1, phary, &phcnt) == 0);
486 VERIFY(phcnt == mbcnt);
487 VERIFY(skmt_mbcnt == 0);
488 for (i = 0; i < mbcnt; i++) {
489 struct __kern_packet *kpkt = SK_PTR_ADDR_KPKT(phary[i]);
490 kern_buflet_t buflet;
491
492 VERIFY((buflet = kern_packet_get_next_buflet(phary[i],
493 NULL)) != NULL);
494 VERIFY(kern_buflet_set_data_length(buflet, BUFLEN) == 0);
495 (void) memset(kern_buflet_get_data_address(buflet), i, BUFLEN);
496 /* attach mbuf to packets and initialize packets */
497 mblen = BUFLEN;
498 VERIFY(mbuf_ring_cluster_alloc(MBUF_WAITOK, MBUF_TYPE_HEADER,
499 &mbary[i], skmem_test_mbfreecb, &mblen) == 0);
500 VERIFY(mblen == BUFLEN);
501 VERIFY(mbary[i] != NULL);
502 VERIFY(mbary[i]->m_nextpkt == NULL);
503 mbuf_setlen(mbary[i], mblen);
504 mbuf_pkthdr_setlen(mbary[i], mblen);
505 VERIFY((size_t)m_pktlen(mbary[i]) == mblen);
506 (void) memset(mbuf_data(mbary[i]), i, mblen);
507 kpkt->pkt_mbuf = mbary[i];
508 kpkt->pkt_pflags |= PKT_F_MBUF_DATA;
509 VERIFY(__packet_finalize_with_mbuf(kpkt) == 0);
510 VERIFY(kern_packet_get_data_length(phary[i]) == BUFLEN);
511 VERIFY(mbuf_ring_cluster_activate(kpkt->pkt_mbuf) == 0);
512 }
513 /* clone packets (heavyweight) */
514 for (i = 0; i < phcnt; i++) {
515 VERIFY(kern_packet_clone(phary[i], &pharyc[i],
516 KPKT_COPY_HEAVY) == 0);
517 struct __kern_packet *kpkt = SK_PTR_ADDR_KPKT(phary[i]);
518 struct __kern_packet *kpkt2 = SK_PTR_ADDR_KPKT(pharyc[i]);
519 kern_buflet_t buflet, buflet2;
520 /*
521 * Source packet was allocated with 1 buffer, so
522 * validate that the clone packet points to different
523 * buffer, and that the clone's attached mbuf is also
524 * different than the source's.
525 */
526 VERIFY(!(QUM_ADDR(pharyc[i])->qum_qflags & QUM_F_FINALIZED));
527 VERIFY((buflet = kern_packet_get_next_buflet(phary[i],
528 NULL)) != NULL);
529 VERIFY((buflet2 = kern_packet_get_next_buflet(pharyc[i],
530 NULL)) != NULL);
531 VERIFY(kern_buflet_get_object_address(buflet) !=
532 kern_buflet_get_object_address(buflet2));
533 VERIFY(kern_buflet_get_data_address(buflet) !=
534 kern_buflet_get_data_address(buflet2));
535 VERIFY(kern_buflet_get_data_limit(buflet) ==
536 kern_buflet_get_data_limit(buflet2));
537 VERIFY(kern_buflet_get_data_offset(buflet) ==
538 kern_buflet_get_data_offset(buflet2));
539 VERIFY(kern_buflet_get_data_length(buflet) == BUFLEN);
540 VERIFY(kern_buflet_get_data_length(buflet) ==
541 kern_buflet_get_data_length(buflet2));
542 VERIFY(kpkt->pkt_pflags & PKT_F_MBUF_DATA);
543 VERIFY(kpkt2->pkt_pflags & PKT_F_MBUF_DATA);
544 VERIFY(m_pktlen(kpkt2->pkt_mbuf) == m_pktlen(kpkt->pkt_mbuf));
545 VERIFY(kern_packet_get_data_length(phary[i]) == BUFLEN);
546 VERIFY(kern_packet_get_data_length(phary[i]) ==
547 kern_packet_get_data_length(pharyc[i]));
548 VERIFY(buflet->buf_ctl != buflet2->buf_ctl);
549 VERIFY(buflet->buf_ctl->bc_usecnt == 1);
550 VERIFY(buflet2->buf_ctl->bc_usecnt == 1);
551 VERIFY(memcmp(kern_buflet_get_data_address(buflet),
552 kern_buflet_get_data_address(buflet2),
553 kern_buflet_get_data_length(buflet)) == 0);
554 VERIFY(kpkt->pkt_mbuf != NULL);
555 VERIFY(kpkt2->pkt_mbuf != NULL);
556 VERIFY(mbuf_data(kpkt->pkt_mbuf) != mbuf_data(kpkt2->pkt_mbuf));
557 VERIFY(mbuf_len(kpkt->pkt_mbuf) == mbuf_len(kpkt2->pkt_mbuf));
558 /* mbuf contents must have been copied */
559 VERIFY(memcmp(mbuf_data(kpkt->pkt_mbuf),
560 mbuf_data(kpkt2->pkt_mbuf), mbuf_len(kpkt->pkt_mbuf)) == 0);
561 VERIFY(__packet_finalize(pharyc[i]) == 0);
562 ++phcloned;
563 }
564 VERIFY(phcloned == phcnt);
565 kern_pbufpool_free_batch(pp, pharyc, phcloned);
566 phcloned = 0;
567 skmt_mbcnt = mbcnt;
568 kern_pbufpool_free_batch(pp, phary, phcnt);
569 /* skmem_test_mbfreecb() should have been called for all mbufs by now */
570 VERIFY(skmt_mbcnt == 0);
571 for (i = 0; i < mbcnt; i++) {
572 VERIFY(mbary[i] != NULL);
573 m_freem(mbary[i]);
574 mbary[i] = NULL;
575 }
576 mbcnt = 0;
577
578 /*
579 * Allocate and free test
580 * Case 2: Packet has a packet attached
581 */
582 VERIFY(pp_mem_info.kpm_packets >= 64);
583 phcnt = 32;
584 VERIFY(kern_pbufpool_alloc_batch(pp, 1, phary, &phcnt) == 0);
585 VERIFY(phcnt == 32);
586 VERIFY(kern_pbufpool_alloc_batch(pp, 1, phary2, &phcnt) == 0);
587 VERIFY(phcnt == 32);
588 /* attach each packet to a packet */
589 for (i = 0; i < phcnt; i++) {
590 struct __kern_packet *kpkt = SK_PTR_ADDR_KPKT(phary[i]);
591 struct __kern_packet *kpkt2 = SK_PTR_ADDR_KPKT(phary2[i]);
592
593 kpkt->pkt_pkt = kpkt2;
594 kpkt->pkt_pflags |= PKT_F_PKT_DATA;
595 }
596 /* free the batch of packets (also free the attached packets) */
597 kern_pbufpool_free_batch(pp, phary, phcnt);
598
599 /*
600 * Allocate and free test
601 * Case 3: Packet has a packet attached. The attached packet itself has
602 * an mbuf attached.
603 */
604 VERIFY(pp_mem_info.kpm_packets >= 64);
605 phcnt = 32;
606 mbcnt = 32;
607 VERIFY(kern_pbufpool_alloc_batch(pp, 1, phary, &phcnt) == 0);
608 VERIFY(phcnt == 32);
609 VERIFY(kern_pbufpool_alloc_batch(pp, 1, phary2, &phcnt) == 0);
610 VERIFY(phcnt == 32);
611 VERIFY(skmt_mbcnt == 0);
612 for (i = 0; i < mbcnt; i++) {
613 mblen = BUFLEN;
614 VERIFY(mbuf_ring_cluster_alloc(MBUF_WAITOK, MBUF_TYPE_HEADER,
615 &mbary[i], skmem_test_mbfreecb, &mblen) == 0);
616 VERIFY(mbary[i] != NULL);
617 VERIFY(mbary[i]->m_nextpkt == NULL);
618 }
619 /* attach each packet to a packet */
620 for (i = 0; i < phcnt; i++) {
621 struct __kern_packet *kpkt = SK_PTR_ADDR_KPKT(phary[i]);
622 struct __kern_packet *kpkt2 = SK_PTR_ADDR_KPKT(phary2[i]);
623
624 VERIFY(mbary[i] != NULL);
625 VERIFY(__packet_initialize_with_mbuf(kpkt2,
626 mbary[i], 0, 0) == 0);
627 VERIFY(mbuf_ring_cluster_activate(kpkt2->pkt_mbuf) == 0);
628 kpkt->pkt_pkt = kpkt2;
629 kpkt->pkt_pflags |= PKT_F_PKT_DATA;
630 }
631 skmt_mbcnt = mbcnt;
632 /* free the batch of packets (also free the attached packets) */
633 kern_pbufpool_free_batch(pp, phary, phcnt);
634 /* skmem_test_mbfreecb() should have been called for all mbufs by now */
635 VERIFY(skmt_mbcnt == 0);
636 for (i = 0; i < mbcnt; i++) {
637 VERIFY(mbary[i] != NULL);
638 m_freem(mbary[i]);
639 mbary[i] = NULL;
640 }
641 mbcnt = 0;
642
643 kern_pbufpool_destroy(pp);
644 pp = NULL;
645 /* check that ctx_release has been called */
646 VERIFY(skmem_pp_ctx_refcnt(&skmem_pp_ctx) == 1);
647
648 pp_init.kbi_ctx = NULL;
649 pp_init.kbi_ctx_retain = NULL;
650 pp_init.kbi_ctx_release = NULL;
651 pp_init.kbi_buflets = 1;
652 /* must fail if buflets is non-zero and less than packets */
653 if (!(flags & KBIF_BUFFER_ON_DEMAND)) {
654 VERIFY(kern_pbufpool_create(&pp_init, &pp, NULL) == EINVAL);
655 } else {
656 VERIFY(kern_pbufpool_create(&pp_init, &pp, NULL) == 0);
657 kern_pbufpool_destroy(pp);
658 pp = NULL;
659 }
660 pp_init.kbi_buflets = (64 * 2);
661 VERIFY(kern_pbufpool_create(&pp_init, &pp, NULL) == 0);
662 bzero(&pp_mem_info, sizeof(pp_mem_info));
663 VERIFY(kern_pbufpool_get_memory_info(pp, &pp_mem_info) == 0);
664 VERIFY(pp_mem_info.kpm_flags & KPMF_EXTERNAL);
665 VERIFY(pp_mem_info.kpm_buflets >= pp_mem_info.kpm_packets);
666 VERIFY(pp_mem_info.kpm_packets >= 64);
667 VERIFY(pp_mem_info.kpm_max_frags == 1);
668 VERIFY(pp_mem_info.kpm_buflets >= (64 * 2));
669 VERIFY(pp_mem_info.kpm_bufsize == SKMEM_TEST_BUFSIZE);
670 VERIFY(kern_pbufpool_alloc(pp, 1, &ph) == 0);
671 VERIFY(kern_packet_get_buflet_count(ph) == 1);
672 kern_pbufpool_free(pp, ph);
673 ph = 0;
674 phcnt = 4;
675 VERIFY(kern_pbufpool_alloc_batch(pp, 4, phary, &phcnt) == EINVAL);
676 VERIFY(kern_pbufpool_alloc_batch(pp, 1, phary, &phcnt) == 0);
677 VERIFY(kern_packet_get_buflet_count(phary[0]) == 1);
678 VERIFY(kern_packet_get_buflet_count(phary[1]) == 1);
679 VERIFY(kern_packet_get_buflet_count(phary[2]) == 1);
680 VERIFY(kern_packet_get_buflet_count(phary[3]) == 1);
681 kern_pbufpool_free_batch(pp, phary, phcnt);
682 kern_pbufpool_destroy(pp);
683 pp = NULL;
684
685 /* check multi-buflet KPIs */
686 bzero(&pp_init_mb, sizeof(pp_init_mb));
687 pp_init_mb.kbi_version = KERN_PBUFPOOL_CURRENT_VERSION;
688 pp_init_mb.kbi_buf_seg_size = skmem_usr_buf_seg_size;
689 (void) snprintf((char *)pp_init_mb.kbi_name,
690 sizeof(pp_init_mb.kbi_name), "%s", "skmem_buflet_tests_mb");
691 pp_init_mb.kbi_flags = flags;
692 pp_init_mb.kbi_max_frags = 4;
693 pp_init_mb.kbi_packets = 64;
694 pp_init_mb.kbi_bufsize = 512;
695 pp_init_mb.kbi_buflets =
696 pp_init_mb.kbi_packets * pp_init_mb.kbi_max_frags;
697
698 VERIFY((kern_pbufpool_create(&pp_init_mb, &pp_mb, NULL) == EINVAL) ||
699 (flags & KBIF_BUFFER_ON_DEMAND));
700
701 if (pp_mb != NULL) {
702 bzero(&pp_mem_info, sizeof(pp_mem_info));
703 VERIFY(kern_pbufpool_get_memory_info(pp_mb, &pp_mem_info) == 0);
704 VERIFY(kern_pbufpool_alloc(pp_mb, 0, &ph_mb) == 0 ||
705 !(flags & KBIF_BUFFER_ON_DEMAND));
706 if (ph_mb != 0) {
707 VERIFY(flags & KBIF_BUFFER_ON_DEMAND);
708 kern_pbufpool_free(pp_mb, ph_mb);
709 ph_mb = 0;
710 }
711 VERIFY(kern_pbufpool_alloc_buffer(pp_mb, &baddr, &sg,
712 &sg_idx) == 0 || !(flags & KBIF_BUFFER_ON_DEMAND));
713 if (baddr != 0) {
714 VERIFY(flags & KBIF_BUFFER_ON_DEMAND);
715 kern_pbufpool_free_buffer(pp_mb, baddr);
716 baddr = 0;
717 }
718 kern_pbufpool_destroy(pp_mb);
719 pp_mb = NULL;
720 }
721
722 kfree_type(struct mbuf *, MAX_PH_ARY, mbary);
723 mbary = NULL;
724
725 kfree_data(phary, sizeof(kern_packet_t) * MAX_PH_ARY);
726 phary = NULL;
727
728 kfree_data(phary2, sizeof(kern_packet_t) * MAX_PH_ARY);
729 phary2 = NULL;
730
731 kfree_data(pharyc, sizeof(kern_packet_t) * MAX_PH_ARY);
732 pharyc = NULL;
733 }
734
735 static void
skmem_test_mbfreecb(caddr_t cl,uint32_t size,caddr_t arg)736 skmem_test_mbfreecb(caddr_t cl, uint32_t size, caddr_t arg)
737 {
738 #pragma unused(cl, size)
739 struct mbuf *m = (void *)arg;
740
741 VERIFY(!mbuf_ring_cluster_is_active(m));
742 VERIFY(skmt_mbcnt > 0);
743 atomic_add_32(&skmt_mbcnt, -1);
744 }
745
746 static void
skmem_test_alloccb(kern_packet_t ph,uint32_t idx,const void * ctx)747 skmem_test_alloccb(kern_packet_t ph, uint32_t idx, const void *ctx)
748 {
749 VERIFY(ph != 0);
750 VERIFY(ctx == &skmt_alloccb_ctx);
751 VERIFY(idx < skmt_alloccb_ctx.stc_req);
752 VERIFY(idx == atomic_add_32_ov(&skmt_alloccb_ctx.stc_idx, 1));
753 }
754 static void
skmem_packet_tests(uint32_t flags)755 skmem_packet_tests(uint32_t flags)
756 {
757 struct kern_pbufpool_memory_info pp_mb_mem_info;
758 struct kern_pbufpool_memory_info pp_mem_info;
759 struct kern_pbufpool_init pp_init;
760 kern_pbufpool_t pp = NULL;
761 struct kern_pbufpool_init pp_init_mb;
762 kern_pbufpool_t pp_mb = NULL;
763 mach_vm_address_t baddr = 0;
764 uint8_t *buffer, *ref_buffer;
765 kern_obj_idx_seg_t sg_idx;
766 kern_buflet_t buflet;
767 kern_segment_t sg;
768 kern_packet_t ph = 0, ph_mb = 0;
769 struct mbuf *m = NULL;
770 uint16_t len;
771 uint32_t i;
772 uint32_t csum_eee_ref, csum_eeo_ref, csum_eoe_ref, csum_eoo_ref;
773 uint32_t csum_oee_ref, csum_oeo_ref, csum_ooe_ref, csum_ooo_ref, csum;
774 boolean_t test_unaligned;
775 kern_buflet_t bft0, bft1;
776
777 SK_ERR("flags 0x%x", flags);
778
779 /*
780 * XXX: Skip packet tests involving unaligned addresses when
781 * KBIF_INHIBIT_CACHE is set, as the copy-and-checksum routine
782 * currently assumes normal memory, rather than device memory.
783 */
784 test_unaligned = !(flags & KBIF_INHIBIT_CACHE);
785
786 /* allocate separately in case pool is setup for device memory */
787 ref_buffer = (uint8_t *) kalloc_data(SKMEM_TEST_BUFSIZE,
788 Z_WAITOK | Z_ZERO);
789
790 bzero(&pp_init_mb, sizeof(pp_init_mb));
791 pp_init_mb.kbi_version = KERN_PBUFPOOL_CURRENT_VERSION;
792 pp_init_mb.kbi_buf_seg_size = skmem_usr_buf_seg_size;
793 (void) snprintf((char *)pp_init_mb.kbi_name,
794 sizeof(pp_init_mb.kbi_name), "%s", "skmem_packet_tests_mb");
795 pp_init_mb.kbi_flags = flags | KBIF_BUFFER_ON_DEMAND;
796 pp_init_mb.kbi_max_frags = 4;
797 pp_init_mb.kbi_packets = 64;
798 pp_init_mb.kbi_bufsize = 512;
799 pp_init_mb.kbi_buflets =
800 pp_init_mb.kbi_packets * pp_init_mb.kbi_max_frags;
801 pp_init_mb.kbi_ctx = NULL;
802 pp_init_mb.kbi_ctx_retain = NULL;
803 pp_init_mb.kbi_ctx_release = NULL;
804
805 VERIFY(kern_pbufpool_create(&pp_init_mb, &pp_mb, &pp_mb_mem_info) == 0);
806 VERIFY(kern_pbufpool_alloc_buffer(pp_mb, &baddr, NULL, NULL) == 0);
807 kern_pbufpool_free_buffer(pp_mb, baddr);
808 VERIFY(kern_pbufpool_alloc_buffer(pp_mb, &baddr, &sg, &sg_idx) == 0);
809 VERIFY(sg != NULL);
810 VERIFY(sg->sg_region != NULL);
811 VERIFY(sg->sg_md != NULL);
812 VERIFY(sg->sg_start != 0);
813 VERIFY(sg->sg_end != 0);
814 VERIFY(sg->sg_type == SKSEG_TYPE_ALLOC);
815 kern_pbufpool_free_buffer(pp_mb, baddr);
816 baddr = 0;
817
818 /* add buflet to a packet with buf count 1 */
819 VERIFY(kern_pbufpool_alloc(pp_mb, 1, &ph_mb) == 0);
820 VERIFY(kern_pbufpool_alloc_buflet(pp_mb, &bft1) == 0);
821 VERIFY(bft1 != NULL);
822 VERIFY(kern_buflet_get_data_address(bft1) != NULL);
823 VERIFY(kern_buflet_get_object_address(bft1) != NULL);
824 VERIFY((bft0 = kern_packet_get_next_buflet(ph_mb, NULL)) != NULL);
825 VERIFY(kern_packet_add_buflet(ph_mb, bft0, bft1) == 0);
826 VERIFY(kern_packet_get_buflet_count(ph_mb) == 2);
827 VERIFY(kern_packet_get_next_buflet(ph_mb, NULL) == bft0);
828 VERIFY(kern_packet_get_next_buflet(ph_mb, bft0) == bft1);
829 VERIFY(kern_packet_get_next_buflet(ph_mb, bft1) == NULL);
830 VERIFY(kern_packet_finalize(ph_mb) == 0);
831 kern_pbufpool_free(pp_mb, ph_mb);
832 ph_mb = 0;
833
834 /* add buflet to a packet with buf count 0 */
835 VERIFY(kern_pbufpool_alloc(pp_mb, 0, &ph_mb) == 0);
836 VERIFY(kern_packet_get_buflet_count(ph_mb) == 0);
837 VERIFY((bft0 = kern_packet_get_next_buflet(ph_mb, NULL)) == NULL);
838 VERIFY(kern_pbufpool_alloc_buflet(pp_mb, &bft1) == 0);
839 VERIFY(bft1 != NULL);
840 VERIFY(kern_packet_add_buflet(ph_mb, bft0, bft1) == 0);
841 VERIFY(kern_packet_get_buflet_count(ph_mb) == 1);
842 VERIFY(kern_packet_get_next_buflet(ph_mb, bft0) == bft1);
843 VERIFY(kern_packet_get_next_buflet(ph_mb, bft1) == NULL);
844 VERIFY(kern_buflet_get_data_address(bft1) != NULL);
845 VERIFY(kern_buflet_get_object_address(bft1) != NULL);
846 VERIFY(kern_buflet_get_data_limit(bft1) != 0);
847 VERIFY(kern_buflet_get_data_length(bft1) == 0);
848 VERIFY(kern_packet_finalize(ph_mb) == 0);
849 kern_pbufpool_free(pp_mb, ph_mb);
850 ph_mb = 0;
851
852 bzero(&pp_init, sizeof(pp_init));
853 pp_init.kbi_version = KERN_PBUFPOOL_CURRENT_VERSION;
854 pp_init.kbi_buf_seg_size = skmem_usr_buf_seg_size;
855 (void) snprintf((char *)pp_init.kbi_name, sizeof(pp_init.kbi_name),
856 "%s", "skmem_packet_tests");
857 pp_init.kbi_flags = flags;
858 pp_init.kbi_packets = 64;
859 pp_init.kbi_bufsize = SKMEM_TEST_BUFSIZE;
860 pp_init.kbi_max_frags = 1;
861 pp_init.kbi_buflets = (64 * 2);
862 pp_init.kbi_ctx = NULL;
863 pp_init.kbi_ctx_retain = NULL;
864 pp_init.kbi_ctx_release = NULL;
865
866 /* validate multi-buflet packet checksum/copy+checksum routines */
867 VERIFY(kern_pbufpool_create(&pp_init, &pp, &pp_mem_info) == 0);
868 VERIFY(kern_pbufpool_alloc(pp, 1, &ph) == 0);
869 VERIFY(kern_packet_get_buflet_count(ph) == 1);
870
871 VERIFY((buflet = kern_packet_get_next_buflet(ph, NULL)) != NULL);
872 VERIFY((buffer = kern_buflet_get_data_address(buflet)) != NULL);
873 len = SKMEM_TEST_BUFSIZE;
874 for (i = 0; i < len; i++) {
875 ref_buffer[i] = (i & 0xff);
876 }
877 /* enforce load/store byte for device memory case */
878 volatile uint8_t *bufp = buffer;
879 for (i = 0; i < len; i++) {
880 bufp[i] = ref_buffer[i];
881 }
882 VERIFY(kern_buflet_set_data_length(buflet, len) == 0);
883 VERIFY(__packet_finalize(ph) == 0);
884
885 /* calculate and validate reference value */
886 csum_eee_ref = __packet_cksum(buffer, len, 0);
887 VERIFY(skmem_reference_sum(ref_buffer, len, 0) == csum_eee_ref);
888 csum_eoe_ref = __packet_cksum(buffer, len - 2, 0);
889 VERIFY(skmem_reference_sum(ref_buffer, len - 2, 0) == csum_eoe_ref);
890 csum_eoo_ref = csum_eeo_ref = __packet_cksum(buffer, len - 1, 0);
891 VERIFY(skmem_reference_sum(ref_buffer, len - 1, 0) == csum_eoo_ref);
892 csum_oeo_ref = csum_ooo_ref = __packet_cksum(buffer + 1, len - 1, 0);
893 VERIFY(skmem_reference_sum(ref_buffer + 1, len - 1, 0) == csum_oeo_ref);
894 csum_ooe_ref = csum_oee_ref = __packet_cksum(buffer + 1, len - 2, 0);
895 VERIFY(skmem_reference_sum(ref_buffer + 1, len - 2, 0) == csum_ooe_ref);
896
897 /* sanity tests */
898 VERIFY(skmem_reference_sum(ref_buffer + 2, len - 2, 0) ==
899 __packet_cksum(buffer + 2, len - 2, 0));
900 VERIFY(skmem_reference_sum(ref_buffer + 3, len - 3, 0) ==
901 __packet_cksum(buffer + 3, len - 3, 0));
902 VERIFY(skmem_reference_sum(ref_buffer + 4, len - 4, 0) ==
903 __packet_cksum(buffer + 4, len - 4, 0));
904 VERIFY(skmem_reference_sum(ref_buffer + 5, len - 5, 0) ==
905 __packet_cksum(buffer + 5, len - 5, 0));
906 VERIFY(skmem_reference_sum(ref_buffer + 6, len - 6, 0) ==
907 __packet_cksum(buffer + 6, len - 6, 0));
908 VERIFY(skmem_reference_sum(ref_buffer + 7, len - 7, 0) ==
909 __packet_cksum(buffer + 7, len - 7, 0));
910
911 VERIFY(mbuf_gethdr(MBUF_WAITOK, MBUF_TYPE_HEADER, &m) == 0);
912 VERIFY(mbuf_copyback(m, 0, len, buffer, MBUF_WAITOK) == 0);
913
914 /* verify copy-checksum between packets */
915 VERIFY(kern_pbufpool_alloc(pp_mb, 4, &ph_mb) == 0);
916 VERIFY(kern_packet_get_buflet_count(ph_mb) == 4);
917 pkt_copypkt_sum(ph, 0, ph_mb, 0, len - 1, &csum, TRUE);
918 METADATA_ADJUST_LEN(SK_PTR_ADDR_KQUM(ph_mb), 0, 0);
919 VERIFY(__packet_finalize(ph_mb) == 0);
920 if (csum_eeo_ref != csum) {
921 SK_ERR("pkt_copypkt_sum: csum_eeo_mismatch 0x%x, "
922 "0x%x, 0x%llx", csum_eeo_ref, csum,
923 SK_KVA(SK_PTR_ADDR_KQUM(ph_mb)));
924 }
925 VERIFY(csum_eeo_ref == csum);
926 kern_pbufpool_free(pp_mb, ph_mb);
927 ph_mb = 0;
928
929 if (test_unaligned) {
930 VERIFY(kern_pbufpool_alloc(pp_mb, 4, &ph_mb) == 0);
931 pkt_copypkt_sum(ph, 0, ph_mb, 1, len - 2, &csum, TRUE);
932 METADATA_ADJUST_LEN(SK_PTR_ADDR_KQUM(ph_mb), 0, 1);
933 SK_PTR_ADDR_KPKT(ph_mb)->pkt_headroom = 1;
934 SK_PTR_ADDR_KPKT(ph_mb)->pkt_l2_len = 0;
935 VERIFY(__packet_finalize(ph_mb) == 0);
936 if (csum_eoe_ref != csum) {
937 SK_ERR("pkt_copypkt_sum: csum_eoe_mismatch 0x%x, "
938 "0x%x, 0x%llx", csum_eoe_ref, csum,
939 SK_KVA(SK_PTR_ADDR_KQUM(ph_mb)));
940 }
941 VERIFY(csum_eoe_ref == csum);
942 kern_pbufpool_free(pp_mb, ph_mb);
943 ph_mb = 0;
944
945 VERIFY(kern_pbufpool_alloc(pp_mb, 4, &ph_mb) == 0);
946 pkt_copypkt_sum(ph, 0, ph_mb, 1, len - 1, &csum, TRUE);
947 METADATA_ADJUST_LEN(SK_PTR_ADDR_KQUM(ph_mb), 0, 1);
948 SK_PTR_ADDR_KPKT(ph_mb)->pkt_headroom = 1;
949 SK_PTR_ADDR_KPKT(ph_mb)->pkt_l2_len = 0;
950 VERIFY(__packet_finalize(ph_mb) == 0);
951 if (csum_eoo_ref != csum) {
952 SK_ERR("pkt_copypkt_sum: csum_eoo_mismatch 0x%x, "
953 "0x%x, 0x%llx", csum_eoo_ref, csum,
954 SK_KVA(SK_PTR_ADDR_KQUM(ph_mb)));
955 }
956 VERIFY(csum_eoo_ref == csum);
957 kern_pbufpool_free(pp_mb, ph_mb);
958 ph_mb = 0;
959
960 VERIFY(kern_pbufpool_alloc(pp_mb, 4, &ph_mb) == 0);
961 pkt_copypkt_sum(ph, 1, ph_mb, 0, len - 1, &csum, TRUE);
962 METADATA_ADJUST_LEN(SK_PTR_ADDR_KQUM(ph_mb), 0, 0);
963 SK_PTR_ADDR_KPKT(ph_mb)->pkt_headroom = 0;
964 SK_PTR_ADDR_KPKT(ph_mb)->pkt_l2_len = 0;
965 VERIFY(__packet_finalize(ph_mb) == 0);
966 if (csum_oeo_ref != csum) {
967 SK_ERR("pkt_copypkt_sum: csum_oeo_mismatch 0x%x, "
968 "0x%x, 0x%llx", csum_oeo_ref, csum,
969 SK_KVA(SK_PTR_ADDR_KQUM(ph_mb)));
970 }
971 VERIFY(csum_oeo_ref == csum);
972 kern_pbufpool_free(pp_mb, ph_mb);
973 ph_mb = 0;
974
975 VERIFY(kern_pbufpool_alloc(pp_mb, 4, &ph_mb) == 0);
976 pkt_copypkt_sum(ph, 1, ph_mb, 1, len - 1, &csum, TRUE);
977 METADATA_ADJUST_LEN(SK_PTR_ADDR_KQUM(ph_mb), 0, 1);
978 SK_PTR_ADDR_KPKT(ph_mb)->pkt_headroom = 1;
979 VERIFY(__packet_finalize(ph_mb) == 0);
980 if (csum_ooo_ref != csum) {
981 SK_ERR("pkt_copypkt_sum: csum_ooo_mismatch 0x%x, "
982 "0x%x, 0x%llx", csum_ooo_ref, csum,
983 SK_KVA(SK_PTR_ADDR_KQUM(ph_mb)));
984 }
985 VERIFY(csum_ooo_ref == csum);
986 kern_pbufpool_free(pp_mb, ph_mb);
987 ph_mb = 0;
988
989 VERIFY(kern_pbufpool_alloc(pp_mb, 4, &ph_mb) == 0);
990 pkt_copypkt_sum(ph, 1, ph_mb, 1, len - 2, &csum, TRUE);
991 METADATA_ADJUST_LEN(SK_PTR_ADDR_KQUM(ph_mb), 0, 1);
992 SK_PTR_ADDR_KPKT(ph_mb)->pkt_headroom = 1;
993 VERIFY(__packet_finalize(ph_mb) == 0);
994 if (csum_ooe_ref != csum) {
995 SK_ERR("pkt_copypkt_sum: csum_ooe_mismatch 0x%x, "
996 "0x%x, 0x%llx", csum_ooe_ref, csum,
997 SK_KVA(SK_PTR_ADDR_KQUM(ph_mb)));
998 }
999 VERIFY(csum_ooe_ref == csum);
1000 kern_pbufpool_free(pp_mb, ph_mb);
1001 ph_mb = 0;
1002
1003 VERIFY(kern_pbufpool_alloc(pp_mb, 4, &ph_mb) == 0);
1004 pkt_copypkt_sum(ph, 1, ph_mb, 0, len - 2, &csum, TRUE);
1005 METADATA_ADJUST_LEN(SK_PTR_ADDR_KQUM(ph_mb), 0, 0);
1006 SK_PTR_ADDR_KPKT(ph_mb)->pkt_headroom = 0;
1007 SK_PTR_ADDR_KPKT(ph_mb)->pkt_l2_len = 0;
1008 VERIFY(__packet_finalize(ph_mb) == 0);
1009 if (csum_ooe_ref != csum) {
1010 SK_ERR("pkt_copypkt_sum: csum_oee_mismatch 0x%x, "
1011 "0x%x, 0x%llx", csum_oee_ref, csum,
1012 SK_KVA(SK_PTR_ADDR_KQUM(ph_mb)));
1013 }
1014 VERIFY(csum_oee_ref == csum);
1015 kern_pbufpool_free(pp_mb, ph_mb);
1016 ph_mb = 0;
1017 }
1018
1019 VERIFY(kern_pbufpool_alloc(pp_mb, 4, &ph_mb) == 0);
1020 VERIFY(kern_packet_get_buflet_count(ph_mb) == 4);
1021 pkt_copypkt_sum(ph, 0, ph_mb, 0, len, &csum, TRUE);
1022 METADATA_ADJUST_LEN(SK_PTR_ADDR_KQUM(ph_mb), 0, 0);
1023 SK_PTR_ADDR_KPKT(ph_mb)->pkt_headroom = 0;
1024 SK_PTR_ADDR_KPKT(ph_mb)->pkt_l2_len = 0;
1025 VERIFY(__packet_finalize(ph_mb) == 0);
1026 if (csum_eee_ref != csum) {
1027 SK_ERR("pkt_copypkt_sum: csum_eee_mismatch 0x%x, "
1028 "0x%x, 0x%llx", csum_eee_ref, csum,
1029 SK_KVA(SK_PTR_ADDR_KQUM(ph_mb)));
1030 }
1031 VERIFY(csum_eee_ref == csum);
1032
1033 /* verify copy-checksum from packet to buffer */
1034 csum = pkt_copyaddr_sum(ph_mb, 0, buffer, len - 1, TRUE, 0, NULL);
1035 if (csum_eeo_ref != csum) {
1036 SK_ERR("pkt_copyaddr_sum: csum_eeo_mismatch "
1037 "0x%x, 0x%x, 0x%llx, 0x%llx", csum_eeo_ref,
1038 csum, SK_KVA(SK_PTR_ADDR_KQUM(ph_mb)),
1039 SK_KVA(buffer));
1040 }
1041 VERIFY(csum_eeo_ref == csum);
1042
1043 if (test_unaligned) {
1044 csum = pkt_copyaddr_sum(ph_mb, 0, buffer + 1, len - 1, TRUE, 0, NULL);
1045 if (csum_eoo_ref != csum) {
1046 SK_ERR("pkt_copyaddr_sum: csum_eoo_mismatch "
1047 "0x%x, 0x%x, 0x%llx, 0x%llx", csum_eoo_ref,
1048 csum, SK_KVA(SK_PTR_ADDR_KQUM(ph_mb)),
1049 SK_KVA(buffer));
1050 }
1051 VERIFY(csum_eoo_ref == csum);
1052
1053 csum = pkt_copyaddr_sum(ph_mb, 0, buffer + 1, len - 2, TRUE, 0, NULL);
1054 if (csum_eoe_ref != csum) {
1055 SK_ERR("pkt_copyaddr_sum: csum_eoe_mismatch "
1056 "0x%x, 0x%x, 0x%llx, 0x%llx", csum_eoe_ref,
1057 csum, SK_KVA(SK_PTR_ADDR_KQUM(ph_mb)),
1058 SK_KVA(buffer));
1059 }
1060 VERIFY(csum_eoe_ref == csum);
1061
1062 csum = pkt_copyaddr_sum(ph_mb, 1, buffer + 1, len - 2, TRUE, 0, NULL);
1063 if (csum_ooe_ref != csum) {
1064 SK_ERR("pkt_copyaddr_sum: csum_ooe_mismatch "
1065 "0x%x, 0x%x, 0x%llx, 0x%llx", csum_ooe_ref,
1066 csum, SK_KVA(SK_PTR_ADDR_KQUM(ph_mb)),
1067 SK_KVA(buffer));
1068 }
1069 VERIFY(csum_ooe_ref == csum);
1070
1071 csum = pkt_copyaddr_sum(ph_mb, 1, buffer, len - 2, TRUE, 0, NULL);
1072 if (csum_oee_ref != csum) {
1073 SK_ERR("pkt_copyaddr_sum: csum_oee_mismatch "
1074 "0x%x, 0x%x, 0x%llx, 0x%llx", csum_oee_ref,
1075 csum, SK_KVA(SK_PTR_ADDR_KQUM(ph_mb)),
1076 SK_KVA(buffer));
1077 }
1078 VERIFY(csum_oee_ref == csum);
1079
1080 csum = pkt_copyaddr_sum(ph_mb, 1, buffer, len - 1, TRUE, 0, NULL);
1081 if (csum_oeo_ref != csum) {
1082 SK_ERR("pkt_copyaddr_sum: csum_oeo_mismatch "
1083 "0x%x, 0x%x, 0x%llx, 0x%llx", csum_oeo_ref,
1084 csum, SK_KVA(SK_PTR_ADDR_KQUM(ph_mb)),
1085 SK_KVA(buffer));
1086 }
1087 VERIFY(csum_oeo_ref == csum);
1088
1089 csum = pkt_copyaddr_sum(ph_mb, 1, buffer + 1, len - 1, TRUE, 0, NULL);
1090 if (csum_ooo_ref != csum) {
1091 SK_ERR("pkt_copyaddr_sum: csum_ooo_mismatch "
1092 "0x%x, 0x%x, 0x%llx, 0x%llx", csum_ooo_ref,
1093 csum, SK_KVA(SK_PTR_ADDR_KQUM(ph_mb)),
1094 SK_KVA(buffer));
1095 }
1096 VERIFY(csum_ooo_ref == csum);
1097 }
1098
1099 csum = pkt_copyaddr_sum(ph_mb, 0, buffer, len, TRUE, 0, NULL);
1100 if (csum_eee_ref != csum) {
1101 SK_ERR("pkt_copyaddr_sum: csum_eee_mismatch "
1102 "0x%x, 0x%x, 0x%llx, 0x%llx", csum_eee_ref,
1103 csum, SK_KVA(SK_PTR_ADDR_KQUM(ph_mb)),
1104 SK_KVA(buffer));
1105 }
1106 VERIFY(csum_eee_ref == csum);
1107
1108 for (i = 0; i < len; i++) {
1109 VERIFY(buffer[i] == (i & 0xff));
1110 }
1111 kern_pbufpool_free(pp_mb, ph_mb);
1112 ph_mb = 0;
1113
1114 if (test_unaligned) {
1115 /* verify copy-checksum from mbuf to packet */
1116 VERIFY(kern_pbufpool_alloc(pp_mb, 4, &ph_mb) == 0);
1117 VERIFY(kern_packet_get_buflet_count(ph_mb) == 4);
1118 csum = pkt_mcopypkt_sum(m, 0, ph_mb, 0, len, TRUE);
1119 METADATA_ADJUST_LEN(SK_PTR_ADDR_KQUM(ph_mb), 0, 0);
1120 SK_PTR_ADDR_KPKT(ph_mb)->pkt_headroom = 0;
1121 SK_PTR_ADDR_KPKT(ph_mb)->pkt_l2_len = 0;
1122 VERIFY(__packet_finalize(ph_mb) == 0);
1123 if (csum_eee_ref != csum) {
1124 SK_ERR("pkt_mcopypkt_sum: csum_eee_mismatch "
1125 "0x%x, 0x%x, 0x%llx, 0x%llx", csum_eee_ref,
1126 csum, SK_KVA(SK_PTR_ADDR_KQUM(ph_mb)),
1127 SK_KVA(m));
1128 }
1129 VERIFY(csum_eee_ref == csum);
1130 kern_pbufpool_free(pp_mb, ph_mb);
1131 ph_mb = 0;
1132
1133 VERIFY(kern_pbufpool_alloc(pp_mb, 4, &ph_mb) == 0);
1134 VERIFY(kern_packet_get_buflet_count(ph_mb) == 4);
1135 csum = pkt_mcopypkt_sum(m, 0, ph_mb, 1, len - 2, TRUE);
1136 METADATA_ADJUST_LEN(SK_PTR_ADDR_KQUM(ph_mb), 0, 1);
1137 SK_PTR_ADDR_KPKT(ph_mb)->pkt_headroom = 1;
1138 SK_PTR_ADDR_KPKT(ph_mb)->pkt_l2_len = 0;
1139 VERIFY(__packet_finalize(ph_mb) == 0);
1140 if (csum_eoe_ref != csum) {
1141 SK_ERR("pkt_mcopypkt_sum: csum_eoe_mismatch "
1142 "0x%x, 0x%x, 0x%llx, 0x%llx", csum_eoe_ref,
1143 csum, SK_KVA(SK_PTR_ADDR_KQUM(ph_mb)),
1144 SK_KVA(m));
1145 }
1146 VERIFY(csum_eoe_ref == csum);
1147 kern_pbufpool_free(pp_mb, ph_mb);
1148 ph_mb = 0;
1149
1150 VERIFY(kern_pbufpool_alloc(pp_mb, 4, &ph_mb) == 0);
1151 VERIFY(kern_packet_get_buflet_count(ph_mb) == 4);
1152 csum = pkt_mcopypkt_sum(m, 0, ph_mb, 1, len - 1, TRUE);
1153 METADATA_ADJUST_LEN(SK_PTR_ADDR_KQUM(ph_mb), 0, 1);
1154 SK_PTR_ADDR_KPKT(ph_mb)->pkt_headroom = 1;
1155 SK_PTR_ADDR_KPKT(ph_mb)->pkt_l2_len = 0;
1156 VERIFY(__packet_finalize(ph_mb) == 0);
1157 if (csum_eoo_ref != csum) {
1158 SK_ERR("pkt_mcopypkt_sum: csum_eoo_mismatch "
1159 "0x%x, 0x%x, 0x%llx, 0x%llx", csum_eoo_ref,
1160 csum, SK_KVA(SK_PTR_ADDR_KQUM(ph_mb)),
1161 SK_KVA(m));
1162 }
1163 VERIFY(csum_eoo_ref == csum);
1164 kern_pbufpool_free(pp_mb, ph_mb);
1165 ph_mb = 0;
1166 }
1167
1168 VERIFY(kern_pbufpool_alloc(pp_mb, 4, &ph_mb) == 0);
1169 VERIFY(kern_packet_get_buflet_count(ph_mb) == 4);
1170 csum = pkt_mcopypkt_sum(m, 0, ph_mb, 0, len - 1, TRUE);
1171 METADATA_ADJUST_LEN(SK_PTR_ADDR_KQUM(ph_mb), 0, 0);
1172 SK_PTR_ADDR_KPKT(ph_mb)->pkt_headroom = 0;
1173 SK_PTR_ADDR_KPKT(ph_mb)->pkt_l2_len = 0;
1174 VERIFY(__packet_finalize(ph_mb) == 0);
1175 if (csum_eeo_ref != csum) {
1176 SK_ERR("pkt_mcopypkt_sum: csum_eeo_mismatch "
1177 "0x%x, 0x%x, 0x%llx, 0x%llx", csum_eeo_ref,
1178 csum, SK_KVA(SK_PTR_ADDR_KQUM(ph_mb)),
1179 SK_KVA(m));
1180 }
1181 VERIFY(csum_eeo_ref == csum);
1182 kern_pbufpool_free(pp_mb, ph_mb);
1183 ph_mb = 0;
1184
1185 if (test_unaligned) {
1186 VERIFY(kern_pbufpool_alloc(pp_mb, 4, &ph_mb) == 0);
1187 VERIFY(kern_packet_get_buflet_count(ph_mb) == 4);
1188 csum = pkt_mcopypkt_sum(m, 1, ph_mb, 0, len - 1, TRUE);
1189 METADATA_ADJUST_LEN(SK_PTR_ADDR_KQUM(ph_mb), 0, 0);
1190 SK_PTR_ADDR_KPKT(ph_mb)->pkt_headroom = 0;
1191 SK_PTR_ADDR_KPKT(ph_mb)->pkt_l2_len = 0;
1192 VERIFY(__packet_finalize(ph_mb) == 0);
1193 if (csum_oeo_ref != csum) {
1194 SK_ERR("pkt_mcopypkt_sum: csum_oeo_mismatch "
1195 "0x%x, 0x%x, 0x%llx, 0x%llx", csum_oeo_ref,
1196 csum, SK_KVA(SK_PTR_ADDR_KQUM(ph_mb)),
1197 SK_KVA(m));
1198 }
1199 VERIFY(csum_oeo_ref == csum);
1200 kern_pbufpool_free(pp_mb, ph_mb);
1201 ph_mb = 0;
1202
1203 VERIFY(kern_pbufpool_alloc(pp_mb, 4, &ph_mb) == 0);
1204 VERIFY(kern_packet_get_buflet_count(ph_mb) == 4);
1205 csum = pkt_mcopypkt_sum(m, 1, ph_mb, 0, len - 2, TRUE);
1206 METADATA_ADJUST_LEN(SK_PTR_ADDR_KQUM(ph_mb), 0, 0);
1207 SK_PTR_ADDR_KPKT(ph_mb)->pkt_headroom = 0;
1208 SK_PTR_ADDR_KPKT(ph_mb)->pkt_l2_len = 0;
1209 VERIFY(__packet_finalize(ph_mb) == 0);
1210 if (csum_oee_ref != csum) {
1211 SK_ERR("pkt_mcopypkt_sum: csum_oee_mismatch "
1212 "0x%x, 0x%x, 0x%llx, 0x%llx", csum_oee_ref,
1213 csum, SK_KVA(SK_PTR_ADDR_KQUM(ph_mb)),
1214 SK_KVA(m));
1215 }
1216 VERIFY(csum_oee_ref == csum);
1217 kern_pbufpool_free(pp_mb, ph_mb);
1218 ph_mb = 0;
1219
1220 VERIFY(kern_pbufpool_alloc(pp_mb, 4, &ph_mb) == 0);
1221 VERIFY(kern_packet_get_buflet_count(ph_mb) == 4);
1222 csum = pkt_mcopypkt_sum(m, 1, ph_mb, 1, len - 2, TRUE);
1223 METADATA_ADJUST_LEN(SK_PTR_ADDR_KQUM(ph_mb), 0, 1);
1224 SK_PTR_ADDR_KPKT(ph_mb)->pkt_headroom = 1;
1225 SK_PTR_ADDR_KPKT(ph_mb)->pkt_l2_len = 0;
1226 VERIFY(__packet_finalize(ph_mb) == 0);
1227 if (csum_ooe_ref != csum) {
1228 SK_ERR("pkt_mcopypkt_sum: csum_ooe_mismatch "
1229 "0x%x, 0x%x, 0x%llx, 0x%llx", csum_ooe_ref,
1230 csum, SK_KVA(SK_PTR_ADDR_KQUM(ph_mb)),
1231 SK_KVA(m));
1232 }
1233 VERIFY(csum_ooe_ref == csum);
1234 kern_pbufpool_free(pp_mb, ph_mb);
1235 ph_mb = 0;
1236
1237 VERIFY(kern_pbufpool_alloc(pp_mb, 4, &ph_mb) == 0);
1238 VERIFY(kern_packet_get_buflet_count(ph_mb) == 4);
1239 csum = pkt_mcopypkt_sum(m, 1, ph_mb, 1, len - 1, TRUE);
1240 METADATA_ADJUST_LEN(SK_PTR_ADDR_KQUM(ph_mb), 0, 1);
1241 SK_PTR_ADDR_KPKT(ph_mb)->pkt_headroom = 1;
1242 SK_PTR_ADDR_KPKT(ph_mb)->pkt_l2_len = 0;
1243 VERIFY(__packet_finalize(ph_mb) == 0);
1244 if (csum_ooo_ref != csum) {
1245 SK_ERR("pkt_mcopypkt_sum: csum_ooo_mismatch "
1246 "0x%x, 0x%x, 0x%llx, 0x%llx", csum_ooo_ref,
1247 csum, SK_KVA(SK_PTR_ADDR_KQUM(ph_mb)),
1248 SK_KVA(m));
1249 }
1250 VERIFY(csum_ooo_ref == csum);
1251 kern_pbufpool_free(pp_mb, ph_mb);
1252 ph_mb = 0;
1253 }
1254
1255 kern_pbufpool_free(pp, ph);
1256 ph = 0;
1257 m_freem(m);
1258 m = NULL;
1259 kern_pbufpool_destroy(pp_mb);
1260 pp_mb = NULL;
1261 kern_pbufpool_destroy(pp);
1262 pp = NULL;
1263
1264 kfree_data(ref_buffer, SKMEM_TEST_BUFSIZE);
1265 ref_buffer = NULL;
1266 }
1267
1268 static void
skmem_quantum_tests(uint32_t flags)1269 skmem_quantum_tests(uint32_t flags)
1270 {
1271 struct kern_pbufpool_init pp_init;
1272 struct kern_pbufpool_memory_info pp_mem_info;
1273 kern_pbufpool_t pp = NULL;
1274 kern_packet_t *phary = NULL;
1275 uint32_t phcnt = 0;
1276 kern_packet_t ph = 0;
1277 uint32_t i;
1278 errno_t err;
1279
1280 flags |= KBIF_QUANTUM;
1281
1282 SK_ERR("flags 0x%x", flags);
1283
1284 phary = (kern_packet_t *) kalloc_data(sizeof(kern_packet_t) * MAX_PH_ARY,
1285 Z_WAITOK | Z_ZERO);
1286
1287 bzero(&pp_init, sizeof(pp_init));
1288 pp_init.kbi_version = KERN_PBUFPOOL_CURRENT_VERSION;
1289 pp_init.kbi_buf_seg_size = skmem_usr_buf_seg_size;
1290 (void) snprintf((char *)pp_init.kbi_name, sizeof(pp_init.kbi_name),
1291 "%s", "skmem_quantum_tests");
1292 pp_init.kbi_flags = (KBIF_QUANTUM | flags);
1293 pp_init.kbi_packets = 64;
1294 pp_init.kbi_bufsize = SKMEM_TEST_BUFSIZE;
1295 pp_init.kbi_buflets = (64 * 2);
1296 pp_init.kbi_ctx = NULL;
1297 pp_init.kbi_ctx_retain = NULL;
1298 pp_init.kbi_ctx_release = NULL;
1299
1300 pp_init.kbi_max_frags = 4;
1301 /* max_frags must be 1 for quantum type */
1302 VERIFY(kern_pbufpool_create(&pp_init, &pp, NULL) == EINVAL);
1303 pp_init.kbi_max_frags = 1;
1304 if ((flags & KBIF_QUANTUM) && (flags & KBIF_BUFFER_ON_DEMAND)) {
1305 VERIFY(kern_pbufpool_create(&pp_init, &pp, NULL) == EINVAL);
1306 goto done;
1307 }
1308 VERIFY(kern_pbufpool_create(&pp_init, &pp, NULL) == 0);
1309 bzero(&pp_mem_info, sizeof(pp_mem_info));
1310 VERIFY(kern_pbufpool_get_memory_info(pp, &pp_mem_info) == 0);
1311 VERIFY(pp_mem_info.kpm_flags & KPMF_EXTERNAL);
1312 VERIFY(pp_mem_info.kpm_buflets >= pp_mem_info.kpm_packets);
1313 VERIFY(pp_mem_info.kpm_packets >= 64);
1314 VERIFY(pp_mem_info.kpm_packets <= MAX_PH_ARY);
1315 VERIFY(pp_mem_info.kpm_max_frags == 1);
1316 VERIFY(pp_mem_info.kpm_buflets >= 64);
1317 VERIFY(pp_mem_info.kpm_bufsize == SKMEM_TEST_BUFSIZE);
1318 VERIFY(kern_pbufpool_alloc(pp, 4, &ph) == EINVAL);
1319 /* allocate and free one at a time */
1320 for (i = 0, phcnt = 0; i < pp_mem_info.kpm_packets; i++) {
1321 boolean_t stop = FALSE;
1322 /*
1323 * This may fail if skmem_region_mtbf is set, or if
1324 * the system is short on memory. Perform retries
1325 * at this layer to get at least 64 packets.
1326 */
1327 while ((err = kern_pbufpool_alloc_nosleep(pp, 1, &ph)) != 0) {
1328 VERIFY(err == ENOMEM);
1329 if (phcnt < 64) {
1330 SK_ERR("retrying alloc for quantum %u", phcnt);
1331 delay(250 * NSEC_PER_USEC); /* 1/4 sec */
1332 continue;
1333 }
1334 stop = TRUE;
1335 break;
1336 }
1337 if (stop) {
1338 break;
1339 }
1340 VERIFY(ph != 0);
1341 VERIFY(kern_packet_get_data_length(ph) == 0);
1342 VERIFY(kern_packet_get_buflet_count(ph) == 1);
1343 phary[phcnt++] = ph;
1344 }
1345 VERIFY(phcnt >= 64);
1346 for (i = 0; i < phcnt; i++) {
1347 kern_pbufpool_free(pp, phary[i]);
1348 phary[i] = 0;
1349 }
1350 /* allocate and free in batch */
1351 phcnt = pp_mem_info.kpm_packets;
1352 for (;;) {
1353 err = kern_pbufpool_alloc_batch_nosleep(pp, 1, phary, &phcnt);
1354 VERIFY(err != EINVAL && err != ENOTSUP);
1355 if (err == ENOMEM) {
1356 phcnt = pp_mem_info.kpm_packets;
1357 SK_ERR("retrying batch alloc for %u quantums", phcnt);
1358 delay(250 * NSEC_PER_USEC); /* 1/4 sec */
1359 } else if (err == EAGAIN) {
1360 SK_ERR("batch alloc for %u quantums only returned %u",
1361 pp_mem_info.kpm_packets, phcnt);
1362 break;
1363 } else {
1364 VERIFY(err == 0);
1365 break;
1366 }
1367 }
1368 VERIFY(phcnt > 0);
1369 for (i = 0; i < phcnt; i++) {
1370 VERIFY(phary[i] != 0);
1371 VERIFY(kern_packet_get_data_length(phary[i]) == 0);
1372 VERIFY(kern_packet_get_buflet_count(phary[i]) == 1);
1373 }
1374 kern_pbufpool_free_batch(pp, phary, phcnt);
1375 /* allocate and free one at a time (blocking) */
1376 for (i = 0, phcnt = 0; i < pp_mem_info.kpm_packets; i++) {
1377 VERIFY(kern_pbufpool_alloc(pp, 1, &ph) == 0);
1378 VERIFY(ph != 0);
1379 VERIFY(kern_packet_get_data_length(ph) == 0);
1380 VERIFY(kern_packet_get_buflet_count(ph) == 1);
1381 phary[phcnt++] = ph;
1382 }
1383 VERIFY(phcnt >= 64);
1384 for (i = 0; i < phcnt; i++) {
1385 kern_pbufpool_free(pp, phary[i]);
1386 phary[i] = 0;
1387 }
1388 /* allocate and free in batch (blocking) */
1389 bzero(&skmt_alloccb_ctx, sizeof(skmt_alloccb_ctx));
1390 skmt_alloccb_ctx.stc_req = phcnt;
1391 VERIFY(kern_pbufpool_alloc_batch_callback(pp, 1, phary, &phcnt,
1392 skmem_test_alloccb, &skmt_alloccb_ctx) == 0);
1393 VERIFY(skmt_alloccb_ctx.stc_idx == phcnt);
1394 kern_pbufpool_free_batch(pp, phary, phcnt);
1395 kern_pbufpool_destroy(pp);
1396 pp = NULL;
1397 done:
1398 kfree_data(phary, sizeof(kern_packet_t) * MAX_PH_ARY);
1399 phary = NULL;
1400 }
1401
1402 static void
skmem_basic_tests(void)1403 skmem_basic_tests(void)
1404 {
1405 /* basic sanity (alloc/free) tests on packet buflet KPIs */
1406 skmem_buflet_tests(0);
1407 skmem_buflet_tests(KBIF_PERSISTENT);
1408 skmem_buflet_tests(KBIF_PERSISTENT | KBIF_NO_MAGAZINES);
1409 skmem_buflet_tests(KBIF_PERSISTENT | KBIF_PHYS_CONTIGUOUS);
1410 skmem_buflet_tests(KBIF_PERSISTENT | KBIF_PHYS_CONTIGUOUS |
1411 KBIF_USER_ACCESS);
1412 skmem_buflet_tests(KBIF_PERSISTENT | KBIF_MONOLITHIC |
1413 KBIF_USER_ACCESS);
1414 skmem_buflet_tests(KBIF_PERSISTENT | KBIF_BUFFER_ON_DEMAND);
1415 skmem_buflet_tests(KBIF_PERSISTENT | TEST_OPTION_INHIBIT_CACHE);
1416 skmem_buflet_tests(KBIF_PERSISTENT | KBIF_MONOLITHIC |
1417 KBIF_BUFFER_ON_DEMAND);
1418 skmem_buflet_tests(KBIF_PERSISTENT | KBIF_MONOLITHIC |
1419 KBIF_USER_ACCESS | TEST_OPTION_INHIBIT_CACHE);
1420 skmem_buflet_tests(KBIF_PERSISTENT | KBIF_MONOLITHIC |
1421 KBIF_BUFFER_ON_DEMAND | TEST_OPTION_INHIBIT_CACHE);
1422 skmem_buflet_tests(KBIF_MONOLITHIC | KBIF_PHYS_CONTIGUOUS);
1423 skmem_buflet_tests(KBIF_MONOLITHIC | KBIF_USER_ACCESS);
1424 skmem_buflet_tests(KBIF_MONOLITHIC | KBIF_USER_ACCESS |
1425 KBIF_NO_MAGAZINES);
1426 skmem_buflet_tests(KBIF_MONOLITHIC | KBIF_PHYS_CONTIGUOUS |
1427 KBIF_USER_ACCESS);
1428 skmem_buflet_tests(KBIF_MONOLITHIC | KBIF_BUFFER_ON_DEMAND);
1429 skmem_buflet_tests(KBIF_MONOLITHIC | KBIF_USER_ACCESS |
1430 TEST_OPTION_INHIBIT_CACHE);
1431 skmem_buflet_tests(KBIF_MONOLITHIC | KBIF_BUFFER_ON_DEMAND |
1432 TEST_OPTION_INHIBIT_CACHE);
1433 skmem_buflet_tests(KBIF_BUFFER_ON_DEMAND | KBIF_NO_MAGAZINES);
1434 skmem_buflet_tests(KBIF_BUFFER_ON_DEMAND | TEST_OPTION_INHIBIT_CACHE);
1435
1436 /* basic sanity (alloc/free) tests on packet buflet KPIs (vdev) */
1437 skmem_buflet_tests(KBIF_VIRTUAL_DEVICE);
1438 skmem_buflet_tests(KBIF_VIRTUAL_DEVICE | KBIF_NO_MAGAZINES);
1439 skmem_buflet_tests(KBIF_VIRTUAL_DEVICE | KBIF_PERSISTENT);
1440 skmem_buflet_tests(KBIF_VIRTUAL_DEVICE | KBIF_PHYS_CONTIGUOUS);
1441 skmem_buflet_tests(KBIF_VIRTUAL_DEVICE | KBIF_PERSISTENT |
1442 KBIF_PHYS_CONTIGUOUS);
1443 skmem_buflet_tests(KBIF_VIRTUAL_DEVICE | KBIF_PERSISTENT |
1444 KBIF_MONOLITHIC | KBIF_USER_ACCESS | KBIF_PHYS_CONTIGUOUS);
1445 skmem_buflet_tests(KBIF_VIRTUAL_DEVICE | KBIF_PERSISTENT |
1446 KBIF_MONOLITHIC | KBIF_USER_ACCESS);
1447 skmem_buflet_tests(KBIF_VIRTUAL_DEVICE | KBIF_PERSISTENT |
1448 KBIF_BUFFER_ON_DEMAND);
1449 skmem_buflet_tests(KBIF_VIRTUAL_DEVICE | KBIF_PERSISTENT |
1450 TEST_OPTION_INHIBIT_CACHE);
1451 skmem_buflet_tests(KBIF_VIRTUAL_DEVICE | KBIF_PERSISTENT |
1452 KBIF_MONOLITHIC | KBIF_BUFFER_ON_DEMAND);
1453 skmem_buflet_tests(KBIF_VIRTUAL_DEVICE | KBIF_PERSISTENT |
1454 KBIF_MONOLITHIC | KBIF_USER_ACCESS | TEST_OPTION_INHIBIT_CACHE);
1455 skmem_buflet_tests(KBIF_VIRTUAL_DEVICE | KBIF_PERSISTENT |
1456 KBIF_MONOLITHIC | KBIF_BUFFER_ON_DEMAND | TEST_OPTION_INHIBIT_CACHE);
1457 skmem_buflet_tests(KBIF_VIRTUAL_DEVICE | KBIF_MONOLITHIC |
1458 KBIF_USER_ACCESS);
1459 skmem_buflet_tests(KBIF_VIRTUAL_DEVICE | KBIF_MONOLITHIC |
1460 KBIF_PHYS_CONTIGUOUS);
1461 skmem_buflet_tests(KBIF_VIRTUAL_DEVICE | KBIF_MONOLITHIC |
1462 KBIF_USER_ACCESS | KBIF_PHYS_CONTIGUOUS);
1463 skmem_buflet_tests(KBIF_VIRTUAL_DEVICE | KBIF_MONOLITHIC |
1464 KBIF_BUFFER_ON_DEMAND);
1465 skmem_buflet_tests(KBIF_VIRTUAL_DEVICE | KBIF_MONOLITHIC |
1466 KBIF_USER_ACCESS | TEST_OPTION_INHIBIT_CACHE);
1467 skmem_buflet_tests(KBIF_VIRTUAL_DEVICE | KBIF_MONOLITHIC |
1468 KBIF_BUFFER_ON_DEMAND | TEST_OPTION_INHIBIT_CACHE);
1469 skmem_buflet_tests(KBIF_VIRTUAL_DEVICE | KBIF_BUFFER_ON_DEMAND);
1470 skmem_buflet_tests(KBIF_VIRTUAL_DEVICE | KBIF_BUFFER_ON_DEMAND |
1471 TEST_OPTION_INHIBIT_CACHE);
1472
1473 /* check packet KPIs (also touches data) */
1474 skmem_packet_tests(0);
1475 skmem_packet_tests(KBIF_PHYS_CONTIGUOUS);
1476 skmem_packet_tests(KBIF_PERSISTENT);
1477 skmem_packet_tests(KBIF_PERSISTENT | KBIF_NO_MAGAZINES);
1478 skmem_packet_tests(KBIF_PERSISTENT | KBIF_PHYS_CONTIGUOUS);
1479 skmem_packet_tests(KBIF_PERSISTENT | KBIF_MONOLITHIC |
1480 KBIF_PHYS_CONTIGUOUS | KBIF_USER_ACCESS);
1481 skmem_packet_tests(KBIF_PERSISTENT | KBIF_MONOLITHIC |
1482 KBIF_USER_ACCESS);
1483 skmem_packet_tests(KBIF_PERSISTENT | KBIF_BUFFER_ON_DEMAND);
1484 skmem_packet_tests(KBIF_PERSISTENT | TEST_OPTION_INHIBIT_CACHE);
1485 skmem_packet_tests(KBIF_PERSISTENT | KBIF_MONOLITHIC |
1486 KBIF_BUFFER_ON_DEMAND);
1487 skmem_packet_tests(KBIF_PERSISTENT | KBIF_MONOLITHIC |
1488 KBIF_USER_ACCESS | TEST_OPTION_INHIBIT_CACHE);
1489 skmem_packet_tests(KBIF_PERSISTENT | KBIF_MONOLITHIC |
1490 KBIF_BUFFER_ON_DEMAND | TEST_OPTION_INHIBIT_CACHE);
1491 skmem_packet_tests(KBIF_MONOLITHIC | KBIF_PHYS_CONTIGUOUS);
1492 skmem_packet_tests(KBIF_MONOLITHIC | KBIF_USER_ACCESS);
1493 skmem_packet_tests(KBIF_MONOLITHIC | KBIF_USER_ACCESS |
1494 KBIF_NO_MAGAZINES);
1495 skmem_packet_tests(KBIF_MONOLITHIC | KBIF_USER_ACCESS |
1496 KBIF_PHYS_CONTIGUOUS);
1497 skmem_packet_tests(KBIF_MONOLITHIC | KBIF_BUFFER_ON_DEMAND);
1498 #if 0
1499 /* XXX: commented out failed tests on ARM64e platforms */
1500 skmem_packet_tests(KBIF_MONOLITHIC | KBIF_USER_ACCESS |
1501 TEST_OPTION_INHIBIT_CACHE);
1502 skmem_packet_tests(KBIF_MONOLITHIC | KBIF_BUFFER_ON_DEMAND |
1503 TEST_OPTION_INHIBIT_CACHE);
1504 skmem_packet_tests(KBIF_BUFFER_ON_DEMAND);
1505 skmem_packet_tests(KBIF_BUFFER_ON_DEMAND | TEST_OPTION_INHIBIT_CACHE);
1506 #endif
1507
1508 /* check packet KPIs (also touches data) (vdev) */
1509 skmem_packet_tests(KBIF_VIRTUAL_DEVICE);
1510 skmem_packet_tests(KBIF_VIRTUAL_DEVICE | KBIF_NO_MAGAZINES);
1511 skmem_packet_tests(KBIF_VIRTUAL_DEVICE | KBIF_PERSISTENT);
1512 skmem_packet_tests(KBIF_VIRTUAL_DEVICE | KBIF_PHYS_CONTIGUOUS);
1513 skmem_packet_tests(KBIF_VIRTUAL_DEVICE | KBIF_PERSISTENT |
1514 KBIF_PHYS_CONTIGUOUS);
1515 skmem_packet_tests(KBIF_VIRTUAL_DEVICE | KBIF_PERSISTENT |
1516 KBIF_MONOLITHIC | KBIF_USER_ACCESS);
1517 skmem_packet_tests(KBIF_VIRTUAL_DEVICE | KBIF_PERSISTENT |
1518 KBIF_BUFFER_ON_DEMAND);
1519 skmem_packet_tests(KBIF_VIRTUAL_DEVICE | KBIF_PERSISTENT |
1520 TEST_OPTION_INHIBIT_CACHE);
1521 skmem_packet_tests(KBIF_VIRTUAL_DEVICE | KBIF_PERSISTENT |
1522 KBIF_MONOLITHIC | KBIF_BUFFER_ON_DEMAND);
1523 skmem_packet_tests(KBIF_VIRTUAL_DEVICE | KBIF_PERSISTENT |
1524 KBIF_MONOLITHIC | KBIF_USER_ACCESS | TEST_OPTION_INHIBIT_CACHE);
1525 skmem_packet_tests(KBIF_VIRTUAL_DEVICE | KBIF_PERSISTENT |
1526 KBIF_MONOLITHIC | KBIF_BUFFER_ON_DEMAND | TEST_OPTION_INHIBIT_CACHE);
1527 skmem_packet_tests(KBIF_VIRTUAL_DEVICE | KBIF_MONOLITHIC |
1528 KBIF_PHYS_CONTIGUOUS);
1529 skmem_packet_tests(KBIF_VIRTUAL_DEVICE | KBIF_MONOLITHIC |
1530 KBIF_USER_ACCESS | KBIF_PHYS_CONTIGUOUS);
1531 skmem_packet_tests(KBIF_VIRTUAL_DEVICE | KBIF_MONOLITHIC |
1532 KBIF_USER_ACCESS);
1533 skmem_packet_tests(KBIF_VIRTUAL_DEVICE | KBIF_MONOLITHIC |
1534 KBIF_BUFFER_ON_DEMAND);
1535 skmem_packet_tests(KBIF_VIRTUAL_DEVICE | KBIF_MONOLITHIC |
1536 KBIF_BUFFER_ON_DEMAND | KBIF_PHYS_CONTIGUOUS);
1537 #if 0
1538 /* XXX: commented out failed tests on ARM64e platforms */
1539 skmem_packet_tests(KBIF_VIRTUAL_DEVICE | KBIF_MONOLITHIC |
1540 KBIF_USER_ACCESS | TEST_OPTION_INHIBIT_CACHE);
1541 skmem_packet_tests(KBIF_VIRTUAL_DEVICE | KBIF_MONOLITHIC |
1542 KBIF_BUFFER_ON_DEMAND | TEST_OPTION_INHIBIT_CACHE);
1543 skmem_packet_tests(KBIF_VIRTUAL_DEVICE | KBIF_BUFFER_ON_DEMAND);
1544 skmem_packet_tests(KBIF_VIRTUAL_DEVICE | KBIF_BUFFER_ON_DEMAND |
1545 TEST_OPTION_INHIBIT_CACHE);
1546 #endif
1547
1548 /* check quantum KPIs */
1549 skmem_quantum_tests(0);
1550 skmem_quantum_tests(KBIF_PHYS_CONTIGUOUS);
1551 skmem_quantum_tests(KBIF_PERSISTENT);
1552 skmem_quantum_tests(KBIF_PERSISTENT | KBIF_NO_MAGAZINES);
1553 skmem_quantum_tests(KBIF_PERSISTENT | KBIF_PHYS_CONTIGUOUS);
1554 skmem_quantum_tests(KBIF_PERSISTENT | KBIF_MONOLITHIC |
1555 KBIF_USER_ACCESS);
1556 skmem_quantum_tests(KBIF_PERSISTENT | KBIF_BUFFER_ON_DEMAND);
1557 skmem_quantum_tests(KBIF_PERSISTENT | TEST_OPTION_INHIBIT_CACHE);
1558 skmem_quantum_tests(KBIF_PERSISTENT | KBIF_MONOLITHIC |
1559 KBIF_BUFFER_ON_DEMAND);
1560 skmem_quantum_tests(KBIF_PERSISTENT | KBIF_MONOLITHIC |
1561 KBIF_USER_ACCESS | TEST_OPTION_INHIBIT_CACHE);
1562 skmem_quantum_tests(KBIF_PERSISTENT | KBIF_MONOLITHIC |
1563 KBIF_BUFFER_ON_DEMAND | TEST_OPTION_INHIBIT_CACHE);
1564 skmem_quantum_tests(KBIF_MONOLITHIC | KBIF_PHYS_CONTIGUOUS);
1565 skmem_quantum_tests(KBIF_MONOLITHIC | KBIF_USER_ACCESS);
1566 skmem_quantum_tests(KBIF_MONOLITHIC | KBIF_USER_ACCESS |
1567 KBIF_PHYS_CONTIGUOUS);
1568 skmem_quantum_tests(KBIF_MONOLITHIC | KBIF_BUFFER_ON_DEMAND);
1569 skmem_quantum_tests(KBIF_MONOLITHIC | KBIF_USER_ACCESS |
1570 TEST_OPTION_INHIBIT_CACHE);
1571 skmem_quantum_tests(KBIF_MONOLITHIC | KBIF_BUFFER_ON_DEMAND |
1572 TEST_OPTION_INHIBIT_CACHE);
1573 skmem_quantum_tests(KBIF_BUFFER_ON_DEMAND);
1574 skmem_quantum_tests(KBIF_BUFFER_ON_DEMAND | KBIF_NO_MAGAZINES);
1575 skmem_quantum_tests(KBIF_BUFFER_ON_DEMAND | KBIF_PHYS_CONTIGUOUS);
1576 skmem_quantum_tests(KBIF_BUFFER_ON_DEMAND | TEST_OPTION_INHIBIT_CACHE);
1577
1578 /* check quantum KPIs (vdev) */
1579 skmem_quantum_tests(KBIF_VIRTUAL_DEVICE);
1580 skmem_quantum_tests(KBIF_VIRTUAL_DEVICE | KBIF_NO_MAGAZINES);
1581 skmem_quantum_tests(KBIF_VIRTUAL_DEVICE | KBIF_PHYS_CONTIGUOUS);
1582 skmem_quantum_tests(KBIF_VIRTUAL_DEVICE | KBIF_PERSISTENT);
1583 skmem_quantum_tests(KBIF_VIRTUAL_DEVICE | KBIF_PERSISTENT |
1584 KBIF_MONOLITHIC | KBIF_USER_ACCESS);
1585 skmem_quantum_tests(KBIF_VIRTUAL_DEVICE | KBIF_PERSISTENT |
1586 KBIF_BUFFER_ON_DEMAND);
1587 skmem_quantum_tests(KBIF_VIRTUAL_DEVICE | KBIF_PERSISTENT |
1588 TEST_OPTION_INHIBIT_CACHE);
1589 skmem_quantum_tests(KBIF_VIRTUAL_DEVICE | KBIF_PERSISTENT |
1590 KBIF_MONOLITHIC | KBIF_BUFFER_ON_DEMAND);
1591 skmem_quantum_tests(KBIF_VIRTUAL_DEVICE | KBIF_PERSISTENT |
1592 KBIF_MONOLITHIC | KBIF_USER_ACCESS | TEST_OPTION_INHIBIT_CACHE);
1593 skmem_quantum_tests(KBIF_VIRTUAL_DEVICE | KBIF_PERSISTENT |
1594 KBIF_MONOLITHIC | KBIF_BUFFER_ON_DEMAND | TEST_OPTION_INHIBIT_CACHE);
1595 skmem_quantum_tests(KBIF_VIRTUAL_DEVICE | KBIF_MONOLITHIC |
1596 KBIF_PHYS_CONTIGUOUS);
1597 skmem_quantum_tests(KBIF_VIRTUAL_DEVICE | KBIF_MONOLITHIC |
1598 KBIF_USER_ACCESS | KBIF_PHYS_CONTIGUOUS);
1599 skmem_quantum_tests(KBIF_VIRTUAL_DEVICE | KBIF_MONOLITHIC |
1600 KBIF_USER_ACCESS);
1601 skmem_quantum_tests(KBIF_VIRTUAL_DEVICE | KBIF_MONOLITHIC |
1602 KBIF_BUFFER_ON_DEMAND);
1603 skmem_quantum_tests(KBIF_VIRTUAL_DEVICE | KBIF_MONOLITHIC |
1604 KBIF_USER_ACCESS | TEST_OPTION_INHIBIT_CACHE);
1605 skmem_quantum_tests(KBIF_VIRTUAL_DEVICE | KBIF_MONOLITHIC |
1606 KBIF_BUFFER_ON_DEMAND | TEST_OPTION_INHIBIT_CACHE);
1607 skmem_quantum_tests(KBIF_VIRTUAL_DEVICE | KBIF_BUFFER_ON_DEMAND);
1608 skmem_quantum_tests(KBIF_VIRTUAL_DEVICE | KBIF_BUFFER_ON_DEMAND |
1609 KBIF_PHYS_CONTIGUOUS);
1610 skmem_quantum_tests(KBIF_VIRTUAL_DEVICE | KBIF_BUFFER_ON_DEMAND |
1611 TEST_OPTION_INHIBIT_CACHE);
1612 }
1613
1614 static void
skmem_advanced_tests(int n,int32_t th_max,uint32_t mode,boolean_t nosleep,uint32_t flags)1615 skmem_advanced_tests(int n, int32_t th_max, uint32_t mode, boolean_t nosleep,
1616 uint32_t flags)
1617 {
1618 struct kern_pbufpool_init pp_init;
1619 kern_packet_t mph = 0;
1620 kern_buflet_t buflet = 0;
1621 int i;
1622
1623 VERIFY(skmth_pp == NULL);
1624 VERIFY(skmth_cnt == 0);
1625
1626 bzero(&pp_init, sizeof(pp_init));
1627 pp_init.kbi_version = KERN_PBUFPOOL_CURRENT_VERSION;
1628 pp_init.kbi_buf_seg_size = skmem_usr_buf_seg_size;
1629 pp_init.kbi_flags |= flags;
1630 (void) snprintf((char *)pp_init.kbi_name,
1631 sizeof(pp_init.kbi_name), "%s", "skmem_advanced");
1632
1633 /* prepare */
1634 switch (mode) {
1635 case 0:
1636 pp_init.kbi_packets = th_max;
1637 pp_init.kbi_bufsize = SKMEM_TEST_BUFSIZE;
1638 pp_init.kbi_max_frags = 1;
1639 pp_init.kbi_flags |= KBIF_MONOLITHIC | KBIF_USER_ACCESS;
1640 VERIFY(kern_pbufpool_create(&pp_init,
1641 &skmth_pp, NULL) == 0);
1642 break;
1643
1644 case 1:
1645 pp_init.kbi_packets = th_max;
1646 pp_init.kbi_bufsize = SKMEM_TEST_BUFSIZE;
1647 pp_init.kbi_max_frags = 1;
1648 pp_init.kbi_flags |= KBIF_MONOLITHIC | KBIF_USER_ACCESS |
1649 KBIF_VIRTUAL_DEVICE;
1650 VERIFY(kern_pbufpool_create(&pp_init,
1651 &skmth_pp, NULL) == 0);
1652 break;
1653
1654 case 2:
1655 pp_init.kbi_packets = th_max;
1656 pp_init.kbi_bufsize = SKMEM_TEST_BUFSIZE;
1657 pp_init.kbi_max_frags = 1;
1658 pp_init.kbi_flags |= KBIF_MONOLITHIC | KBIF_USER_ACCESS |
1659 KBIF_PERSISTENT;
1660 VERIFY(kern_pbufpool_create(&pp_init,
1661 &skmth_pp, NULL) == 0);
1662 break;
1663
1664 case 3:
1665 pp_init.kbi_packets = th_max;
1666 pp_init.kbi_bufsize = SKMEM_TEST_BUFSIZE;
1667 pp_init.kbi_max_frags = 1;
1668 pp_init.kbi_flags |= KBIF_MONOLITHIC | KBIF_USER_ACCESS |
1669 KBIF_PERSISTENT | KBIF_VIRTUAL_DEVICE;
1670 VERIFY(kern_pbufpool_create(&pp_init,
1671 &skmth_pp, NULL) == 0);
1672 break;
1673
1674 case 4:
1675 pp_init.kbi_packets = th_max;
1676 pp_init.kbi_bufsize = SKMEM_TEST_BUFSIZE;
1677 pp_init.kbi_max_frags = 1;
1678 pp_init.kbi_flags |= KBIF_PERSISTENT | KBIF_USER_ACCESS;
1679 VERIFY(kern_pbufpool_create(&pp_init,
1680 &skmth_pp, NULL) == 0);
1681 break;
1682
1683 case 5:
1684 pp_init.kbi_packets = th_max;
1685 pp_init.kbi_bufsize = SKMEM_TEST_BUFSIZE;
1686 pp_init.kbi_max_frags = 1;
1687 pp_init.kbi_flags |= KBIF_PERSISTENT | KBIF_VIRTUAL_DEVICE;
1688 VERIFY(kern_pbufpool_create(&pp_init,
1689 &skmth_pp, NULL) == 0);
1690 break;
1691
1692 case 6:
1693 pp_init.kbi_packets = th_max;
1694 pp_init.kbi_bufsize = SKMEM_TEST_BUFSIZE;
1695 pp_init.kbi_max_frags = 1;
1696 pp_init.kbi_flags |= 0;
1697 VERIFY(kern_pbufpool_create(&pp_init,
1698 &skmth_pp, NULL) == 0);
1699 break;
1700
1701 case 7:
1702 pp_init.kbi_packets = th_max;
1703 pp_init.kbi_bufsize = SKMEM_TEST_BUFSIZE;
1704 pp_init.kbi_max_frags = 1;
1705 pp_init.kbi_flags |= KBIF_VIRTUAL_DEVICE;
1706 VERIFY(kern_pbufpool_create(&pp_init,
1707 &skmth_pp, NULL) == 0);
1708 break;
1709
1710 case 8:
1711 pp_init.kbi_packets = (th_max * 2) + 1;
1712 pp_init.kbi_bufsize = SKMEM_TEST_BUFSIZE;
1713 pp_init.kbi_max_frags = 1;
1714 pp_init.kbi_flags |= KBIF_BUFFER_ON_DEMAND;
1715 VERIFY(kern_pbufpool_create(&pp_init,
1716 &skmth_pp, NULL) == 0);
1717 break;
1718
1719 default:
1720 VERIFY(0);
1721 /* NOTREACHED */
1722 __builtin_unreachable();
1723 }
1724
1725 SK_ERR("%d: th_max %d mode %u nosleep %u nomagazines %u",
1726 n, th_max, mode, nosleep, !!(flags & KBIF_NO_MAGAZINES));
1727
1728 if (pp_init.kbi_flags & KBIF_BUFFER_ON_DEMAND) {
1729 /* create 1 master packet to clone */
1730 VERIFY(kern_pbufpool_alloc(skmth_pp, 1, &mph) == 0);
1731 VERIFY((buflet = kern_packet_get_next_buflet(mph, NULL)) != NULL);
1732 VERIFY(kern_buflet_set_data_length(buflet, SKMEM_TEST_BUFSIZE) == 0);
1733 VERIFY(__packet_finalize(mph) == 0);
1734 }
1735
1736 bzero(skmth_info, skmth_info_size);
1737
1738 /* spawn as many threads as there are CPUs */
1739 for (i = 0; i < th_max; i++) {
1740 skmth_info[i].sti_mph = mph;
1741 skmth_info[i].sti_nosleep = nosleep;
1742 if (kernel_thread_start(skmem_test_func, (void *)(uintptr_t)i,
1743 &skmth_info[i].sti_thread) != KERN_SUCCESS) {
1744 panic("Failed to create skmem test thread");
1745 /* NOTREACHED */
1746 __builtin_unreachable();
1747 }
1748 }
1749
1750 lck_mtx_lock(&skmt_lock);
1751 do {
1752 struct timespec ts = { .tv_sec = 0, .tv_nsec = 100 * USEC_PER_SEC };
1753 (void) msleep(&skmth_cnt, &skmt_lock, (PZERO - 1),
1754 "skmtstartw", &ts);
1755 } while (skmth_cnt < th_max);
1756 VERIFY(skmth_cnt == th_max);
1757 lck_mtx_unlock(&skmt_lock);
1758
1759 lck_mtx_lock(&skmt_lock);
1760 VERIFY(!skmth_run);
1761 skmth_run = TRUE;
1762 wakeup((caddr_t)&skmth_run);
1763 lck_mtx_unlock(&skmt_lock);
1764
1765 /* wait until all threads are done */
1766 lck_mtx_lock(&skmt_lock);
1767 do {
1768 struct timespec ts = { .tv_sec = 0, .tv_nsec = 100 * USEC_PER_SEC };
1769 (void) msleep(&skmth_cnt, &skmt_lock, (PZERO - 1),
1770 "skmtstopw", &ts);
1771 } while (skmth_cnt != 0);
1772 skmth_run = FALSE;
1773 lck_mtx_unlock(&skmt_lock);
1774
1775 if (mph != 0) {
1776 VERIFY((buflet = kern_packet_get_next_buflet( mph, NULL)) != NULL);
1777 VERIFY(buflet->buf_ctl->bc_usecnt == 1);
1778 kern_pbufpool_free(skmth_pp, mph);
1779 mph = 0;
1780 }
1781 kern_pbufpool_destroy(skmth_pp);
1782 skmth_pp = NULL;
1783 }
1784
1785 __attribute__((noreturn))
1786 static void
skmem_test_func(void * v,wait_result_t w)1787 skmem_test_func(void *v, wait_result_t w)
1788 {
1789 #pragma unused(w)
1790 int i = (int)(uintptr_t)v, c;
1791 kern_packet_t ph = 0;
1792
1793 /* let skmem_test_start() know we're ready */
1794 lck_mtx_lock(&skmt_lock);
1795 atomic_add_32(&skmth_cnt, 1);
1796 wakeup((caddr_t)&skmth_cnt);
1797 do {
1798 (void) msleep(&skmth_run, &skmt_lock, (PZERO - 1),
1799 "skmtfuncw", NULL);
1800 } while (!skmth_run);
1801 lck_mtx_unlock(&skmt_lock);
1802
1803 for (c = 0; c < 41; c++) {
1804 /* run alloc tests */
1805 VERIFY(skmth_pp != NULL);
1806 if (skmth_info[i].sti_nosleep) {
1807 errno_t err = kern_pbufpool_alloc_nosleep(skmth_pp,
1808 1, &ph);
1809 VERIFY(ph != 0 || err != 0);
1810 } else {
1811 VERIFY(kern_pbufpool_alloc(skmth_pp, 1, &ph) == 0);
1812 }
1813
1814 if (ph != 0) {
1815 kern_pbufpool_free(skmth_pp, ph);
1816 ph = 0;
1817 }
1818
1819 /* run clone tests */
1820 if (skmth_info[i].sti_mph != 0) {
1821 kern_buflet_t buflet, buflet2;
1822 kern_obj_idx_seg_t buf_idx_seg, buf2_idx_seg;
1823
1824 if (skmth_info[i].sti_nosleep) {
1825 errno_t err;
1826 err = kern_packet_clone_nosleep(skmth_info[i].sti_mph,
1827 &skmth_info[i].sti_mpc, KPKT_COPY_LIGHT);
1828 VERIFY(skmth_info[i].sti_mpc != 0 || err != 0);
1829 } else {
1830 VERIFY(kern_packet_clone(skmth_info[i].sti_mph,
1831 &skmth_info[i].sti_mpc, KPKT_COPY_LIGHT) == 0);
1832 }
1833 if (skmth_info[i].sti_mpc != 0) {
1834 VERIFY(!(QUM_ADDR(skmth_info[i].sti_mpc)->qum_qflags & QUM_F_FINALIZED));
1835 VERIFY((buflet = kern_packet_get_next_buflet(
1836 skmth_info[i].sti_mph, NULL)) != NULL);
1837 VERIFY((buflet2 = kern_packet_get_next_buflet(
1838 skmth_info[i].sti_mpc, NULL)) != NULL);
1839 VERIFY(kern_buflet_get_object_address(buflet) ==
1840 kern_buflet_get_object_address(buflet2));
1841 VERIFY(kern_buflet_get_data_address(buflet) ==
1842 kern_buflet_get_data_address(buflet2));
1843 VERIFY(kern_buflet_get_data_limit(buflet) ==
1844 kern_buflet_get_data_limit(buflet2));
1845 VERIFY(kern_buflet_get_data_offset(buflet) ==
1846 kern_buflet_get_data_offset(buflet2));
1847 VERIFY(kern_buflet_get_data_length(buflet) ==
1848 kern_buflet_get_data_length(buflet2));
1849 VERIFY(kern_buflet_get_object_segment(buflet,
1850 &buf_idx_seg) ==
1851 kern_buflet_get_object_segment(buflet2,
1852 &buf2_idx_seg));
1853 VERIFY(buf_idx_seg == buf2_idx_seg);
1854 VERIFY(buflet->buf_ctl == buflet2->buf_ctl);
1855 VERIFY(__packet_finalize(skmth_info[i].sti_mpc) == 0);
1856 kern_pbufpool_free(skmth_pp, skmth_info[i].sti_mpc);
1857 skmth_info[i].sti_mpc = 0;
1858 }
1859 skmth_info[i].sti_mph = 0;
1860 }
1861
1862 /* force cache purges to exercise related code paths */
1863 if (skmth_pp->pp_kmd_cache != NULL) {
1864 skmem_cache_reap_now(skmth_pp->pp_kmd_cache, TRUE);
1865 }
1866 if (skmth_pp->pp_buf_cache != NULL) {
1867 skmem_cache_reap_now(skmth_pp->pp_buf_cache, TRUE);
1868 }
1869 if (skmth_pp->pp_kbft_cache != NULL) {
1870 skmem_cache_reap_now(skmth_pp->pp_kbft_cache, TRUE);
1871 }
1872 }
1873
1874 /* let skmem_test_start() know we're finished */
1875 lck_mtx_lock(&skmt_lock);
1876 VERIFY(atomic_add_32_ov(&skmth_cnt, -1) != 0);
1877 wakeup((caddr_t)&skmth_cnt);
1878 lck_mtx_unlock(&skmt_lock);
1879
1880 /* for the extra refcnt from kernel_thread_start() */
1881 thread_deallocate(current_thread());
1882
1883 thread_terminate(current_thread());
1884 __builtin_unreachable();
1885 /* NOTREACHED */
1886 }
1887
1888 static int skmem_test_objs;
1889
1890 struct skmem_test_obj {
1891 uint64_t sto_val[2];
1892 };
1893
1894 static int
skmem_test_ctor(struct skmem_obj_info * oi,struct skmem_obj_info * oim,void * arg,uint32_t skmflag)1895 skmem_test_ctor(struct skmem_obj_info *oi, struct skmem_obj_info *oim,
1896 void *arg, uint32_t skmflag)
1897 {
1898 #pragma unused(skmflag)
1899 struct skmem_test_obj *sto = SKMEM_OBJ_ADDR(oi);
1900
1901 VERIFY(oim == NULL);
1902 VERIFY(arg == &skmem_test_init);
1903 VERIFY(SKMEM_OBJ_SIZE(oi) >= sizeof(struct skmem_test_obj));
1904 sto->sto_val[0] = (uint64_t)(void *)sto ^
1905 (uint64_t)(void *)&sto->sto_val[0];
1906 sto->sto_val[1] = (uint64_t)(void *)sto ^
1907 (uint64_t)(void *)&sto->sto_val[1];
1908 atomic_add_32(&skmem_test_objs, 1);
1909
1910 return 0;
1911 }
1912
1913 static void
skmem_test_dtor(void * addr,void * arg)1914 skmem_test_dtor(void *addr, void *arg)
1915 {
1916 struct skmem_test_obj *sto = addr;
1917
1918 VERIFY(arg == &skmem_test_init);
1919 VERIFY((sto->sto_val[0] ^ (uint64_t)(void *)&sto->sto_val[0]) ==
1920 (uint64_t)(void *)sto);
1921 VERIFY((sto->sto_val[1] ^ (uint64_t)(void *)&sto->sto_val[1]) ==
1922 (uint64_t)(void *)sto);
1923 VERIFY(skmem_test_objs > 0);
1924 atomic_add_32(&skmem_test_objs, -1);
1925 }
1926
1927 static void
skmem_tests(uint32_t align)1928 skmem_tests(uint32_t align)
1929 {
1930 struct skmem_cache *skm;
1931 uint32_t bufsize = sizeof(struct skmem_test_obj);
1932
1933 uint32_t objary_max = (uint32_t)MAX_PH_ARY;
1934 void **objary = NULL;
1935 char name[64];
1936
1937 VERIFY(align != 0);
1938
1939 SK_ERR("bufsize %u align %u", bufsize, align);
1940
1941 objary = kalloc_type(void *, objary_max, Z_WAITOK | Z_ZERO);
1942
1943 (void) snprintf(name, sizeof(name), "skmem_test.%u.%u", bufsize, align);
1944
1945 skm = skmem_cache_create(name, bufsize, align, skmem_test_ctor,
1946 skmem_test_dtor, NULL, &skmem_test_init, NULL, 0);
1947
1948 VERIFY(skmem_test_objs == 0);
1949 for (int i = 0; i < objary_max; i++) {
1950 objary[i] = skmem_cache_alloc(skm, SKMEM_SLEEP);
1951 VERIFY(objary[i] != NULL);
1952 VERIFY(IS_P2ALIGNED(objary[i], align));
1953 }
1954 for (int i = 0; i < objary_max; i++) {
1955 VERIFY(objary[i] != NULL);
1956 skmem_cache_free(skm, objary[i]);
1957 objary[i] = NULL;
1958 }
1959 skmem_cache_destroy(skm);
1960 VERIFY(skmem_test_objs == 0);
1961
1962 kfree_type(void *, objary_max, objary);
1963 objary = NULL;
1964 }
1965
1966 static void
skmem_test_start(void * v,wait_result_t w)1967 skmem_test_start(void *v, wait_result_t w)
1968 {
1969 int32_t ncpus = ml_wait_max_cpus();
1970 int error = 0, n;
1971 uint32_t flags;
1972 uint64_t mtbf_saved;
1973
1974 lck_mtx_lock(&skmt_lock);
1975 VERIFY(!skmt_busy);
1976 skmt_busy = 1;
1977 skmem_cache_test_start(1); /* 1 second update interval */
1978 lck_mtx_unlock(&skmt_lock);
1979
1980 VERIFY(skmth_info == NULL);
1981 skmth_info_size = sizeof(struct skmt_thread_info) * ncpus;
1982 skmth_info = (struct skmt_thread_info *) kalloc_data(skmth_info_size,
1983 Z_WAITOK | Z_ZERO);
1984
1985 /*
1986 * Sanity tests.
1987 */
1988 (void) skmem_cache_magazine_max(1);
1989 (void) skmem_cache_magazine_max(32);
1990 (void) skmem_cache_magazine_max(64);
1991 (void) skmem_cache_magazine_max(128);
1992 (void) skmem_cache_magazine_max(256);
1993 (void) skmem_cache_magazine_max(512);
1994 (void) skmem_cache_magazine_max(1024);
1995 (void) skmem_cache_magazine_max(2048);
1996 (void) skmem_cache_magazine_max(4096);
1997 (void) skmem_cache_magazine_max(8192);
1998 (void) skmem_cache_magazine_max(16384);
1999 (void) skmem_cache_magazine_max(32768);
2000 (void) skmem_cache_magazine_max(65536);
2001
2002 /*
2003 * skmem allocator tests
2004 */
2005 skmem_tests(8);
2006 skmem_tests(16);
2007 skmem_tests(32);
2008 skmem_tests(64);
2009 skmem_tests(128);
2010
2011 /*
2012 * Basic packet buffer pool sanity tests
2013 */
2014 skmem_basic_tests();
2015
2016 /*
2017 * Multi-threaded alloc and free tests (blocking).
2018 */
2019 for (n = 0; n < 7; n++) {
2020 flags = ((n & 1) ? KBIF_NO_MAGAZINES : 0);
2021 skmem_advanced_tests(n, ncpus, 0, FALSE, flags);
2022 }
2023 for (n = 0; n < 7; n++) {
2024 flags = ((n & 1) ? KBIF_NO_MAGAZINES : 0);
2025 skmem_advanced_tests(n, ncpus, 0, TRUE, flags);
2026 }
2027 for (n = 0; n < 7; n++) {
2028 flags = ((n & 1) ? KBIF_NO_MAGAZINES : 0);
2029 skmem_advanced_tests(n, ncpus, 1, FALSE, flags);
2030 }
2031 for (n = 0; n < 7; n++) {
2032 flags = ((n & 1) ? KBIF_NO_MAGAZINES : 0);
2033 skmem_advanced_tests(n, ncpus, 1, TRUE, flags);
2034 }
2035 for (n = 0; n < 7; n++) {
2036 flags = ((n & 1) ? KBIF_NO_MAGAZINES : 0);
2037 skmem_advanced_tests(n, ncpus, 2, FALSE, flags);
2038 }
2039 for (n = 0; n < 7; n++) {
2040 flags = ((n & 1) ? KBIF_NO_MAGAZINES : 0);
2041 skmem_advanced_tests(n, ncpus, 2, TRUE, flags);
2042 }
2043 for (n = 0; n < 7; n++) {
2044 flags = ((n & 1) ? KBIF_NO_MAGAZINES : 0);
2045 skmem_advanced_tests(n, ncpus, 3, FALSE, flags);
2046 }
2047 for (n = 0; n < 7; n++) {
2048 flags = ((n & 1) ? KBIF_NO_MAGAZINES : 0);
2049 skmem_advanced_tests(n, ncpus, 3, TRUE, flags);
2050 }
2051 for (n = 0; n < 7; n++) {
2052 flags = ((n & 1) ? KBIF_NO_MAGAZINES : 0);
2053 skmem_advanced_tests(n, ncpus, 4, FALSE, flags);
2054 }
2055 for (n = 0; n < 7; n++) {
2056 flags = ((n & 1) ? KBIF_NO_MAGAZINES : 0);
2057 skmem_advanced_tests(n, ncpus, 5, FALSE, flags);
2058 }
2059
2060 /*
2061 * Modes 4-5 deal with persistent/mirrored regions, and to
2062 * maximize the chance of exercising the allocation failures
2063 * handling we lower the MTBF (if set) to the minimum possible,
2064 * and restore it to the saved value later.
2065 */
2066 mtbf_saved = skmem_region_get_mtbf();
2067 if (mtbf_saved != 0) {
2068 skmem_region_set_mtbf(SKMEM_REGION_MTBF_MIN);
2069 }
2070
2071 /*
2072 * Multi-threaded alloc and free tests (non-blocking).
2073 */
2074
2075 for (n = 0; n < 7; n++) {
2076 flags = ((n & 1) ? KBIF_NO_MAGAZINES : 0);
2077 skmem_advanced_tests(n, ncpus, 4, TRUE, flags);
2078 }
2079 for (n = 0; n < 7; n++) {
2080 flags = ((n & 1) ? KBIF_NO_MAGAZINES : 0);
2081 skmem_advanced_tests(n, ncpus, 5, TRUE, flags);
2082 }
2083
2084 /*
2085 * Restore MTBF to previous set value.
2086 */
2087 if (mtbf_saved != 0) {
2088 skmem_region_set_mtbf(mtbf_saved);
2089 }
2090
2091 for (n = 0; n < 7; n++) {
2092 flags = ((n & 1) ? KBIF_NO_MAGAZINES : 0);
2093 skmem_advanced_tests(n, ncpus, 6, FALSE, flags);
2094 }
2095 for (n = 0; n < 7; n++) {
2096 flags = ((n & 1) ? KBIF_NO_MAGAZINES : 0);
2097 skmem_advanced_tests(n, ncpus, 6, TRUE, flags);
2098 }
2099 for (n = 0; n < 7; n++) {
2100 flags = ((n & 1) ? KBIF_NO_MAGAZINES : 0);
2101 skmem_advanced_tests(n, ncpus, 7, FALSE, flags);
2102 }
2103 for (n = 0; n < 7; n++) {
2104 flags = ((n & 1) ? KBIF_NO_MAGAZINES : 0);
2105 skmem_advanced_tests(n, ncpus, 7, TRUE, flags);
2106 }
2107 for (n = 0; n < 7; n++) {
2108 flags = ((n & 1) ? KBIF_NO_MAGAZINES : 0);
2109 skmem_advanced_tests(n, ncpus, 8, FALSE, flags);
2110 }
2111 for (n = 0; n < 7; n++) {
2112 flags = ((n & 1) ? KBIF_NO_MAGAZINES : 0);
2113 skmem_advanced_tests(n, ncpus, 8, TRUE, flags);
2114 }
2115
2116 lck_mtx_lock(&skmt_lock);
2117 skmt_enabled = 1;
2118 wakeup((caddr_t)&skmt_enabled);
2119 lck_mtx_unlock(&skmt_lock);
2120
2121 if (error != 0) {
2122 skmem_test_stop(v, w);
2123 }
2124 }
2125
2126 static void
skmem_test_stop(void * v,wait_result_t w)2127 skmem_test_stop(void *v, wait_result_t w)
2128 {
2129 #pragma unused(v, w)
2130
2131 if (skmth_info != NULL) {
2132 kfree_data(skmth_info, skmth_info_size);
2133 skmth_info = NULL;
2134 }
2135
2136 lck_mtx_lock(&skmt_lock);
2137 skmem_cache_test_stop();
2138 VERIFY(skmt_busy);
2139 skmt_busy = 0;
2140 skmt_enabled = 0;
2141 wakeup((caddr_t)&skmt_enabled);
2142 lck_mtx_unlock(&skmt_lock);
2143 }
2144
2145 static int
sysctl_skmem_test(__unused struct sysctl_oid * oidp,__unused void * arg1,__unused int arg2,struct sysctl_req * req)2146 sysctl_skmem_test(__unused struct sysctl_oid *oidp,
2147 __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2148 {
2149 int error, newvalue, changed;
2150
2151 lck_mtx_lock(&skmt_lock);
2152 if ((error = sysctl_io_number(req, skmt_enabled, sizeof(int),
2153 &newvalue, &changed)) != 0) {
2154 goto done;
2155 }
2156
2157 if (changed && skmt_enabled != newvalue) {
2158 thread_t th;
2159 thread_continue_t func;
2160
2161 if (newvalue && skmt_busy) {
2162 SK_ERR("Older skmem test instance is still active");
2163 error = EBUSY;
2164 goto done;
2165 }
2166
2167 if (newvalue) {
2168 func = skmem_test_start;
2169 } else {
2170 func = skmem_test_stop;
2171 }
2172
2173 if (kernel_thread_start(func, NULL, &th) != KERN_SUCCESS) {
2174 SK_ERR("Failed to create skmem test action thread");
2175 error = EBUSY;
2176 goto done;
2177 }
2178 do {
2179 SK_DF(SK_VERB_MEM, "Waiting for %s to complete",
2180 newvalue ? "startup" : "shutdown");
2181 error = msleep(&skmt_enabled, &skmt_lock,
2182 PWAIT | PCATCH, "skmtw", NULL);
2183 /* BEGIN CSTYLED */
2184 /*
2185 * Loop exit conditions:
2186 * - we were interrupted
2187 * OR
2188 * - we are starting up and are enabled
2189 * (Startup complete)
2190 * OR
2191 * - we are starting up and are not busy
2192 * (Failed startup)
2193 * OR
2194 * - we are shutting down and are not busy
2195 * (Shutdown complete)
2196 */
2197 /* END CSTYLED */
2198 } while (!((error == EINTR) || (newvalue && skmt_enabled) ||
2199 (newvalue && !skmt_busy) || (!newvalue && !skmt_busy)));
2200
2201 thread_deallocate(th);
2202 }
2203
2204 done:
2205 lck_mtx_unlock(&skmt_lock);
2206 return error;
2207 }
2208
2209 SYSCTL_PROC(_kern_skywalk_mem, OID_AUTO, test,
2210 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, NULL, 0,
2211 sysctl_skmem_test, "I", "Start Skywalk memory test");
2212
2213 #endif /* DEVELOPMENT || DEBUG */
2214