xref: /xnu-8792.81.2/bsd/skywalk/mem/skmem_test.c (revision 19c3b8c28c31cb8130e034cfb5df6bf9ba342d90)
1 /*
2  * Copyright (c) 2018-2021 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #if (DEVELOPMENT || DEBUG) /* XXX make this whole file a config option? */
30 
31 #include <skywalk/os_skywalk_private.h>
32 
33 /*
34  * Ignore -Wxnu-typed-allocators for this file, because
35  * this is test-only code
36  */
37 __typed_allocators_ignore_push
38 
39 #define SKMEM_TEST_BUFSIZE      2048
40 
41 #if XNU_TARGET_OS_OSX && defined(__arm64__)
42 #define TEST_OPTION_INHIBIT_CACHE    0
43 #else /* !(XNU_TARGET_OS_OSX && defined(__arm64__)) */
44 #define TEST_OPTION_INHIBIT_CACHE    KBIF_INHIBIT_CACHE
45 #endif /* XNU_TARGET_OS_OSX && defined(__arm64__) */
46 
47 static void skmem_test_start(void *, wait_result_t);
48 static void skmem_test_stop(void *, wait_result_t);
49 static void skmem_test_func(void *v, wait_result_t w);
50 static void skmem_test_mbfreecb(caddr_t cl, uint32_t size, caddr_t arg);
51 static void skmem_test_alloccb(kern_packet_t, uint32_t, const void *);
52 
53 extern unsigned int ml_wait_max_cpus(void);
54 extern kern_return_t thread_terminate(thread_t);
55 
56 static int skmt_enabled;
57 static int skmt_busy;
58 static int skmt_mbcnt;
59 
60 decl_lck_mtx_data(static, skmt_lock);
61 
62 struct skmt_alloc_ctx {
63 	uint32_t        stc_req;        /* # of objects requested */
64 	uint32_t        stc_idx;        /* expected index */
65 };
66 
67 static struct skmt_alloc_ctx skmt_alloccb_ctx;
68 
69 struct skmt_thread_info {
70 	kern_packet_t   sti_mph;        /* master packet */
71 	kern_packet_t   sti_mpc;        /* cloned packet */
72 	thread_t        sti_thread;     /* thread instance */
73 	boolean_t       sti_nosleep;    /* non-sleeping allocation */
74 } __attribute__((aligned(CHANNEL_CACHE_ALIGN_MAX)));
75 
76 static struct skmt_thread_info *skmth_info;
77 static uint32_t skmth_info_size;
78 static int32_t skmth_cnt;
79 static boolean_t skmth_run;
80 static kern_pbufpool_t skmth_pp;
81 
82 void
skmem_test_init(void)83 skmem_test_init(void)
84 {
85 	lck_mtx_init(&skmt_lock, &sk_lock_group, &sk_lock_attr);
86 }
87 
88 void
skmem_test_fini(void)89 skmem_test_fini(void)
90 {
91 	lck_mtx_destroy(&skmt_lock, &sk_lock_group);
92 }
93 
94 bool
skmem_test_enabled(void)95 skmem_test_enabled(void)
96 {
97 	bool enabled;
98 	lck_mtx_lock(&skmt_lock);
99 	enabled = (skmt_busy != 0);
100 	lck_mtx_unlock(&skmt_lock);
101 	return enabled;
102 }
103 
104 typedef union {
105 	char c[2];
106 	uint16_t s;
107 } short_union_t;
108 
109 typedef union {
110 	uint16_t s[2];
111 	long l;
112 } long_union_t;
113 
114 static void
_reduce(int * sum)115 _reduce(int *sum)
116 {
117 	long_union_t l_util;
118 
119 	l_util.l = *sum;
120 	*sum = l_util.s[0] + l_util.s[1];
121 	if (*sum > 65535) {
122 		*sum -= 65535;
123 	}
124 }
125 
126 static uint16_t
skmem_reference_sum(void * buffer,int len,int sum0)127 skmem_reference_sum(void *buffer, int len, int sum0)
128 {
129 	uint16_t *w;
130 	int sum = sum0;
131 
132 	w = (uint16_t *)buffer;
133 	while ((len -= 32) >= 0) {
134 		sum += w[0]; sum += w[1];
135 		sum += w[2]; sum += w[3];
136 		sum += w[4]; sum += w[5];
137 		sum += w[6]; sum += w[7];
138 		sum += w[8]; sum += w[9];
139 		sum += w[10]; sum += w[11];
140 		sum += w[12]; sum += w[13];
141 		sum += w[14]; sum += w[15];
142 		w += 16;
143 	}
144 	len += 32;
145 	while ((len -= 8) >= 0) {
146 		sum += w[0]; sum += w[1];
147 		sum += w[2]; sum += w[3];
148 		w += 4;
149 	}
150 	len += 8;
151 	if (len) {
152 		_reduce(&sum);
153 		while ((len -= 2) >= 0) {
154 			sum += *w++;
155 		}
156 	}
157 	if (len == -1) { /* odd-length packet */
158 		short_union_t s_util;
159 
160 		s_util.s = 0;
161 		s_util.c[0] = *((char *)w);
162 		s_util.c[1] = 0;
163 		sum += s_util.s;
164 	}
165 	_reduce(&sum);
166 	return sum & 0xffff;
167 }
168 
169 /*
170  * At present, the number of objects created in the pool will be
171  * higher than the requested amount, if the pool is allowed to use
172  * the magazines layer.  Round up a bit to accomodate any rounding
173  * ups done by the pool allocator.
174  */
175 #define MAX_PH_ARY      P2ROUNDUP(skmem_cache_magazine_max(1) + 129, 256)
176 
177 struct skmem_pp_ctx_s {
178 	os_refcnt_t     skmem_pp_ctx_refcnt;
179 };
180 
181 static struct skmem_pp_ctx_s skmem_pp_ctx;
182 
183 static uint32_t
skmem_pp_ctx_refcnt(void * ctx)184 skmem_pp_ctx_refcnt(void *ctx)
185 {
186 	struct skmem_pp_ctx_s *pp_ctx = ctx;
187 	VERIFY(pp_ctx == &skmem_pp_ctx);
188 	return os_ref_get_count(&pp_ctx->skmem_pp_ctx_refcnt);
189 }
190 
191 static void
skmem_pp_ctx_retain(void * ctx)192 skmem_pp_ctx_retain(void *ctx)
193 {
194 	struct skmem_pp_ctx_s *pp_ctx = ctx;
195 	VERIFY(pp_ctx == &skmem_pp_ctx);
196 	os_ref_retain(&pp_ctx->skmem_pp_ctx_refcnt);
197 }
198 
199 static void
skmem_pp_ctx_release(void * ctx)200 skmem_pp_ctx_release(void *ctx)
201 {
202 	struct skmem_pp_ctx_s *pp_ctx = ctx;
203 	VERIFY(pp_ctx == &skmem_pp_ctx);
204 	(void)os_ref_release(&pp_ctx->skmem_pp_ctx_refcnt);
205 }
206 
207 #define BUFLEN 2048
208 
209 static void
skmem_buflet_tests(uint32_t flags)210 skmem_buflet_tests(uint32_t flags)
211 {
212 	struct kern_pbufpool_init pp_init;
213 	struct kern_pbufpool_memory_info pp_mem_info;
214 	kern_pbufpool_t pp = NULL;
215 	struct kern_pbufpool_init pp_init_mb;
216 	kern_pbufpool_t pp_mb = NULL;
217 	mach_vm_address_t baddr = 0;
218 	kern_obj_idx_seg_t sg_idx;
219 	kern_segment_t sg;
220 	kern_packet_t *phary = NULL;
221 	kern_packet_t *phary2 = NULL;
222 	kern_packet_t *pharyc = NULL;
223 	struct mbuf **mbary = NULL;
224 	uint32_t mbcnt = 0;
225 	uint32_t phcnt = 0, maxphcnt = 0;
226 	uint32_t phcloned = 0;
227 	size_t mblen = BUFLEN;
228 	kern_packet_t ph, ph_mb;
229 	uint32_t i;
230 	errno_t err;
231 
232 	/* packets only */
233 	VERIFY(!(flags & KBIF_QUANTUM));
234 
235 	SK_ERR("flags 0x%x", flags);
236 
237 	phary = (kern_packet_t *) kalloc_data(sizeof(kern_packet_t) * MAX_PH_ARY,
238 	    Z_WAITOK | Z_ZERO);
239 	phary2 = (kern_packet_t *) kalloc_data(sizeof(kern_packet_t) * MAX_PH_ARY,
240 	    Z_WAITOK | Z_ZERO);
241 	pharyc = (kern_packet_t *) kalloc_data(sizeof(kern_packet_t) * MAX_PH_ARY,
242 	    Z_WAITOK | Z_ZERO);
243 	mbary = kalloc_type(struct mbuf *, MAX_PH_ARY, Z_WAITOK | Z_ZERO);
244 
245 	os_ref_init(&skmem_pp_ctx.skmem_pp_ctx_refcnt, NULL);
246 	bzero(&pp_init, sizeof(pp_init));
247 	pp_init.kbi_version = KERN_PBUFPOOL_CURRENT_VERSION;
248 	pp_init.kbi_buf_seg_size = skmem_usr_buf_seg_size;
249 	(void) snprintf((char *)pp_init.kbi_name, sizeof(pp_init.kbi_name),
250 	    "%s", "skmem_buflet_tests");
251 	pp_init.kbi_flags = flags;
252 	pp_init.kbi_ctx = &skmem_pp_ctx;
253 	pp_init.kbi_ctx_retain = skmem_pp_ctx_retain;
254 	pp_init.kbi_ctx_release = skmem_pp_ctx_release;
255 
256 	/* must fail if packets is 0 */
257 	VERIFY(kern_pbufpool_create(&pp_init, &pp, NULL) == EINVAL);
258 	pp_init.kbi_packets = 64;
259 	/* must fail if bufsize is 0 */
260 	VERIFY(kern_pbufpool_create(&pp_init, &pp, NULL) == EINVAL);
261 	pp_init.kbi_bufsize = SKMEM_TEST_BUFSIZE;
262 	/* must fail if max_frags is 0 */
263 	VERIFY(kern_pbufpool_create(&pp_init, &pp, NULL) == EINVAL);
264 
265 	pp_init.kbi_max_frags = 1;
266 	VERIFY(kern_pbufpool_create(&pp_init, &pp, NULL) == 0);
267 	VERIFY(skmem_pp_ctx_refcnt(&skmem_pp_ctx) == 2);
268 	void *ctx = kern_pbufpool_get_context(pp);
269 	VERIFY(ctx == &skmem_pp_ctx);
270 	VERIFY(skmem_pp_ctx_refcnt(&skmem_pp_ctx) == 3);
271 	skmem_pp_ctx_release(ctx);
272 	VERIFY(skmem_pp_ctx_refcnt(&skmem_pp_ctx) == 2);
273 	bzero(&pp_mem_info, sizeof(pp_mem_info));
274 	VERIFY(kern_pbufpool_get_memory_info(pp, NULL) == EINVAL);
275 	VERIFY(kern_pbufpool_get_memory_info(pp, &pp_mem_info) == 0);
276 	VERIFY(pp_mem_info.kpm_flags & KPMF_EXTERNAL);
277 	VERIFY(pp_mem_info.kpm_buflets >= pp_mem_info.kpm_packets);
278 	VERIFY(pp_mem_info.kpm_packets >= 64);
279 	VERIFY(pp_mem_info.kpm_packets <= MAX_PH_ARY);
280 	VERIFY(pp_mem_info.kpm_max_frags == 1);
281 	VERIFY(pp_mem_info.kpm_buflets >= 64);
282 	VERIFY(pp_mem_info.kpm_bufsize == SKMEM_TEST_BUFSIZE);
283 	VERIFY(kern_pbufpool_alloc(pp, 0, &ph) == EINVAL ||
284 	    (flags & KBIF_BUFFER_ON_DEMAND));
285 	if (ph != 0) {
286 		kern_packet_t phc = 0;
287 		kern_buflet_t buflet;
288 
289 		VERIFY(flags & KBIF_BUFFER_ON_DEMAND);
290 		VERIFY((buflet = kern_packet_get_next_buflet(ph, NULL)) == NULL);
291 		VERIFY(kern_packet_clone(ph, &phc, KPKT_COPY_LIGHT) == EINVAL);
292 		VERIFY(kern_packet_clone(ph, &phc, KPKT_COPY_HEAVY) == EINVAL);
293 		kern_pbufpool_free(pp, ph);
294 		ph = 0;
295 	}
296 	maxphcnt = 32;
297 	VERIFY(kern_pbufpool_alloc(pp, 5, &ph) == EINVAL);
298 	if (flags & KBIF_BUFFER_ON_DEMAND) {
299 		/* allocate and free one at a time (no buflet) */
300 		for (i = 0, phcnt = 0; i < maxphcnt; i++) {
301 			boolean_t stop = FALSE;
302 			/*
303 			 * This may fail if skmem_region_mtbf is set, or if
304 			 * the system is short on memory.  Perform retries at
305 			 * this layer to get at least 32 packets.
306 			 */
307 			while ((err = kern_pbufpool_alloc_nosleep(pp, 0, &ph)) != 0) {
308 				VERIFY(err == ENOMEM);
309 				if (phcnt < 32) {
310 					SK_ERR("[a] retrying alloc for packet %u",
311 					    phcnt);
312 					delay(250 * NSEC_PER_USEC); /* 1/4 sec */
313 					continue;
314 				}
315 				stop = TRUE;
316 				break;
317 			}
318 			if (stop) {
319 				break;
320 			}
321 			VERIFY(ph != 0);
322 			VERIFY(kern_packet_get_data_length(ph) == 0);
323 			VERIFY(kern_packet_get_buflet_count(ph) == 0);
324 			phary[phcnt++] = ph;
325 		}
326 		VERIFY(phcnt >= 32);
327 		for (i = 0; i < phcnt; i++) {
328 			kern_pbufpool_free(pp, phary[i]);
329 			phary[i] = 0;
330 		}
331 	}
332 	/* allocate and free one at a time (1 buflet) */
333 	for (i = 0, phcnt = 0; i < maxphcnt; i++) {
334 		boolean_t stop = FALSE;
335 		/*
336 		 * This may fail if skmem_region_mtbf is set, or if
337 		 * the system is short on memory.  Perform retries at
338 		 * this layer to get at least 32 packets.
339 		 */
340 		while ((err = kern_pbufpool_alloc_nosleep(pp, 1, &ph)) != 0) {
341 			VERIFY(err == ENOMEM);
342 			if (phcnt < 32) {
343 				SK_ERR("[a] retrying alloc for packet %u",
344 				    phcnt);
345 				delay(250 * NSEC_PER_USEC); /* 1/4 sec */
346 				continue;
347 			}
348 			stop = TRUE;
349 			break;
350 		}
351 		if (stop) {
352 			break;
353 		}
354 		VERIFY(ph != 0);
355 		VERIFY(kern_packet_get_data_length(ph) == 0);
356 		VERIFY(kern_packet_get_buflet_count(ph) == 1);
357 		phary[phcnt++] = ph;
358 	}
359 	VERIFY(phcnt >= 32);
360 	for (i = 0; i < phcnt; i++) {
361 		kern_pbufpool_free(pp, phary[i]);
362 		phary[i] = 0;
363 	}
364 	/* allocate and free in batch */
365 	phcnt = maxphcnt;
366 	for (;;) {
367 		err = kern_pbufpool_alloc_batch_nosleep(pp, 1, phary, &phcnt);
368 		VERIFY(err != EINVAL);
369 		if (err == ENOMEM) {
370 			phcnt = maxphcnt;
371 			SK_ERR("retrying batch alloc for %u packets", phcnt);
372 			delay(250 * NSEC_PER_USEC);     /* 1/4 sec */
373 		} else if (err == EAGAIN) {
374 			SK_ERR("batch alloc for %u packets only returned %u",
375 			    maxphcnt, phcnt);
376 			break;
377 		} else {
378 			VERIFY(err == 0);
379 			break;
380 		}
381 	}
382 	VERIFY(phcnt > 0);
383 	for (i = 0; i < phcnt; i++) {
384 		VERIFY(phary[i] != 0);
385 		VERIFY(kern_packet_get_data_length(phary[i]) == 0);
386 		VERIFY(kern_packet_get_buflet_count(phary[i]) == 1);
387 	}
388 	kern_pbufpool_free_batch(pp, phary, phcnt);
389 	/* allocate and free one at a time (blocking) */
390 	for (i = 0, phcnt = 0; i < maxphcnt; i++) {
391 		VERIFY(kern_pbufpool_alloc(pp, 1, &ph) == 0);
392 		VERIFY(ph != 0);
393 		VERIFY(kern_packet_get_data_length(ph) == 0);
394 		VERIFY(kern_packet_get_buflet_count(ph) == 1);
395 		phary[phcnt++] = ph;
396 	}
397 	VERIFY(phcnt >= 32);
398 	for (i = 0; i < phcnt; i++) {
399 		kern_pbufpool_free(pp, phary[i]);
400 		phary[i] = 0;
401 	}
402 	/* allocate with callback */
403 	bzero(&skmt_alloccb_ctx, sizeof(skmt_alloccb_ctx));
404 	skmt_alloccb_ctx.stc_req = phcnt;
405 	VERIFY(kern_pbufpool_alloc_batch_callback(pp, 1, phary, &phcnt,
406 	    NULL, &skmt_alloccb_ctx) == EINVAL);
407 	VERIFY(kern_pbufpool_alloc_batch_callback(pp, 1, phary, &phcnt,
408 	    skmem_test_alloccb, &skmt_alloccb_ctx) == 0);
409 	VERIFY(skmt_alloccb_ctx.stc_idx == phcnt);
410 	kern_pbufpool_free_batch(pp, phary, phcnt);
411 
412 	/*
413 	 * Allocate and free test
414 	 * Case 1: Packet has an mbuf attached
415 	 */
416 	mbcnt = phcnt;
417 	VERIFY(kern_pbufpool_alloc_batch(pp, 1, phary, &phcnt) == 0);
418 	/* clone packets (lightweight, without mbufs) */
419 	for (i = 0; i < phcnt; i++) {
420 		kern_buflet_t buflet, buflet2;
421 		kern_obj_idx_seg_t buf_idx_seg, buf2_idx_seg;
422 
423 		VERIFY((buflet = kern_packet_get_next_buflet(phary[i],
424 		    NULL)) != NULL);
425 		VERIFY(kern_buflet_set_data_length(buflet, BUFLEN) == 0);
426 		VERIFY(__packet_finalize(phary[i]) == 0);
427 		VERIFY(kern_packet_get_data_length(phary[i]) == BUFLEN);
428 		(void) memset(kern_buflet_get_data_address(buflet), i, BUFLEN);
429 		kern_packet_set_trace_id(phary[i], i);
430 		VERIFY(kern_packet_get_trace_id(phary[i]) == i);
431 		VERIFY(kern_packet_clone(phary[i], &pharyc[i],
432 		    KPKT_COPY_LIGHT) == 0 || !(flags & KBIF_BUFFER_ON_DEMAND));
433 		if (pharyc[i] != 0) {
434 			struct __kern_packet *kpkt2 = SK_PTR_ADDR_KPKT(pharyc[i]);
435 			/*
436 			 * Source packet was allocated with 1 buffer, so
437 			 * validate that the clone packet points to that
438 			 * same buffer, and that the buffer's usecnt is 2.
439 			 */
440 			VERIFY(!(QUM_ADDR(pharyc[i])->qum_qflags & QUM_F_FINALIZED));
441 			VERIFY(kpkt2->pkt_mbuf == NULL);
442 			VERIFY(!(kpkt2->pkt_pflags & PKT_F_MBUF_MASK));
443 			VERIFY((buflet2 = kern_packet_get_next_buflet(pharyc[i],
444 			    NULL)) != NULL);
445 			VERIFY(kern_buflet_get_object_address(buflet) ==
446 			    kern_buflet_get_object_address(buflet2));
447 			VERIFY(kern_buflet_get_data_address(buflet) ==
448 			    kern_buflet_get_data_address(buflet2));
449 			VERIFY(kern_buflet_get_data_limit(buflet) ==
450 			    kern_buflet_get_data_limit(buflet2));
451 			VERIFY(kern_buflet_get_data_offset(buflet) ==
452 			    kern_buflet_get_data_offset(buflet2));
453 			VERIFY(kern_buflet_get_data_length(buflet) ==
454 			    kern_buflet_get_data_length(buflet2));
455 			VERIFY(kern_buflet_set_data_limit(buflet2,
456 			    (uint16_t)kern_buflet_get_object_limit(buflet2) + 1)
457 			    == ERANGE);
458 			VERIFY(kern_buflet_set_data_limit(buflet2,
459 			    (uint16_t)kern_buflet_get_object_limit(buflet2) - 16)
460 			    == 0);
461 			VERIFY(kern_buflet_set_data_address(buflet2,
462 			    (const void *)((uintptr_t)kern_buflet_get_object_address(buflet2) - 1))
463 			    == ERANGE);
464 			VERIFY(kern_buflet_set_data_address(buflet2,
465 			    (const void *)((uintptr_t)kern_buflet_get_object_address(buflet2) + 16))
466 			    == 0);
467 			VERIFY(kern_buflet_set_data_length(buflet2,
468 			    kern_buflet_get_data_length(buflet2) - 32) == 0);
469 			VERIFY(kern_buflet_get_object_segment(buflet,
470 			    &buf_idx_seg) ==
471 			    kern_buflet_get_object_segment(buflet2,
472 			    &buf2_idx_seg));
473 			VERIFY(buf_idx_seg == buf2_idx_seg);
474 			VERIFY(buflet->buf_ctl == buflet2->buf_ctl);
475 			VERIFY(buflet->buf_ctl->bc_usecnt == 2);
476 			++phcloned;
477 			VERIFY(__packet_finalize(pharyc[i]) == 0);
478 			/* verify trace id isn't reused */
479 			VERIFY(kern_packet_get_trace_id(pharyc[i]) == 0);
480 			kern_packet_set_trace_id(pharyc[i], phcnt - i);
481 			VERIFY(kern_packet_get_trace_id(pharyc[i]) == (phcnt - i));
482 			VERIFY(kern_packet_get_trace_id(phary[i]) == i);
483 		}
484 	}
485 	VERIFY(phcloned == phcnt || phcloned == 0);
486 	if (phcloned != 0) {
487 		kern_pbufpool_free_batch(pp, pharyc, phcloned);
488 		phcloned = 0;
489 	}
490 	kern_pbufpool_free_batch(pp, phary, phcnt);
491 	VERIFY(kern_pbufpool_alloc_batch(pp, 1, phary, &phcnt) == 0);
492 	VERIFY(phcnt == mbcnt);
493 	VERIFY(skmt_mbcnt == 0);
494 	for (i = 0; i < mbcnt; i++) {
495 		struct __kern_packet *kpkt = SK_PTR_ADDR_KPKT(phary[i]);
496 		kern_buflet_t buflet;
497 
498 		VERIFY((buflet = kern_packet_get_next_buflet(phary[i],
499 		    NULL)) != NULL);
500 		VERIFY(kern_buflet_set_data_length(buflet, BUFLEN) == 0);
501 		(void) memset(kern_buflet_get_data_address(buflet), i, BUFLEN);
502 		/* attach mbuf to packets and initialize packets */
503 		mblen = BUFLEN;
504 		VERIFY(mbuf_ring_cluster_alloc(MBUF_WAITOK, MBUF_TYPE_HEADER,
505 		    &mbary[i], skmem_test_mbfreecb, &mblen) == 0);
506 		VERIFY(mblen == BUFLEN);
507 		VERIFY(mbary[i] != NULL);
508 		VERIFY(mbary[i]->m_nextpkt == NULL);
509 		mbuf_setlen(mbary[i], mblen);
510 		mbuf_pkthdr_setlen(mbary[i], mblen);
511 		VERIFY((size_t)m_pktlen(mbary[i]) == mblen);
512 		(void) memset(mbuf_data(mbary[i]), i, mblen);
513 		kpkt->pkt_mbuf = mbary[i];
514 		kpkt->pkt_pflags |= PKT_F_MBUF_DATA;
515 		VERIFY(__packet_finalize_with_mbuf(kpkt) == 0);
516 		VERIFY(kern_packet_get_data_length(phary[i]) == BUFLEN);
517 		VERIFY(mbuf_ring_cluster_activate(kpkt->pkt_mbuf) == 0);
518 	}
519 	/* clone packets (heavyweight) */
520 	for (i = 0; i < phcnt; i++) {
521 		VERIFY(kern_packet_clone(phary[i], &pharyc[i],
522 		    KPKT_COPY_HEAVY) == 0);
523 		struct __kern_packet *kpkt = SK_PTR_ADDR_KPKT(phary[i]);
524 		struct __kern_packet *kpkt2 = SK_PTR_ADDR_KPKT(pharyc[i]);
525 		kern_buflet_t buflet, buflet2;
526 		/*
527 		 * Source packet was allocated with 1 buffer, so
528 		 * validate that the clone packet points to different
529 		 * buffer, and that the clone's attached mbuf is also
530 		 * different than the source's.
531 		 */
532 		VERIFY(!(QUM_ADDR(pharyc[i])->qum_qflags & QUM_F_FINALIZED));
533 		VERIFY((buflet = kern_packet_get_next_buflet(phary[i],
534 		    NULL)) != NULL);
535 		VERIFY((buflet2 = kern_packet_get_next_buflet(pharyc[i],
536 		    NULL)) != NULL);
537 		VERIFY(kern_buflet_get_object_address(buflet) !=
538 		    kern_buflet_get_object_address(buflet2));
539 		VERIFY(kern_buflet_get_data_address(buflet) !=
540 		    kern_buflet_get_data_address(buflet2));
541 		VERIFY(kern_buflet_get_data_limit(buflet) ==
542 		    kern_buflet_get_data_limit(buflet2));
543 		VERIFY(kern_buflet_get_data_offset(buflet) ==
544 		    kern_buflet_get_data_offset(buflet2));
545 		VERIFY(kern_buflet_get_data_length(buflet) == BUFLEN);
546 		VERIFY(kern_buflet_get_data_length(buflet) ==
547 		    kern_buflet_get_data_length(buflet2));
548 		VERIFY(kpkt->pkt_pflags & PKT_F_MBUF_DATA);
549 		VERIFY(kpkt2->pkt_pflags & PKT_F_MBUF_DATA);
550 		VERIFY(m_pktlen(kpkt2->pkt_mbuf) == m_pktlen(kpkt->pkt_mbuf));
551 		VERIFY(kern_packet_get_data_length(phary[i]) == BUFLEN);
552 		VERIFY(kern_packet_get_data_length(phary[i]) ==
553 		    kern_packet_get_data_length(pharyc[i]));
554 		VERIFY(buflet->buf_ctl != buflet2->buf_ctl);
555 		VERIFY(buflet->buf_ctl->bc_usecnt == 1);
556 		VERIFY(buflet2->buf_ctl->bc_usecnt == 1);
557 		VERIFY(memcmp(kern_buflet_get_data_address(buflet),
558 		    kern_buflet_get_data_address(buflet2),
559 		    kern_buflet_get_data_length(buflet)) == 0);
560 		VERIFY(kpkt->pkt_mbuf != NULL);
561 		VERIFY(kpkt2->pkt_mbuf != NULL);
562 		VERIFY(mbuf_data(kpkt->pkt_mbuf) != mbuf_data(kpkt2->pkt_mbuf));
563 		VERIFY(mbuf_len(kpkt->pkt_mbuf) == mbuf_len(kpkt2->pkt_mbuf));
564 		/* mbuf contents must have been copied */
565 		VERIFY(memcmp(mbuf_data(kpkt->pkt_mbuf),
566 		    mbuf_data(kpkt2->pkt_mbuf), mbuf_len(kpkt->pkt_mbuf)) == 0);
567 		VERIFY(__packet_finalize(pharyc[i]) == 0);
568 		++phcloned;
569 	}
570 	VERIFY(phcloned == phcnt);
571 	kern_pbufpool_free_batch(pp, pharyc, phcloned);
572 	phcloned = 0;
573 	skmt_mbcnt = mbcnt;
574 	kern_pbufpool_free_batch(pp, phary, phcnt);
575 	/* skmem_test_mbfreecb() should have been called for all mbufs by now */
576 	VERIFY(skmt_mbcnt == 0);
577 	for (i = 0; i < mbcnt; i++) {
578 		VERIFY(mbary[i] != NULL);
579 		m_freem(mbary[i]);
580 		mbary[i] = NULL;
581 	}
582 	mbcnt = 0;
583 
584 	/*
585 	 * Allocate and free test
586 	 * Case 2: Packet has a packet attached
587 	 */
588 	VERIFY(pp_mem_info.kpm_packets >= 64);
589 	phcnt = 32;
590 	VERIFY(kern_pbufpool_alloc_batch(pp, 1, phary, &phcnt) == 0);
591 	VERIFY(phcnt == 32);
592 	VERIFY(kern_pbufpool_alloc_batch(pp, 1, phary2, &phcnt) == 0);
593 	VERIFY(phcnt == 32);
594 	/* attach each packet to a packet */
595 	for (i = 0; i < phcnt; i++) {
596 		struct __kern_packet *kpkt = SK_PTR_ADDR_KPKT(phary[i]);
597 		struct __kern_packet *kpkt2 = SK_PTR_ADDR_KPKT(phary2[i]);
598 
599 		kpkt->pkt_pkt = kpkt2;
600 		kpkt->pkt_pflags |= PKT_F_PKT_DATA;
601 	}
602 	/* free the batch of packets (also free the attached packets) */
603 	kern_pbufpool_free_batch(pp, phary, phcnt);
604 
605 	/*
606 	 * Allocate and free test
607 	 * Case 3: Packet has a packet attached. The attached packet itself has
608 	 * an mbuf attached.
609 	 */
610 	VERIFY(pp_mem_info.kpm_packets >= 64);
611 	phcnt = 32;
612 	mbcnt = 32;
613 	VERIFY(kern_pbufpool_alloc_batch(pp, 1, phary, &phcnt) == 0);
614 	VERIFY(phcnt == 32);
615 	VERIFY(kern_pbufpool_alloc_batch(pp, 1, phary2, &phcnt) == 0);
616 	VERIFY(phcnt == 32);
617 	VERIFY(skmt_mbcnt == 0);
618 	for (i = 0; i < mbcnt; i++) {
619 		mblen = BUFLEN;
620 		VERIFY(mbuf_ring_cluster_alloc(MBUF_WAITOK, MBUF_TYPE_HEADER,
621 		    &mbary[i], skmem_test_mbfreecb, &mblen) == 0);
622 		VERIFY(mbary[i] != NULL);
623 		VERIFY(mbary[i]->m_nextpkt == NULL);
624 	}
625 	/* attach each packet to a packet */
626 	for (i = 0; i < phcnt; i++) {
627 		struct __kern_packet *kpkt = SK_PTR_ADDR_KPKT(phary[i]);
628 		struct __kern_packet *kpkt2 = SK_PTR_ADDR_KPKT(phary2[i]);
629 
630 		VERIFY(mbary[i] != NULL);
631 		VERIFY(__packet_initialize_with_mbuf(kpkt2,
632 		    mbary[i], 0, 0) == 0);
633 		VERIFY(mbuf_ring_cluster_activate(kpkt2->pkt_mbuf) == 0);
634 		kpkt->pkt_pkt = kpkt2;
635 		kpkt->pkt_pflags |= PKT_F_PKT_DATA;
636 	}
637 	skmt_mbcnt = mbcnt;
638 	/* free the batch of packets (also free the attached packets) */
639 	kern_pbufpool_free_batch(pp, phary, phcnt);
640 	/* skmem_test_mbfreecb() should have been called for all mbufs by now */
641 	VERIFY(skmt_mbcnt == 0);
642 	for (i = 0; i < mbcnt; i++) {
643 		VERIFY(mbary[i] != NULL);
644 		m_freem(mbary[i]);
645 		mbary[i] = NULL;
646 	}
647 	mbcnt = 0;
648 
649 	kern_pbufpool_destroy(pp);
650 	pp = NULL;
651 	/* check that ctx_release has been called */
652 	VERIFY(skmem_pp_ctx_refcnt(&skmem_pp_ctx) == 1);
653 
654 	pp_init.kbi_ctx = NULL;
655 	pp_init.kbi_ctx_retain = NULL;
656 	pp_init.kbi_ctx_release = NULL;
657 	pp_init.kbi_buflets = 1;
658 	/* must fail if buflets is non-zero and less than packets */
659 	if (!(flags & KBIF_BUFFER_ON_DEMAND)) {
660 		VERIFY(kern_pbufpool_create(&pp_init, &pp, NULL) == EINVAL);
661 	} else {
662 		VERIFY(kern_pbufpool_create(&pp_init, &pp, NULL) == 0);
663 		kern_pbufpool_destroy(pp);
664 		pp = NULL;
665 	}
666 	pp_init.kbi_buflets = (64 * 2);
667 	VERIFY(kern_pbufpool_create(&pp_init, &pp, NULL) == 0);
668 	bzero(&pp_mem_info, sizeof(pp_mem_info));
669 	VERIFY(kern_pbufpool_get_memory_info(pp, &pp_mem_info) == 0);
670 	VERIFY(pp_mem_info.kpm_flags & KPMF_EXTERNAL);
671 	VERIFY(pp_mem_info.kpm_buflets >= pp_mem_info.kpm_packets);
672 	VERIFY(pp_mem_info.kpm_packets >= 64);
673 	VERIFY(pp_mem_info.kpm_max_frags == 1);
674 	VERIFY(pp_mem_info.kpm_buflets >= (64 * 2));
675 	VERIFY(pp_mem_info.kpm_bufsize == SKMEM_TEST_BUFSIZE);
676 	VERIFY(kern_pbufpool_alloc(pp, 1, &ph) == 0);
677 	VERIFY(kern_packet_get_buflet_count(ph) == 1);
678 	kern_pbufpool_free(pp, ph);
679 	ph = 0;
680 	phcnt = 4;
681 	VERIFY(kern_pbufpool_alloc_batch(pp, 4, phary, &phcnt) == EINVAL);
682 	VERIFY(kern_pbufpool_alloc_batch(pp, 1, phary, &phcnt) == 0);
683 	VERIFY(kern_packet_get_buflet_count(phary[0]) == 1);
684 	VERIFY(kern_packet_get_buflet_count(phary[1]) == 1);
685 	VERIFY(kern_packet_get_buflet_count(phary[2]) == 1);
686 	VERIFY(kern_packet_get_buflet_count(phary[3]) == 1);
687 	kern_pbufpool_free_batch(pp, phary, phcnt);
688 	kern_pbufpool_destroy(pp);
689 	pp = NULL;
690 
691 	/* check multi-buflet KPIs */
692 	bzero(&pp_init_mb, sizeof(pp_init_mb));
693 	pp_init_mb.kbi_version = KERN_PBUFPOOL_CURRENT_VERSION;
694 	pp_init_mb.kbi_buf_seg_size = skmem_usr_buf_seg_size;
695 	(void) snprintf((char *)pp_init_mb.kbi_name,
696 	    sizeof(pp_init_mb.kbi_name), "%s", "skmem_buflet_tests_mb");
697 	pp_init_mb.kbi_flags = flags;
698 	pp_init_mb.kbi_max_frags = 4;
699 	pp_init_mb.kbi_packets = 64;
700 	pp_init_mb.kbi_bufsize = 512;
701 	pp_init_mb.kbi_buflets =
702 	    pp_init_mb.kbi_packets * pp_init_mb.kbi_max_frags;
703 
704 	VERIFY((kern_pbufpool_create(&pp_init_mb, &pp_mb, NULL) == EINVAL) ||
705 	    (flags & KBIF_BUFFER_ON_DEMAND));
706 
707 	if (pp_mb != NULL) {
708 		bzero(&pp_mem_info, sizeof(pp_mem_info));
709 		VERIFY(kern_pbufpool_get_memory_info(pp_mb, &pp_mem_info) == 0);
710 		VERIFY(kern_pbufpool_alloc(pp_mb, 0, &ph_mb) == 0 ||
711 		    !(flags & KBIF_BUFFER_ON_DEMAND));
712 		if (ph_mb != 0) {
713 			VERIFY(flags & KBIF_BUFFER_ON_DEMAND);
714 			kern_pbufpool_free(pp_mb, ph_mb);
715 			ph_mb = 0;
716 		}
717 		VERIFY(kern_pbufpool_alloc_buffer(pp_mb, &baddr, &sg,
718 		    &sg_idx) == 0 || !(flags & KBIF_BUFFER_ON_DEMAND));
719 		if (baddr != 0) {
720 			VERIFY(flags & KBIF_BUFFER_ON_DEMAND);
721 			kern_pbufpool_free_buffer(pp_mb, baddr);
722 			baddr = 0;
723 		}
724 		kern_pbufpool_destroy(pp_mb);
725 		pp_mb = NULL;
726 	}
727 
728 	kfree_type(struct mbuf *, MAX_PH_ARY, mbary);
729 	mbary = NULL;
730 
731 	kfree_data(phary, sizeof(kern_packet_t) * MAX_PH_ARY);
732 	phary = NULL;
733 
734 	kfree_data(phary2, sizeof(kern_packet_t) * MAX_PH_ARY);
735 	phary2 = NULL;
736 
737 	kfree_data(pharyc, sizeof(kern_packet_t) * MAX_PH_ARY);
738 	pharyc = NULL;
739 }
740 
741 static void
skmem_test_mbfreecb(caddr_t cl,uint32_t size,caddr_t arg)742 skmem_test_mbfreecb(caddr_t cl, uint32_t size, caddr_t arg)
743 {
744 #pragma unused(cl, size)
745 	struct mbuf *m = (void *)arg;
746 
747 	VERIFY(!mbuf_ring_cluster_is_active(m));
748 	VERIFY(skmt_mbcnt > 0);
749 	atomic_add_32(&skmt_mbcnt, -1);
750 }
751 
752 static void
skmem_test_alloccb(kern_packet_t ph,uint32_t idx,const void * ctx)753 skmem_test_alloccb(kern_packet_t ph, uint32_t idx, const void *ctx)
754 {
755 	VERIFY(ph != 0);
756 	VERIFY(ctx == &skmt_alloccb_ctx);
757 	VERIFY(idx < skmt_alloccb_ctx.stc_req);
758 	VERIFY(idx == atomic_add_32_ov(&skmt_alloccb_ctx.stc_idx, 1));
759 }
760 static void
skmem_packet_tests(uint32_t flags)761 skmem_packet_tests(uint32_t flags)
762 {
763 	struct kern_pbufpool_memory_info pp_mb_mem_info;
764 	struct kern_pbufpool_memory_info pp_mem_info;
765 	struct kern_pbufpool_init pp_init;
766 	kern_pbufpool_t pp = NULL;
767 	struct kern_pbufpool_init pp_init_mb;
768 	kern_pbufpool_t pp_mb = NULL;
769 	mach_vm_address_t baddr = 0;
770 	uint8_t *buffer, *ref_buffer;
771 	uint8_t *bflt_buffer;
772 	kern_obj_idx_seg_t sg_idx;
773 	kern_buflet_t buflet;
774 	kern_segment_t sg;
775 	kern_packet_t ph = 0, ph_mb = 0;
776 	struct mbuf *m = NULL;
777 	mach_vm_address_t buffer_addr = 0;
778 	uint16_t len;
779 	uint32_t i;
780 	uint32_t bft_cnt;
781 	uint32_t csum_eee_ref, csum_eeo_ref, csum_eoe_ref, csum_eoo_ref;
782 	uint32_t csum_oee_ref, csum_oeo_ref, csum_ooe_ref, csum_ooo_ref, csum;
783 	uint32_t ref_sum;
784 	boolean_t test_unaligned;
785 	kern_buflet_t bft0, bft1, bft2;
786 	kern_buflet_t bft_array[4];
787 
788 	SK_ERR("flags 0x%x", flags);
789 
790 	/*
791 	 * XXX: Skip packet tests involving unaligned addresses when
792 	 * KBIF_INHIBIT_CACHE is set, as the copy-and-checksum routine
793 	 * currently assumes normal memory, rather than device memory.
794 	 */
795 	test_unaligned = !(flags & KBIF_INHIBIT_CACHE);
796 
797 	/* allocate separately in case pool is setup for device memory */
798 	ref_buffer = (uint8_t *) kalloc_data(SKMEM_TEST_BUFSIZE,
799 	    Z_WAITOK | Z_ZERO);
800 
801 	bzero(&pp_init_mb, sizeof(pp_init_mb));
802 	pp_init_mb.kbi_version = KERN_PBUFPOOL_CURRENT_VERSION;
803 	pp_init_mb.kbi_buf_seg_size = skmem_usr_buf_seg_size;
804 	(void) snprintf((char *)pp_init_mb.kbi_name,
805 	    sizeof(pp_init_mb.kbi_name), "%s", "skmem_packet_tests_mb");
806 	pp_init_mb.kbi_flags = flags | KBIF_BUFFER_ON_DEMAND | KBIF_RAW_BFLT;
807 	pp_init_mb.kbi_max_frags = 4;
808 	pp_init_mb.kbi_packets = 64;
809 	pp_init_mb.kbi_bufsize = 512;
810 	pp_init_mb.kbi_buflets =
811 	    pp_init_mb.kbi_packets * pp_init_mb.kbi_max_frags;
812 	pp_init_mb.kbi_ctx = NULL;
813 	pp_init_mb.kbi_ctx_retain = NULL;
814 	pp_init_mb.kbi_ctx_release = NULL;
815 
816 	VERIFY(kern_pbufpool_create(&pp_init_mb, &pp_mb, &pp_mb_mem_info) == 0);
817 	VERIFY(kern_pbufpool_alloc_buffer(pp_mb, &baddr, NULL, NULL) == 0);
818 	kern_pbufpool_free_buffer(pp_mb, baddr);
819 	VERIFY(kern_pbufpool_alloc_buffer(pp_mb, &baddr, &sg, &sg_idx) == 0);
820 	VERIFY(sg != NULL);
821 	VERIFY(sg->sg_region != NULL);
822 	VERIFY(sg->sg_md != NULL);
823 	VERIFY(sg->sg_start != 0);
824 	VERIFY(sg->sg_end != 0);
825 	VERIFY(sg->sg_type == SKSEG_TYPE_ALLOC);
826 	kern_pbufpool_free_buffer(pp_mb, baddr);
827 	baddr = 0;
828 
829 	/* add buflet to a packet with buf count 1 */
830 	VERIFY(kern_pbufpool_alloc(pp_mb, 1, &ph_mb) == 0);
831 	VERIFY(kern_pbufpool_alloc_buflet(pp_mb, &bft1, true) == 0);
832 	VERIFY(bft1 != NULL);
833 	VERIFY(kern_buflet_get_data_address(bft1) != NULL);
834 	VERIFY(kern_buflet_get_object_address(bft1) != NULL);
835 	VERIFY((bft0 = kern_packet_get_next_buflet(ph_mb, NULL)) != NULL);
836 	VERIFY(kern_packet_add_buflet(ph_mb, bft0, bft1) == 0);
837 	VERIFY(kern_packet_get_buflet_count(ph_mb) == 2);
838 	VERIFY(kern_packet_get_next_buflet(ph_mb, NULL) == bft0);
839 	VERIFY(kern_packet_get_next_buflet(ph_mb, bft0) == bft1);
840 	VERIFY(kern_packet_get_next_buflet(ph_mb, bft1) == NULL);
841 	VERIFY(kern_packet_finalize(ph_mb) == 0);
842 	kern_pbufpool_free(pp_mb, ph_mb);
843 	ph_mb = 0;
844 
845 	/* clone and add a buflet to a packet */
846 	VERIFY(kern_pbufpool_alloc(pp_mb, 1, &ph_mb) == 0);
847 	VERIFY((bft0 = kern_packet_get_next_buflet(ph_mb, NULL)) != NULL);
848 	bft_cnt = 1;
849 	VERIFY(kern_buflet_clone(bft0, &bft1, &bft_cnt, pp_mb) == 0);
850 	VERIFY(bft_cnt == 1 && bft1 != NULL);
851 
852 	VERIFY(kern_buflet_get_data_address(bft1) != NULL);
853 	VERIFY(kern_buflet_get_object_address(bft1) != NULL);
854 	VERIFY(kern_buflet_get_data_address(bft0) ==
855 	    kern_buflet_get_data_address(bft1));
856 	VERIFY(kern_buflet_get_object_address(bft0) ==
857 	    kern_buflet_get_object_address(bft1));
858 
859 	/* exercise some buflet KPIs here */
860 	VERIFY(kern_buflet_set_data_length(bft0, 128) == 0);
861 	VERIFY(kern_buflet_set_gro_len(bft0, 128) == 0);
862 	buffer_addr = (mach_vm_address_t) kern_buflet_get_data_address(bft0);
863 	VERIFY((mach_vm_address_t) kern_buflet_get_next_buf(bft0, NULL) == buffer_addr);
864 	VERIFY(kern_buflet_get_next_buf(bft0, (void *)buffer_addr) == NULL);
865 
866 	VERIFY(kern_buflet_set_gro_len(bft1, 128) == 0);
867 	VERIFY(kern_buflet_set_data_length(bft1, 512 - 128) == 0);
868 	VERIFY(kern_buflet_set_buffer_offset(bft1, 128) == 0);
869 	VERIFY((mach_vm_address_t) kern_buflet_get_data_address(bft1) ==
870 	    buffer_addr + 128);
871 	VERIFY((mach_vm_address_t) kern_buflet_get_next_buf(bft1, 0) ==
872 	    buffer_addr + 128);
873 	VERIFY((mach_vm_address_t) kern_buflet_get_next_buf(bft1,
874 	    (void *)(buffer_addr + 128)) == buffer_addr + 256);
875 
876 	/* attach cloned buflet to pkt */
877 	VERIFY(kern_packet_add_buflet(ph_mb, bft0, bft1) == 0);
878 	VERIFY(kern_packet_get_buflet_count(ph_mb) == 2);
879 	VERIFY(kern_packet_get_next_buflet(ph_mb, NULL) == bft0);
880 	VERIFY(kern_packet_get_next_buflet(ph_mb, bft0) == bft1);
881 	VERIFY(kern_packet_get_next_buflet(ph_mb, bft1) == NULL);
882 	VERIFY(kern_packet_finalize(ph_mb) == 0);
883 	kern_pbufpool_free(pp_mb, ph_mb);
884 	ph_mb = 0;
885 
886 	/* clone and add 2 buflets to a packet */
887 	VERIFY(kern_pbufpool_alloc(pp_mb, 1, &ph_mb) == 0);
888 	VERIFY((bft0 = kern_packet_get_next_buflet(ph_mb, NULL)) != NULL);
889 	bft_cnt = 2;
890 	VERIFY(kern_buflet_clone(bft0, bft_array, &bft_cnt, pp_mb) == 0);
891 	VERIFY(bft_cnt == 2 && bft_array[0] != NULL && bft_array[1] != NULL);
892 	bft1 = bft_array[0];
893 	bft2 = bft_array[1];
894 	VERIFY(kern_buflet_get_data_address(bft1) != NULL);
895 	VERIFY(kern_buflet_get_object_address(bft1) != NULL);
896 	VERIFY(kern_buflet_get_data_address(bft0) ==
897 	    kern_buflet_get_data_address(bft1));
898 	VERIFY(kern_buflet_get_object_address(bft0) ==
899 	    kern_buflet_get_object_address(bft1));
900 	VERIFY(kern_buflet_get_data_address(bft0) ==
901 	    kern_buflet_get_data_address(bft2));
902 	VERIFY(kern_buflet_get_object_address(bft0) ==
903 	    kern_buflet_get_object_address(bft2));
904 
905 	VERIFY(kern_buflet_set_data_length(bft0, 128) == 0);
906 	VERIFY(kern_buflet_set_gro_len(bft0, 128) == 0);
907 	buffer_addr = (mach_vm_address_t) kern_buflet_get_data_address(bft0);
908 	VERIFY((mach_vm_address_t) kern_buflet_get_next_buf(bft0, NULL) == buffer_addr);
909 	VERIFY(kern_buflet_get_next_buf(bft0, (void *)buffer_addr) == NULL);
910 
911 	VERIFY(kern_buflet_set_gro_len(bft1, 256) == 0);
912 	VERIFY(kern_buflet_set_data_length(bft1, 256) == 0);
913 	VERIFY(kern_buflet_set_buffer_offset(bft1, 128) == 0);
914 	VERIFY((mach_vm_address_t) kern_buflet_get_data_address(bft1) ==
915 	    buffer_addr + 128);
916 	VERIFY((mach_vm_address_t) kern_buflet_get_next_buf(bft1, 0) ==
917 	    buffer_addr + 128);
918 
919 	VERIFY(kern_buflet_set_gro_len(bft2, 128) == 0);
920 	VERIFY(kern_buflet_set_data_length(bft2, 128) == 0);
921 	VERIFY(kern_buflet_set_buffer_offset(bft2, 128 + 256) == 0);
922 	VERIFY((mach_vm_address_t) kern_buflet_get_data_address(bft2) ==
923 	    buffer_addr + 128 + 256);
924 	VERIFY((mach_vm_address_t) kern_buflet_get_next_buf(bft2, 0) ==
925 	    buffer_addr + 128 + 256);
926 
927 	/* attach cloned buflet to pkt */
928 	VERIFY(kern_packet_add_buflet(ph_mb, bft0, bft1) == 0);
929 	VERIFY(kern_packet_add_buflet(ph_mb, bft1, bft2) == 0);
930 	VERIFY(kern_packet_get_buflet_count(ph_mb) == 3);
931 	VERIFY(kern_packet_get_next_buflet(ph_mb, NULL) == bft0);
932 	VERIFY(kern_packet_get_next_buflet(ph_mb, bft0) == bft1);
933 	VERIFY(kern_packet_get_next_buflet(ph_mb, bft1) == bft2);
934 	VERIFY(kern_packet_get_next_buflet(ph_mb, bft2) == NULL);
935 	VERIFY(kern_packet_finalize(ph_mb) == 0);
936 	kern_pbufpool_free(pp_mb, ph_mb);
937 	ph_mb = 0;
938 
939 	/* add buflet to a packet with buf count 0 */
940 	VERIFY(kern_pbufpool_alloc(pp_mb, 0, &ph_mb) == 0);
941 	VERIFY(kern_packet_get_buflet_count(ph_mb) == 0);
942 	VERIFY((bft0 = kern_packet_get_next_buflet(ph_mb, NULL)) == NULL);
943 	VERIFY(kern_pbufpool_alloc_buflet(pp_mb, &bft1, true) == 0);
944 	VERIFY(bft1 != NULL);
945 	VERIFY(kern_packet_add_buflet(ph_mb, bft0, bft1) == 0);
946 	VERIFY(kern_packet_get_buflet_count(ph_mb) == 1);
947 	VERIFY(kern_packet_get_next_buflet(ph_mb, bft0) == bft1);
948 	VERIFY(kern_packet_get_next_buflet(ph_mb, bft1) == NULL);
949 	VERIFY(kern_buflet_get_data_address(bft1) != NULL);
950 	VERIFY(kern_buflet_get_object_address(bft1) != NULL);
951 	VERIFY(kern_buflet_get_data_limit(bft1) != 0);
952 	VERIFY(kern_buflet_get_data_length(bft1) == 0);
953 	VERIFY(kern_packet_finalize(ph_mb) == 0);
954 	kern_pbufpool_free(pp_mb, ph_mb);
955 	ph_mb = 0;
956 
957 	bzero(&pp_init, sizeof(pp_init));
958 	pp_init.kbi_version = KERN_PBUFPOOL_CURRENT_VERSION;
959 	pp_init.kbi_buf_seg_size = skmem_usr_buf_seg_size;
960 	(void) snprintf((char *)pp_init.kbi_name, sizeof(pp_init.kbi_name),
961 	    "%s", "skmem_packet_tests");
962 	pp_init.kbi_flags = flags;
963 	pp_init.kbi_packets = 64;
964 	pp_init.kbi_bufsize = SKMEM_TEST_BUFSIZE;
965 	pp_init.kbi_max_frags = 1;
966 	pp_init.kbi_buflets = (64 * 2);
967 	pp_init.kbi_ctx = NULL;
968 	pp_init.kbi_ctx_retain = NULL;
969 	pp_init.kbi_ctx_release = NULL;
970 
971 	/* validate multi-buflet packet checksum/copy+checksum routines */
972 	VERIFY(kern_pbufpool_create(&pp_init, &pp, &pp_mem_info) == 0);
973 	VERIFY(kern_pbufpool_alloc(pp, 1, &ph) == 0);
974 	VERIFY(kern_packet_get_buflet_count(ph) == 1);
975 
976 	VERIFY((buflet = kern_packet_get_next_buflet(ph, NULL)) != NULL);
977 	VERIFY((buffer = kern_buflet_get_data_address(buflet)) != NULL);
978 	len = SKMEM_TEST_BUFSIZE;
979 	for (i = 0; i < len; i++) {
980 		ref_buffer[i] = (i & 0xff);
981 	}
982 	/* enforce load/store byte for device memory case */
983 	volatile uint8_t *bufp = buffer;
984 	for (i = 0; i < len; i++) {
985 		bufp[i] = ref_buffer[i];
986 	}
987 	VERIFY(kern_buflet_set_data_length(buflet, len) == 0);
988 	VERIFY(__packet_finalize(ph) == 0);
989 
990 	/* calculate and validate reference value */
991 	csum_eee_ref = __packet_cksum(buffer, len, 0);
992 	VERIFY(skmem_reference_sum(ref_buffer, len, 0) == csum_eee_ref);
993 	csum_eoe_ref = __packet_cksum(buffer, len - 2, 0);
994 	VERIFY(skmem_reference_sum(ref_buffer, len - 2, 0) == csum_eoe_ref);
995 	csum_eoo_ref = csum_eeo_ref = __packet_cksum(buffer, len - 1, 0);
996 	VERIFY(skmem_reference_sum(ref_buffer, len - 1, 0) == csum_eoo_ref);
997 	csum_oeo_ref = csum_ooo_ref = __packet_cksum(buffer + 1, len - 1, 0);
998 	VERIFY(skmem_reference_sum(ref_buffer + 1, len - 1, 0) == csum_oeo_ref);
999 	csum_ooe_ref = csum_oee_ref = __packet_cksum(buffer + 1, len - 2, 0);
1000 	VERIFY(skmem_reference_sum(ref_buffer + 1, len - 2, 0) == csum_ooe_ref);
1001 
1002 	/* sanity tests */
1003 	VERIFY(skmem_reference_sum(ref_buffer + 2, len - 2, 0) ==
1004 	    __packet_cksum(buffer + 2, len - 2, 0));
1005 	VERIFY(skmem_reference_sum(ref_buffer + 3, len - 3, 0) ==
1006 	    __packet_cksum(buffer + 3, len - 3, 0));
1007 	VERIFY(skmem_reference_sum(ref_buffer + 4, len - 4, 0) ==
1008 	    __packet_cksum(buffer + 4, len - 4, 0));
1009 	VERIFY(skmem_reference_sum(ref_buffer + 5, len - 5, 0) ==
1010 	    __packet_cksum(buffer + 5, len - 5, 0));
1011 	VERIFY(skmem_reference_sum(ref_buffer + 6, len - 6, 0) ==
1012 	    __packet_cksum(buffer + 6, len - 6, 0));
1013 	VERIFY(skmem_reference_sum(ref_buffer + 7, len - 7, 0) ==
1014 	    __packet_cksum(buffer + 7, len - 7, 0));
1015 
1016 	VERIFY(mbuf_gethdr(MBUF_WAITOK, MBUF_TYPE_HEADER, &m) == 0);
1017 	VERIFY(mbuf_copyback(m, 0, len, buffer, MBUF_WAITOK) == 0);
1018 
1019 	/* verify copy-checksum between packets */
1020 	VERIFY(kern_pbufpool_alloc(pp_mb, 4, &ph_mb) == 0);
1021 	VERIFY(kern_packet_get_buflet_count(ph_mb) == 4);
1022 	pkt_copypkt_sum(ph, 0, ph_mb, 0, len - 1, &csum, TRUE);
1023 	METADATA_ADJUST_LEN(SK_PTR_ADDR_KQUM(ph_mb), 0, 0);
1024 	VERIFY(__packet_finalize(ph_mb) == 0);
1025 	if (csum_eeo_ref != csum) {
1026 		SK_ERR("pkt_copypkt_sum: csum_eeo_mismatch 0x%x, "
1027 		    "0x%x, 0x%llx", csum_eeo_ref, csum,
1028 		    SK_KVA(SK_PTR_ADDR_KQUM(ph_mb)));
1029 	}
1030 	VERIFY(csum_eeo_ref == csum);
1031 	kern_pbufpool_free(pp_mb, ph_mb);
1032 	ph_mb = 0;
1033 
1034 	/* verify checksum on a packet with shared-buffer buflet */
1035 	VERIFY(kern_pbufpool_alloc(pp_mb, 1, &ph_mb) == 0);
1036 	VERIFY(kern_packet_get_buflet_count(ph_mb) == 1);
1037 	VERIFY((bft0 = kern_packet_get_next_buflet(ph_mb, NULL)) != NULL);
1038 	VERIFY((bflt_buffer = kern_buflet_get_data_address(bft0)) != NULL);
1039 
1040 	bufp = bflt_buffer;
1041 	for (i = 0; i < 512; i++) {
1042 		bufp[i] = ref_buffer[i];
1043 	}
1044 
1045 	bft_cnt = 1;
1046 	VERIFY(kern_buflet_clone(bft0, &bft1, &bft_cnt, pp_mb) == 0);
1047 	VERIFY(bft_cnt == 1 && bft1 != NULL);
1048 
1049 	VERIFY(kern_buflet_set_data_length(bft0, 128) == 0);
1050 	VERIFY(kern_buflet_set_gro_len(bft0, 128) == 0);
1051 
1052 	VERIFY(kern_buflet_set_buffer_offset(bft1, 128) == 0);
1053 	VERIFY(kern_buflet_set_gro_len(bft1, 512 - 128) == 0);
1054 	VERIFY(kern_buflet_set_data_length(bft1, 512 - 128) == 0);
1055 
1056 	VERIFY(kern_packet_add_buflet(ph_mb, bft0, bft1) == 0);
1057 	VERIFY(kern_packet_get_buflet_count(ph_mb) == 2);
1058 	METADATA_ADJUST_LEN(SK_PTR_ADDR_KQUM(ph_mb), 0, 0);
1059 	VERIFY(kern_packet_finalize(ph_mb) == 0);
1060 	csum = pkt_sum(ph_mb, 0, 512);
1061 
1062 	ref_sum = skmem_reference_sum(ref_buffer, 512, 0);
1063 	VERIFY(ref_sum == __packet_cksum(bflt_buffer, 512, 0));
1064 	if (__packet_cksum(ref_buffer, 512, 0) != csum) {
1065 		SK_ERR("pkt_sum: ref_sum 0x%x, "
1066 		    "0x%x, 0x%llx", ref_sum, csum,
1067 		    SK_KVA(SK_PTR_ADDR_KQUM(ph_mb)));
1068 	}
1069 	VERIFY(ref_sum == csum);
1070 	kern_pbufpool_free(pp_mb, ph_mb);
1071 	ph_mb = 0;
1072 
1073 	if (test_unaligned) {
1074 		VERIFY(kern_pbufpool_alloc(pp_mb, 4, &ph_mb) == 0);
1075 		pkt_copypkt_sum(ph, 0, ph_mb, 1, len - 2, &csum, TRUE);
1076 		METADATA_ADJUST_LEN(SK_PTR_ADDR_KQUM(ph_mb), 0, 1);
1077 		SK_PTR_ADDR_KPKT(ph_mb)->pkt_headroom = 1;
1078 		SK_PTR_ADDR_KPKT(ph_mb)->pkt_l2_len = 0;
1079 		VERIFY(__packet_finalize(ph_mb) == 0);
1080 		if (csum_eoe_ref != csum) {
1081 			SK_ERR("pkt_copypkt_sum: csum_eoe_mismatch 0x%x, "
1082 			    "0x%x, 0x%llx", csum_eoe_ref, csum,
1083 			    SK_KVA(SK_PTR_ADDR_KQUM(ph_mb)));
1084 		}
1085 		VERIFY(csum_eoe_ref == csum);
1086 		kern_pbufpool_free(pp_mb, ph_mb);
1087 		ph_mb = 0;
1088 
1089 		VERIFY(kern_pbufpool_alloc(pp_mb, 4, &ph_mb) == 0);
1090 		pkt_copypkt_sum(ph, 0, ph_mb, 1, len - 1, &csum, TRUE);
1091 		METADATA_ADJUST_LEN(SK_PTR_ADDR_KQUM(ph_mb), 0, 1);
1092 		SK_PTR_ADDR_KPKT(ph_mb)->pkt_headroom = 1;
1093 		SK_PTR_ADDR_KPKT(ph_mb)->pkt_l2_len = 0;
1094 		VERIFY(__packet_finalize(ph_mb) == 0);
1095 		if (csum_eoo_ref != csum) {
1096 			SK_ERR("pkt_copypkt_sum: csum_eoo_mismatch 0x%x, "
1097 			    "0x%x, 0x%llx", csum_eoo_ref, csum,
1098 			    SK_KVA(SK_PTR_ADDR_KQUM(ph_mb)));
1099 		}
1100 		VERIFY(csum_eoo_ref == csum);
1101 		kern_pbufpool_free(pp_mb, ph_mb);
1102 		ph_mb = 0;
1103 
1104 		VERIFY(kern_pbufpool_alloc(pp_mb, 4, &ph_mb) == 0);
1105 		pkt_copypkt_sum(ph, 1, ph_mb, 0, len - 1, &csum, TRUE);
1106 		METADATA_ADJUST_LEN(SK_PTR_ADDR_KQUM(ph_mb), 0, 0);
1107 		SK_PTR_ADDR_KPKT(ph_mb)->pkt_headroom = 0;
1108 		SK_PTR_ADDR_KPKT(ph_mb)->pkt_l2_len = 0;
1109 		VERIFY(__packet_finalize(ph_mb) == 0);
1110 		if (csum_oeo_ref != csum) {
1111 			SK_ERR("pkt_copypkt_sum: csum_oeo_mismatch 0x%x, "
1112 			    "0x%x, 0x%llx", csum_oeo_ref, csum,
1113 			    SK_KVA(SK_PTR_ADDR_KQUM(ph_mb)));
1114 		}
1115 		VERIFY(csum_oeo_ref == csum);
1116 		kern_pbufpool_free(pp_mb, ph_mb);
1117 		ph_mb = 0;
1118 
1119 		VERIFY(kern_pbufpool_alloc(pp_mb, 4, &ph_mb) == 0);
1120 		pkt_copypkt_sum(ph, 1, ph_mb, 1, len - 1, &csum, TRUE);
1121 		METADATA_ADJUST_LEN(SK_PTR_ADDR_KQUM(ph_mb), 0, 1);
1122 		SK_PTR_ADDR_KPKT(ph_mb)->pkt_headroom = 1;
1123 		VERIFY(__packet_finalize(ph_mb) == 0);
1124 		if (csum_ooo_ref != csum) {
1125 			SK_ERR("pkt_copypkt_sum: csum_ooo_mismatch 0x%x, "
1126 			    "0x%x, 0x%llx", csum_ooo_ref, csum,
1127 			    SK_KVA(SK_PTR_ADDR_KQUM(ph_mb)));
1128 		}
1129 		VERIFY(csum_ooo_ref == csum);
1130 		kern_pbufpool_free(pp_mb, ph_mb);
1131 		ph_mb = 0;
1132 
1133 		VERIFY(kern_pbufpool_alloc(pp_mb, 4, &ph_mb) == 0);
1134 		pkt_copypkt_sum(ph, 1, ph_mb, 1, len - 2, &csum, TRUE);
1135 		METADATA_ADJUST_LEN(SK_PTR_ADDR_KQUM(ph_mb), 0, 1);
1136 		SK_PTR_ADDR_KPKT(ph_mb)->pkt_headroom = 1;
1137 		VERIFY(__packet_finalize(ph_mb) == 0);
1138 		if (csum_ooe_ref != csum) {
1139 			SK_ERR("pkt_copypkt_sum: csum_ooe_mismatch 0x%x, "
1140 			    "0x%x, 0x%llx", csum_ooe_ref, csum,
1141 			    SK_KVA(SK_PTR_ADDR_KQUM(ph_mb)));
1142 		}
1143 		VERIFY(csum_ooe_ref == csum);
1144 		kern_pbufpool_free(pp_mb, ph_mb);
1145 		ph_mb = 0;
1146 
1147 		VERIFY(kern_pbufpool_alloc(pp_mb, 4, &ph_mb) == 0);
1148 		pkt_copypkt_sum(ph, 1, ph_mb, 0, len - 2, &csum, TRUE);
1149 		METADATA_ADJUST_LEN(SK_PTR_ADDR_KQUM(ph_mb), 0, 0);
1150 		SK_PTR_ADDR_KPKT(ph_mb)->pkt_headroom = 0;
1151 		SK_PTR_ADDR_KPKT(ph_mb)->pkt_l2_len = 0;
1152 		VERIFY(__packet_finalize(ph_mb) == 0);
1153 		if (csum_ooe_ref != csum) {
1154 			SK_ERR("pkt_copypkt_sum: csum_oee_mismatch 0x%x, "
1155 			    "0x%x, 0x%llx", csum_oee_ref, csum,
1156 			    SK_KVA(SK_PTR_ADDR_KQUM(ph_mb)));
1157 		}
1158 		VERIFY(csum_oee_ref == csum);
1159 		kern_pbufpool_free(pp_mb, ph_mb);
1160 		ph_mb = 0;
1161 	}
1162 
1163 	VERIFY(kern_pbufpool_alloc(pp_mb, 4, &ph_mb) == 0);
1164 	VERIFY(kern_packet_get_buflet_count(ph_mb) == 4);
1165 	pkt_copypkt_sum(ph, 0, ph_mb, 0, len, &csum, TRUE);
1166 	METADATA_ADJUST_LEN(SK_PTR_ADDR_KQUM(ph_mb), 0, 0);
1167 	SK_PTR_ADDR_KPKT(ph_mb)->pkt_headroom = 0;
1168 	SK_PTR_ADDR_KPKT(ph_mb)->pkt_l2_len = 0;
1169 	VERIFY(__packet_finalize(ph_mb) == 0);
1170 	if (csum_eee_ref != csum) {
1171 		SK_ERR("pkt_copypkt_sum: csum_eee_mismatch 0x%x, "
1172 		    "0x%x, 0x%llx", csum_eee_ref, csum,
1173 		    SK_KVA(SK_PTR_ADDR_KQUM(ph_mb)));
1174 	}
1175 	VERIFY(csum_eee_ref == csum);
1176 
1177 	/* verify copy-checksum from packet to buffer */
1178 	csum = pkt_copyaddr_sum(ph_mb, 0, buffer, len - 1, TRUE, 0, NULL);
1179 	if (csum_eeo_ref != csum) {
1180 		SK_ERR("pkt_copyaddr_sum: csum_eeo_mismatch "
1181 		    "0x%x, 0x%x, 0x%llx, 0x%llx", csum_eeo_ref,
1182 		    csum, SK_KVA(SK_PTR_ADDR_KQUM(ph_mb)),
1183 		    SK_KVA(buffer));
1184 	}
1185 	VERIFY(csum_eeo_ref == csum);
1186 
1187 	if (test_unaligned) {
1188 		csum = pkt_copyaddr_sum(ph_mb, 0, buffer + 1, len - 1, TRUE, 0, NULL);
1189 		if (csum_eoo_ref != csum) {
1190 			SK_ERR("pkt_copyaddr_sum: csum_eoo_mismatch "
1191 			    "0x%x, 0x%x, 0x%llx, 0x%llx", csum_eoo_ref,
1192 			    csum, SK_KVA(SK_PTR_ADDR_KQUM(ph_mb)),
1193 			    SK_KVA(buffer));
1194 		}
1195 		VERIFY(csum_eoo_ref == csum);
1196 
1197 		csum = pkt_copyaddr_sum(ph_mb, 0, buffer + 1, len - 2, TRUE, 0, NULL);
1198 		if (csum_eoe_ref != csum) {
1199 			SK_ERR("pkt_copyaddr_sum: csum_eoe_mismatch "
1200 			    "0x%x, 0x%x, 0x%llx, 0x%llx", csum_eoe_ref,
1201 			    csum, SK_KVA(SK_PTR_ADDR_KQUM(ph_mb)),
1202 			    SK_KVA(buffer));
1203 		}
1204 		VERIFY(csum_eoe_ref == csum);
1205 
1206 		csum = pkt_copyaddr_sum(ph_mb, 1, buffer + 1, len - 2, TRUE, 0, NULL);
1207 		if (csum_ooe_ref != csum) {
1208 			SK_ERR("pkt_copyaddr_sum: csum_ooe_mismatch "
1209 			    "0x%x, 0x%x, 0x%llx, 0x%llx", csum_ooe_ref,
1210 			    csum, SK_KVA(SK_PTR_ADDR_KQUM(ph_mb)),
1211 			    SK_KVA(buffer));
1212 		}
1213 		VERIFY(csum_ooe_ref == csum);
1214 
1215 		csum = pkt_copyaddr_sum(ph_mb, 1, buffer, len - 2, TRUE, 0, NULL);
1216 		if (csum_oee_ref != csum) {
1217 			SK_ERR("pkt_copyaddr_sum: csum_oee_mismatch "
1218 			    "0x%x, 0x%x, 0x%llx, 0x%llx", csum_oee_ref,
1219 			    csum, SK_KVA(SK_PTR_ADDR_KQUM(ph_mb)),
1220 			    SK_KVA(buffer));
1221 		}
1222 		VERIFY(csum_oee_ref == csum);
1223 
1224 		csum = pkt_copyaddr_sum(ph_mb, 1, buffer, len - 1, TRUE, 0, NULL);
1225 		if (csum_oeo_ref != csum) {
1226 			SK_ERR("pkt_copyaddr_sum: csum_oeo_mismatch "
1227 			    "0x%x, 0x%x, 0x%llx, 0x%llx", csum_oeo_ref,
1228 			    csum, SK_KVA(SK_PTR_ADDR_KQUM(ph_mb)),
1229 			    SK_KVA(buffer));
1230 		}
1231 		VERIFY(csum_oeo_ref == csum);
1232 
1233 		csum = pkt_copyaddr_sum(ph_mb, 1, buffer + 1, len - 1, TRUE, 0, NULL);
1234 		if (csum_ooo_ref != csum) {
1235 			SK_ERR("pkt_copyaddr_sum: csum_ooo_mismatch "
1236 			    "0x%x, 0x%x, 0x%llx, 0x%llx", csum_ooo_ref,
1237 			    csum, SK_KVA(SK_PTR_ADDR_KQUM(ph_mb)),
1238 			    SK_KVA(buffer));
1239 		}
1240 		VERIFY(csum_ooo_ref == csum);
1241 	}
1242 
1243 	csum = pkt_copyaddr_sum(ph_mb, 0, buffer, len, TRUE, 0, NULL);
1244 	if (csum_eee_ref != csum) {
1245 		SK_ERR("pkt_copyaddr_sum: csum_eee_mismatch "
1246 		    "0x%x, 0x%x, 0x%llx, 0x%llx", csum_eee_ref,
1247 		    csum, SK_KVA(SK_PTR_ADDR_KQUM(ph_mb)),
1248 		    SK_KVA(buffer));
1249 	}
1250 	VERIFY(csum_eee_ref == csum);
1251 
1252 	for (i = 0; i < len; i++) {
1253 		VERIFY(buffer[i] == (i & 0xff));
1254 	}
1255 	kern_pbufpool_free(pp_mb, ph_mb);
1256 	ph_mb = 0;
1257 
1258 	if (test_unaligned) {
1259 		/* verify copy-checksum from mbuf to packet */
1260 		VERIFY(kern_pbufpool_alloc(pp_mb, 4, &ph_mb) == 0);
1261 		VERIFY(kern_packet_get_buflet_count(ph_mb) == 4);
1262 		csum = pkt_mcopypkt_sum(m, 0, ph_mb, 0, len, TRUE);
1263 		METADATA_ADJUST_LEN(SK_PTR_ADDR_KQUM(ph_mb), 0, 0);
1264 		SK_PTR_ADDR_KPKT(ph_mb)->pkt_headroom = 0;
1265 		SK_PTR_ADDR_KPKT(ph_mb)->pkt_l2_len = 0;
1266 		VERIFY(__packet_finalize(ph_mb) == 0);
1267 		if (csum_eee_ref != csum) {
1268 			SK_ERR("pkt_mcopypkt_sum: csum_eee_mismatch "
1269 			    "0x%x, 0x%x, 0x%llx, 0x%llx", csum_eee_ref,
1270 			    csum, SK_KVA(SK_PTR_ADDR_KQUM(ph_mb)),
1271 			    SK_KVA(m));
1272 		}
1273 		VERIFY(csum_eee_ref == csum);
1274 		kern_pbufpool_free(pp_mb, ph_mb);
1275 		ph_mb = 0;
1276 
1277 		VERIFY(kern_pbufpool_alloc(pp_mb, 4, &ph_mb) == 0);
1278 		VERIFY(kern_packet_get_buflet_count(ph_mb) == 4);
1279 		csum = pkt_mcopypkt_sum(m, 0, ph_mb, 1, len - 2, TRUE);
1280 		METADATA_ADJUST_LEN(SK_PTR_ADDR_KQUM(ph_mb), 0, 1);
1281 		SK_PTR_ADDR_KPKT(ph_mb)->pkt_headroom = 1;
1282 		SK_PTR_ADDR_KPKT(ph_mb)->pkt_l2_len = 0;
1283 		VERIFY(__packet_finalize(ph_mb) == 0);
1284 		if (csum_eoe_ref != csum) {
1285 			SK_ERR("pkt_mcopypkt_sum: csum_eoe_mismatch "
1286 			    "0x%x, 0x%x, 0x%llx, 0x%llx", csum_eoe_ref,
1287 			    csum, SK_KVA(SK_PTR_ADDR_KQUM(ph_mb)),
1288 			    SK_KVA(m));
1289 		}
1290 		VERIFY(csum_eoe_ref == csum);
1291 		kern_pbufpool_free(pp_mb, ph_mb);
1292 		ph_mb = 0;
1293 
1294 		VERIFY(kern_pbufpool_alloc(pp_mb, 4, &ph_mb) == 0);
1295 		VERIFY(kern_packet_get_buflet_count(ph_mb) == 4);
1296 		csum = pkt_mcopypkt_sum(m, 0, ph_mb, 1, len - 1, TRUE);
1297 		METADATA_ADJUST_LEN(SK_PTR_ADDR_KQUM(ph_mb), 0, 1);
1298 		SK_PTR_ADDR_KPKT(ph_mb)->pkt_headroom = 1;
1299 		SK_PTR_ADDR_KPKT(ph_mb)->pkt_l2_len = 0;
1300 		VERIFY(__packet_finalize(ph_mb) == 0);
1301 		if (csum_eoo_ref != csum) {
1302 			SK_ERR("pkt_mcopypkt_sum: csum_eoo_mismatch "
1303 			    "0x%x, 0x%x, 0x%llx, 0x%llx", csum_eoo_ref,
1304 			    csum, SK_KVA(SK_PTR_ADDR_KQUM(ph_mb)),
1305 			    SK_KVA(m));
1306 		}
1307 		VERIFY(csum_eoo_ref == csum);
1308 		kern_pbufpool_free(pp_mb, ph_mb);
1309 		ph_mb = 0;
1310 	}
1311 
1312 	VERIFY(kern_pbufpool_alloc(pp_mb, 4, &ph_mb) == 0);
1313 	VERIFY(kern_packet_get_buflet_count(ph_mb) == 4);
1314 	csum = pkt_mcopypkt_sum(m, 0, ph_mb, 0, len - 1, TRUE);
1315 	METADATA_ADJUST_LEN(SK_PTR_ADDR_KQUM(ph_mb), 0, 0);
1316 	SK_PTR_ADDR_KPKT(ph_mb)->pkt_headroom = 0;
1317 	SK_PTR_ADDR_KPKT(ph_mb)->pkt_l2_len = 0;
1318 	VERIFY(__packet_finalize(ph_mb) == 0);
1319 	if (csum_eeo_ref != csum) {
1320 		SK_ERR("pkt_mcopypkt_sum: csum_eeo_mismatch "
1321 		    "0x%x, 0x%x, 0x%llx, 0x%llx", csum_eeo_ref,
1322 		    csum, SK_KVA(SK_PTR_ADDR_KQUM(ph_mb)),
1323 		    SK_KVA(m));
1324 	}
1325 	VERIFY(csum_eeo_ref == csum);
1326 	kern_pbufpool_free(pp_mb, ph_mb);
1327 	ph_mb = 0;
1328 
1329 	if (test_unaligned) {
1330 		VERIFY(kern_pbufpool_alloc(pp_mb, 4, &ph_mb) == 0);
1331 		VERIFY(kern_packet_get_buflet_count(ph_mb) == 4);
1332 		csum = pkt_mcopypkt_sum(m, 1, ph_mb, 0, len - 1, TRUE);
1333 		METADATA_ADJUST_LEN(SK_PTR_ADDR_KQUM(ph_mb), 0, 0);
1334 		SK_PTR_ADDR_KPKT(ph_mb)->pkt_headroom = 0;
1335 		SK_PTR_ADDR_KPKT(ph_mb)->pkt_l2_len = 0;
1336 		VERIFY(__packet_finalize(ph_mb) == 0);
1337 		if (csum_oeo_ref != csum) {
1338 			SK_ERR("pkt_mcopypkt_sum: csum_oeo_mismatch "
1339 			    "0x%x, 0x%x, 0x%llx, 0x%llx", csum_oeo_ref,
1340 			    csum, SK_KVA(SK_PTR_ADDR_KQUM(ph_mb)),
1341 			    SK_KVA(m));
1342 		}
1343 		VERIFY(csum_oeo_ref == csum);
1344 		kern_pbufpool_free(pp_mb, ph_mb);
1345 		ph_mb = 0;
1346 
1347 		VERIFY(kern_pbufpool_alloc(pp_mb, 4, &ph_mb) == 0);
1348 		VERIFY(kern_packet_get_buflet_count(ph_mb) == 4);
1349 		csum = pkt_mcopypkt_sum(m, 1, ph_mb, 0, len - 2, TRUE);
1350 		METADATA_ADJUST_LEN(SK_PTR_ADDR_KQUM(ph_mb), 0, 0);
1351 		SK_PTR_ADDR_KPKT(ph_mb)->pkt_headroom = 0;
1352 		SK_PTR_ADDR_KPKT(ph_mb)->pkt_l2_len = 0;
1353 		VERIFY(__packet_finalize(ph_mb) == 0);
1354 		if (csum_oee_ref != csum) {
1355 			SK_ERR("pkt_mcopypkt_sum: csum_oee_mismatch "
1356 			    "0x%x, 0x%x, 0x%llx, 0x%llx", csum_oee_ref,
1357 			    csum, SK_KVA(SK_PTR_ADDR_KQUM(ph_mb)),
1358 			    SK_KVA(m));
1359 		}
1360 		VERIFY(csum_oee_ref == csum);
1361 		kern_pbufpool_free(pp_mb, ph_mb);
1362 		ph_mb = 0;
1363 
1364 		VERIFY(kern_pbufpool_alloc(pp_mb, 4, &ph_mb) == 0);
1365 		VERIFY(kern_packet_get_buflet_count(ph_mb) == 4);
1366 		csum = pkt_mcopypkt_sum(m, 1, ph_mb, 1, len - 2, TRUE);
1367 		METADATA_ADJUST_LEN(SK_PTR_ADDR_KQUM(ph_mb), 0, 1);
1368 		SK_PTR_ADDR_KPKT(ph_mb)->pkt_headroom = 1;
1369 		SK_PTR_ADDR_KPKT(ph_mb)->pkt_l2_len = 0;
1370 		VERIFY(__packet_finalize(ph_mb) == 0);
1371 		if (csum_ooe_ref != csum) {
1372 			SK_ERR("pkt_mcopypkt_sum: csum_ooe_mismatch "
1373 			    "0x%x, 0x%x, 0x%llx, 0x%llx", csum_ooe_ref,
1374 			    csum, SK_KVA(SK_PTR_ADDR_KQUM(ph_mb)),
1375 			    SK_KVA(m));
1376 		}
1377 		VERIFY(csum_ooe_ref == csum);
1378 		kern_pbufpool_free(pp_mb, ph_mb);
1379 		ph_mb = 0;
1380 
1381 		VERIFY(kern_pbufpool_alloc(pp_mb, 4, &ph_mb) == 0);
1382 		VERIFY(kern_packet_get_buflet_count(ph_mb) == 4);
1383 		csum = pkt_mcopypkt_sum(m, 1, ph_mb, 1, len - 1, TRUE);
1384 		METADATA_ADJUST_LEN(SK_PTR_ADDR_KQUM(ph_mb), 0, 1);
1385 		SK_PTR_ADDR_KPKT(ph_mb)->pkt_headroom = 1;
1386 		SK_PTR_ADDR_KPKT(ph_mb)->pkt_l2_len = 0;
1387 		VERIFY(__packet_finalize(ph_mb) == 0);
1388 		if (csum_ooo_ref != csum) {
1389 			SK_ERR("pkt_mcopypkt_sum: csum_ooo_mismatch "
1390 			    "0x%x, 0x%x, 0x%llx, 0x%llx", csum_ooo_ref,
1391 			    csum, SK_KVA(SK_PTR_ADDR_KQUM(ph_mb)),
1392 			    SK_KVA(m));
1393 		}
1394 		VERIFY(csum_ooo_ref == csum);
1395 		kern_pbufpool_free(pp_mb, ph_mb);
1396 		ph_mb = 0;
1397 	}
1398 
1399 	kern_pbufpool_free(pp, ph);
1400 	ph = 0;
1401 	m_freem(m);
1402 	m = NULL;
1403 	kern_pbufpool_destroy(pp_mb);
1404 	pp_mb = NULL;
1405 	kern_pbufpool_destroy(pp);
1406 	pp = NULL;
1407 
1408 	kfree_data(ref_buffer, SKMEM_TEST_BUFSIZE);
1409 	ref_buffer = NULL;
1410 }
1411 
1412 static void
skmem_quantum_tests(uint32_t flags)1413 skmem_quantum_tests(uint32_t flags)
1414 {
1415 	struct kern_pbufpool_init pp_init;
1416 	struct kern_pbufpool_memory_info pp_mem_info;
1417 	kern_pbufpool_t pp = NULL;
1418 	kern_packet_t *phary = NULL;
1419 	uint32_t phcnt = 0;
1420 	kern_packet_t ph = 0;
1421 	uint32_t i;
1422 	errno_t err;
1423 
1424 	flags |= KBIF_QUANTUM;
1425 
1426 	SK_ERR("flags 0x%x", flags);
1427 
1428 	phary = (kern_packet_t *) kalloc_data(sizeof(kern_packet_t) * MAX_PH_ARY,
1429 	    Z_WAITOK | Z_ZERO);
1430 
1431 	bzero(&pp_init, sizeof(pp_init));
1432 	pp_init.kbi_version = KERN_PBUFPOOL_CURRENT_VERSION;
1433 	pp_init.kbi_buf_seg_size = skmem_usr_buf_seg_size;
1434 	(void) snprintf((char *)pp_init.kbi_name, sizeof(pp_init.kbi_name),
1435 	    "%s", "skmem_quantum_tests");
1436 	pp_init.kbi_flags = (KBIF_QUANTUM | flags);
1437 	pp_init.kbi_packets = 64;
1438 	pp_init.kbi_bufsize = SKMEM_TEST_BUFSIZE;
1439 	pp_init.kbi_buflets = (64 * 2);
1440 	pp_init.kbi_ctx = NULL;
1441 	pp_init.kbi_ctx_retain = NULL;
1442 	pp_init.kbi_ctx_release = NULL;
1443 
1444 	pp_init.kbi_max_frags = 4;
1445 	/* max_frags must be 1 for quantum type */
1446 	VERIFY(kern_pbufpool_create(&pp_init, &pp, NULL) == EINVAL);
1447 	pp_init.kbi_max_frags = 1;
1448 	if ((flags & KBIF_QUANTUM) && (flags & KBIF_BUFFER_ON_DEMAND)) {
1449 		VERIFY(kern_pbufpool_create(&pp_init, &pp, NULL) == EINVAL);
1450 		goto done;
1451 	}
1452 	VERIFY(kern_pbufpool_create(&pp_init, &pp, NULL) == 0);
1453 	bzero(&pp_mem_info, sizeof(pp_mem_info));
1454 	VERIFY(kern_pbufpool_get_memory_info(pp, &pp_mem_info) == 0);
1455 	VERIFY(pp_mem_info.kpm_flags & KPMF_EXTERNAL);
1456 	VERIFY(pp_mem_info.kpm_buflets >= pp_mem_info.kpm_packets);
1457 	VERIFY(pp_mem_info.kpm_packets >= 64);
1458 	VERIFY(pp_mem_info.kpm_packets <= MAX_PH_ARY);
1459 	VERIFY(pp_mem_info.kpm_max_frags == 1);
1460 	VERIFY(pp_mem_info.kpm_buflets >= 64);
1461 	VERIFY(pp_mem_info.kpm_bufsize == SKMEM_TEST_BUFSIZE);
1462 	VERIFY(kern_pbufpool_alloc(pp, 4, &ph) == EINVAL);
1463 	/* allocate and free one at a time */
1464 	for (i = 0, phcnt = 0; i < pp_mem_info.kpm_packets; i++) {
1465 		boolean_t stop = FALSE;
1466 		/*
1467 		 * This may fail if skmem_region_mtbf is set, or if
1468 		 * the system is short on memory.  Perform retries
1469 		 * at this layer to get at least 64 packets.
1470 		 */
1471 		while ((err = kern_pbufpool_alloc_nosleep(pp, 1, &ph)) != 0) {
1472 			VERIFY(err == ENOMEM);
1473 			if (phcnt < 64) {
1474 				SK_ERR("retrying alloc for quantum %u", phcnt);
1475 				delay(250 * NSEC_PER_USEC); /* 1/4 sec */
1476 				continue;
1477 			}
1478 			stop = TRUE;
1479 			break;
1480 		}
1481 		if (stop) {
1482 			break;
1483 		}
1484 		VERIFY(ph != 0);
1485 		VERIFY(kern_packet_get_data_length(ph) == 0);
1486 		VERIFY(kern_packet_get_buflet_count(ph) == 1);
1487 		phary[phcnt++] = ph;
1488 	}
1489 	VERIFY(phcnt >= 64);
1490 	for (i = 0; i < phcnt; i++) {
1491 		kern_pbufpool_free(pp, phary[i]);
1492 		phary[i] = 0;
1493 	}
1494 	/* allocate and free in batch */
1495 	phcnt = pp_mem_info.kpm_packets;
1496 	for (;;) {
1497 		err = kern_pbufpool_alloc_batch_nosleep(pp, 1, phary, &phcnt);
1498 		VERIFY(err != EINVAL && err != ENOTSUP);
1499 		if (err == ENOMEM) {
1500 			phcnt = pp_mem_info.kpm_packets;
1501 			SK_ERR("retrying batch alloc for %u quantums", phcnt);
1502 			delay(250 * NSEC_PER_USEC);     /* 1/4 sec */
1503 		} else if (err == EAGAIN) {
1504 			SK_ERR("batch alloc for %u quantums only returned %u",
1505 			    pp_mem_info.kpm_packets, phcnt);
1506 			break;
1507 		} else {
1508 			VERIFY(err == 0);
1509 			break;
1510 		}
1511 	}
1512 	VERIFY(phcnt > 0);
1513 	for (i = 0; i < phcnt; i++) {
1514 		VERIFY(phary[i] != 0);
1515 		VERIFY(kern_packet_get_data_length(phary[i]) == 0);
1516 		VERIFY(kern_packet_get_buflet_count(phary[i]) == 1);
1517 	}
1518 	kern_pbufpool_free_batch(pp, phary, phcnt);
1519 	/* allocate and free one at a time (blocking) */
1520 	for (i = 0, phcnt = 0; i < pp_mem_info.kpm_packets; i++) {
1521 		VERIFY(kern_pbufpool_alloc(pp, 1, &ph) == 0);
1522 		VERIFY(ph != 0);
1523 		VERIFY(kern_packet_get_data_length(ph) == 0);
1524 		VERIFY(kern_packet_get_buflet_count(ph) == 1);
1525 		phary[phcnt++] = ph;
1526 	}
1527 	VERIFY(phcnt >= 64);
1528 	for (i = 0; i < phcnt; i++) {
1529 		kern_pbufpool_free(pp, phary[i]);
1530 		phary[i] = 0;
1531 	}
1532 	/* allocate and free in batch (blocking) */
1533 	bzero(&skmt_alloccb_ctx, sizeof(skmt_alloccb_ctx));
1534 	skmt_alloccb_ctx.stc_req = phcnt;
1535 	VERIFY(kern_pbufpool_alloc_batch_callback(pp, 1, phary, &phcnt,
1536 	    skmem_test_alloccb, &skmt_alloccb_ctx) == 0);
1537 	VERIFY(skmt_alloccb_ctx.stc_idx == phcnt);
1538 	kern_pbufpool_free_batch(pp, phary, phcnt);
1539 	kern_pbufpool_destroy(pp);
1540 	pp = NULL;
1541 done:
1542 	kfree_data(phary, sizeof(kern_packet_t) * MAX_PH_ARY);
1543 	phary = NULL;
1544 }
1545 
1546 static void
skmem_basic_tests(void)1547 skmem_basic_tests(void)
1548 {
1549 	/* basic sanity (alloc/free) tests on packet buflet KPIs */
1550 	skmem_buflet_tests(0);
1551 	skmem_buflet_tests(KBIF_PERSISTENT);
1552 	skmem_buflet_tests(KBIF_PERSISTENT | KBIF_NO_MAGAZINES);
1553 	skmem_buflet_tests(KBIF_PERSISTENT | KBIF_PHYS_CONTIGUOUS);
1554 	skmem_buflet_tests(KBIF_PERSISTENT | KBIF_PHYS_CONTIGUOUS |
1555 	    KBIF_USER_ACCESS);
1556 	skmem_buflet_tests(KBIF_PERSISTENT | KBIF_MONOLITHIC |
1557 	    KBIF_USER_ACCESS);
1558 	skmem_buflet_tests(KBIF_PERSISTENT | KBIF_BUFFER_ON_DEMAND);
1559 	skmem_buflet_tests(KBIF_PERSISTENT | TEST_OPTION_INHIBIT_CACHE);
1560 	skmem_buflet_tests(KBIF_PERSISTENT | KBIF_MONOLITHIC |
1561 	    KBIF_BUFFER_ON_DEMAND);
1562 	skmem_buflet_tests(KBIF_PERSISTENT | KBIF_MONOLITHIC |
1563 	    KBIF_USER_ACCESS | TEST_OPTION_INHIBIT_CACHE);
1564 	skmem_buflet_tests(KBIF_PERSISTENT | KBIF_MONOLITHIC |
1565 	    KBIF_BUFFER_ON_DEMAND | TEST_OPTION_INHIBIT_CACHE);
1566 	skmem_buflet_tests(KBIF_MONOLITHIC | KBIF_PHYS_CONTIGUOUS);
1567 	skmem_buflet_tests(KBIF_MONOLITHIC | KBIF_USER_ACCESS);
1568 	skmem_buflet_tests(KBIF_MONOLITHIC | KBIF_USER_ACCESS |
1569 	    KBIF_NO_MAGAZINES);
1570 	skmem_buflet_tests(KBIF_MONOLITHIC | KBIF_PHYS_CONTIGUOUS |
1571 	    KBIF_USER_ACCESS);
1572 	skmem_buflet_tests(KBIF_MONOLITHIC | KBIF_BUFFER_ON_DEMAND);
1573 	skmem_buflet_tests(KBIF_MONOLITHIC | KBIF_USER_ACCESS |
1574 	    TEST_OPTION_INHIBIT_CACHE);
1575 	skmem_buflet_tests(KBIF_MONOLITHIC | KBIF_BUFFER_ON_DEMAND |
1576 	    TEST_OPTION_INHIBIT_CACHE);
1577 	skmem_buflet_tests(KBIF_BUFFER_ON_DEMAND | KBIF_NO_MAGAZINES);
1578 	skmem_buflet_tests(KBIF_BUFFER_ON_DEMAND | TEST_OPTION_INHIBIT_CACHE);
1579 
1580 	/* basic sanity (alloc/free) tests on packet buflet KPIs (vdev) */
1581 	skmem_buflet_tests(KBIF_VIRTUAL_DEVICE);
1582 	skmem_buflet_tests(KBIF_VIRTUAL_DEVICE | KBIF_NO_MAGAZINES);
1583 	skmem_buflet_tests(KBIF_VIRTUAL_DEVICE | KBIF_PERSISTENT);
1584 	skmem_buflet_tests(KBIF_VIRTUAL_DEVICE | KBIF_PHYS_CONTIGUOUS);
1585 	skmem_buflet_tests(KBIF_VIRTUAL_DEVICE | KBIF_PERSISTENT |
1586 	    KBIF_PHYS_CONTIGUOUS);
1587 	skmem_buflet_tests(KBIF_VIRTUAL_DEVICE | KBIF_PERSISTENT |
1588 	    KBIF_MONOLITHIC | KBIF_USER_ACCESS | KBIF_PHYS_CONTIGUOUS);
1589 	skmem_buflet_tests(KBIF_VIRTUAL_DEVICE | KBIF_PERSISTENT |
1590 	    KBIF_MONOLITHIC | KBIF_USER_ACCESS);
1591 	skmem_buflet_tests(KBIF_VIRTUAL_DEVICE | KBIF_PERSISTENT |
1592 	    KBIF_BUFFER_ON_DEMAND);
1593 	skmem_buflet_tests(KBIF_VIRTUAL_DEVICE | KBIF_PERSISTENT |
1594 	    TEST_OPTION_INHIBIT_CACHE);
1595 	skmem_buflet_tests(KBIF_VIRTUAL_DEVICE | KBIF_PERSISTENT |
1596 	    KBIF_MONOLITHIC | KBIF_BUFFER_ON_DEMAND);
1597 	skmem_buflet_tests(KBIF_VIRTUAL_DEVICE | KBIF_PERSISTENT |
1598 	    KBIF_MONOLITHIC | KBIF_USER_ACCESS | TEST_OPTION_INHIBIT_CACHE);
1599 	skmem_buflet_tests(KBIF_VIRTUAL_DEVICE | KBIF_PERSISTENT |
1600 	    KBIF_MONOLITHIC | KBIF_BUFFER_ON_DEMAND | TEST_OPTION_INHIBIT_CACHE);
1601 	skmem_buflet_tests(KBIF_VIRTUAL_DEVICE | KBIF_MONOLITHIC |
1602 	    KBIF_USER_ACCESS);
1603 	skmem_buflet_tests(KBIF_VIRTUAL_DEVICE | KBIF_MONOLITHIC |
1604 	    KBIF_PHYS_CONTIGUOUS);
1605 	skmem_buflet_tests(KBIF_VIRTUAL_DEVICE | KBIF_MONOLITHIC |
1606 	    KBIF_USER_ACCESS | KBIF_PHYS_CONTIGUOUS);
1607 	skmem_buflet_tests(KBIF_VIRTUAL_DEVICE | KBIF_MONOLITHIC |
1608 	    KBIF_BUFFER_ON_DEMAND);
1609 	skmem_buflet_tests(KBIF_VIRTUAL_DEVICE | KBIF_MONOLITHIC |
1610 	    KBIF_USER_ACCESS | TEST_OPTION_INHIBIT_CACHE);
1611 	skmem_buflet_tests(KBIF_VIRTUAL_DEVICE | KBIF_MONOLITHIC |
1612 	    KBIF_BUFFER_ON_DEMAND | TEST_OPTION_INHIBIT_CACHE);
1613 	skmem_buflet_tests(KBIF_VIRTUAL_DEVICE | KBIF_BUFFER_ON_DEMAND);
1614 	skmem_buflet_tests(KBIF_VIRTUAL_DEVICE | KBIF_BUFFER_ON_DEMAND |
1615 	    TEST_OPTION_INHIBIT_CACHE);
1616 
1617 	/* check packet KPIs (also touches data) */
1618 	skmem_packet_tests(0);
1619 	skmem_packet_tests(KBIF_PHYS_CONTIGUOUS);
1620 	skmem_packet_tests(KBIF_PERSISTENT);
1621 	skmem_packet_tests(KBIF_PERSISTENT | KBIF_NO_MAGAZINES);
1622 	skmem_packet_tests(KBIF_PERSISTENT | KBIF_PHYS_CONTIGUOUS);
1623 	skmem_packet_tests(KBIF_PERSISTENT | KBIF_MONOLITHIC |
1624 	    KBIF_PHYS_CONTIGUOUS | KBIF_USER_ACCESS);
1625 	skmem_packet_tests(KBIF_PERSISTENT | KBIF_MONOLITHIC |
1626 	    KBIF_USER_ACCESS);
1627 	skmem_packet_tests(KBIF_PERSISTENT | KBIF_BUFFER_ON_DEMAND);
1628 	skmem_packet_tests(KBIF_PERSISTENT | TEST_OPTION_INHIBIT_CACHE);
1629 	skmem_packet_tests(KBIF_PERSISTENT | KBIF_MONOLITHIC |
1630 	    KBIF_BUFFER_ON_DEMAND);
1631 	skmem_packet_tests(KBIF_PERSISTENT | KBIF_MONOLITHIC |
1632 	    KBIF_USER_ACCESS | TEST_OPTION_INHIBIT_CACHE);
1633 	skmem_packet_tests(KBIF_PERSISTENT | KBIF_MONOLITHIC |
1634 	    KBIF_BUFFER_ON_DEMAND | TEST_OPTION_INHIBIT_CACHE);
1635 	skmem_packet_tests(KBIF_MONOLITHIC | KBIF_PHYS_CONTIGUOUS);
1636 	skmem_packet_tests(KBIF_MONOLITHIC | KBIF_USER_ACCESS);
1637 	skmem_packet_tests(KBIF_MONOLITHIC | KBIF_USER_ACCESS |
1638 	    KBIF_NO_MAGAZINES);
1639 	skmem_packet_tests(KBIF_MONOLITHIC | KBIF_USER_ACCESS |
1640 	    KBIF_PHYS_CONTIGUOUS);
1641 	skmem_packet_tests(KBIF_MONOLITHIC | KBIF_BUFFER_ON_DEMAND);
1642 #if 0
1643 	/* XXX: commented out failed tests on ARM64e platforms */
1644 	skmem_packet_tests(KBIF_MONOLITHIC | KBIF_USER_ACCESS |
1645 	    TEST_OPTION_INHIBIT_CACHE);
1646 	skmem_packet_tests(KBIF_MONOLITHIC | KBIF_BUFFER_ON_DEMAND |
1647 	    TEST_OPTION_INHIBIT_CACHE);
1648 	skmem_packet_tests(KBIF_BUFFER_ON_DEMAND);
1649 	skmem_packet_tests(KBIF_BUFFER_ON_DEMAND | TEST_OPTION_INHIBIT_CACHE);
1650 #endif
1651 
1652 	/* check packet KPIs (also touches data) (vdev) */
1653 	skmem_packet_tests(KBIF_VIRTUAL_DEVICE);
1654 	skmem_packet_tests(KBIF_VIRTUAL_DEVICE | KBIF_NO_MAGAZINES);
1655 	skmem_packet_tests(KBIF_VIRTUAL_DEVICE | KBIF_PERSISTENT);
1656 	skmem_packet_tests(KBIF_VIRTUAL_DEVICE | KBIF_PHYS_CONTIGUOUS);
1657 	skmem_packet_tests(KBIF_VIRTUAL_DEVICE | KBIF_PERSISTENT |
1658 	    KBIF_PHYS_CONTIGUOUS);
1659 	skmem_packet_tests(KBIF_VIRTUAL_DEVICE | KBIF_PERSISTENT |
1660 	    KBIF_MONOLITHIC | KBIF_USER_ACCESS);
1661 	skmem_packet_tests(KBIF_VIRTUAL_DEVICE | KBIF_PERSISTENT |
1662 	    KBIF_BUFFER_ON_DEMAND);
1663 	skmem_packet_tests(KBIF_VIRTUAL_DEVICE | KBIF_PERSISTENT |
1664 	    TEST_OPTION_INHIBIT_CACHE);
1665 	skmem_packet_tests(KBIF_VIRTUAL_DEVICE | KBIF_PERSISTENT |
1666 	    KBIF_MONOLITHIC | KBIF_BUFFER_ON_DEMAND);
1667 	skmem_packet_tests(KBIF_VIRTUAL_DEVICE | KBIF_PERSISTENT |
1668 	    KBIF_MONOLITHIC | KBIF_USER_ACCESS | TEST_OPTION_INHIBIT_CACHE);
1669 	skmem_packet_tests(KBIF_VIRTUAL_DEVICE | KBIF_PERSISTENT |
1670 	    KBIF_MONOLITHIC | KBIF_BUFFER_ON_DEMAND | TEST_OPTION_INHIBIT_CACHE);
1671 	skmem_packet_tests(KBIF_VIRTUAL_DEVICE | KBIF_MONOLITHIC |
1672 	    KBIF_PHYS_CONTIGUOUS);
1673 	skmem_packet_tests(KBIF_VIRTUAL_DEVICE | KBIF_MONOLITHIC |
1674 	    KBIF_USER_ACCESS | KBIF_PHYS_CONTIGUOUS);
1675 	skmem_packet_tests(KBIF_VIRTUAL_DEVICE | KBIF_MONOLITHIC |
1676 	    KBIF_USER_ACCESS);
1677 	skmem_packet_tests(KBIF_VIRTUAL_DEVICE | KBIF_MONOLITHIC |
1678 	    KBIF_BUFFER_ON_DEMAND);
1679 	skmem_packet_tests(KBIF_VIRTUAL_DEVICE | KBIF_MONOLITHIC |
1680 	    KBIF_BUFFER_ON_DEMAND | KBIF_PHYS_CONTIGUOUS);
1681 #if 0
1682 	/* XXX: commented out failed tests on ARM64e platforms */
1683 	skmem_packet_tests(KBIF_VIRTUAL_DEVICE | KBIF_MONOLITHIC |
1684 	    KBIF_USER_ACCESS | TEST_OPTION_INHIBIT_CACHE);
1685 	skmem_packet_tests(KBIF_VIRTUAL_DEVICE | KBIF_MONOLITHIC |
1686 	    KBIF_BUFFER_ON_DEMAND | TEST_OPTION_INHIBIT_CACHE);
1687 	skmem_packet_tests(KBIF_VIRTUAL_DEVICE | KBIF_BUFFER_ON_DEMAND);
1688 	skmem_packet_tests(KBIF_VIRTUAL_DEVICE | KBIF_BUFFER_ON_DEMAND |
1689 	    TEST_OPTION_INHIBIT_CACHE);
1690 #endif
1691 
1692 	/* check quantum KPIs */
1693 	skmem_quantum_tests(0);
1694 	skmem_quantum_tests(KBIF_PHYS_CONTIGUOUS);
1695 	skmem_quantum_tests(KBIF_PERSISTENT);
1696 	skmem_quantum_tests(KBIF_PERSISTENT | KBIF_NO_MAGAZINES);
1697 	skmem_quantum_tests(KBIF_PERSISTENT | KBIF_PHYS_CONTIGUOUS);
1698 	skmem_quantum_tests(KBIF_PERSISTENT | KBIF_MONOLITHIC |
1699 	    KBIF_USER_ACCESS);
1700 	skmem_quantum_tests(KBIF_PERSISTENT | KBIF_BUFFER_ON_DEMAND);
1701 	skmem_quantum_tests(KBIF_PERSISTENT | TEST_OPTION_INHIBIT_CACHE);
1702 	skmem_quantum_tests(KBIF_PERSISTENT | KBIF_MONOLITHIC |
1703 	    KBIF_BUFFER_ON_DEMAND);
1704 	skmem_quantum_tests(KBIF_PERSISTENT | KBIF_MONOLITHIC |
1705 	    KBIF_USER_ACCESS | TEST_OPTION_INHIBIT_CACHE);
1706 	skmem_quantum_tests(KBIF_PERSISTENT | KBIF_MONOLITHIC |
1707 	    KBIF_BUFFER_ON_DEMAND | TEST_OPTION_INHIBIT_CACHE);
1708 	skmem_quantum_tests(KBIF_MONOLITHIC | KBIF_PHYS_CONTIGUOUS);
1709 	skmem_quantum_tests(KBIF_MONOLITHIC | KBIF_USER_ACCESS);
1710 	skmem_quantum_tests(KBIF_MONOLITHIC | KBIF_USER_ACCESS |
1711 	    KBIF_PHYS_CONTIGUOUS);
1712 	skmem_quantum_tests(KBIF_MONOLITHIC | KBIF_BUFFER_ON_DEMAND);
1713 	skmem_quantum_tests(KBIF_MONOLITHIC | KBIF_USER_ACCESS |
1714 	    TEST_OPTION_INHIBIT_CACHE);
1715 	skmem_quantum_tests(KBIF_MONOLITHIC | KBIF_BUFFER_ON_DEMAND |
1716 	    TEST_OPTION_INHIBIT_CACHE);
1717 	skmem_quantum_tests(KBIF_BUFFER_ON_DEMAND);
1718 	skmem_quantum_tests(KBIF_BUFFER_ON_DEMAND | KBIF_NO_MAGAZINES);
1719 	skmem_quantum_tests(KBIF_BUFFER_ON_DEMAND | KBIF_PHYS_CONTIGUOUS);
1720 	skmem_quantum_tests(KBIF_BUFFER_ON_DEMAND | TEST_OPTION_INHIBIT_CACHE);
1721 
1722 	/* check quantum KPIs (vdev) */
1723 	skmem_quantum_tests(KBIF_VIRTUAL_DEVICE);
1724 	skmem_quantum_tests(KBIF_VIRTUAL_DEVICE | KBIF_NO_MAGAZINES);
1725 	skmem_quantum_tests(KBIF_VIRTUAL_DEVICE | KBIF_PHYS_CONTIGUOUS);
1726 	skmem_quantum_tests(KBIF_VIRTUAL_DEVICE | KBIF_PERSISTENT);
1727 	skmem_quantum_tests(KBIF_VIRTUAL_DEVICE | KBIF_PERSISTENT |
1728 	    KBIF_MONOLITHIC | KBIF_USER_ACCESS);
1729 	skmem_quantum_tests(KBIF_VIRTUAL_DEVICE | KBIF_PERSISTENT |
1730 	    KBIF_BUFFER_ON_DEMAND);
1731 	skmem_quantum_tests(KBIF_VIRTUAL_DEVICE | KBIF_PERSISTENT |
1732 	    TEST_OPTION_INHIBIT_CACHE);
1733 	skmem_quantum_tests(KBIF_VIRTUAL_DEVICE | KBIF_PERSISTENT |
1734 	    KBIF_MONOLITHIC | KBIF_BUFFER_ON_DEMAND);
1735 	skmem_quantum_tests(KBIF_VIRTUAL_DEVICE | KBIF_PERSISTENT |
1736 	    KBIF_MONOLITHIC | KBIF_USER_ACCESS | TEST_OPTION_INHIBIT_CACHE);
1737 	skmem_quantum_tests(KBIF_VIRTUAL_DEVICE | KBIF_PERSISTENT |
1738 	    KBIF_MONOLITHIC | KBIF_BUFFER_ON_DEMAND | TEST_OPTION_INHIBIT_CACHE);
1739 	skmem_quantum_tests(KBIF_VIRTUAL_DEVICE | KBIF_MONOLITHIC |
1740 	    KBIF_PHYS_CONTIGUOUS);
1741 	skmem_quantum_tests(KBIF_VIRTUAL_DEVICE | KBIF_MONOLITHIC |
1742 	    KBIF_USER_ACCESS | KBIF_PHYS_CONTIGUOUS);
1743 	skmem_quantum_tests(KBIF_VIRTUAL_DEVICE | KBIF_MONOLITHIC |
1744 	    KBIF_USER_ACCESS);
1745 	skmem_quantum_tests(KBIF_VIRTUAL_DEVICE | KBIF_MONOLITHIC |
1746 	    KBIF_BUFFER_ON_DEMAND);
1747 	skmem_quantum_tests(KBIF_VIRTUAL_DEVICE | KBIF_MONOLITHIC |
1748 	    KBIF_USER_ACCESS | TEST_OPTION_INHIBIT_CACHE);
1749 	skmem_quantum_tests(KBIF_VIRTUAL_DEVICE | KBIF_MONOLITHIC |
1750 	    KBIF_BUFFER_ON_DEMAND | TEST_OPTION_INHIBIT_CACHE);
1751 	skmem_quantum_tests(KBIF_VIRTUAL_DEVICE | KBIF_BUFFER_ON_DEMAND);
1752 	skmem_quantum_tests(KBIF_VIRTUAL_DEVICE | KBIF_BUFFER_ON_DEMAND |
1753 	    KBIF_PHYS_CONTIGUOUS);
1754 	skmem_quantum_tests(KBIF_VIRTUAL_DEVICE | KBIF_BUFFER_ON_DEMAND |
1755 	    TEST_OPTION_INHIBIT_CACHE);
1756 }
1757 
1758 static void
skmem_advanced_tests(int n,int32_t th_max,uint32_t mode,boolean_t nosleep,uint32_t flags)1759 skmem_advanced_tests(int n, int32_t th_max, uint32_t mode, boolean_t nosleep,
1760     uint32_t flags)
1761 {
1762 	struct kern_pbufpool_init pp_init;
1763 	kern_packet_t mph = 0;
1764 	kern_buflet_t buflet = 0;
1765 	int i;
1766 
1767 	VERIFY(skmth_pp == NULL);
1768 	VERIFY(skmth_cnt == 0);
1769 
1770 	bzero(&pp_init, sizeof(pp_init));
1771 	pp_init.kbi_version = KERN_PBUFPOOL_CURRENT_VERSION;
1772 	pp_init.kbi_buf_seg_size = skmem_usr_buf_seg_size;
1773 	pp_init.kbi_flags |= flags;
1774 	(void) snprintf((char *)pp_init.kbi_name,
1775 	    sizeof(pp_init.kbi_name), "%s", "skmem_advanced");
1776 
1777 	/* prepare */
1778 	switch (mode) {
1779 	case 0:
1780 		pp_init.kbi_packets = th_max;
1781 		pp_init.kbi_bufsize = SKMEM_TEST_BUFSIZE;
1782 		pp_init.kbi_max_frags = 1;
1783 		pp_init.kbi_flags |= KBIF_MONOLITHIC | KBIF_USER_ACCESS;
1784 		VERIFY(kern_pbufpool_create(&pp_init,
1785 		    &skmth_pp, NULL) == 0);
1786 		break;
1787 
1788 	case 1:
1789 		pp_init.kbi_packets = th_max;
1790 		pp_init.kbi_bufsize = SKMEM_TEST_BUFSIZE;
1791 		pp_init.kbi_max_frags = 1;
1792 		pp_init.kbi_flags |= KBIF_MONOLITHIC | KBIF_USER_ACCESS |
1793 		    KBIF_VIRTUAL_DEVICE;
1794 		VERIFY(kern_pbufpool_create(&pp_init,
1795 		    &skmth_pp, NULL) == 0);
1796 		break;
1797 
1798 	case 2:
1799 		pp_init.kbi_packets = th_max;
1800 		pp_init.kbi_bufsize = SKMEM_TEST_BUFSIZE;
1801 		pp_init.kbi_max_frags = 1;
1802 		pp_init.kbi_flags |= KBIF_MONOLITHIC | KBIF_USER_ACCESS |
1803 		    KBIF_PERSISTENT;
1804 		VERIFY(kern_pbufpool_create(&pp_init,
1805 		    &skmth_pp, NULL) == 0);
1806 		break;
1807 
1808 	case 3:
1809 		pp_init.kbi_packets = th_max;
1810 		pp_init.kbi_bufsize = SKMEM_TEST_BUFSIZE;
1811 		pp_init.kbi_max_frags = 1;
1812 		pp_init.kbi_flags |= KBIF_MONOLITHIC | KBIF_USER_ACCESS |
1813 		    KBIF_PERSISTENT | KBIF_VIRTUAL_DEVICE;
1814 		VERIFY(kern_pbufpool_create(&pp_init,
1815 		    &skmth_pp, NULL) == 0);
1816 		break;
1817 
1818 	case 4:
1819 		pp_init.kbi_packets = th_max;
1820 		pp_init.kbi_bufsize = SKMEM_TEST_BUFSIZE;
1821 		pp_init.kbi_max_frags = 1;
1822 		pp_init.kbi_flags |= KBIF_PERSISTENT | KBIF_USER_ACCESS;
1823 		VERIFY(kern_pbufpool_create(&pp_init,
1824 		    &skmth_pp, NULL) == 0);
1825 		break;
1826 
1827 	case 5:
1828 		pp_init.kbi_packets = th_max;
1829 		pp_init.kbi_bufsize = SKMEM_TEST_BUFSIZE;
1830 		pp_init.kbi_max_frags = 1;
1831 		pp_init.kbi_flags |= KBIF_PERSISTENT | KBIF_VIRTUAL_DEVICE;
1832 		VERIFY(kern_pbufpool_create(&pp_init,
1833 		    &skmth_pp, NULL) == 0);
1834 		break;
1835 
1836 	case 6:
1837 		pp_init.kbi_packets = th_max;
1838 		pp_init.kbi_bufsize = SKMEM_TEST_BUFSIZE;
1839 		pp_init.kbi_max_frags = 1;
1840 		pp_init.kbi_flags |= 0;
1841 		VERIFY(kern_pbufpool_create(&pp_init,
1842 		    &skmth_pp, NULL) == 0);
1843 		break;
1844 
1845 	case 7:
1846 		pp_init.kbi_packets = th_max;
1847 		pp_init.kbi_bufsize = SKMEM_TEST_BUFSIZE;
1848 		pp_init.kbi_max_frags = 1;
1849 		pp_init.kbi_flags |= KBIF_VIRTUAL_DEVICE;
1850 		VERIFY(kern_pbufpool_create(&pp_init,
1851 		    &skmth_pp, NULL) == 0);
1852 		break;
1853 
1854 	case 8:
1855 		pp_init.kbi_packets = (th_max * 2) + 1;
1856 		pp_init.kbi_bufsize = SKMEM_TEST_BUFSIZE;
1857 		pp_init.kbi_max_frags = 1;
1858 		pp_init.kbi_flags |= KBIF_BUFFER_ON_DEMAND;
1859 		VERIFY(kern_pbufpool_create(&pp_init,
1860 		    &skmth_pp, NULL) == 0);
1861 		break;
1862 
1863 	default:
1864 		VERIFY(0);
1865 		/* NOTREACHED */
1866 		__builtin_unreachable();
1867 	}
1868 
1869 	SK_ERR("%d: th_max %d mode %u nosleep %u nomagazines %u",
1870 	    n, th_max, mode, nosleep, !!(flags & KBIF_NO_MAGAZINES));
1871 
1872 	if (pp_init.kbi_flags & KBIF_BUFFER_ON_DEMAND) {
1873 		/* create 1 master packet to clone */
1874 		VERIFY(kern_pbufpool_alloc(skmth_pp, 1, &mph) == 0);
1875 		VERIFY((buflet = kern_packet_get_next_buflet(mph, NULL)) != NULL);
1876 		VERIFY(kern_buflet_set_data_length(buflet, SKMEM_TEST_BUFSIZE) == 0);
1877 		VERIFY(__packet_finalize(mph) == 0);
1878 	}
1879 
1880 	bzero(skmth_info, skmth_info_size);
1881 
1882 	/* spawn as many threads as there are CPUs */
1883 	for (i = 0; i < th_max; i++) {
1884 		skmth_info[i].sti_mph = mph;
1885 		skmth_info[i].sti_nosleep = nosleep;
1886 		if (kernel_thread_start(skmem_test_func, (void *)(uintptr_t)i,
1887 		    &skmth_info[i].sti_thread) != KERN_SUCCESS) {
1888 			panic("Failed to create skmem test thread");
1889 			/* NOTREACHED */
1890 			__builtin_unreachable();
1891 		}
1892 	}
1893 
1894 	lck_mtx_lock(&skmt_lock);
1895 	do {
1896 		struct timespec ts = { .tv_sec = 0, .tv_nsec = 100 * USEC_PER_SEC };
1897 		(void) msleep(&skmth_cnt, &skmt_lock, (PZERO - 1),
1898 		    "skmtstartw", &ts);
1899 	} while (skmth_cnt < th_max);
1900 	VERIFY(skmth_cnt == th_max);
1901 	lck_mtx_unlock(&skmt_lock);
1902 
1903 	lck_mtx_lock(&skmt_lock);
1904 	VERIFY(!skmth_run);
1905 	skmth_run = TRUE;
1906 	wakeup((caddr_t)&skmth_run);
1907 	lck_mtx_unlock(&skmt_lock);
1908 
1909 	/* wait until all threads are done */
1910 	lck_mtx_lock(&skmt_lock);
1911 	do {
1912 		struct timespec ts = { .tv_sec = 0, .tv_nsec = 100 * USEC_PER_SEC };
1913 		(void) msleep(&skmth_cnt, &skmt_lock, (PZERO - 1),
1914 		    "skmtstopw", &ts);
1915 	} while (skmth_cnt != 0);
1916 	skmth_run = FALSE;
1917 	lck_mtx_unlock(&skmt_lock);
1918 
1919 	if (mph != 0) {
1920 		VERIFY((buflet = kern_packet_get_next_buflet( mph, NULL)) != NULL);
1921 		VERIFY(buflet->buf_ctl->bc_usecnt == 1);
1922 		kern_pbufpool_free(skmth_pp, mph);
1923 		mph = 0;
1924 	}
1925 	kern_pbufpool_destroy(skmth_pp);
1926 	skmth_pp = NULL;
1927 }
1928 
1929 __attribute__((noreturn))
1930 static void
skmem_test_func(void * v,wait_result_t w)1931 skmem_test_func(void *v, wait_result_t w)
1932 {
1933 #pragma unused(w)
1934 	int i = (int)(uintptr_t)v, c;
1935 	kern_packet_t ph = 0;
1936 
1937 	/* let skmem_test_start() know we're ready */
1938 	lck_mtx_lock(&skmt_lock);
1939 	atomic_add_32(&skmth_cnt, 1);
1940 	wakeup((caddr_t)&skmth_cnt);
1941 	do {
1942 		(void) msleep(&skmth_run, &skmt_lock, (PZERO - 1),
1943 		    "skmtfuncw", NULL);
1944 	} while (!skmth_run);
1945 	lck_mtx_unlock(&skmt_lock);
1946 
1947 	for (c = 0; c < 41; c++) {
1948 		/* run alloc tests */
1949 		VERIFY(skmth_pp != NULL);
1950 		if (skmth_info[i].sti_nosleep) {
1951 			errno_t err = kern_pbufpool_alloc_nosleep(skmth_pp,
1952 			    1, &ph);
1953 			VERIFY(ph != 0 || err != 0);
1954 		} else {
1955 			VERIFY(kern_pbufpool_alloc(skmth_pp, 1, &ph) == 0);
1956 		}
1957 
1958 		if (ph != 0) {
1959 			kern_pbufpool_free(skmth_pp, ph);
1960 			ph = 0;
1961 		}
1962 
1963 		/* run clone tests */
1964 		if (skmth_info[i].sti_mph != 0) {
1965 			kern_buflet_t buflet, buflet2;
1966 			kern_obj_idx_seg_t buf_idx_seg, buf2_idx_seg;
1967 
1968 			if (skmth_info[i].sti_nosleep) {
1969 				errno_t err;
1970 				err = kern_packet_clone_nosleep(skmth_info[i].sti_mph,
1971 				    &skmth_info[i].sti_mpc, KPKT_COPY_LIGHT);
1972 				VERIFY(skmth_info[i].sti_mpc != 0 || err != 0);
1973 			} else {
1974 				VERIFY(kern_packet_clone(skmth_info[i].sti_mph,
1975 				    &skmth_info[i].sti_mpc, KPKT_COPY_LIGHT) == 0);
1976 			}
1977 			if (skmth_info[i].sti_mpc != 0) {
1978 				VERIFY(!(QUM_ADDR(skmth_info[i].sti_mpc)->qum_qflags & QUM_F_FINALIZED));
1979 				VERIFY((buflet = kern_packet_get_next_buflet(
1980 					    skmth_info[i].sti_mph, NULL)) != NULL);
1981 				VERIFY((buflet2 = kern_packet_get_next_buflet(
1982 					    skmth_info[i].sti_mpc, NULL)) != NULL);
1983 				VERIFY(kern_buflet_get_object_address(buflet) ==
1984 				    kern_buflet_get_object_address(buflet2));
1985 				VERIFY(kern_buflet_get_data_address(buflet) ==
1986 				    kern_buflet_get_data_address(buflet2));
1987 				VERIFY(kern_buflet_get_data_limit(buflet) ==
1988 				    kern_buflet_get_data_limit(buflet2));
1989 				VERIFY(kern_buflet_get_data_offset(buflet) ==
1990 				    kern_buflet_get_data_offset(buflet2));
1991 				VERIFY(kern_buflet_get_data_length(buflet) ==
1992 				    kern_buflet_get_data_length(buflet2));
1993 				VERIFY(kern_buflet_get_object_segment(buflet,
1994 				    &buf_idx_seg) ==
1995 				    kern_buflet_get_object_segment(buflet2,
1996 				    &buf2_idx_seg));
1997 				VERIFY(buf_idx_seg == buf2_idx_seg);
1998 				VERIFY(buflet->buf_ctl == buflet2->buf_ctl);
1999 				VERIFY(__packet_finalize(skmth_info[i].sti_mpc) == 0);
2000 				kern_pbufpool_free(skmth_pp, skmth_info[i].sti_mpc);
2001 				skmth_info[i].sti_mpc = 0;
2002 			}
2003 			skmth_info[i].sti_mph = 0;
2004 		}
2005 
2006 		/* force cache purges to exercise related code paths */
2007 		if (skmth_pp->pp_kmd_cache != NULL) {
2008 			skmem_cache_reap_now(skmth_pp->pp_kmd_cache, TRUE);
2009 		}
2010 		if (PP_BUF_CACHE_DEF(skmth_pp) != NULL) {
2011 			skmem_cache_reap_now(PP_BUF_CACHE_DEF(skmth_pp), TRUE);
2012 		}
2013 		if (PP_KBFT_CACHE_DEF(skmth_pp) != NULL) {
2014 			skmem_cache_reap_now(PP_KBFT_CACHE_DEF(skmth_pp), TRUE);
2015 		}
2016 		if (skmth_pp->pp_raw_kbft_cache != NULL) {
2017 			skmem_cache_reap_now(skmth_pp->pp_raw_kbft_cache, TRUE);
2018 		}
2019 	}
2020 
2021 	/* let skmem_test_start() know we're finished */
2022 	lck_mtx_lock(&skmt_lock);
2023 	VERIFY(atomic_add_32_ov(&skmth_cnt, -1) != 0);
2024 	wakeup((caddr_t)&skmth_cnt);
2025 	lck_mtx_unlock(&skmt_lock);
2026 
2027 	/* for the extra refcnt from kernel_thread_start() */
2028 	thread_deallocate(current_thread());
2029 
2030 	thread_terminate(current_thread());
2031 	__builtin_unreachable();
2032 	/* NOTREACHED */
2033 }
2034 
2035 static int skmem_test_objs;
2036 
2037 struct skmem_test_obj {
2038 	uint64_t        sto_val[2];
2039 };
2040 
2041 static int
skmem_test_ctor(struct skmem_obj_info * oi,struct skmem_obj_info * oim,void * arg,uint32_t skmflag)2042 skmem_test_ctor(struct skmem_obj_info *oi, struct skmem_obj_info *oim,
2043     void *arg, uint32_t skmflag)
2044 {
2045 #pragma unused(skmflag)
2046 	struct skmem_test_obj *sto = SKMEM_OBJ_ADDR(oi);
2047 
2048 	VERIFY(oim == NULL);
2049 	VERIFY(arg == &skmem_test_init);
2050 	VERIFY(SKMEM_OBJ_SIZE(oi) >= sizeof(struct skmem_test_obj));
2051 	sto->sto_val[0] = (uint64_t)(void *)sto ^
2052 	    (uint64_t)(void *)&sto->sto_val[0];
2053 	sto->sto_val[1] = (uint64_t)(void *)sto ^
2054 	    (uint64_t)(void *)&sto->sto_val[1];
2055 	atomic_add_32(&skmem_test_objs, 1);
2056 
2057 	return 0;
2058 }
2059 
2060 static void
skmem_test_dtor(void * addr,void * arg)2061 skmem_test_dtor(void *addr, void *arg)
2062 {
2063 	struct skmem_test_obj *sto = addr;
2064 
2065 	VERIFY(arg == &skmem_test_init);
2066 	VERIFY((sto->sto_val[0] ^ (uint64_t)(void *)&sto->sto_val[0]) ==
2067 	    (uint64_t)(void *)sto);
2068 	VERIFY((sto->sto_val[1] ^ (uint64_t)(void *)&sto->sto_val[1]) ==
2069 	    (uint64_t)(void *)sto);
2070 	VERIFY(skmem_test_objs > 0);
2071 	atomic_add_32(&skmem_test_objs, -1);
2072 }
2073 
2074 static void
skmem_tests(uint32_t align)2075 skmem_tests(uint32_t align)
2076 {
2077 	struct skmem_cache *skm;
2078 	uint32_t bufsize = sizeof(struct skmem_test_obj);
2079 
2080 	uint32_t objary_max = (uint32_t)MAX_PH_ARY;
2081 	void **objary = NULL;
2082 	char name[64];
2083 
2084 	VERIFY(align != 0);
2085 
2086 	SK_ERR("bufsize %u align %u", bufsize, align);
2087 
2088 	objary = kalloc_type(void *, objary_max, Z_WAITOK | Z_ZERO);
2089 
2090 	(void) snprintf(name, sizeof(name), "skmem_test.%u.%u", bufsize, align);
2091 
2092 	skm = skmem_cache_create(name, bufsize, align, skmem_test_ctor,
2093 	    skmem_test_dtor, NULL, &skmem_test_init, NULL, 0);
2094 
2095 	VERIFY(skmem_test_objs == 0);
2096 	for (int i = 0; i < objary_max; i++) {
2097 		objary[i] = skmem_cache_alloc(skm, SKMEM_SLEEP);
2098 		VERIFY(objary[i] != NULL);
2099 		VERIFY(IS_P2ALIGNED(objary[i], align));
2100 	}
2101 	for (int i = 0; i < objary_max; i++) {
2102 		VERIFY(objary[i] != NULL);
2103 		skmem_cache_free(skm, objary[i]);
2104 		objary[i] = NULL;
2105 	}
2106 	skmem_cache_destroy(skm);
2107 	VERIFY(skmem_test_objs == 0);
2108 
2109 	kfree_type(void *, objary_max, objary);
2110 	objary = NULL;
2111 }
2112 
2113 static void
skmem_test_start(void * v,wait_result_t w)2114 skmem_test_start(void *v, wait_result_t w)
2115 {
2116 	int32_t ncpus = ml_wait_max_cpus();
2117 	int error = 0, n;
2118 	uint32_t flags;
2119 	uint64_t mtbf_saved;
2120 
2121 	lck_mtx_lock(&skmt_lock);
2122 	VERIFY(!skmt_busy);
2123 	skmt_busy = 1;
2124 	skmem_cache_test_start(1);      /* 1 second update interval */
2125 	lck_mtx_unlock(&skmt_lock);
2126 
2127 	VERIFY(skmth_info == NULL);
2128 	skmth_info_size = sizeof(struct skmt_thread_info) * ncpus;
2129 	skmth_info = (struct skmt_thread_info *) kalloc_data(skmth_info_size,
2130 	    Z_WAITOK | Z_ZERO);
2131 
2132 	/*
2133 	 * Sanity tests.
2134 	 */
2135 	(void) skmem_cache_magazine_max(1);
2136 	(void) skmem_cache_magazine_max(32);
2137 	(void) skmem_cache_magazine_max(64);
2138 	(void) skmem_cache_magazine_max(128);
2139 	(void) skmem_cache_magazine_max(256);
2140 	(void) skmem_cache_magazine_max(512);
2141 	(void) skmem_cache_magazine_max(1024);
2142 	(void) skmem_cache_magazine_max(2048);
2143 	(void) skmem_cache_magazine_max(4096);
2144 	(void) skmem_cache_magazine_max(8192);
2145 	(void) skmem_cache_magazine_max(16384);
2146 	(void) skmem_cache_magazine_max(32768);
2147 	(void) skmem_cache_magazine_max(65536);
2148 
2149 	/*
2150 	 * skmem allocator tests
2151 	 */
2152 	skmem_tests(8);
2153 	skmem_tests(16);
2154 	skmem_tests(32);
2155 	skmem_tests(64);
2156 	skmem_tests(128);
2157 
2158 	/*
2159 	 * Basic packet buffer pool sanity tests
2160 	 */
2161 	skmem_basic_tests();
2162 
2163 	/*
2164 	 * Multi-threaded alloc and free tests (blocking).
2165 	 */
2166 	for (n = 0; n < 7; n++) {
2167 		flags = ((n & 1) ? KBIF_NO_MAGAZINES : 0);
2168 		skmem_advanced_tests(n, ncpus, 0, FALSE, flags);
2169 	}
2170 	for (n = 0; n < 7; n++) {
2171 		flags = ((n & 1) ? KBIF_NO_MAGAZINES : 0);
2172 		skmem_advanced_tests(n, ncpus, 0, TRUE, flags);
2173 	}
2174 	for (n = 0; n < 7; n++) {
2175 		flags = ((n & 1) ? KBIF_NO_MAGAZINES : 0);
2176 		skmem_advanced_tests(n, ncpus, 1, FALSE, flags);
2177 	}
2178 	for (n = 0; n < 7; n++) {
2179 		flags = ((n & 1) ? KBIF_NO_MAGAZINES : 0);
2180 		skmem_advanced_tests(n, ncpus, 1, TRUE, flags);
2181 	}
2182 	for (n = 0; n < 7; n++) {
2183 		flags = ((n & 1) ? KBIF_NO_MAGAZINES : 0);
2184 		skmem_advanced_tests(n, ncpus, 2, FALSE, flags);
2185 	}
2186 	for (n = 0; n < 7; n++) {
2187 		flags = ((n & 1) ? KBIF_NO_MAGAZINES : 0);
2188 		skmem_advanced_tests(n, ncpus, 2, TRUE, flags);
2189 	}
2190 	for (n = 0; n < 7; n++) {
2191 		flags = ((n & 1) ? KBIF_NO_MAGAZINES : 0);
2192 		skmem_advanced_tests(n, ncpus, 3, FALSE, flags);
2193 	}
2194 	for (n = 0; n < 7; n++) {
2195 		flags = ((n & 1) ? KBIF_NO_MAGAZINES : 0);
2196 		skmem_advanced_tests(n, ncpus, 3, TRUE, flags);
2197 	}
2198 	for (n = 0; n < 7; n++) {
2199 		flags = ((n & 1) ? KBIF_NO_MAGAZINES : 0);
2200 		skmem_advanced_tests(n, ncpus, 4, FALSE, flags);
2201 	}
2202 	for (n = 0; n < 7; n++) {
2203 		flags = ((n & 1) ? KBIF_NO_MAGAZINES : 0);
2204 		skmem_advanced_tests(n, ncpus, 5, FALSE, flags);
2205 	}
2206 
2207 	/*
2208 	 * Modes 4-5 deal with persistent/mirrored regions, and to
2209 	 * maximize the chance of exercising the allocation failures
2210 	 * handling we lower the MTBF (if set) to the minimum possible,
2211 	 * and restore it to the saved value later.
2212 	 */
2213 	mtbf_saved = skmem_region_get_mtbf();
2214 	if (mtbf_saved != 0) {
2215 		skmem_region_set_mtbf(SKMEM_REGION_MTBF_MIN);
2216 	}
2217 
2218 	/*
2219 	 * Multi-threaded alloc and free tests (non-blocking).
2220 	 */
2221 
2222 	for (n = 0; n < 7; n++) {
2223 		flags = ((n & 1) ? KBIF_NO_MAGAZINES : 0);
2224 		skmem_advanced_tests(n, ncpus, 4, TRUE, flags);
2225 	}
2226 	for (n = 0; n < 7; n++) {
2227 		flags = ((n & 1) ? KBIF_NO_MAGAZINES : 0);
2228 		skmem_advanced_tests(n, ncpus, 5, TRUE, flags);
2229 	}
2230 
2231 	/*
2232 	 * Restore MTBF to previous set value.
2233 	 */
2234 	if (mtbf_saved != 0) {
2235 		skmem_region_set_mtbf(mtbf_saved);
2236 	}
2237 
2238 	for (n = 0; n < 7; n++) {
2239 		flags = ((n & 1) ? KBIF_NO_MAGAZINES : 0);
2240 		skmem_advanced_tests(n, ncpus, 6, FALSE, flags);
2241 	}
2242 	for (n = 0; n < 7; n++) {
2243 		flags = ((n & 1) ? KBIF_NO_MAGAZINES : 0);
2244 		skmem_advanced_tests(n, ncpus, 6, TRUE, flags);
2245 	}
2246 	for (n = 0; n < 7; n++) {
2247 		flags = ((n & 1) ? KBIF_NO_MAGAZINES : 0);
2248 		skmem_advanced_tests(n, ncpus, 7, FALSE, flags);
2249 	}
2250 	for (n = 0; n < 7; n++) {
2251 		flags = ((n & 1) ? KBIF_NO_MAGAZINES : 0);
2252 		skmem_advanced_tests(n, ncpus, 7, TRUE, flags);
2253 	}
2254 	for (n = 0; n < 7; n++) {
2255 		flags = ((n & 1) ? KBIF_NO_MAGAZINES : 0);
2256 		skmem_advanced_tests(n, ncpus, 8, FALSE, flags);
2257 	}
2258 	for (n = 0; n < 7; n++) {
2259 		flags = ((n & 1) ? KBIF_NO_MAGAZINES : 0);
2260 		skmem_advanced_tests(n, ncpus, 8, TRUE, flags);
2261 	}
2262 
2263 	lck_mtx_lock(&skmt_lock);
2264 	skmt_enabled = 1;
2265 	wakeup((caddr_t)&skmt_enabled);
2266 	lck_mtx_unlock(&skmt_lock);
2267 
2268 	if (error != 0) {
2269 		skmem_test_stop(v, w);
2270 	}
2271 }
2272 
2273 static void
skmem_test_stop(void * v,wait_result_t w)2274 skmem_test_stop(void *v, wait_result_t w)
2275 {
2276 #pragma unused(v, w)
2277 
2278 	if (skmth_info != NULL) {
2279 		kfree_data(skmth_info, skmth_info_size);
2280 		skmth_info = NULL;
2281 	}
2282 
2283 	lck_mtx_lock(&skmt_lock);
2284 	skmem_cache_test_stop();
2285 	VERIFY(skmt_busy);
2286 	skmt_busy = 0;
2287 	skmt_enabled = 0;
2288 	wakeup((caddr_t)&skmt_enabled);
2289 	lck_mtx_unlock(&skmt_lock);
2290 }
2291 
2292 static int
sysctl_skmem_test(__unused struct sysctl_oid * oidp,__unused void * arg1,__unused int arg2,struct sysctl_req * req)2293 sysctl_skmem_test(__unused struct sysctl_oid *oidp,
2294     __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2295 {
2296 	int error, newvalue, changed;
2297 
2298 	lck_mtx_lock(&skmt_lock);
2299 	if ((error = sysctl_io_number(req, skmt_enabled, sizeof(int),
2300 	    &newvalue, &changed)) != 0) {
2301 		goto done;
2302 	}
2303 
2304 	if (changed && skmt_enabled != newvalue) {
2305 		thread_t th;
2306 		thread_continue_t func;
2307 
2308 		if (newvalue && skmt_busy) {
2309 			SK_ERR("Older skmem test instance is still active");
2310 			error = EBUSY;
2311 			goto done;
2312 		}
2313 
2314 		if (newvalue) {
2315 			func = skmem_test_start;
2316 		} else {
2317 			func = skmem_test_stop;
2318 		}
2319 
2320 		if (kernel_thread_start(func, NULL, &th) != KERN_SUCCESS) {
2321 			SK_ERR("Failed to create skmem test action thread");
2322 			error = EBUSY;
2323 			goto done;
2324 		}
2325 		do {
2326 			SK_DF(SK_VERB_MEM, "Waiting for %s to complete",
2327 			    newvalue ? "startup" : "shutdown");
2328 			error = msleep(&skmt_enabled, &skmt_lock,
2329 			    PWAIT | PCATCH, "skmtw", NULL);
2330 			/* BEGIN CSTYLED */
2331 			/*
2332 			 * Loop exit conditions:
2333 			 *   - we were interrupted
2334 			 *     OR
2335 			 *   - we are starting up and are enabled
2336 			 *     (Startup complete)
2337 			 *     OR
2338 			 *   - we are starting up and are not busy
2339 			 *     (Failed startup)
2340 			 *     OR
2341 			 *   - we are shutting down and are not busy
2342 			 *     (Shutdown complete)
2343 			 */
2344 			/* END CSTYLED */
2345 		} while (!((error == EINTR) || (newvalue && skmt_enabled) ||
2346 		    (newvalue && !skmt_busy) || (!newvalue && !skmt_busy)));
2347 
2348 		thread_deallocate(th);
2349 	}
2350 
2351 done:
2352 	lck_mtx_unlock(&skmt_lock);
2353 	return error;
2354 }
2355 
2356 SYSCTL_PROC(_kern_skywalk_mem, OID_AUTO, test,
2357     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, NULL, 0,
2358     sysctl_skmem_test, "I", "Start Skywalk memory test");
2359 
2360 __typed_allocators_ignore_pop
2361 
2362 #endif /* DEVELOPMENT || DEBUG */
2363