xref: /xnu-10002.81.5/osfmk/tests/kernel_tests.c (revision 5e3eaea39dcf651e66cb99ba7d70e32cc4a99587)
1 /*
2  * Copyright (c) 2019-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <kern/kern_types.h>
30 #include <kern/assert.h>
31 #include <kern/host.h>
32 #include <kern/macro_help.h>
33 #include <kern/sched.h>
34 #include <kern/locks.h>
35 #include <kern/sched_prim.h>
36 #include <kern/misc_protos.h>
37 #include <kern/thread_call.h>
38 #include <kern/zalloc_internal.h>
39 #include <kern/kalloc.h>
40 #include <tests/ktest.h>
41 #include <sys/errno.h>
42 #include <sys/random.h>
43 #include <kern/kern_cdata.h>
44 #include <machine/lowglobals.h>
45 #include <vm/vm_page.h>
46 #include <vm/vm_object.h>
47 #include <vm/vm_protos.h>
48 #include <string.h>
49 #include <kern/kern_apfs_reflock.h>
50 
51 #if !(DEVELOPMENT || DEBUG)
52 #error "Testing is not enabled on RELEASE configurations"
53 #endif
54 
55 #include <tests/xnupost.h>
56 
57 extern boolean_t get_range_bounds(char * c, int64_t * lower, int64_t * upper);
58 __private_extern__ void qsort(void * a, size_t n, size_t es, int (*cmp)(const void *, const void *));
59 
60 uint32_t total_post_tests_count = 0;
61 void xnupost_reset_panic_widgets(void);
62 
63 /* test declarations */
64 kern_return_t zalloc_test(void);
65 kern_return_t RandomULong_test(void);
66 kern_return_t kcdata_api_test(void);
67 kern_return_t ts_kernel_primitive_test(void);
68 kern_return_t ts_kernel_sleep_inheritor_test(void);
69 kern_return_t ts_kernel_gate_test(void);
70 kern_return_t ts_kernel_turnstile_chain_test(void);
71 kern_return_t ts_kernel_timingsafe_bcmp_test(void);
72 
73 #if __ARM_VFP__
74 extern kern_return_t vfp_state_test(void);
75 #endif
76 
77 extern kern_return_t kprintf_hhx_test(void);
78 
79 #if defined(__arm64__)
80 kern_return_t pmap_coredump_test(void);
81 #endif
82 
83 extern kern_return_t console_serial_test(void);
84 extern kern_return_t console_serial_parallel_log_tests(void);
85 extern kern_return_t test_printf(void);
86 extern kern_return_t test_os_log(void);
87 extern kern_return_t test_os_log_parallel(void);
88 extern kern_return_t bitmap_post_test(void);
89 extern kern_return_t counter_tests(void);
90 #if ML_IO_TIMEOUTS_ENABLED
91 extern kern_return_t ml_io_timeout_test(void);
92 #endif
93 
94 #ifdef __arm64__
95 extern kern_return_t arm64_munger_test(void);
96 #if __ARM_PAN_AVAILABLE__
97 extern kern_return_t arm64_pan_test(void);
98 #endif
99 #if defined(HAS_APPLE_PAC)
100 extern kern_return_t arm64_ropjop_test(void);
101 #endif /* defined(HAS_APPLE_PAC) */
102 #endif /* __arm64__ */
103 
104 extern kern_return_t test_thread_call(void);
105 
106 
107 struct xnupost_panic_widget xt_panic_widgets = {.xtp_context_p = NULL,
108 	                                        .xtp_outval_p = NULL,
109 	                                        .xtp_func_name = NULL,
110 	                                        .xtp_func = NULL};
111 
112 struct xnupost_test kernel_post_tests[] = {XNUPOST_TEST_CONFIG_BASIC(zalloc_test),
113 	                                   XNUPOST_TEST_CONFIG_BASIC(RandomULong_test),
114 	                                   XNUPOST_TEST_CONFIG_BASIC(test_printf),
115 	                                   XNUPOST_TEST_CONFIG_BASIC(test_os_log),
116 	                                   XNUPOST_TEST_CONFIG_BASIC(test_os_log_parallel),
117 #ifdef __arm64__
118 	                                   XNUPOST_TEST_CONFIG_BASIC(arm64_munger_test),
119 #if __ARM_PAN_AVAILABLE__
120 	                                   XNUPOST_TEST_CONFIG_BASIC(arm64_pan_test),
121 #endif
122 #if defined(HAS_APPLE_PAC)
123 	                                   XNUPOST_TEST_CONFIG_BASIC(arm64_ropjop_test),
124 #endif /* defined(HAS_APPLE_PAC) */
125 #endif /* __arm64__ */
126 	                                   XNUPOST_TEST_CONFIG_BASIC(kcdata_api_test),
127 	                                   XNUPOST_TEST_CONFIG_BASIC(console_serial_test),
128 	                                   XNUPOST_TEST_CONFIG_BASIC(console_serial_parallel_log_tests),
129 #if defined(__arm64__)
130 	                                   XNUPOST_TEST_CONFIG_BASIC(pmap_coredump_test),
131 #endif
132 	                                   XNUPOST_TEST_CONFIG_BASIC(bitmap_post_test),
133 	                                   //XNUPOST_TEST_CONFIG_TEST_PANIC(kcdata_api_assert_tests)
134 	                                   XNUPOST_TEST_CONFIG_BASIC(test_thread_call),
135 	                                   XNUPOST_TEST_CONFIG_BASIC(ts_kernel_primitive_test),
136 	                                   XNUPOST_TEST_CONFIG_BASIC(ts_kernel_sleep_inheritor_test),
137 	                                   XNUPOST_TEST_CONFIG_BASIC(ts_kernel_gate_test),
138 	                                   XNUPOST_TEST_CONFIG_BASIC(ts_kernel_turnstile_chain_test),
139 	                                   XNUPOST_TEST_CONFIG_BASIC(ts_kernel_timingsafe_bcmp_test),
140 	                                   XNUPOST_TEST_CONFIG_BASIC(kprintf_hhx_test),
141 #if __ARM_VFP__
142 	                                   XNUPOST_TEST_CONFIG_BASIC(vfp_state_test),
143 #endif
144 	                                   XNUPOST_TEST_CONFIG_BASIC(vm_tests),
145 	                                   XNUPOST_TEST_CONFIG_BASIC(counter_tests),
146 #if ML_IO_TIMEOUTS_ENABLED
147 	                                   XNUPOST_TEST_CONFIG_BASIC(ml_io_timeout_test),
148 #endif
149 };
150 
151 uint32_t kernel_post_tests_count = sizeof(kernel_post_tests) / sizeof(xnupost_test_data_t);
152 
153 #define POSTARGS_RUN_TESTS 0x1
154 #define POSTARGS_CONTROLLER_AVAILABLE 0x2
155 #define POSTARGS_CUSTOM_TEST_RUNLIST 0x4
156 uint64_t kernel_post_args = 0x0;
157 
158 /* static variables to hold state */
159 static kern_return_t parse_config_retval = KERN_INVALID_CAPABILITY;
160 static char kernel_post_test_configs[256];
161 boolean_t xnupost_should_run_test(uint32_t test_num);
162 
163 kern_return_t
xnupost_parse_config()164 xnupost_parse_config()
165 {
166 	if (parse_config_retval != KERN_INVALID_CAPABILITY) {
167 		return parse_config_retval;
168 	}
169 	PE_parse_boot_argn("kernPOST", &kernel_post_args, sizeof(kernel_post_args));
170 
171 	if (PE_parse_boot_argn("kernPOST_config", &kernel_post_test_configs[0], sizeof(kernel_post_test_configs)) == TRUE) {
172 		kernel_post_args |= POSTARGS_CUSTOM_TEST_RUNLIST;
173 	}
174 
175 	if (kernel_post_args != 0) {
176 		parse_config_retval = KERN_SUCCESS;
177 		goto out;
178 	}
179 	parse_config_retval = KERN_NOT_SUPPORTED;
180 out:
181 	return parse_config_retval;
182 }
183 
184 boolean_t
xnupost_should_run_test(uint32_t test_num)185 xnupost_should_run_test(uint32_t test_num)
186 {
187 	if (kernel_post_args & POSTARGS_CUSTOM_TEST_RUNLIST) {
188 		int64_t begin = 0, end = 999999;
189 		char * b = kernel_post_test_configs;
190 		while (*b) {
191 			get_range_bounds(b, &begin, &end);
192 			if (test_num >= begin && test_num <= end) {
193 				return TRUE;
194 			}
195 
196 			/* skip to the next "," */
197 			while (*b != ',') {
198 				if (*b == '\0') {
199 					return FALSE;
200 				}
201 				b++;
202 			}
203 			/* skip past the ',' */
204 			b++;
205 		}
206 		return FALSE;
207 	}
208 	return TRUE;
209 }
210 
211 kern_return_t
xnupost_list_tests(xnupost_test_t test_list,uint32_t test_count)212 xnupost_list_tests(xnupost_test_t test_list, uint32_t test_count)
213 {
214 	if (KERN_SUCCESS != xnupost_parse_config()) {
215 		return KERN_FAILURE;
216 	}
217 
218 	xnupost_test_t testp;
219 	for (uint32_t i = 0; i < test_count; i++) {
220 		testp = &test_list[i];
221 		if (testp->xt_test_num == 0) {
222 			assert(total_post_tests_count < UINT16_MAX);
223 			testp->xt_test_num = (uint16_t)++total_post_tests_count;
224 		}
225 		/* make sure the boot-arg based test run list is honored */
226 		if (kernel_post_args & POSTARGS_CUSTOM_TEST_RUNLIST) {
227 			testp->xt_config |= XT_CONFIG_IGNORE;
228 			if (xnupost_should_run_test(testp->xt_test_num)) {
229 				testp->xt_config &= ~(XT_CONFIG_IGNORE);
230 				testp->xt_config |= XT_CONFIG_RUN;
231 				printf("\n[TEST] #%u is marked as ignored", testp->xt_test_num);
232 			}
233 		}
234 		printf("\n[TEST] TOC#%u name: %s expected: %d config: %x\n", testp->xt_test_num, testp->xt_name, testp->xt_expected_retval,
235 		    testp->xt_config);
236 	}
237 
238 	return KERN_SUCCESS;
239 }
240 
241 kern_return_t
xnupost_run_tests(xnupost_test_t test_list,uint32_t test_count)242 xnupost_run_tests(xnupost_test_t test_list, uint32_t test_count)
243 {
244 	uint32_t i = 0;
245 	int retval = KERN_SUCCESS;
246 	int test_retval = KERN_FAILURE;
247 
248 	if ((kernel_post_args & POSTARGS_RUN_TESTS) == 0) {
249 		printf("No POST boot-arg set.\n");
250 		return retval;
251 	}
252 
253 	T_START;
254 	xnupost_test_t testp;
255 	for (; i < test_count; i++) {
256 		xnupost_reset_panic_widgets();
257 		T_TESTRESULT = T_STATE_UNRESOLVED;
258 		testp = &test_list[i];
259 		T_BEGIN(testp->xt_name);
260 		testp->xt_begin_time = mach_absolute_time();
261 		testp->xt_end_time   = testp->xt_begin_time;
262 
263 		/*
264 		 * If test is designed to panic and controller
265 		 * is not available then mark as SKIPPED
266 		 */
267 		if ((testp->xt_config & XT_CONFIG_EXPECT_PANIC) && !(kernel_post_args & POSTARGS_CONTROLLER_AVAILABLE)) {
268 			T_SKIP(
269 				"Test expects panic but "
270 				"no controller is present");
271 			testp->xt_test_actions = XT_ACTION_SKIPPED;
272 			continue;
273 		}
274 
275 		if ((testp->xt_config & XT_CONFIG_IGNORE)) {
276 			T_SKIP("Test is marked as XT_CONFIG_IGNORE");
277 			testp->xt_test_actions = XT_ACTION_SKIPPED;
278 			continue;
279 		}
280 
281 		test_retval = testp->xt_func();
282 		if (T_STATE_UNRESOLVED == T_TESTRESULT) {
283 			/*
284 			 * If test result is unresolved due to that no T_* test cases are called,
285 			 * determine the test result based on the return value of the test function.
286 			 */
287 			if (KERN_SUCCESS == test_retval) {
288 				T_PASS("Test passed because retval == KERN_SUCCESS");
289 			} else {
290 				T_FAIL("Test failed because retval == KERN_FAILURE");
291 			}
292 		}
293 		T_END;
294 		testp->xt_retval = T_TESTRESULT;
295 		testp->xt_end_time = mach_absolute_time();
296 		if (testp->xt_retval == testp->xt_expected_retval) {
297 			testp->xt_test_actions = XT_ACTION_PASSED;
298 		} else {
299 			testp->xt_test_actions = XT_ACTION_FAILED;
300 		}
301 	}
302 	T_FINISH;
303 	return retval;
304 }
305 
306 kern_return_t
kernel_list_tests()307 kernel_list_tests()
308 {
309 	return xnupost_list_tests(kernel_post_tests, kernel_post_tests_count);
310 }
311 
312 kern_return_t
kernel_do_post()313 kernel_do_post()
314 {
315 	return xnupost_run_tests(kernel_post_tests, kernel_post_tests_count);
316 }
317 
318 kern_return_t
xnupost_register_panic_widget(xt_panic_widget_func funcp,const char * funcname,void * context,void ** outval)319 xnupost_register_panic_widget(xt_panic_widget_func funcp, const char * funcname, void * context, void ** outval)
320 {
321 	if (xt_panic_widgets.xtp_context_p != NULL || xt_panic_widgets.xtp_func != NULL) {
322 		return KERN_RESOURCE_SHORTAGE;
323 	}
324 
325 	xt_panic_widgets.xtp_context_p = context;
326 	xt_panic_widgets.xtp_func      = funcp;
327 	xt_panic_widgets.xtp_func_name = funcname;
328 	xt_panic_widgets.xtp_outval_p  = outval;
329 
330 	return KERN_SUCCESS;
331 }
332 
333 void
xnupost_reset_panic_widgets()334 xnupost_reset_panic_widgets()
335 {
336 	bzero(&xt_panic_widgets, sizeof(xt_panic_widgets));
337 }
338 
339 kern_return_t
xnupost_process_kdb_stop(const char * panic_s)340 xnupost_process_kdb_stop(const char * panic_s)
341 {
342 	xt_panic_return_t retval         = 0;
343 	struct xnupost_panic_widget * pw = &xt_panic_widgets;
344 	const char * name = "unknown";
345 	if (xt_panic_widgets.xtp_func_name) {
346 		name = xt_panic_widgets.xtp_func_name;
347 	}
348 
349 	/* bail early on if kernPOST is not set */
350 	if (kernel_post_args == 0) {
351 		return KERN_INVALID_CAPABILITY;
352 	}
353 
354 	if (xt_panic_widgets.xtp_func) {
355 		T_LOG("%s: Calling out to widget: %s", __func__, xt_panic_widgets.xtp_func_name);
356 		retval = pw->xtp_func(panic_s, pw->xtp_context_p, pw->xtp_outval_p);
357 	} else {
358 		return KERN_INVALID_CAPABILITY;
359 	}
360 
361 	switch (retval) {
362 	case XT_RET_W_SUCCESS:
363 		T_EXPECT_EQ_INT(retval, XT_RET_W_SUCCESS, "%s reported successful handling. Returning from kdb_stop.", name);
364 		/* KERN_SUCCESS means return from panic/assertion */
365 		return KERN_SUCCESS;
366 
367 	case XT_RET_W_FAIL:
368 		T_FAIL("%s reported XT_RET_W_FAIL: Returning from kdb_stop", name);
369 		return KERN_SUCCESS;
370 
371 	case XT_PANIC_W_FAIL:
372 		T_FAIL("%s reported XT_PANIC_W_FAIL: Continuing to kdb_stop", name);
373 		return KERN_FAILURE;
374 
375 	case XT_PANIC_W_SUCCESS:
376 		T_EXPECT_EQ_INT(retval, XT_PANIC_W_SUCCESS, "%s reported successful testcase. But continuing to kdb_stop.", name);
377 		return KERN_FAILURE;
378 
379 	case XT_PANIC_UNRELATED:
380 	default:
381 		T_LOG("UNRELATED: Continuing to kdb_stop.");
382 		return KERN_FAILURE;
383 	}
384 }
385 
386 xt_panic_return_t
_xt_generic_assert_check(const char * s,void * str_to_match,void ** outval)387 _xt_generic_assert_check(const char * s, void * str_to_match, void ** outval)
388 {
389 	xt_panic_return_t ret = XT_PANIC_UNRELATED;
390 
391 	if (NULL != strnstr(__DECONST(char *, s), (char *)str_to_match, strlen(s))) {
392 		T_LOG("%s: kdb_stop string: '%s' MATCHED string: '%s'", __func__, s, (char *)str_to_match);
393 		ret = XT_RET_W_SUCCESS;
394 	}
395 
396 	if (outval) {
397 		*outval = (void *)(uintptr_t)ret;
398 	}
399 	return ret;
400 }
401 
402 kern_return_t
xnupost_reset_tests(xnupost_test_t test_list,uint32_t test_count)403 xnupost_reset_tests(xnupost_test_t test_list, uint32_t test_count)
404 {
405 	uint32_t i = 0;
406 	xnupost_test_t testp;
407 	for (; i < test_count; i++) {
408 		testp                  = &test_list[i];
409 		testp->xt_begin_time   = 0;
410 		testp->xt_end_time     = 0;
411 		testp->xt_test_actions = XT_ACTION_NONE;
412 		testp->xt_retval       = -1;
413 	}
414 	return KERN_SUCCESS;
415 }
416 
417 
418 kern_return_t
zalloc_test(void)419 zalloc_test(void)
420 {
421 	zone_t test_zone;
422 	void * test_ptr;
423 
424 	T_SETUPBEGIN;
425 	test_zone = zone_create("test_uint64_zone", sizeof(uint64_t),
426 	    ZC_DESTRUCTIBLE);
427 	T_ASSERT_NOTNULL(test_zone, NULL);
428 
429 	T_ASSERT_EQ_INT(test_zone->z_elems_free, 0, NULL);
430 	T_SETUPEND;
431 
432 	T_ASSERT_NOTNULL(test_ptr = zalloc(test_zone), NULL);
433 
434 	zfree(test_zone, test_ptr);
435 
436 	/* A sample report for perfdata */
437 	T_PERF("num_threads_at_ktest", threads_count, "count", "# of threads in system at zalloc_test");
438 
439 	return KERN_SUCCESS;
440 }
441 
442 /*
443  * Function used for comparison by qsort()
444  */
445 static int
compare_numbers_ascending(const void * a,const void * b)446 compare_numbers_ascending(const void * a, const void * b)
447 {
448 	const uint64_t x = *(const uint64_t *)a;
449 	const uint64_t y = *(const uint64_t *)b;
450 	if (x < y) {
451 		return -1;
452 	} else if (x > y) {
453 		return 1;
454 	} else {
455 		return 0;
456 	}
457 }
458 
459 /*
460  * Function to count number of bits that are set in a number.
461  * It uses Side Addition using Magic Binary Numbers
462  */
463 static int
count_bits(uint64_t number)464 count_bits(uint64_t number)
465 {
466 	return __builtin_popcountll(number);
467 }
468 
469 kern_return_t
RandomULong_test()470 RandomULong_test()
471 {
472 /*
473  * Randomness test for RandomULong()
474  *
475  * This test verifies that:
476  *  a. RandomULong works
477  *  b. The generated numbers match the following entropy criteria:
478  *     For a thousand iterations, verify:
479  *          1. mean entropy > 12 bits
480  *          2. min entropy > 4 bits
481  *          3. No Duplicate
482  *          4. No incremental/decremental pattern in a window of 3
483  *          5. No Zero
484  *          6. No -1
485  *
486  * <rdar://problem/22526137> Add test to increase code coverage for /dev/random
487  */
488 
489 #define CONF_MIN_ENTROPY 4
490 #define CONF_MEAN_ENTROPY 12
491 #define CONF_ITERATIONS 1000
492 #define CONF_WINDOW_SIZE 3
493 #define CONF_WINDOW_TREND_LIMIT ((CONF_WINDOW_SIZE / 2) + (CONF_WINDOW_SIZE & 1)) >> 0
494 
495 	int i;
496 	uint32_t min_bit_entropy, max_bit_entropy, bit_entropy;
497 	uint32_t aggregate_bit_entropy = 0;
498 	uint32_t mean_bit_entropy      = 0;
499 	uint64_t numbers[CONF_ITERATIONS];
500 	min_bit_entropy = UINT32_MAX;
501 	max_bit_entropy = 0;
502 
503 	/*
504 	 * TEST 1: Number generation and basic and basic validation
505 	 * Check for non-zero (no bits set), -1 (all bits set) and error
506 	 */
507 	for (i = 0; i < CONF_ITERATIONS; i++) {
508 		read_random(&numbers[i], sizeof(numbers[i]));
509 		if (numbers[i] == 0) {
510 			T_ASSERT_NE_ULLONG(numbers[i], 0, "read_random returned zero value.");
511 		}
512 		if (numbers[i] == UINT64_MAX) {
513 			T_ASSERT_NE_ULLONG(numbers[i], UINT64_MAX, "read_random returned -1.");
514 		}
515 	}
516 	T_PASS("Generated %d non-zero random numbers with atleast one bit reset.", CONF_ITERATIONS);
517 
518 	/*
519 	 * TEST 2: Mean and Min Bit Entropy
520 	 * Check the bit entropy and its mean over the generated numbers.
521 	 */
522 	for (i = 1; i < CONF_ITERATIONS; i++) {
523 		bit_entropy = count_bits(numbers[i - 1] ^ numbers[i]);
524 		if (bit_entropy < min_bit_entropy) {
525 			min_bit_entropy = bit_entropy;
526 		}
527 		if (bit_entropy > max_bit_entropy) {
528 			max_bit_entropy = bit_entropy;
529 		}
530 
531 		if (bit_entropy < CONF_MIN_ENTROPY) {
532 			T_EXPECT_GE_UINT(bit_entropy, CONF_MIN_ENTROPY,
533 			    "Number of differing bits in consecutive numbers does not satisfy the min criteria.");
534 		}
535 
536 		aggregate_bit_entropy += bit_entropy;
537 	}
538 	T_PASS("Passed the min bit entropy expectation of %d bits", CONF_MIN_ENTROPY);
539 
540 	mean_bit_entropy = aggregate_bit_entropy / CONF_ITERATIONS;
541 	T_EXPECT_GE_UINT(mean_bit_entropy, CONF_MEAN_ENTROPY, "Test criteria for mean number of differing bits.");
542 	T_PASS("Mean bit entropy criteria satisfied (Required %d, Actual: %d).", CONF_MEAN_ENTROPY, mean_bit_entropy);
543 	T_LOG("{PERFORMANCE} iterations: %d, min_bit_entropy: %d, mean_bit_entropy: %d, max_bit_entropy: %d", CONF_ITERATIONS,
544 	    min_bit_entropy, mean_bit_entropy, max_bit_entropy);
545 	T_PERF("min_bit_entropy_" T_TOSTRING(CONF_ITERATIONS), min_bit_entropy, "bits", "minimum bit entropy in RNG. High is better");
546 	T_PERF("mean_bit_entropy_" T_TOSTRING(CONF_ITERATIONS), mean_bit_entropy, "bits", "mean bit entropy in RNG. High is better");
547 	T_PERF("max_bit_entropy_" T_TOSTRING(CONF_ITERATIONS), max_bit_entropy, "bits", "max bit entropy in RNG. High is better");
548 
549 	/*
550 	 * TEST 3: Incremental Pattern Search
551 	 * Check that incremental/decremental pattern does not exist in the given window
552 	 */
553 	int window_start, window_end, trend;
554 	window_start = window_end = trend = 0;
555 
556 	do {
557 		/*
558 		 * Set the window
559 		 */
560 		window_end = window_start + CONF_WINDOW_SIZE - 1;
561 		if (window_end >= CONF_ITERATIONS) {
562 			window_end = CONF_ITERATIONS - 1;
563 		}
564 
565 		trend = 0;
566 		for (i = window_start; i < window_end; i++) {
567 			if (numbers[i] < numbers[i + 1]) {
568 				trend++;
569 			} else if (numbers[i] > numbers[i + 1]) {
570 				trend--;
571 			}
572 		}
573 		/*
574 		 * Check that there is no increasing or decreasing trend
575 		 * i.e. trend <= ceil(window_size/2)
576 		 */
577 		if (trend < 0) {
578 			trend = -trend;
579 		}
580 		if (trend > CONF_WINDOW_TREND_LIMIT) {
581 			T_ASSERT_LE_INT(trend, CONF_WINDOW_TREND_LIMIT, "Found increasing/decreasing trend in random numbers.");
582 		}
583 
584 		/*
585 		 * Move to the next window
586 		 */
587 		window_start++;
588 	} while (window_start < (CONF_ITERATIONS - 1));
589 	T_PASS("Did not find increasing/decreasing trends in a window of %d numbers.", CONF_WINDOW_SIZE);
590 
591 	/*
592 	 * TEST 4: Find Duplicates
593 	 * Check no duplicate values are generated
594 	 */
595 	qsort(numbers, CONF_ITERATIONS, sizeof(numbers[0]), compare_numbers_ascending);
596 	for (i = 1; i < CONF_ITERATIONS; i++) {
597 		if (numbers[i] == numbers[i - 1]) {
598 			T_ASSERT_NE_ULLONG(numbers[i], numbers[i - 1], "read_random generated duplicate values.");
599 		}
600 	}
601 	T_PASS("Test did not find any duplicates as expected.");
602 
603 	return KERN_SUCCESS;
604 }
605 
606 
607 /* KCDATA kernel api tests */
608 static struct kcdata_descriptor test_kc_data;//, test_kc_data2;
609 struct sample_disk_io_stats {
610 	uint64_t disk_reads_count;
611 	uint64_t disk_reads_size;
612 	uint64_t io_priority_count[4];
613 	uint64_t io_priority_size;
614 } __attribute__((packed));
615 
616 struct kcdata_subtype_descriptor test_disk_io_stats_def[] = {
617 	{
618 		.kcs_flags = KCS_SUBTYPE_FLAGS_NONE,
619 		.kcs_elem_type = KC_ST_UINT64,
620 		.kcs_elem_offset = 0 * sizeof(uint64_t),
621 		.kcs_elem_size = sizeof(uint64_t),
622 		.kcs_name = "disk_reads_count"
623 	},
624 	{
625 		.kcs_flags = KCS_SUBTYPE_FLAGS_NONE,
626 		.kcs_elem_type = KC_ST_UINT64,
627 		.kcs_elem_offset = 1 * sizeof(uint64_t),
628 		.kcs_elem_size = sizeof(uint64_t),
629 		.kcs_name = "disk_reads_size"
630 	},
631 	{
632 		.kcs_flags = KCS_SUBTYPE_FLAGS_ARRAY,
633 		.kcs_elem_type = KC_ST_UINT64,
634 		.kcs_elem_offset = 2 * sizeof(uint64_t),
635 		.kcs_elem_size = KCS_SUBTYPE_PACK_SIZE(4, sizeof(uint64_t)),
636 		.kcs_name = "io_priority_count"
637 	},
638 	{
639 		.kcs_flags = KCS_SUBTYPE_FLAGS_ARRAY,
640 		.kcs_elem_type = KC_ST_UINT64,
641 		.kcs_elem_offset = (2 + 4) * sizeof(uint64_t),
642 		.kcs_elem_size = sizeof(uint64_t),
643 		.kcs_name = "io_priority_size"
644 	},
645 };
646 
647 kern_return_t
kcdata_api_test(void)648 kcdata_api_test(void)
649 {
650 	kern_return_t retval = KERN_SUCCESS;
651 
652 	/* test for NULL input */
653 	retval = kcdata_memory_static_init(NULL, (mach_vm_address_t)0, KCDATA_BUFFER_BEGIN_STACKSHOT, 100, KCFLAG_USE_MEMCOPY);
654 	T_ASSERT(retval == KERN_INVALID_ARGUMENT, "kcdata_memory_static_init with NULL struct");
655 
656 	/* another negative test with buffer size < 32 bytes */
657 	char data[30] = "sample_disk_io_stats";
658 	retval = kcdata_memory_static_init(&test_kc_data, (mach_vm_address_t)&data, KCDATA_BUFFER_BEGIN_CRASHINFO, sizeof(data),
659 	    KCFLAG_USE_MEMCOPY);
660 	T_ASSERT(retval == KERN_INSUFFICIENT_BUFFER_SIZE, "init with 30 bytes failed as expected with KERN_INSUFFICIENT_BUFFER_SIZE");
661 
662 	/* test with COPYOUT for 0x0 address. Should return KERN_NO_ACCESS */
663 	retval = kcdata_memory_static_init(&test_kc_data, (mach_vm_address_t)0, KCDATA_BUFFER_BEGIN_CRASHINFO, PAGE_SIZE,
664 	    KCFLAG_USE_COPYOUT);
665 	T_ASSERT(retval == KERN_NO_ACCESS, "writing to 0x0 returned KERN_NO_ACCESS");
666 
667 	/* test with successful kcdata_memory_static_init */
668 	test_kc_data.kcd_length   = 0xdeadbeef;
669 
670 	void *data_ptr = kalloc_data(PAGE_SIZE, Z_WAITOK_ZERO_NOFAIL);
671 	mach_vm_address_t address = (mach_vm_address_t)data_ptr;
672 	T_EXPECT_NOTNULL(address, "kalloc of PAGE_SIZE data.");
673 
674 	retval = kcdata_memory_static_init(&test_kc_data, (mach_vm_address_t)address, KCDATA_BUFFER_BEGIN_STACKSHOT, PAGE_SIZE,
675 	    KCFLAG_USE_MEMCOPY);
676 
677 	T_ASSERT(retval == KERN_SUCCESS, "successful kcdata_memory_static_init call");
678 
679 	T_ASSERT(test_kc_data.kcd_length == PAGE_SIZE, "kcdata length is set correctly to PAGE_SIZE.");
680 	T_LOG("addr_begin 0x%llx and end 0x%llx and address 0x%llx", test_kc_data.kcd_addr_begin, test_kc_data.kcd_addr_end, address);
681 	T_ASSERT(test_kc_data.kcd_addr_begin == address, "kcdata begin address is correct 0x%llx", (uint64_t)address);
682 
683 	/* verify we have BEGIN and END HEADERS set */
684 	uint32_t * mem = (uint32_t *)address;
685 	T_ASSERT(mem[0] == KCDATA_BUFFER_BEGIN_STACKSHOT, "buffer does contain KCDATA_BUFFER_BEGIN_STACKSHOT");
686 	T_ASSERT(mem[4] == KCDATA_TYPE_BUFFER_END, "KCDATA_TYPE_BUFFER_END is appended as expected");
687 	T_ASSERT(mem[5] == 0, "size of BUFFER_END tag is zero");
688 
689 	/* verify kcdata_memory_get_used_bytes() */
690 	uint64_t bytes_used = 0;
691 	bytes_used = kcdata_memory_get_used_bytes(&test_kc_data);
692 	T_ASSERT(bytes_used == (2 * sizeof(struct kcdata_item)), "bytes_used api returned expected %llu", bytes_used);
693 
694 	/* test for kcdata_get_memory_addr() */
695 
696 	mach_vm_address_t user_addr = 0;
697 	/* negative test for NULL user_addr AND/OR kcdata_descriptor */
698 	retval = kcdata_get_memory_addr(NULL, KCDATA_TYPE_MACH_ABSOLUTE_TIME, sizeof(uint64_t), &user_addr);
699 	T_ASSERT(retval == KERN_INVALID_ARGUMENT, "kcdata_get_memory_addr with NULL struct -> KERN_INVALID_ARGUMENT");
700 
701 	retval = kcdata_get_memory_addr(&test_kc_data, KCDATA_TYPE_MACH_ABSOLUTE_TIME, sizeof(uint64_t), NULL);
702 	T_ASSERT(retval == KERN_INVALID_ARGUMENT, "kcdata_get_memory_addr with NULL user_addr -> KERN_INVALID_ARGUMENT");
703 
704 	/* successful case with size 0. Yes this is expected to succeed as just a item type could be used as boolean */
705 	retval = kcdata_get_memory_addr(&test_kc_data, KCDATA_TYPE_USECS_SINCE_EPOCH, 0, &user_addr);
706 	T_ASSERT(retval == KERN_SUCCESS, "Successfully got kcdata entry for 0 size data");
707 	T_ASSERT(user_addr == test_kc_data.kcd_addr_end, "0 sized data did not add any extra buffer space");
708 
709 	/* successful case with valid size. */
710 	user_addr = 0xdeadbeef;
711 	retval = kcdata_get_memory_addr(&test_kc_data, KCDATA_TYPE_MACH_ABSOLUTE_TIME, sizeof(uint64_t), &user_addr);
712 	T_ASSERT(retval == KERN_SUCCESS, "kcdata_get_memory_addr with valid values succeeded.");
713 	T_ASSERT(user_addr > test_kc_data.kcd_addr_begin, "user_addr is in range of buffer");
714 	T_ASSERT(user_addr < test_kc_data.kcd_addr_end, "user_addr is in range of buffer");
715 
716 	/* Try creating an item with really large size */
717 	user_addr  = 0xdeadbeef;
718 	bytes_used = kcdata_memory_get_used_bytes(&test_kc_data);
719 	retval = kcdata_get_memory_addr(&test_kc_data, KCDATA_TYPE_MACH_ABSOLUTE_TIME, PAGE_SIZE * 4, &user_addr);
720 	T_ASSERT(retval == KERN_INSUFFICIENT_BUFFER_SIZE, "Allocating entry with size > buffer -> KERN_INSUFFICIENT_BUFFER_SIZE");
721 	T_ASSERT(user_addr == 0xdeadbeef, "user_addr remained unaffected with failed kcdata_get_memory_addr");
722 	T_ASSERT(bytes_used == kcdata_memory_get_used_bytes(&test_kc_data), "The data structure should be unaffected");
723 
724 	/* verify convenience functions for uint32_with_description */
725 	retval = kcdata_add_uint32_with_description(&test_kc_data, 0xbdc0ffee, "This is bad coffee");
726 	T_ASSERT(retval == KERN_SUCCESS, "add uint32 with description succeeded.");
727 
728 	retval = kcdata_add_uint64_with_description(&test_kc_data, 0xf001badc0ffee, "another 8 byte no.");
729 	T_ASSERT(retval == KERN_SUCCESS, "add uint64 with desc succeeded.");
730 
731 	/* verify creating an KCDATA_TYPE_ARRAY here */
732 	user_addr  = 0xdeadbeef;
733 	bytes_used = kcdata_memory_get_used_bytes(&test_kc_data);
734 	/* save memory address where the array will come up */
735 	struct kcdata_item * item_p = (struct kcdata_item *)test_kc_data.kcd_addr_end;
736 
737 	retval = kcdata_get_memory_addr_for_array(&test_kc_data, KCDATA_TYPE_MACH_ABSOLUTE_TIME, sizeof(uint64_t), 20, &user_addr);
738 	T_ASSERT(retval == KERN_SUCCESS, "Array of 20 integers should be possible");
739 	T_ASSERT(user_addr != 0xdeadbeef, "user_addr is updated as expected");
740 	T_ASSERT((kcdata_memory_get_used_bytes(&test_kc_data) - bytes_used) >= 20 * sizeof(uint64_t), "memory allocation is in range");
741 	kcdata_iter_t iter = kcdata_iter(item_p, (unsigned long)(PAGE_SIZE - kcdata_memory_get_used_bytes(&test_kc_data)));
742 	T_ASSERT(kcdata_iter_array_elem_count(iter) == 20, "array count is 20");
743 
744 	/* FIXME add tests here for ranges of sizes and counts */
745 
746 	T_ASSERT(item_p->flags == (((uint64_t)KCDATA_TYPE_MACH_ABSOLUTE_TIME << 32) | 20), "flags are set correctly");
747 
748 	/* test adding of custom type */
749 
750 	retval = kcdata_add_type_definition(&test_kc_data, 0x999, data, &test_disk_io_stats_def[0],
751 	    sizeof(test_disk_io_stats_def) / sizeof(struct kcdata_subtype_descriptor));
752 	T_ASSERT(retval == KERN_SUCCESS, "adding custom type succeeded.");
753 
754 	kfree_data(data_ptr, PAGE_SIZE);
755 	return KERN_SUCCESS;
756 }
757 
758 /*
759  *  kern_return_t
760  *  kcdata_api_assert_tests()
761  *  {
762  *       kern_return_t retval       = 0;
763  *       void * assert_check_retval = NULL;
764  *       test_kc_data2.kcd_length   = 0xdeadbeef;
765  *       mach_vm_address_t address = (mach_vm_address_t)kalloc(PAGE_SIZE);
766  *       T_EXPECT_NOTNULL(address, "kalloc of PAGE_SIZE data.");
767  *
768  *       retval = kcdata_memory_static_init(&test_kc_data2, (mach_vm_address_t)address, KCDATA_BUFFER_BEGIN_STACKSHOT, PAGE_SIZE,
769  *                                          KCFLAG_USE_MEMCOPY);
770  *
771  *       T_ASSERT(retval == KERN_SUCCESS, "successful kcdata_memory_static_init call");
772  *
773  *       retval = T_REGISTER_ASSERT_CHECK("KCDATA_DESC_MAXLEN", &assert_check_retval);
774  *       T_ASSERT(retval == KERN_SUCCESS, "registered assert widget");
775  *
776  *       // this will assert
777  *       retval = kcdata_add_uint32_with_description(&test_kc_data2, 0xc0ffee, "really long description string for kcdata");
778  *       T_ASSERT(retval == KERN_INVALID_ARGUMENT, "API param check returned KERN_INVALID_ARGUMENT correctly");
779  *       T_ASSERT(assert_check_retval == (void *)XT_RET_W_SUCCESS, "assertion handler verified that it was hit");
780  *
781  *       return KERN_SUCCESS;
782  *  }
783  */
784 
785 #if defined(__arm64__)
786 
787 #include <arm/pmap.h>
788 
789 #define MAX_PMAP_OBJECT_ELEMENT 100000
790 
791 extern struct vm_object pmap_object_store; /* store pt pages */
792 extern unsigned long gPhysBase, gPhysSize, first_avail;
793 
794 /*
795  * Define macros to transverse the pmap object structures and extract
796  * physical page number with information from low global only
797  * This emulate how Astris extracts information from coredump
798  */
799 #if defined(__arm64__)
800 
801 static inline uintptr_t
astris_vm_page_unpack_ptr(uintptr_t p)802 astris_vm_page_unpack_ptr(uintptr_t p)
803 {
804 	if (!p) {
805 		return (uintptr_t)0;
806 	}
807 
808 	return (p & lowGlo.lgPmapMemFromArrayMask)
809 	       ? lowGlo.lgPmapMemStartAddr + (p & ~(lowGlo.lgPmapMemFromArrayMask)) * lowGlo.lgPmapMemPagesize
810 	       : lowGlo.lgPmapMemPackedBaseAddr + (p << lowGlo.lgPmapMemPackedShift);
811 }
812 
813 // assume next pointer is the first element
814 #define astris_vm_page_queue_next(qc) (astris_vm_page_unpack_ptr(*((uint32_t *)(qc))))
815 
816 #endif
817 
818 #define astris_vm_page_queue_first(q) astris_vm_page_queue_next(q)
819 
820 #define astris_vm_page_queue_end(q, qe) ((q) == (qe))
821 
822 #define astris_vm_page_queue_iterate(head, elt)                                                           \
823 	for ((elt) = (uintptr_t)astris_vm_page_queue_first((head)); !astris_vm_page_queue_end((head), (elt)); \
824 	     (elt) = (uintptr_t)astris_vm_page_queue_next(((elt) + (uintptr_t)lowGlo.lgPmapMemChainOffset)))
825 
826 #define astris_ptoa(x) ((vm_address_t)(x) << lowGlo.lgPageShift)
827 
828 static inline ppnum_t
astris_vm_page_get_phys_page(uintptr_t m)829 astris_vm_page_get_phys_page(uintptr_t m)
830 {
831 	return (m >= lowGlo.lgPmapMemStartAddr && m < lowGlo.lgPmapMemEndAddr)
832 	       ? (ppnum_t)((m - lowGlo.lgPmapMemStartAddr) / lowGlo.lgPmapMemPagesize + lowGlo.lgPmapMemFirstppnum)
833 	       : *((ppnum_t *)(m + lowGlo.lgPmapMemPageOffset));
834 }
835 
836 kern_return_t
pmap_coredump_test(void)837 pmap_coredump_test(void)
838 {
839 	int iter = 0;
840 	uintptr_t p;
841 
842 	T_LOG("Testing coredump info for PMAP.");
843 
844 	T_ASSERT_GE_ULONG(lowGlo.lgStaticAddr, gPhysBase, NULL);
845 	T_ASSERT_LE_ULONG(lowGlo.lgStaticAddr + lowGlo.lgStaticSize, first_avail, NULL);
846 	T_ASSERT_EQ_ULONG(lowGlo.lgLayoutMajorVersion, 3, NULL);
847 	T_ASSERT_GE_ULONG(lowGlo.lgLayoutMinorVersion, 2, NULL);
848 	T_ASSERT_EQ_ULONG(lowGlo.lgLayoutMagic, LOWGLO_LAYOUT_MAGIC, NULL);
849 
850 	// check the constant values in lowGlo
851 	T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemQ, ((typeof(lowGlo.lgPmapMemQ)) & (pmap_object_store.memq)), NULL);
852 	T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemPageOffset, offsetof(struct vm_page_with_ppnum, vmp_phys_page), NULL);
853 	T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemChainOffset, offsetof(struct vm_page, vmp_listq), NULL);
854 	T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemPagesize, sizeof(struct vm_page), NULL);
855 
856 #if defined(__arm64__)
857 	T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemFromArrayMask, VM_PAGE_PACKED_FROM_ARRAY, NULL);
858 	T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemPackedShift, VM_PAGE_PACKED_PTR_SHIFT, NULL);
859 	T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemPackedBaseAddr, VM_PAGE_PACKED_PTR_BASE, NULL);
860 #endif
861 
862 	vm_object_lock_shared(&pmap_object_store);
863 	astris_vm_page_queue_iterate(lowGlo.lgPmapMemQ, p)
864 	{
865 		ppnum_t ppnum   = astris_vm_page_get_phys_page(p);
866 		pmap_paddr_t pa = (pmap_paddr_t)astris_ptoa(ppnum);
867 		T_ASSERT_GE_ULONG(pa, gPhysBase, NULL);
868 		T_ASSERT_LT_ULONG(pa, gPhysBase + gPhysSize, NULL);
869 		iter++;
870 		T_ASSERT_LT_INT(iter, MAX_PMAP_OBJECT_ELEMENT, NULL);
871 	}
872 	vm_object_unlock(&pmap_object_store);
873 
874 	T_ASSERT_GT_INT(iter, 0, NULL);
875 	return KERN_SUCCESS;
876 }
877 #endif /* defined(__arm64__) */
878 
879 struct ts_kern_prim_test_args {
880 	int *end_barrier;
881 	int *notify_b;
882 	int *wait_event_b;
883 	int before_num;
884 	int *notify_a;
885 	int *wait_event_a;
886 	int after_num;
887 	int priority_to_check;
888 };
889 
890 static void
wait_threads(int * var,int num)891 wait_threads(
892 	int* var,
893 	int num)
894 {
895 	if (var != NULL) {
896 		while (os_atomic_load(var, acquire) != num) {
897 			assert_wait((event_t) var, THREAD_UNINT);
898 			if (os_atomic_load(var, acquire) != num) {
899 				(void) thread_block(THREAD_CONTINUE_NULL);
900 			} else {
901 				clear_wait(current_thread(), THREAD_AWAKENED);
902 			}
903 		}
904 	}
905 }
906 
907 static void
wake_threads(int * var)908 wake_threads(
909 	int* var)
910 {
911 	if (var) {
912 		os_atomic_inc(var, relaxed);
913 		thread_wakeup((event_t) var);
914 	}
915 }
916 
917 extern void IOSleep(int);
918 
919 static void
thread_lock_unlock_kernel_primitive(void * args,__unused wait_result_t wr)920 thread_lock_unlock_kernel_primitive(
921 	void *args,
922 	__unused wait_result_t wr)
923 {
924 	thread_t thread = current_thread();
925 	struct ts_kern_prim_test_args *info = (struct ts_kern_prim_test_args*) args;
926 	int pri;
927 
928 	wait_threads(info->wait_event_b, info->before_num);
929 	wake_threads(info->notify_b);
930 
931 	tstile_test_prim_lock(SYSCTL_TURNSTILE_TEST_KERNEL_DEFAULT);
932 
933 	wake_threads(info->notify_a);
934 	wait_threads(info->wait_event_a, info->after_num);
935 
936 	IOSleep(100);
937 
938 	if (info->priority_to_check) {
939 		spl_t s = splsched();
940 		thread_lock(thread);
941 		pri = thread->sched_pri;
942 		thread_unlock(thread);
943 		splx(s);
944 		T_ASSERT(pri == info->priority_to_check, "Priority thread: current sched %d sched wanted %d", pri, info->priority_to_check);
945 	}
946 
947 	tstile_test_prim_unlock(SYSCTL_TURNSTILE_TEST_KERNEL_DEFAULT);
948 
949 	wake_threads(info->end_barrier);
950 	thread_terminate_self();
951 }
952 
953 kern_return_t
ts_kernel_primitive_test(void)954 ts_kernel_primitive_test(void)
955 {
956 	thread_t owner, thread1, thread2;
957 	struct ts_kern_prim_test_args targs[2] = {};
958 	kern_return_t result;
959 	int end_barrier = 0;
960 	int owner_locked = 0;
961 	int waiters_ready = 0;
962 
963 	T_LOG("Testing turnstile kernel primitive");
964 
965 	targs[0].notify_b = NULL;
966 	targs[0].wait_event_b = NULL;
967 	targs[0].before_num = 0;
968 	targs[0].notify_a = &owner_locked;
969 	targs[0].wait_event_a = &waiters_ready;
970 	targs[0].after_num = 2;
971 	targs[0].priority_to_check = 90;
972 	targs[0].end_barrier = &end_barrier;
973 
974 	// Start owner with priority 80
975 	result = kernel_thread_start_priority((thread_continue_t)thread_lock_unlock_kernel_primitive, &targs[0], 80, &owner);
976 	T_ASSERT(result == KERN_SUCCESS, "Starting owner");
977 
978 	targs[1].notify_b = &waiters_ready;
979 	targs[1].wait_event_b = &owner_locked;
980 	targs[1].before_num = 1;
981 	targs[1].notify_a = NULL;
982 	targs[1].wait_event_a = NULL;
983 	targs[1].after_num = 0;
984 	targs[1].priority_to_check = 0;
985 	targs[1].end_barrier = &end_barrier;
986 
987 	// Start waiters with priority 85 and 90
988 	result = kernel_thread_start_priority((thread_continue_t)thread_lock_unlock_kernel_primitive, &targs[1], 85, &thread1);
989 	T_ASSERT(result == KERN_SUCCESS, "Starting thread1");
990 
991 	result = kernel_thread_start_priority((thread_continue_t)thread_lock_unlock_kernel_primitive, &targs[1], 90, &thread2);
992 	T_ASSERT(result == KERN_SUCCESS, "Starting thread2");
993 
994 	wait_threads(&end_barrier, 3);
995 
996 	return KERN_SUCCESS;
997 }
998 
999 #define MTX_LOCK 0
1000 #define RW_LOCK 1
1001 
1002 #define NUM_THREADS 4
1003 
1004 struct synch_test_common {
1005 	unsigned int nthreads;
1006 	thread_t *threads;
1007 	int max_pri;
1008 	int test_done;
1009 };
1010 
1011 static kern_return_t
init_synch_test_common(struct synch_test_common * info,unsigned int nthreads)1012 init_synch_test_common(struct synch_test_common *info, unsigned int nthreads)
1013 {
1014 	info->nthreads = nthreads;
1015 	info->threads = kalloc_type(thread_t, nthreads, Z_WAITOK);
1016 	if (!info->threads) {
1017 		return ENOMEM;
1018 	}
1019 
1020 	return KERN_SUCCESS;
1021 }
1022 
1023 static void
destroy_synch_test_common(struct synch_test_common * info)1024 destroy_synch_test_common(struct synch_test_common *info)
1025 {
1026 	kfree_type(thread_t, info->nthreads, info->threads);
1027 }
1028 
1029 static void
start_threads(thread_continue_t func,struct synch_test_common * info,bool sleep_after_first)1030 start_threads(thread_continue_t func, struct synch_test_common *info, bool sleep_after_first)
1031 {
1032 	thread_t thread;
1033 	kern_return_t result;
1034 	uint i;
1035 	int priority = 75;
1036 
1037 	info->test_done = 0;
1038 
1039 	for (i = 0; i < info->nthreads; i++) {
1040 		info->threads[i] = NULL;
1041 	}
1042 
1043 	info->max_pri = priority + (info->nthreads - 1) * 5;
1044 	if (info->max_pri > 95) {
1045 		info->max_pri = 95;
1046 	}
1047 
1048 	for (i = 0; i < info->nthreads; i++) {
1049 		result = kernel_thread_start_priority((thread_continue_t)func, info, priority, &thread);
1050 		os_atomic_store(&info->threads[i], thread, release);
1051 		T_ASSERT(result == KERN_SUCCESS, "Starting thread %d, priority %d, %p", i, priority, thread);
1052 
1053 		priority += 5;
1054 
1055 		if (i == 0 && sleep_after_first) {
1056 			IOSleep(100);
1057 		}
1058 	}
1059 }
1060 
1061 static unsigned int
get_max_pri(struct synch_test_common * info)1062 get_max_pri(struct synch_test_common * info)
1063 {
1064 	return info->max_pri;
1065 }
1066 
1067 static void
wait_all_thread(struct synch_test_common * info)1068 wait_all_thread(struct synch_test_common * info)
1069 {
1070 	wait_threads(&info->test_done, info->nthreads);
1071 }
1072 
1073 static void
notify_waiter(struct synch_test_common * info)1074 notify_waiter(struct synch_test_common * info)
1075 {
1076 	wake_threads(&info->test_done);
1077 }
1078 
1079 static void
wait_for_waiters(struct synch_test_common * info)1080 wait_for_waiters(struct synch_test_common *info)
1081 {
1082 	uint i, j;
1083 	thread_t thread;
1084 
1085 	for (i = 0; i < info->nthreads; i++) {
1086 		j = 0;
1087 		while (os_atomic_load(&info->threads[i], acquire) == NULL) {
1088 			if (j % 100 == 0) {
1089 				IOSleep(10);
1090 			}
1091 			j++;
1092 		}
1093 
1094 		if (info->threads[i] != current_thread()) {
1095 			j = 0;
1096 			do {
1097 				thread = os_atomic_load(&info->threads[i], relaxed);
1098 				if (thread == (thread_t) 1) {
1099 					break;
1100 				}
1101 
1102 				if (!(thread->state & TH_RUN)) {
1103 					break;
1104 				}
1105 
1106 				if (j % 100 == 0) {
1107 					IOSleep(100);
1108 				}
1109 				j++;
1110 
1111 				if (thread->started == FALSE) {
1112 					continue;
1113 				}
1114 			} while (thread->state & TH_RUN);
1115 		}
1116 	}
1117 }
1118 
1119 static void
exclude_current_waiter(struct synch_test_common * info)1120 exclude_current_waiter(struct synch_test_common *info)
1121 {
1122 	uint i, j;
1123 
1124 	for (i = 0; i < info->nthreads; i++) {
1125 		j = 0;
1126 		while (os_atomic_load(&info->threads[i], acquire) == NULL) {
1127 			if (j % 100 == 0) {
1128 				IOSleep(10);
1129 			}
1130 			j++;
1131 		}
1132 
1133 		if (os_atomic_load(&info->threads[i], acquire) == current_thread()) {
1134 			os_atomic_store(&info->threads[i], (thread_t)1, release);
1135 			return;
1136 		}
1137 	}
1138 }
1139 
1140 struct info_sleep_inheritor_test {
1141 	struct synch_test_common head;
1142 	lck_mtx_t mtx_lock;
1143 	lck_rw_t rw_lock;
1144 	decl_lck_mtx_gate_data(, gate);
1145 	boolean_t gate_closed;
1146 	int prim_type;
1147 	boolean_t work_to_do;
1148 	unsigned int max_pri;
1149 	unsigned int steal_pri;
1150 	int synch_value;
1151 	int synch;
1152 	int value;
1153 	int handoff_failure;
1154 	thread_t thread_inheritor;
1155 	bool use_alloc_gate;
1156 	gate_t *alloc_gate;
1157 	struct obj_cached **obj_cache;
1158 	kern_apfs_reflock_data(, reflock);
1159 	int reflock_protected_status;
1160 };
1161 
1162 static void
primitive_lock(struct info_sleep_inheritor_test * info)1163 primitive_lock(struct info_sleep_inheritor_test *info)
1164 {
1165 	switch (info->prim_type) {
1166 	case MTX_LOCK:
1167 		lck_mtx_lock(&info->mtx_lock);
1168 		break;
1169 	case RW_LOCK:
1170 		lck_rw_lock(&info->rw_lock, LCK_RW_TYPE_EXCLUSIVE);
1171 		break;
1172 	default:
1173 		panic("invalid type %d", info->prim_type);
1174 	}
1175 }
1176 
1177 static void
primitive_unlock(struct info_sleep_inheritor_test * info)1178 primitive_unlock(struct info_sleep_inheritor_test *info)
1179 {
1180 	switch (info->prim_type) {
1181 	case MTX_LOCK:
1182 		lck_mtx_unlock(&info->mtx_lock);
1183 		break;
1184 	case RW_LOCK:
1185 		lck_rw_unlock(&info->rw_lock, LCK_RW_TYPE_EXCLUSIVE);
1186 		break;
1187 	default:
1188 		panic("invalid type %d", info->prim_type);
1189 	}
1190 }
1191 
1192 static wait_result_t
primitive_sleep_with_inheritor(struct info_sleep_inheritor_test * info)1193 primitive_sleep_with_inheritor(struct info_sleep_inheritor_test *info)
1194 {
1195 	wait_result_t ret = KERN_SUCCESS;
1196 	switch (info->prim_type) {
1197 	case MTX_LOCK:
1198 		ret = lck_mtx_sleep_with_inheritor(&info->mtx_lock, LCK_SLEEP_DEFAULT, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1199 		break;
1200 	case RW_LOCK:
1201 		ret = lck_rw_sleep_with_inheritor(&info->rw_lock, LCK_SLEEP_DEFAULT, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1202 		break;
1203 	default:
1204 		panic("invalid type %d", info->prim_type);
1205 	}
1206 
1207 	return ret;
1208 }
1209 
1210 static void
primitive_wakeup_one_with_inheritor(struct info_sleep_inheritor_test * info)1211 primitive_wakeup_one_with_inheritor(struct info_sleep_inheritor_test *info)
1212 {
1213 	switch (info->prim_type) {
1214 	case MTX_LOCK:
1215 	case RW_LOCK:
1216 		wakeup_one_with_inheritor((event_t) &info->thread_inheritor, THREAD_AWAKENED, LCK_WAKE_DEFAULT, &info->thread_inheritor);
1217 		break;
1218 	default:
1219 		panic("invalid type %d", info->prim_type);
1220 	}
1221 }
1222 
1223 static void
primitive_wakeup_all_with_inheritor(struct info_sleep_inheritor_test * info)1224 primitive_wakeup_all_with_inheritor(struct info_sleep_inheritor_test *info)
1225 {
1226 	switch (info->prim_type) {
1227 	case MTX_LOCK:
1228 	case RW_LOCK:
1229 		wakeup_all_with_inheritor((event_t) &info->thread_inheritor, THREAD_AWAKENED);
1230 		break;
1231 	default:
1232 		panic("invalid type %d", info->prim_type);
1233 	}
1234 	return;
1235 }
1236 
1237 static void
primitive_change_sleep_inheritor(struct info_sleep_inheritor_test * info)1238 primitive_change_sleep_inheritor(struct info_sleep_inheritor_test *info)
1239 {
1240 	switch (info->prim_type) {
1241 	case MTX_LOCK:
1242 	case RW_LOCK:
1243 		change_sleep_inheritor((event_t) &info->thread_inheritor, info->thread_inheritor);
1244 		break;
1245 	default:
1246 		panic("invalid type %d", info->prim_type);
1247 	}
1248 	return;
1249 }
1250 
1251 static kern_return_t
primitive_gate_try_close(struct info_sleep_inheritor_test * info)1252 primitive_gate_try_close(struct info_sleep_inheritor_test *info)
1253 {
1254 	gate_t *gate = &info->gate;
1255 	if (info->use_alloc_gate == true) {
1256 		gate = info->alloc_gate;
1257 	}
1258 	kern_return_t ret = KERN_SUCCESS;
1259 	switch (info->prim_type) {
1260 	case MTX_LOCK:
1261 		ret = lck_mtx_gate_try_close(&info->mtx_lock, gate);
1262 		break;
1263 	case RW_LOCK:
1264 		ret = lck_rw_gate_try_close(&info->rw_lock, gate);
1265 		break;
1266 	default:
1267 		panic("invalid type %d", info->prim_type);
1268 	}
1269 	return ret;
1270 }
1271 
1272 static gate_wait_result_t
primitive_gate_wait(struct info_sleep_inheritor_test * info)1273 primitive_gate_wait(struct info_sleep_inheritor_test *info)
1274 {
1275 	gate_t *gate = &info->gate;
1276 	if (info->use_alloc_gate == true) {
1277 		gate = info->alloc_gate;
1278 	}
1279 	gate_wait_result_t ret = GATE_OPENED;
1280 	switch (info->prim_type) {
1281 	case MTX_LOCK:
1282 		ret = lck_mtx_gate_wait(&info->mtx_lock, gate, LCK_SLEEP_DEFAULT, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1283 		break;
1284 	case RW_LOCK:
1285 		ret = lck_rw_gate_wait(&info->rw_lock, gate, LCK_SLEEP_DEFAULT, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1286 		break;
1287 	default:
1288 		panic("invalid type %d", info->prim_type);
1289 	}
1290 	return ret;
1291 }
1292 
1293 static void
primitive_gate_open(struct info_sleep_inheritor_test * info)1294 primitive_gate_open(struct info_sleep_inheritor_test *info)
1295 {
1296 	gate_t *gate = &info->gate;
1297 	if (info->use_alloc_gate == true) {
1298 		gate = info->alloc_gate;
1299 	}
1300 	switch (info->prim_type) {
1301 	case MTX_LOCK:
1302 		lck_mtx_gate_open(&info->mtx_lock, gate);
1303 		break;
1304 	case RW_LOCK:
1305 		lck_rw_gate_open(&info->rw_lock, gate);
1306 		break;
1307 	default:
1308 		panic("invalid type %d", info->prim_type);
1309 	}
1310 }
1311 
1312 static void
primitive_gate_close(struct info_sleep_inheritor_test * info)1313 primitive_gate_close(struct info_sleep_inheritor_test *info)
1314 {
1315 	gate_t *gate = &info->gate;
1316 	if (info->use_alloc_gate == true) {
1317 		gate = info->alloc_gate;
1318 	}
1319 
1320 	switch (info->prim_type) {
1321 	case MTX_LOCK:
1322 		lck_mtx_gate_close(&info->mtx_lock, gate);
1323 		break;
1324 	case RW_LOCK:
1325 		lck_rw_gate_close(&info->rw_lock, gate);
1326 		break;
1327 	default:
1328 		panic("invalid type %d", info->prim_type);
1329 	}
1330 }
1331 
1332 static void
primitive_gate_steal(struct info_sleep_inheritor_test * info)1333 primitive_gate_steal(struct info_sleep_inheritor_test *info)
1334 {
1335 	gate_t *gate = &info->gate;
1336 	if (info->use_alloc_gate == true) {
1337 		gate = info->alloc_gate;
1338 	}
1339 
1340 	switch (info->prim_type) {
1341 	case MTX_LOCK:
1342 		lck_mtx_gate_steal(&info->mtx_lock, gate);
1343 		break;
1344 	case RW_LOCK:
1345 		lck_rw_gate_steal(&info->rw_lock, gate);
1346 		break;
1347 	default:
1348 		panic("invalid type %d", info->prim_type);
1349 	}
1350 }
1351 
1352 static kern_return_t
primitive_gate_handoff(struct info_sleep_inheritor_test * info,int flags)1353 primitive_gate_handoff(struct info_sleep_inheritor_test *info, int flags)
1354 {
1355 	gate_t *gate = &info->gate;
1356 	if (info->use_alloc_gate == true) {
1357 		gate = info->alloc_gate;
1358 	}
1359 
1360 	kern_return_t ret = KERN_SUCCESS;
1361 	switch (info->prim_type) {
1362 	case MTX_LOCK:
1363 		ret = lck_mtx_gate_handoff(&info->mtx_lock, gate, flags);
1364 		break;
1365 	case RW_LOCK:
1366 		ret = lck_rw_gate_handoff(&info->rw_lock, gate, flags);
1367 		break;
1368 	default:
1369 		panic("invalid type %d", info->prim_type);
1370 	}
1371 	return ret;
1372 }
1373 
1374 static void
primitive_gate_assert(struct info_sleep_inheritor_test * info,int type)1375 primitive_gate_assert(struct info_sleep_inheritor_test *info, int type)
1376 {
1377 	gate_t *gate = &info->gate;
1378 	if (info->use_alloc_gate == true) {
1379 		gate = info->alloc_gate;
1380 	}
1381 
1382 	switch (info->prim_type) {
1383 	case MTX_LOCK:
1384 		lck_mtx_gate_assert(&info->mtx_lock, gate, type);
1385 		break;
1386 	case RW_LOCK:
1387 		lck_rw_gate_assert(&info->rw_lock, gate, type);
1388 		break;
1389 	default:
1390 		panic("invalid type %d", info->prim_type);
1391 	}
1392 }
1393 
1394 static void
primitive_gate_init(struct info_sleep_inheritor_test * info)1395 primitive_gate_init(struct info_sleep_inheritor_test *info)
1396 {
1397 	switch (info->prim_type) {
1398 	case MTX_LOCK:
1399 		lck_mtx_gate_init(&info->mtx_lock, &info->gate);
1400 		break;
1401 	case RW_LOCK:
1402 		lck_rw_gate_init(&info->rw_lock, &info->gate);
1403 		break;
1404 	default:
1405 		panic("invalid type %d", info->prim_type);
1406 	}
1407 }
1408 
1409 static void
primitive_gate_destroy(struct info_sleep_inheritor_test * info)1410 primitive_gate_destroy(struct info_sleep_inheritor_test *info)
1411 {
1412 	switch (info->prim_type) {
1413 	case MTX_LOCK:
1414 		lck_mtx_gate_destroy(&info->mtx_lock, &info->gate);
1415 		break;
1416 	case RW_LOCK:
1417 		lck_rw_gate_destroy(&info->rw_lock, &info->gate);
1418 		break;
1419 	default:
1420 		panic("invalid type %d", info->prim_type);
1421 	}
1422 }
1423 
1424 static void
primitive_gate_alloc(struct info_sleep_inheritor_test * info)1425 primitive_gate_alloc(struct info_sleep_inheritor_test *info)
1426 {
1427 	gate_t *gate;
1428 	switch (info->prim_type) {
1429 	case MTX_LOCK:
1430 		gate = lck_mtx_gate_alloc_init(&info->mtx_lock);
1431 		break;
1432 	case RW_LOCK:
1433 		gate = lck_rw_gate_alloc_init(&info->rw_lock);
1434 		break;
1435 	default:
1436 		panic("invalid type %d", info->prim_type);
1437 	}
1438 	info->alloc_gate = gate;
1439 }
1440 
1441 static void
primitive_gate_free(struct info_sleep_inheritor_test * info)1442 primitive_gate_free(struct info_sleep_inheritor_test *info)
1443 {
1444 	T_ASSERT(info->alloc_gate != NULL, "gate not yet freed");
1445 
1446 	switch (info->prim_type) {
1447 	case MTX_LOCK:
1448 		lck_mtx_gate_free(&info->mtx_lock, info->alloc_gate);
1449 		break;
1450 	case RW_LOCK:
1451 		lck_rw_gate_free(&info->rw_lock, info->alloc_gate);
1452 		break;
1453 	default:
1454 		panic("invalid type %d", info->prim_type);
1455 	}
1456 	info->alloc_gate = NULL;
1457 }
1458 
1459 static void
thread_inheritor_like_mutex(void * args,__unused wait_result_t wr)1460 thread_inheritor_like_mutex(
1461 	void *args,
1462 	__unused wait_result_t wr)
1463 {
1464 	wait_result_t wait;
1465 
1466 	struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
1467 	uint my_pri = current_thread()->sched_pri;
1468 
1469 	T_LOG("Started thread pri %d %p", my_pri, current_thread());
1470 
1471 	/*
1472 	 * spin here to start concurrently
1473 	 */
1474 	wake_threads(&info->synch);
1475 	wait_threads(&info->synch, info->synch_value);
1476 
1477 	primitive_lock(info);
1478 
1479 	if (info->thread_inheritor == NULL) {
1480 		info->thread_inheritor = current_thread();
1481 	} else {
1482 		wait = primitive_sleep_with_inheritor(info);
1483 		T_ASSERT(wait == THREAD_AWAKENED || wait == THREAD_NOT_WAITING, "sleep_with_inheritor return");
1484 	}
1485 	primitive_unlock(info);
1486 
1487 	IOSleep(100);
1488 	info->value++;
1489 
1490 	primitive_lock(info);
1491 
1492 	T_ASSERT(info->thread_inheritor == current_thread(), "thread_inheritor is %p", info->thread_inheritor);
1493 	primitive_wakeup_one_with_inheritor(info);
1494 	T_LOG("woken up %p", info->thread_inheritor);
1495 
1496 	if (info->thread_inheritor == NULL) {
1497 		T_ASSERT(info->handoff_failure == 0, "handoff failures");
1498 		info->handoff_failure++;
1499 	} else {
1500 		T_ASSERT(info->thread_inheritor != current_thread(), "thread_inheritor is %p", info->thread_inheritor);
1501 		thread_deallocate(info->thread_inheritor);
1502 	}
1503 
1504 	primitive_unlock(info);
1505 
1506 	assert(current_thread()->kern_promotion_schedpri == 0);
1507 	notify_waiter((struct synch_test_common *)info);
1508 
1509 	thread_terminate_self();
1510 }
1511 
1512 static void
thread_just_inheritor_do_work(void * args,__unused wait_result_t wr)1513 thread_just_inheritor_do_work(
1514 	void *args,
1515 	__unused wait_result_t wr)
1516 {
1517 	struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
1518 	uint my_pri = current_thread()->sched_pri;
1519 	uint max_pri;
1520 
1521 	T_LOG("Started thread pri %d %p", my_pri, current_thread());
1522 	primitive_lock(info);
1523 
1524 	if (info->thread_inheritor == NULL) {
1525 		info->thread_inheritor = current_thread();
1526 		primitive_unlock(info);
1527 		T_LOG("Thread pri %d first to run %p", my_pri, current_thread());
1528 
1529 		wait_threads(&info->synch, info->synch_value - 1);
1530 
1531 		wait_for_waiters((struct synch_test_common *)info);
1532 
1533 		max_pri = get_max_pri((struct synch_test_common *) info);
1534 		T_ASSERT((uint) current_thread()->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", current_thread()->sched_pri, max_pri);
1535 
1536 		os_atomic_store(&info->synch, 0, relaxed);
1537 		primitive_lock(info);
1538 		primitive_wakeup_all_with_inheritor(info);
1539 	} else {
1540 		wake_threads(&info->synch);
1541 		primitive_sleep_with_inheritor(info);
1542 	}
1543 
1544 	primitive_unlock(info);
1545 
1546 	assert(current_thread()->kern_promotion_schedpri == 0);
1547 	notify_waiter((struct synch_test_common *)info);
1548 
1549 	thread_terminate_self();
1550 }
1551 
1552 static void
thread_steal_work(void * args,__unused wait_result_t wr)1553 thread_steal_work(
1554 	void *args,
1555 	__unused wait_result_t wr)
1556 {
1557 	struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
1558 	uint my_pri = current_thread()->sched_pri;
1559 
1560 	T_LOG("Started thread pri %d %p", my_pri, current_thread());
1561 	primitive_lock(info);
1562 
1563 	if (info->thread_inheritor == NULL) {
1564 		info->thread_inheritor = current_thread();
1565 		exclude_current_waiter((struct synch_test_common *)info);
1566 
1567 		T_LOG("Thread pri %d first to run %p", my_pri, current_thread());
1568 		primitive_unlock(info);
1569 
1570 		wait_threads(&info->synch, info->synch_value - 2);
1571 
1572 		wait_for_waiters((struct synch_test_common *)info);
1573 		T_LOG("Thread pri %d first to run %p", my_pri, current_thread());
1574 		primitive_lock(info);
1575 		if (info->thread_inheritor == current_thread()) {
1576 			primitive_wakeup_all_with_inheritor(info);
1577 		}
1578 	} else {
1579 		if (info->steal_pri == 0) {
1580 			info->steal_pri = my_pri;
1581 			info->thread_inheritor = current_thread();
1582 			primitive_change_sleep_inheritor(info);
1583 			exclude_current_waiter((struct synch_test_common *)info);
1584 
1585 			primitive_unlock(info);
1586 
1587 			wait_threads(&info->synch, info->synch_value - 2);
1588 
1589 			T_LOG("Thread pri %d stole push %p", my_pri, current_thread());
1590 			wait_for_waiters((struct synch_test_common *)info);
1591 
1592 			T_ASSERT((uint) current_thread()->sched_pri == info->steal_pri, "sleep_inheritor inheritor priority current is %d, should be %d", current_thread()->sched_pri, info->steal_pri);
1593 
1594 			primitive_lock(info);
1595 			primitive_wakeup_all_with_inheritor(info);
1596 		} else {
1597 			if (my_pri > info->steal_pri) {
1598 				info->steal_pri = my_pri;
1599 			}
1600 			wake_threads(&info->synch);
1601 			primitive_sleep_with_inheritor(info);
1602 			exclude_current_waiter((struct synch_test_common *)info);
1603 		}
1604 	}
1605 	primitive_unlock(info);
1606 
1607 	assert(current_thread()->kern_promotion_schedpri == 0);
1608 	notify_waiter((struct synch_test_common *)info);
1609 
1610 	thread_terminate_self();
1611 }
1612 
1613 static void
thread_no_inheritor_work(void * args,__unused wait_result_t wr)1614 thread_no_inheritor_work(
1615 	void *args,
1616 	__unused wait_result_t wr)
1617 {
1618 	struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
1619 	uint my_pri = current_thread()->sched_pri;
1620 
1621 	T_LOG("Started thread pri %d %p", my_pri, current_thread());
1622 	primitive_lock(info);
1623 
1624 	info->value--;
1625 	if (info->value == 0) {
1626 		primitive_wakeup_all_with_inheritor(info);
1627 	} else {
1628 		info->thread_inheritor = NULL;
1629 		primitive_sleep_with_inheritor(info);
1630 	}
1631 
1632 	primitive_unlock(info);
1633 
1634 	assert(current_thread()->kern_promotion_schedpri == 0);
1635 	notify_waiter((struct synch_test_common *)info);
1636 
1637 	thread_terminate_self();
1638 }
1639 
1640 static void
thread_mtx_work(void * args,__unused wait_result_t wr)1641 thread_mtx_work(
1642 	void *args,
1643 	__unused wait_result_t wr)
1644 {
1645 	struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
1646 	uint my_pri = current_thread()->sched_pri;
1647 	int i;
1648 	u_int8_t rand;
1649 	unsigned int mod_rand;
1650 	uint max_pri;
1651 
1652 	T_LOG("Started thread pri %d %p", my_pri, current_thread());
1653 
1654 	for (i = 0; i < 10; i++) {
1655 		lck_mtx_lock(&info->mtx_lock);
1656 		if (info->thread_inheritor == NULL) {
1657 			info->thread_inheritor = current_thread();
1658 			lck_mtx_unlock(&info->mtx_lock);
1659 
1660 			T_LOG("Thread pri %d first to run %p", my_pri, current_thread());
1661 
1662 			wait_threads(&info->synch, info->synch_value - 1);
1663 			wait_for_waiters((struct synch_test_common *)info);
1664 			max_pri = get_max_pri((struct synch_test_common *) info);
1665 			T_ASSERT((uint) current_thread()->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", current_thread()->sched_pri, max_pri);
1666 
1667 			os_atomic_store(&info->synch, 0, relaxed);
1668 
1669 			lck_mtx_lock(&info->mtx_lock);
1670 			info->thread_inheritor = NULL;
1671 			wakeup_all_with_inheritor((event_t) &info->thread_inheritor, THREAD_AWAKENED);
1672 			lck_mtx_unlock(&info->mtx_lock);
1673 			continue;
1674 		}
1675 
1676 		read_random(&rand, sizeof(rand));
1677 		mod_rand = rand % 2;
1678 
1679 		wake_threads(&info->synch);
1680 		switch (mod_rand) {
1681 		case 0:
1682 			lck_mtx_sleep_with_inheritor(&info->mtx_lock, LCK_SLEEP_DEFAULT, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1683 			lck_mtx_unlock(&info->mtx_lock);
1684 			break;
1685 		case 1:
1686 			lck_mtx_sleep_with_inheritor(&info->mtx_lock, LCK_SLEEP_UNLOCK, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1687 			break;
1688 		default:
1689 			panic("rand()mod4 returned %u (random %u)", mod_rand, rand);
1690 		}
1691 	}
1692 
1693 	/*
1694 	 * spin here to stop using the lock as mutex
1695 	 */
1696 	wake_threads(&info->synch);
1697 	wait_threads(&info->synch, info->synch_value);
1698 
1699 	for (i = 0; i < 10; i++) {
1700 		/* read_random might sleep so read it before acquiring the mtx as spin */
1701 		read_random(&rand, sizeof(rand));
1702 
1703 		lck_mtx_lock_spin(&info->mtx_lock);
1704 		if (info->thread_inheritor == NULL) {
1705 			info->thread_inheritor = current_thread();
1706 			lck_mtx_unlock(&info->mtx_lock);
1707 
1708 			T_LOG("Thread pri %d first to run %p", my_pri, current_thread());
1709 			wait_for_waiters((struct synch_test_common *)info);
1710 			max_pri = get_max_pri((struct synch_test_common *) info);
1711 			T_ASSERT((uint) current_thread()->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", current_thread()->sched_pri, max_pri);
1712 
1713 			lck_mtx_lock_spin(&info->mtx_lock);
1714 			info->thread_inheritor = NULL;
1715 			wakeup_all_with_inheritor((event_t) &info->thread_inheritor, THREAD_AWAKENED);
1716 			lck_mtx_unlock(&info->mtx_lock);
1717 			continue;
1718 		}
1719 
1720 		mod_rand = rand % 2;
1721 		switch (mod_rand) {
1722 		case 0:
1723 			lck_mtx_sleep_with_inheritor(&info->mtx_lock, LCK_SLEEP_SPIN, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1724 			lck_mtx_unlock(&info->mtx_lock);
1725 			break;
1726 		case 1:
1727 			lck_mtx_sleep_with_inheritor(&info->mtx_lock, LCK_SLEEP_SPIN_ALWAYS, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1728 			lck_mtx_unlock(&info->mtx_lock);
1729 			break;
1730 		default:
1731 			panic("rand()mod4 returned %u (random %u)", mod_rand, rand);
1732 		}
1733 	}
1734 	assert(current_thread()->kern_promotion_schedpri == 0);
1735 	notify_waiter((struct synch_test_common *)info);
1736 
1737 	thread_terminate_self();
1738 }
1739 
1740 static void
thread_rw_work(void * args,__unused wait_result_t wr)1741 thread_rw_work(
1742 	void *args,
1743 	__unused wait_result_t wr)
1744 {
1745 	struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
1746 	uint my_pri = current_thread()->sched_pri;
1747 	int i;
1748 	lck_rw_type_t type;
1749 	u_int8_t rand;
1750 	unsigned int mod_rand;
1751 	uint max_pri;
1752 
1753 	T_LOG("Started thread pri %d %p", my_pri, current_thread());
1754 
1755 	for (i = 0; i < 10; i++) {
1756 try_again:
1757 		type = LCK_RW_TYPE_SHARED;
1758 		lck_rw_lock(&info->rw_lock, type);
1759 		if (info->thread_inheritor == NULL) {
1760 			type = LCK_RW_TYPE_EXCLUSIVE;
1761 
1762 			if (lck_rw_lock_shared_to_exclusive(&info->rw_lock)) {
1763 				if (info->thread_inheritor == NULL) {
1764 					info->thread_inheritor = current_thread();
1765 					lck_rw_unlock(&info->rw_lock, type);
1766 					wait_threads(&info->synch, info->synch_value - 1);
1767 
1768 					T_LOG("Thread pri %d first to run %p", my_pri, current_thread());
1769 					wait_for_waiters((struct synch_test_common *)info);
1770 					max_pri = get_max_pri((struct synch_test_common *) info);
1771 					T_ASSERT((uint) current_thread()->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", current_thread()->sched_pri, max_pri);
1772 
1773 					os_atomic_store(&info->synch, 0, relaxed);
1774 
1775 					lck_rw_lock(&info->rw_lock, type);
1776 					info->thread_inheritor = NULL;
1777 					wakeup_all_with_inheritor((event_t) &info->thread_inheritor, THREAD_AWAKENED);
1778 					lck_rw_unlock(&info->rw_lock, type);
1779 					continue;
1780 				}
1781 			} else {
1782 				goto try_again;
1783 			}
1784 		}
1785 
1786 		read_random(&rand, sizeof(rand));
1787 		mod_rand = rand % 4;
1788 
1789 		wake_threads(&info->synch);
1790 		switch (mod_rand) {
1791 		case 0:
1792 			lck_rw_sleep_with_inheritor(&info->rw_lock, LCK_SLEEP_DEFAULT, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1793 			lck_rw_unlock(&info->rw_lock, type);
1794 			break;
1795 		case 1:
1796 			lck_rw_sleep_with_inheritor(&info->rw_lock, LCK_SLEEP_UNLOCK, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1797 			break;
1798 		case 2:
1799 			lck_rw_sleep_with_inheritor(&info->rw_lock, LCK_SLEEP_SHARED, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1800 			lck_rw_unlock(&info->rw_lock, LCK_RW_TYPE_SHARED);
1801 			break;
1802 		case 3:
1803 			lck_rw_sleep_with_inheritor(&info->rw_lock, LCK_SLEEP_EXCLUSIVE, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1804 			lck_rw_unlock(&info->rw_lock, LCK_RW_TYPE_EXCLUSIVE);
1805 			break;
1806 		default:
1807 			panic("rand()mod4 returned %u (random %u)", mod_rand, rand);
1808 		}
1809 	}
1810 
1811 	assert(current_thread()->kern_promotion_schedpri == 0);
1812 	notify_waiter((struct synch_test_common *)info);
1813 
1814 	thread_terminate_self();
1815 }
1816 
1817 #define OBJ_STATE_UNUSED        0
1818 #define OBJ_STATE_REAL          1
1819 #define OBJ_STATE_PLACEHOLDER   2
1820 
1821 #define OBJ_BUFF_SIZE 11
1822 struct obj_cached {
1823 	int obj_id;
1824 	int obj_state;
1825 	struct kern_apfs_reflock *obj_refcount;
1826 	char obj_buff[OBJ_BUFF_SIZE];
1827 };
1828 
1829 #define CACHE_SIZE 2
1830 #define USE_CACHE_ROUNDS 15
1831 
1832 #define REFCOUNT_REFLOCK_ROUNDS 15
1833 
1834 /*
1835  * For the reflock cache test the cache is allocated
1836  * and its pointer is saved in obj_cache.
1837  * The lock for the cache is going to be one of the exclusive
1838  * locks already present in struct info_sleep_inheritor_test.
1839  */
1840 
1841 static struct obj_cached *
alloc_init_cache_entry(void)1842 alloc_init_cache_entry(void)
1843 {
1844 	struct obj_cached *cache_entry = kalloc_type(struct obj_cached, 1, Z_WAITOK | Z_NOFAIL | Z_ZERO);
1845 	cache_entry->obj_id = 0;
1846 	cache_entry->obj_state = OBJ_STATE_UNUSED;
1847 	cache_entry->obj_refcount = kern_apfs_reflock_alloc_init();
1848 	snprintf(cache_entry->obj_buff, OBJ_BUFF_SIZE, "I am groot");
1849 	return cache_entry;
1850 }
1851 
1852 static void
init_cache(struct info_sleep_inheritor_test * info)1853 init_cache(struct info_sleep_inheritor_test *info)
1854 {
1855 	struct obj_cached **obj_cache = kalloc_type(struct obj_cached *, CACHE_SIZE, Z_WAITOK | Z_NOFAIL | Z_ZERO);
1856 
1857 	int i;
1858 	for (i = 0; i < CACHE_SIZE; i++) {
1859 		obj_cache[i] = alloc_init_cache_entry();
1860 	}
1861 
1862 	info->obj_cache = obj_cache;
1863 }
1864 
1865 static void
check_cache_empty(struct info_sleep_inheritor_test * info)1866 check_cache_empty(struct info_sleep_inheritor_test *info)
1867 {
1868 	struct obj_cached **obj_cache = info->obj_cache;
1869 
1870 	int i, ret;
1871 	for (i = 0; i < CACHE_SIZE; i++) {
1872 		if (obj_cache[i] != NULL) {
1873 			T_ASSERT(obj_cache[i]->obj_state == OBJ_STATE_UNUSED, "checked OBJ_STATE_UNUSED");
1874 			T_ASSERT(obj_cache[i]->obj_refcount != NULL, "checked obj_refcount");
1875 			ret = memcmp(obj_cache[i]->obj_buff, "I am groot", OBJ_BUFF_SIZE);
1876 			T_ASSERT(ret == 0, "checked buff correctly emptied");
1877 		}
1878 	}
1879 }
1880 
1881 static void
free_cache(struct info_sleep_inheritor_test * info)1882 free_cache(struct info_sleep_inheritor_test *info)
1883 {
1884 	struct obj_cached **obj_cache = info->obj_cache;
1885 
1886 	int i;
1887 	for (i = 0; i < CACHE_SIZE; i++) {
1888 		if (obj_cache[i] != NULL) {
1889 			kern_apfs_reflock_free(obj_cache[i]->obj_refcount);
1890 			obj_cache[i]->obj_refcount = NULL;
1891 			kfree_type(struct obj_cached, 1, obj_cache[i]);
1892 			obj_cache[i] = NULL;
1893 		}
1894 	}
1895 
1896 	kfree_type(struct obj_cached *, CACHE_SIZE, obj_cache);
1897 	info->obj_cache = NULL;
1898 }
1899 
1900 static struct obj_cached *
find_id_in_cache(int obj_id,struct info_sleep_inheritor_test * info)1901 find_id_in_cache(int obj_id, struct info_sleep_inheritor_test *info)
1902 {
1903 	struct obj_cached **obj_cache = info->obj_cache;
1904 	int i;
1905 	for (i = 0; i < CACHE_SIZE; i++) {
1906 		if (obj_cache[i] != NULL && obj_cache[i]->obj_id == obj_id) {
1907 			return obj_cache[i];
1908 		}
1909 	}
1910 	return NULL;
1911 }
1912 
1913 static bool
free_id_in_cache(int obj_id,struct info_sleep_inheritor_test * info,struct obj_cached * expected)1914 free_id_in_cache(int obj_id, struct info_sleep_inheritor_test *info, struct obj_cached *expected)
1915 {
1916 	struct obj_cached **obj_cache = info->obj_cache;
1917 	int i;
1918 	for (i = 0; i < CACHE_SIZE; i++) {
1919 		if (obj_cache[i] != NULL && obj_cache[i]->obj_id == obj_id) {
1920 			assert(obj_cache[i] == expected);
1921 			kfree_type(struct obj_cached, 1, obj_cache[i]);
1922 			obj_cache[i] = NULL;
1923 			return true;
1924 		}
1925 	}
1926 	return false;
1927 }
1928 
1929 static struct obj_cached *
find_empty_spot_in_cache(struct info_sleep_inheritor_test * info)1930 find_empty_spot_in_cache(struct info_sleep_inheritor_test *info)
1931 {
1932 	struct obj_cached **obj_cache = info->obj_cache;
1933 	int i;
1934 	for (i = 0; i < CACHE_SIZE; i++) {
1935 		if (obj_cache[i] == NULL) {
1936 			obj_cache[i] = alloc_init_cache_entry();
1937 			return obj_cache[i];
1938 		}
1939 		if (obj_cache[i]->obj_state == OBJ_STATE_UNUSED) {
1940 			return obj_cache[i];
1941 		}
1942 	}
1943 	return NULL;
1944 }
1945 
1946 static int
get_obj_cache(int obj_id,struct info_sleep_inheritor_test * info,char ** buff)1947 get_obj_cache(int obj_id, struct info_sleep_inheritor_test *info, char **buff)
1948 {
1949 	struct obj_cached *obj = NULL, *obj2 = NULL;
1950 	kern_apfs_reflock_t refcount = NULL;
1951 	bool ret;
1952 	kern_apfs_reflock_out_flags_t out_flags;
1953 
1954 try_again:
1955 	primitive_lock(info);
1956 	if ((obj = find_id_in_cache(obj_id, info)) != NULL) {
1957 		/* Found an allocated object on the cache with same id */
1958 
1959 		/*
1960 		 * copy the pointer to obj_refcount as obj might
1961 		 * get deallocated after primitive_unlock()
1962 		 */
1963 		refcount = obj->obj_refcount;
1964 		if (kern_apfs_reflock_try_get_ref(refcount, KERN_APFS_REFLOCK_IN_WILL_WAIT, &out_flags)) {
1965 			/*
1966 			 * Got a ref, let's check the state
1967 			 */
1968 			switch (obj->obj_state) {
1969 			case OBJ_STATE_UNUSED:
1970 				goto init;
1971 			case OBJ_STATE_REAL:
1972 				goto done;
1973 			case OBJ_STATE_PLACEHOLDER:
1974 				panic("Thread %p observed OBJ_STATE_PLACEHOLDER %d for obj %d", current_thread(), obj->obj_state, obj_id);
1975 			default:
1976 				panic("Thread %p observed an unknown obj_state %d for obj %d", current_thread(), obj->obj_state, obj_id);
1977 			}
1978 		} else {
1979 			/*
1980 			 * Didn't get a ref.
1981 			 * This means or an obj_put() of the last ref is ongoing
1982 			 * or a init of the object is happening.
1983 			 * Both cases wait for that to finish and retry.
1984 			 * While waiting the thread that is holding the reflock
1985 			 * will get a priority at least as the one of this thread.
1986 			 */
1987 			primitive_unlock(info);
1988 			kern_apfs_reflock_wait_for_unlock(refcount, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1989 			goto try_again;
1990 		}
1991 	} else {
1992 		/* Look for a spot on the cache where we can save the object */
1993 
1994 		if ((obj = find_empty_spot_in_cache(info)) == NULL) {
1995 			/*
1996 			 * Sadness cache is full, and everyting in the cache is
1997 			 * used.
1998 			 */
1999 			primitive_unlock(info);
2000 			return -1;
2001 		} else {
2002 			/*
2003 			 * copy the pointer to obj_refcount as obj might
2004 			 * get deallocated after primitive_unlock()
2005 			 */
2006 			refcount = obj->obj_refcount;
2007 			if (kern_apfs_reflock_try_get_ref(refcount, KERN_APFS_REFLOCK_IN_WILL_WAIT, &out_flags)) {
2008 				/*
2009 				 * Got a ref on a OBJ_STATE_UNUSED obj.
2010 				 * Recicle time.
2011 				 */
2012 				obj->obj_id = obj_id;
2013 				goto init;
2014 			} else {
2015 				/*
2016 				 * This could happen if the obj_put() has just changed the
2017 				 * state to OBJ_STATE_UNUSED, but not unlocked the reflock yet.
2018 				 */
2019 				primitive_unlock(info);
2020 				kern_apfs_reflock_wait_for_unlock(refcount, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
2021 				goto try_again;
2022 			}
2023 		}
2024 	}
2025 init:
2026 	assert(obj->obj_id == obj_id);
2027 	assert(obj->obj_state == OBJ_STATE_UNUSED);
2028 	/*
2029 	 * We already got a ref on the object, but we need
2030 	 * to initialize it. Mark it as
2031 	 * OBJ_STATE_PLACEHOLDER and get the obj_reflock.
2032 	 * In this way all thread waiting for this init
2033 	 * to finish will push on this thread.
2034 	 */
2035 	ret = kern_apfs_reflock_try_lock(refcount, KERN_APFS_REFLOCK_IN_DEFAULT, NULL);
2036 	assert(ret == true);
2037 	obj->obj_state = OBJ_STATE_PLACEHOLDER;
2038 	primitive_unlock(info);
2039 
2040 	//let's pretend we are populating the obj
2041 	IOSleep(10);
2042 	/*
2043 	 * obj will not be deallocated while I hold a ref.
2044 	 * So it is safe to access it.
2045 	 */
2046 	snprintf(obj->obj_buff, OBJ_BUFF_SIZE, "I am %d", obj_id);
2047 
2048 	primitive_lock(info);
2049 	obj2 = find_id_in_cache(obj_id, info);
2050 	assert(obj == obj2);
2051 	assert(obj->obj_state == OBJ_STATE_PLACEHOLDER);
2052 
2053 	obj->obj_state = OBJ_STATE_REAL;
2054 	kern_apfs_reflock_unlock(refcount);
2055 
2056 done:
2057 	*buff = obj->obj_buff;
2058 	primitive_unlock(info);
2059 	return 0;
2060 }
2061 
2062 static void
put_obj_cache(int obj_id,struct info_sleep_inheritor_test * info,bool free)2063 put_obj_cache(int obj_id, struct info_sleep_inheritor_test *info, bool free)
2064 {
2065 	struct obj_cached *obj = NULL, *obj2 = NULL;
2066 	bool ret;
2067 	kern_apfs_reflock_out_flags_t out_flags;
2068 	kern_apfs_reflock_t refcount = NULL;
2069 
2070 	primitive_lock(info);
2071 	obj = find_id_in_cache(obj_id, info);
2072 	primitive_unlock(info);
2073 
2074 	/*
2075 	 * Nobody should have been able to remove obj_id
2076 	 * from the cache.
2077 	 */
2078 	assert(obj != NULL);
2079 	assert(obj->obj_state == OBJ_STATE_REAL);
2080 
2081 	refcount = obj->obj_refcount;
2082 
2083 	/*
2084 	 * This should never fail, as or the reflock
2085 	 * was acquired when the state was OBJ_STATE_UNUSED to init,
2086 	 * or from a put that reached zero. And if the latter
2087 	 * happened subsequent reflock_get_ref() will had to wait to transition
2088 	 * to OBJ_STATE_REAL.
2089 	 */
2090 	ret = kern_apfs_reflock_try_put_ref(refcount, KERN_APFS_REFLOCK_IN_LOCK_IF_LAST, &out_flags);
2091 	assert(ret == true);
2092 	if ((out_flags & KERN_APFS_REFLOCK_OUT_LOCKED) == 0) {
2093 		return;
2094 	}
2095 
2096 	/*
2097 	 * Note: nobody at this point will be able to get a ref or a lock on
2098 	 * refcount.
2099 	 * All people waiting on refcount will push on this thread.
2100 	 */
2101 
2102 	//let's pretend we are flushing the obj somewhere.
2103 	IOSleep(10);
2104 	snprintf(obj->obj_buff, OBJ_BUFF_SIZE, "I am groot");
2105 
2106 	primitive_lock(info);
2107 	obj->obj_state = OBJ_STATE_UNUSED;
2108 	if (free) {
2109 		obj2 = find_id_in_cache(obj_id, info);
2110 		assert(obj == obj2);
2111 
2112 		ret = free_id_in_cache(obj_id, info, obj);
2113 		assert(ret == true);
2114 	}
2115 	primitive_unlock(info);
2116 
2117 	kern_apfs_reflock_unlock(refcount);
2118 
2119 	if (free) {
2120 		kern_apfs_reflock_free(refcount);
2121 	}
2122 }
2123 
2124 static void
thread_use_cache(void * args,__unused wait_result_t wr)2125 thread_use_cache(
2126 	void *args,
2127 	__unused wait_result_t wr)
2128 {
2129 	struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
2130 	int my_obj;
2131 
2132 	primitive_lock(info);
2133 	my_obj = ((info->value--) % (CACHE_SIZE + 1)) + 1;
2134 	primitive_unlock(info);
2135 
2136 	T_LOG("Thread %p started and it is going to use obj %d", current_thread(), my_obj);
2137 	/*
2138 	 * This is the string I would expect to see
2139 	 * on my_obj buff.
2140 	 */
2141 	char my_string[OBJ_BUFF_SIZE];
2142 	int my_string_size = snprintf(my_string, OBJ_BUFF_SIZE, "I am %d", my_obj);
2143 
2144 	/*
2145 	 * spin here to start concurrently with the other threads
2146 	 */
2147 	wake_threads(&info->synch);
2148 	wait_threads(&info->synch, info->synch_value);
2149 
2150 	for (int i = 0; i < USE_CACHE_ROUNDS; i++) {
2151 		char *buff;
2152 		while (get_obj_cache(my_obj, info, &buff) == -1) {
2153 			/*
2154 			 * Cache is full, wait.
2155 			 */
2156 			IOSleep(10);
2157 		}
2158 		T_ASSERT(memcmp(buff, my_string, my_string_size) == 0, "reflock: thread %p obj_id %d value in buff", current_thread(), my_obj);
2159 		IOSleep(10);
2160 		T_ASSERT(memcmp(buff, my_string, my_string_size) == 0, "reflock: thread %p obj_id %d value in buff", current_thread(), my_obj);
2161 		put_obj_cache(my_obj, info, (i % 2 == 0));
2162 	}
2163 
2164 	notify_waiter((struct synch_test_common *)info);
2165 	thread_terminate_self();
2166 }
2167 
2168 static void
thread_refcount_reflock(void * args,__unused wait_result_t wr)2169 thread_refcount_reflock(
2170 	void *args,
2171 	__unused wait_result_t wr)
2172 {
2173 	struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
2174 	bool ret;
2175 	kern_apfs_reflock_out_flags_t out_flags;
2176 	kern_apfs_reflock_in_flags_t in_flags;
2177 
2178 	T_LOG("Thread %p started", current_thread());
2179 	/*
2180 	 * spin here to start concurrently with the other threads
2181 	 */
2182 	wake_threads(&info->synch);
2183 	wait_threads(&info->synch, info->synch_value);
2184 
2185 	for (int i = 0; i < REFCOUNT_REFLOCK_ROUNDS; i++) {
2186 		in_flags = KERN_APFS_REFLOCK_IN_LOCK_IF_FIRST;
2187 		if ((i % 2) == 0) {
2188 			in_flags |= KERN_APFS_REFLOCK_IN_WILL_WAIT;
2189 		}
2190 		ret = kern_apfs_reflock_try_get_ref(&info->reflock, in_flags, &out_flags);
2191 		if (ret == true) {
2192 			/* got reference, check if we did 0->1 */
2193 			if ((out_flags & KERN_APFS_REFLOCK_OUT_LOCKED) == KERN_APFS_REFLOCK_OUT_LOCKED) {
2194 				T_ASSERT(info->reflock_protected_status == 0, "status init check");
2195 				info->reflock_protected_status = 1;
2196 				kern_apfs_reflock_unlock(&info->reflock);
2197 			} else {
2198 				T_ASSERT(info->reflock_protected_status == 1, "status set check");
2199 			}
2200 			/* release the reference and check if we did 1->0 */
2201 			ret = kern_apfs_reflock_try_put_ref(&info->reflock, KERN_APFS_REFLOCK_IN_LOCK_IF_LAST, &out_flags);
2202 			T_ASSERT(ret == true, "kern_apfs_reflock_try_put_ref success");
2203 			if ((out_flags & KERN_APFS_REFLOCK_OUT_LOCKED) == KERN_APFS_REFLOCK_OUT_LOCKED) {
2204 				T_ASSERT(info->reflock_protected_status == 1, "status set check");
2205 				info->reflock_protected_status = 0;
2206 				kern_apfs_reflock_unlock(&info->reflock);
2207 			}
2208 		} else {
2209 			/* didn't get a reference */
2210 			if ((in_flags & KERN_APFS_REFLOCK_IN_WILL_WAIT) == KERN_APFS_REFLOCK_IN_WILL_WAIT) {
2211 				kern_apfs_reflock_wait_for_unlock(&info->reflock, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
2212 			}
2213 		}
2214 	}
2215 
2216 	notify_waiter((struct synch_test_common *)info);
2217 	thread_terminate_self();
2218 }
2219 
2220 static void
thread_force_reflock(void * args,__unused wait_result_t wr)2221 thread_force_reflock(
2222 	void *args,
2223 	__unused wait_result_t wr)
2224 {
2225 	struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
2226 	bool ret;
2227 	kern_apfs_reflock_out_flags_t out_flags;
2228 	bool lock = false;
2229 	uint32_t count;
2230 
2231 	T_LOG("Thread %p started", current_thread());
2232 	if (os_atomic_inc_orig(&info->value, relaxed) == 0) {
2233 		T_LOG("Thread %p is locker", current_thread());
2234 		lock = true;
2235 		ret = kern_apfs_reflock_try_lock(&info->reflock, KERN_APFS_REFLOCK_IN_ALLOW_FORCE, &count);
2236 		T_ASSERT(ret == true, "kern_apfs_reflock_try_lock success");
2237 		T_ASSERT(count == 0, "refcount value");
2238 	}
2239 	/*
2240 	 * spin here to start concurrently with the other threads
2241 	 */
2242 	wake_threads(&info->synch);
2243 	wait_threads(&info->synch, info->synch_value);
2244 
2245 	if (lock) {
2246 		IOSleep(100);
2247 		kern_apfs_reflock_unlock(&info->reflock);
2248 	} else {
2249 		for (int i = 0; i < REFCOUNT_REFLOCK_ROUNDS; i++) {
2250 			ret = kern_apfs_reflock_try_get_ref(&info->reflock, KERN_APFS_REFLOCK_IN_FORCE, &out_flags);
2251 			T_ASSERT(ret == true, "kern_apfs_reflock_try_get_ref success");
2252 			ret = kern_apfs_reflock_try_put_ref(&info->reflock, KERN_APFS_REFLOCK_IN_FORCE, &out_flags);
2253 			T_ASSERT(ret == true, "kern_apfs_reflock_try_put_ref success");
2254 		}
2255 	}
2256 
2257 	notify_waiter((struct synch_test_common *)info);
2258 	thread_terminate_self();
2259 }
2260 
2261 static void
thread_lock_reflock(void * args,__unused wait_result_t wr)2262 thread_lock_reflock(
2263 	void *args,
2264 	__unused wait_result_t wr)
2265 {
2266 	struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
2267 	bool ret;
2268 	kern_apfs_reflock_out_flags_t out_flags;
2269 	bool lock = false;
2270 	uint32_t count;
2271 
2272 	T_LOG("Thread %p started", current_thread());
2273 	if (os_atomic_inc_orig(&info->value, relaxed) == 0) {
2274 		T_LOG("Thread %p is locker", current_thread());
2275 		lock = true;
2276 		ret = kern_apfs_reflock_try_lock(&info->reflock, KERN_APFS_REFLOCK_IN_DEFAULT, &count);
2277 		T_ASSERT(ret == true, "kern_apfs_reflock_try_lock success");
2278 		T_ASSERT(count == 0, "refcount value");
2279 		info->reflock_protected_status = 1;
2280 	}
2281 	/*
2282 	 * spin here to start concurrently with the other threads
2283 	 */
2284 	wake_threads(&info->synch);
2285 	wait_threads(&info->synch, info->synch_value);
2286 
2287 	if (lock) {
2288 		IOSleep(100);
2289 		info->reflock_protected_status = 0;
2290 		kern_apfs_reflock_unlock(&info->reflock);
2291 	} else {
2292 		for (int i = 0; i < REFCOUNT_REFLOCK_ROUNDS; i++) {
2293 			ret = kern_apfs_reflock_try_get_ref(&info->reflock, KERN_APFS_REFLOCK_IN_DEFAULT, &out_flags);
2294 			if (ret == true) {
2295 				T_ASSERT(info->reflock_protected_status == 0, "unlocked status check");
2296 				ret = kern_apfs_reflock_try_put_ref(&info->reflock, KERN_APFS_REFLOCK_IN_DEFAULT, &out_flags);
2297 				T_ASSERT(ret == true, "kern_apfs_reflock_try_put_ref success");
2298 				break;
2299 			}
2300 		}
2301 	}
2302 
2303 	notify_waiter((struct synch_test_common *)info);
2304 	thread_terminate_self();
2305 }
2306 
2307 static void
test_cache_reflock(struct info_sleep_inheritor_test * info)2308 test_cache_reflock(struct info_sleep_inheritor_test *info)
2309 {
2310 	info->synch = 0;
2311 	info->synch_value = info->head.nthreads;
2312 
2313 	info->value = info->head.nthreads;
2314 	/*
2315 	 * Use the mtx as cache lock
2316 	 */
2317 	info->prim_type = MTX_LOCK;
2318 
2319 	init_cache(info);
2320 
2321 	start_threads((thread_continue_t)thread_use_cache, (struct synch_test_common *)info, FALSE);
2322 	wait_all_thread((struct synch_test_common *)info);
2323 
2324 	check_cache_empty(info);
2325 	free_cache(info);
2326 }
2327 
2328 static void
test_refcount_reflock(struct info_sleep_inheritor_test * info)2329 test_refcount_reflock(struct info_sleep_inheritor_test *info)
2330 {
2331 	info->synch = 0;
2332 	info->synch_value = info->head.nthreads;
2333 	kern_apfs_reflock_init(&info->reflock);
2334 	info->reflock_protected_status = 0;
2335 
2336 	start_threads((thread_continue_t)thread_refcount_reflock, (struct synch_test_common *)info, FALSE);
2337 	wait_all_thread((struct synch_test_common *)info);
2338 
2339 	kern_apfs_reflock_destroy(&info->reflock);
2340 
2341 	T_ASSERT(info->reflock_protected_status == 0, "unlocked status check");
2342 }
2343 
2344 static void
test_force_reflock(struct info_sleep_inheritor_test * info)2345 test_force_reflock(struct info_sleep_inheritor_test *info)
2346 {
2347 	info->synch = 0;
2348 	info->synch_value = info->head.nthreads;
2349 	kern_apfs_reflock_init(&info->reflock);
2350 	info->value = 0;
2351 
2352 	start_threads((thread_continue_t)thread_force_reflock, (struct synch_test_common *)info, FALSE);
2353 	wait_all_thread((struct synch_test_common *)info);
2354 
2355 	kern_apfs_reflock_destroy(&info->reflock);
2356 }
2357 
2358 static void
test_lock_reflock(struct info_sleep_inheritor_test * info)2359 test_lock_reflock(struct info_sleep_inheritor_test *info)
2360 {
2361 	info->synch = 0;
2362 	info->synch_value = info->head.nthreads;
2363 	kern_apfs_reflock_init(&info->reflock);
2364 	info->value = 0;
2365 
2366 	start_threads((thread_continue_t)thread_lock_reflock, (struct synch_test_common *)info, FALSE);
2367 	wait_all_thread((struct synch_test_common *)info);
2368 
2369 	kern_apfs_reflock_destroy(&info->reflock);
2370 }
2371 
2372 static void
test_sleep_with_wake_all(struct info_sleep_inheritor_test * info,int prim_type)2373 test_sleep_with_wake_all(struct info_sleep_inheritor_test *info, int prim_type)
2374 {
2375 	info->prim_type = prim_type;
2376 	info->synch = 0;
2377 	info->synch_value = info->head.nthreads;
2378 
2379 	info->thread_inheritor = NULL;
2380 
2381 	start_threads((thread_continue_t)thread_just_inheritor_do_work, (struct synch_test_common *)info, TRUE);
2382 	wait_all_thread((struct synch_test_common *)info);
2383 }
2384 
2385 static void
test_sleep_with_wake_one(struct info_sleep_inheritor_test * info,int prim_type)2386 test_sleep_with_wake_one(struct info_sleep_inheritor_test *info, int prim_type)
2387 {
2388 	info->prim_type = prim_type;
2389 
2390 	info->synch = 0;
2391 	info->synch_value = info->head.nthreads;
2392 	info->value = 0;
2393 	info->handoff_failure = 0;
2394 	info->thread_inheritor = NULL;
2395 
2396 	start_threads((thread_continue_t)thread_inheritor_like_mutex, (struct synch_test_common *)info, FALSE);
2397 	wait_all_thread((struct synch_test_common *)info);
2398 
2399 	T_ASSERT(info->value == (int)info->head.nthreads, "value protected by sleep");
2400 	T_ASSERT(info->handoff_failure == 1, "handoff failures");
2401 }
2402 
2403 static void
test_change_sleep_inheritor(struct info_sleep_inheritor_test * info,int prim_type)2404 test_change_sleep_inheritor(struct info_sleep_inheritor_test *info, int prim_type)
2405 {
2406 	info->prim_type = prim_type;
2407 
2408 	info->thread_inheritor = NULL;
2409 	info->steal_pri = 0;
2410 	info->synch = 0;
2411 	info->synch_value = info->head.nthreads;
2412 
2413 	start_threads((thread_continue_t)thread_steal_work, (struct synch_test_common *)info, FALSE);
2414 	wait_all_thread((struct synch_test_common *)info);
2415 }
2416 
2417 static void
test_no_inheritor(struct info_sleep_inheritor_test * info,int prim_type)2418 test_no_inheritor(struct info_sleep_inheritor_test *info, int prim_type)
2419 {
2420 	info->prim_type = prim_type;
2421 	info->synch = 0;
2422 	info->synch_value = info->head.nthreads;
2423 
2424 	info->thread_inheritor = NULL;
2425 	info->value = info->head.nthreads;
2426 
2427 	start_threads((thread_continue_t)thread_no_inheritor_work, (struct synch_test_common *)info, FALSE);
2428 	wait_all_thread((struct synch_test_common *)info);
2429 }
2430 
2431 static void
test_rw_lock(struct info_sleep_inheritor_test * info)2432 test_rw_lock(struct info_sleep_inheritor_test *info)
2433 {
2434 	info->thread_inheritor = NULL;
2435 	info->value = info->head.nthreads;
2436 	info->synch = 0;
2437 	info->synch_value = info->head.nthreads;
2438 
2439 	start_threads((thread_continue_t)thread_rw_work, (struct synch_test_common *)info, FALSE);
2440 	wait_all_thread((struct synch_test_common *)info);
2441 }
2442 
2443 static void
test_mtx_lock(struct info_sleep_inheritor_test * info)2444 test_mtx_lock(struct info_sleep_inheritor_test *info)
2445 {
2446 	info->thread_inheritor = NULL;
2447 	info->value = info->head.nthreads;
2448 	info->synch = 0;
2449 	info->synch_value = info->head.nthreads;
2450 
2451 	start_threads((thread_continue_t)thread_mtx_work, (struct synch_test_common *)info, FALSE);
2452 	wait_all_thread((struct synch_test_common *)info);
2453 }
2454 
2455 kern_return_t
ts_kernel_sleep_inheritor_test(void)2456 ts_kernel_sleep_inheritor_test(void)
2457 {
2458 	struct info_sleep_inheritor_test info = {};
2459 
2460 	init_synch_test_common((struct synch_test_common *)&info, NUM_THREADS);
2461 
2462 	lck_attr_t* lck_attr = lck_attr_alloc_init();
2463 	lck_grp_attr_t* lck_grp_attr = lck_grp_attr_alloc_init();
2464 	lck_grp_t* lck_grp = lck_grp_alloc_init("test sleep_inheritor", lck_grp_attr);
2465 
2466 	lck_mtx_init(&info.mtx_lock, lck_grp, lck_attr);
2467 	lck_rw_init(&info.rw_lock, lck_grp, lck_attr);
2468 
2469 	/*
2470 	 * Testing lck_mtx_sleep_with_inheritor and wakeup_all_with_inheritor
2471 	 */
2472 	T_LOG("Testing mtx sleep with inheritor and wake_all_with_inheritor");
2473 	test_sleep_with_wake_all(&info, MTX_LOCK);
2474 
2475 	/*
2476 	 * Testing rw_mtx_sleep_with_inheritor and wakeup_all_with_inheritor
2477 	 */
2478 	T_LOG("Testing rw sleep with inheritor and wake_all_with_inheritor");
2479 	test_sleep_with_wake_all(&info, RW_LOCK);
2480 
2481 	/*
2482 	 * Testing lck_mtx_sleep_with_inheritor and wakeup_one_with_inheritor
2483 	 */
2484 	T_LOG("Testing mtx sleep with inheritor and wake_one_with_inheritor");
2485 	test_sleep_with_wake_one(&info, MTX_LOCK);
2486 
2487 	/*
2488 	 * Testing lck_rw_sleep_with_inheritor and wakeup_one_with_inheritor
2489 	 */
2490 	T_LOG("Testing rw sleep with inheritor and wake_one_with_inheritor");
2491 	test_sleep_with_wake_one(&info, RW_LOCK);
2492 
2493 	/*
2494 	 * Testing lck_mtx_sleep_with_inheritor and wakeup_all_with_inheritor
2495 	 * and change_sleep_inheritor
2496 	 */
2497 	T_LOG("Testing change_sleep_inheritor with mxt sleep");
2498 	test_change_sleep_inheritor(&info, MTX_LOCK);
2499 
2500 	/*
2501 	 * Testing lck_mtx_sleep_with_inheritor and wakeup_all_with_inheritor
2502 	 * and change_sleep_inheritor
2503 	 */
2504 	T_LOG("Testing change_sleep_inheritor with rw sleep");
2505 	test_change_sleep_inheritor(&info, RW_LOCK);
2506 
2507 	/*
2508 	 * Testing lck_mtx_sleep_with_inheritor and wakeup_all_with_inheritor
2509 	 * with inheritor NULL
2510 	 */
2511 	T_LOG("Testing inheritor NULL");
2512 	test_no_inheritor(&info, MTX_LOCK);
2513 
2514 	/*
2515 	 * Testing lck_mtx_sleep_with_inheritor and wakeup_all_with_inheritor
2516 	 * with inheritor NULL
2517 	 */
2518 	T_LOG("Testing inheritor NULL");
2519 	test_no_inheritor(&info, RW_LOCK);
2520 
2521 	/*
2522 	 * Testing mtx locking combinations
2523 	 */
2524 	T_LOG("Testing mtx locking combinations");
2525 	test_mtx_lock(&info);
2526 
2527 	/*
2528 	 * Testing rw locking combinations
2529 	 */
2530 	T_LOG("Testing rw locking combinations");
2531 	test_rw_lock(&info);
2532 
2533 	/*
2534 	 * Testing reflock / cond_sleep_with_inheritor
2535 	 */
2536 	T_LOG("Test cache reflock + cond_sleep_with_inheritor");
2537 	test_cache_reflock(&info);
2538 	T_LOG("Test force reflock + cond_sleep_with_inheritor");
2539 	test_force_reflock(&info);
2540 	T_LOG("Test refcount reflock + cond_sleep_with_inheritor");
2541 	test_refcount_reflock(&info);
2542 	T_LOG("Test lock reflock + cond_sleep_with_inheritor");
2543 	test_lock_reflock(&info);
2544 
2545 	destroy_synch_test_common((struct synch_test_common *)&info);
2546 
2547 	lck_attr_free(lck_attr);
2548 	lck_grp_attr_free(lck_grp_attr);
2549 	lck_rw_destroy(&info.rw_lock, lck_grp);
2550 	lck_mtx_destroy(&info.mtx_lock, lck_grp);
2551 	lck_grp_free(lck_grp);
2552 
2553 	return KERN_SUCCESS;
2554 }
2555 
2556 static void
thread_gate_aggressive(void * args,__unused wait_result_t wr)2557 thread_gate_aggressive(
2558 	void *args,
2559 	__unused wait_result_t wr)
2560 {
2561 	struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
2562 	uint my_pri = current_thread()->sched_pri;
2563 
2564 	T_LOG("Started thread pri %d %p", my_pri, current_thread());
2565 
2566 	primitive_lock(info);
2567 	if (info->thread_inheritor == NULL) {
2568 		info->thread_inheritor = current_thread();
2569 		primitive_gate_assert(info, GATE_ASSERT_OPEN);
2570 		primitive_gate_close(info);
2571 		exclude_current_waiter((struct synch_test_common *)info);
2572 
2573 		primitive_unlock(info);
2574 
2575 		wait_threads(&info->synch, info->synch_value - 2);
2576 		wait_for_waiters((struct synch_test_common *)info);
2577 		T_LOG("Thread pri %d first to run %p", my_pri, current_thread());
2578 
2579 		primitive_lock(info);
2580 		if (info->thread_inheritor == current_thread()) {
2581 			primitive_gate_open(info);
2582 		}
2583 	} else {
2584 		if (info->steal_pri == 0) {
2585 			info->steal_pri = my_pri;
2586 			info->thread_inheritor = current_thread();
2587 			primitive_gate_steal(info);
2588 			exclude_current_waiter((struct synch_test_common *)info);
2589 
2590 			primitive_unlock(info);
2591 			wait_threads(&info->synch, info->synch_value - 2);
2592 
2593 			T_LOG("Thread pri %d stole push %p", my_pri, current_thread());
2594 			wait_for_waiters((struct synch_test_common *)info);
2595 			T_ASSERT((uint) current_thread()->sched_pri == info->steal_pri, "gate keeper priority current is %d, should be %d", current_thread()->sched_pri, info->steal_pri);
2596 
2597 			primitive_lock(info);
2598 			primitive_gate_open(info);
2599 		} else {
2600 			if (my_pri > info->steal_pri) {
2601 				info->steal_pri = my_pri;
2602 			}
2603 			wake_threads(&info->synch);
2604 			primitive_gate_wait(info);
2605 			exclude_current_waiter((struct synch_test_common *)info);
2606 		}
2607 	}
2608 	primitive_unlock(info);
2609 
2610 	assert(current_thread()->kern_promotion_schedpri == 0);
2611 	notify_waiter((struct synch_test_common *)info);
2612 
2613 	thread_terminate_self();
2614 }
2615 
2616 static void
thread_gate_free(void * args,__unused wait_result_t wr)2617 thread_gate_free(
2618 	void *args,
2619 	__unused wait_result_t wr)
2620 {
2621 	struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
2622 	uint my_pri = current_thread()->sched_pri;
2623 
2624 	T_LOG("Started thread pri %d %p", my_pri, current_thread());
2625 
2626 	primitive_lock(info);
2627 
2628 	if (primitive_gate_try_close(info) == KERN_SUCCESS) {
2629 		primitive_gate_assert(info, GATE_ASSERT_HELD);
2630 		primitive_unlock(info);
2631 
2632 		wait_threads(&info->synch, info->synch_value - 1);
2633 		wait_for_waiters((struct synch_test_common *) info);
2634 
2635 		primitive_lock(info);
2636 		primitive_gate_open(info);
2637 		primitive_gate_free(info);
2638 	} else {
2639 		primitive_gate_assert(info, GATE_ASSERT_CLOSED);
2640 		wake_threads(&info->synch);
2641 		gate_wait_result_t ret = primitive_gate_wait(info);
2642 		T_ASSERT(ret == GATE_OPENED, "open gate");
2643 	}
2644 
2645 	primitive_unlock(info);
2646 
2647 	notify_waiter((struct synch_test_common *)info);
2648 
2649 	thread_terminate_self();
2650 }
2651 
2652 static void
thread_gate_like_mutex(void * args,__unused wait_result_t wr)2653 thread_gate_like_mutex(
2654 	void *args,
2655 	__unused wait_result_t wr)
2656 {
2657 	gate_wait_result_t wait;
2658 	kern_return_t ret;
2659 	uint my_pri = current_thread()->sched_pri;
2660 
2661 	struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
2662 
2663 	T_LOG("Started thread pri %d %p", my_pri, current_thread());
2664 
2665 	/*
2666 	 * spin here to start concurrently
2667 	 */
2668 	wake_threads(&info->synch);
2669 	wait_threads(&info->synch, info->synch_value);
2670 
2671 	primitive_lock(info);
2672 
2673 	if (primitive_gate_try_close(info) != KERN_SUCCESS) {
2674 		wait = primitive_gate_wait(info);
2675 		T_ASSERT(wait == GATE_HANDOFF, "gate_wait return");
2676 	}
2677 
2678 	primitive_gate_assert(info, GATE_ASSERT_HELD);
2679 
2680 	primitive_unlock(info);
2681 
2682 	IOSleep(100);
2683 	info->value++;
2684 
2685 	primitive_lock(info);
2686 
2687 	ret = primitive_gate_handoff(info, GATE_HANDOFF_DEFAULT);
2688 	if (ret == KERN_NOT_WAITING) {
2689 		T_ASSERT(info->handoff_failure == 0, "handoff failures");
2690 		primitive_gate_handoff(info, GATE_HANDOFF_OPEN_IF_NO_WAITERS);
2691 		info->handoff_failure++;
2692 	}
2693 
2694 	primitive_unlock(info);
2695 	notify_waiter((struct synch_test_common *)info);
2696 
2697 	thread_terminate_self();
2698 }
2699 
2700 static void
thread_just_one_do_work(void * args,__unused wait_result_t wr)2701 thread_just_one_do_work(
2702 	void *args,
2703 	__unused wait_result_t wr)
2704 {
2705 	struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
2706 	uint my_pri = current_thread()->sched_pri;
2707 	uint max_pri;
2708 
2709 	T_LOG("Started thread pri %d %p", my_pri, current_thread());
2710 
2711 	primitive_lock(info);
2712 check_again:
2713 	if (info->work_to_do) {
2714 		if (primitive_gate_try_close(info) == KERN_SUCCESS) {
2715 			primitive_gate_assert(info, GATE_ASSERT_HELD);
2716 			primitive_unlock(info);
2717 
2718 			T_LOG("Thread pri %d acquired the gate %p", my_pri, current_thread());
2719 			wait_threads(&info->synch, info->synch_value - 1);
2720 			wait_for_waiters((struct synch_test_common *)info);
2721 			max_pri = get_max_pri((struct synch_test_common *) info);
2722 			T_ASSERT((uint) current_thread()->sched_pri == max_pri, "gate owner priority current is %d, should be %d", current_thread()->sched_pri, max_pri);
2723 			os_atomic_store(&info->synch, 0, relaxed);
2724 
2725 			primitive_lock(info);
2726 			info->work_to_do = FALSE;
2727 			primitive_gate_open(info);
2728 		} else {
2729 			primitive_gate_assert(info, GATE_ASSERT_CLOSED);
2730 			wake_threads(&info->synch);
2731 			primitive_gate_wait(info);
2732 			goto check_again;
2733 		}
2734 	}
2735 	primitive_unlock(info);
2736 
2737 	assert(current_thread()->kern_promotion_schedpri == 0);
2738 	notify_waiter((struct synch_test_common *)info);
2739 	thread_terminate_self();
2740 }
2741 
2742 static void
test_gate_push(struct info_sleep_inheritor_test * info,int prim_type)2743 test_gate_push(struct info_sleep_inheritor_test *info, int prim_type)
2744 {
2745 	info->prim_type = prim_type;
2746 	info->use_alloc_gate = false;
2747 
2748 	primitive_gate_init(info);
2749 	info->work_to_do = TRUE;
2750 	info->synch = 0;
2751 	info->synch_value = NUM_THREADS;
2752 
2753 	start_threads((thread_continue_t)thread_just_one_do_work, (struct synch_test_common *) info, TRUE);
2754 	wait_all_thread((struct synch_test_common *)info);
2755 
2756 	primitive_gate_destroy(info);
2757 }
2758 
2759 static void
test_gate_handoff(struct info_sleep_inheritor_test * info,int prim_type)2760 test_gate_handoff(struct info_sleep_inheritor_test *info, int prim_type)
2761 {
2762 	info->prim_type = prim_type;
2763 	info->use_alloc_gate = false;
2764 
2765 	primitive_gate_init(info);
2766 
2767 	info->synch = 0;
2768 	info->synch_value = NUM_THREADS;
2769 	info->value = 0;
2770 	info->handoff_failure = 0;
2771 
2772 	start_threads((thread_continue_t)thread_gate_like_mutex, (struct synch_test_common *)info, false);
2773 	wait_all_thread((struct synch_test_common *)info);
2774 
2775 	T_ASSERT(info->value == NUM_THREADS, "value protected by gate");
2776 	T_ASSERT(info->handoff_failure == 1, "handoff failures");
2777 
2778 	primitive_gate_destroy(info);
2779 }
2780 
2781 static void
test_gate_steal(struct info_sleep_inheritor_test * info,int prim_type)2782 test_gate_steal(struct info_sleep_inheritor_test *info, int prim_type)
2783 {
2784 	info->prim_type = prim_type;
2785 	info->use_alloc_gate = false;
2786 
2787 	primitive_gate_init(info);
2788 
2789 	info->synch = 0;
2790 	info->synch_value = NUM_THREADS;
2791 	info->thread_inheritor = NULL;
2792 	info->steal_pri = 0;
2793 
2794 	start_threads((thread_continue_t)thread_gate_aggressive, (struct synch_test_common *)info, FALSE);
2795 	wait_all_thread((struct synch_test_common *)info);
2796 
2797 	primitive_gate_destroy(info);
2798 }
2799 
2800 static void
test_gate_alloc_free(struct info_sleep_inheritor_test * info,int prim_type)2801 test_gate_alloc_free(struct info_sleep_inheritor_test *info, int prim_type)
2802 {
2803 	(void)info;
2804 	(void) prim_type;
2805 	info->prim_type = prim_type;
2806 	info->use_alloc_gate = true;
2807 
2808 	primitive_gate_alloc(info);
2809 
2810 	info->synch = 0;
2811 	info->synch_value = NUM_THREADS;
2812 
2813 	start_threads((thread_continue_t)thread_gate_free, (struct synch_test_common *)info, FALSE);
2814 	wait_all_thread((struct synch_test_common *)info);
2815 
2816 	T_ASSERT(info->alloc_gate == NULL, "gate free");
2817 	info->use_alloc_gate = false;
2818 }
2819 
2820 kern_return_t
ts_kernel_gate_test(void)2821 ts_kernel_gate_test(void)
2822 {
2823 	struct info_sleep_inheritor_test info = {};
2824 
2825 	T_LOG("Testing gate primitive");
2826 
2827 	init_synch_test_common((struct synch_test_common *)&info, NUM_THREADS);
2828 
2829 	lck_attr_t* lck_attr = lck_attr_alloc_init();
2830 	lck_grp_attr_t* lck_grp_attr = lck_grp_attr_alloc_init();
2831 	lck_grp_t* lck_grp = lck_grp_alloc_init("test gate", lck_grp_attr);
2832 
2833 	lck_mtx_init(&info.mtx_lock, lck_grp, lck_attr);
2834 	lck_rw_init(&info.rw_lock, lck_grp, lck_attr);
2835 
2836 	/*
2837 	 * Testing the priority inherited by the keeper
2838 	 * lck_mtx_gate_try_close, lck_mtx_gate_open, lck_mtx_gate_wait
2839 	 */
2840 	T_LOG("Testing gate push, mtx");
2841 	test_gate_push(&info, MTX_LOCK);
2842 
2843 	T_LOG("Testing gate push, rw");
2844 	test_gate_push(&info, RW_LOCK);
2845 
2846 	/*
2847 	 * Testing the handoff
2848 	 * lck_mtx_gate_wait, lck_mtx_gate_handoff
2849 	 */
2850 	T_LOG("Testing gate handoff, mtx");
2851 	test_gate_handoff(&info, MTX_LOCK);
2852 
2853 	T_LOG("Testing gate handoff, rw");
2854 	test_gate_handoff(&info, RW_LOCK);
2855 
2856 	/*
2857 	 * Testing the steal
2858 	 * lck_mtx_gate_close, lck_mtx_gate_wait, lck_mtx_gate_steal, lck_mtx_gate_handoff
2859 	 */
2860 	T_LOG("Testing gate steal, mtx");
2861 	test_gate_steal(&info, MTX_LOCK);
2862 
2863 	T_LOG("Testing gate steal, rw");
2864 	test_gate_steal(&info, RW_LOCK);
2865 
2866 	/*
2867 	 * Testing the alloc/free
2868 	 * lck_mtx_gate_alloc_init, lck_mtx_gate_close, lck_mtx_gate_wait, lck_mtx_gate_free
2869 	 */
2870 	T_LOG("Testing gate alloc/free, mtx");
2871 	test_gate_alloc_free(&info, MTX_LOCK);
2872 
2873 	T_LOG("Testing gate alloc/free, rw");
2874 	test_gate_alloc_free(&info, RW_LOCK);
2875 
2876 	destroy_synch_test_common((struct synch_test_common *)&info);
2877 
2878 	lck_attr_free(lck_attr);
2879 	lck_grp_attr_free(lck_grp_attr);
2880 	lck_mtx_destroy(&info.mtx_lock, lck_grp);
2881 	lck_grp_free(lck_grp);
2882 
2883 	return KERN_SUCCESS;
2884 }
2885 
2886 #define NUM_THREAD_CHAIN 6
2887 
2888 struct turnstile_chain_test {
2889 	struct synch_test_common head;
2890 	lck_mtx_t mtx_lock;
2891 	int synch_value;
2892 	int synch;
2893 	int synch2;
2894 	gate_t gates[NUM_THREAD_CHAIN];
2895 };
2896 
2897 static void
thread_sleep_gate_chain_work(void * args,__unused wait_result_t wr)2898 thread_sleep_gate_chain_work(
2899 	void *args,
2900 	__unused wait_result_t wr)
2901 {
2902 	struct turnstile_chain_test *info = (struct turnstile_chain_test*) args;
2903 	thread_t self = current_thread();
2904 	uint my_pri = self->sched_pri;
2905 	uint max_pri;
2906 	uint i;
2907 	thread_t inheritor = NULL, woken_up;
2908 	event_t wait_event, wake_event;
2909 	kern_return_t ret;
2910 
2911 	T_LOG("Started thread pri %d %p", my_pri, self);
2912 
2913 	/*
2914 	 * Need to use the threads ids, wait for all of them to be populated
2915 	 */
2916 
2917 	while (os_atomic_load(&info->head.threads[info->head.nthreads - 1], acquire) == NULL) {
2918 		IOSleep(10);
2919 	}
2920 
2921 	max_pri = get_max_pri((struct synch_test_common *) info);
2922 
2923 	for (i = 0; i < info->head.nthreads; i = i + 2) {
2924 		// even threads will close a gate
2925 		if (info->head.threads[i] == self) {
2926 			lck_mtx_lock(&info->mtx_lock);
2927 			lck_mtx_gate_close(&info->mtx_lock, &info->gates[i]);
2928 			lck_mtx_unlock(&info->mtx_lock);
2929 			break;
2930 		}
2931 	}
2932 
2933 	wake_threads(&info->synch2);
2934 	wait_threads(&info->synch2, info->synch_value);
2935 
2936 	if (self == os_atomic_load(&info->head.threads[0], acquire)) {
2937 		wait_threads(&info->synch, info->synch_value - 1);
2938 		wait_for_waiters((struct synch_test_common *)info);
2939 
2940 		T_ASSERT((uint) self->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", self->sched_pri, max_pri);
2941 
2942 		lck_mtx_lock(&info->mtx_lock);
2943 		lck_mtx_gate_open(&info->mtx_lock, &info->gates[0]);
2944 		lck_mtx_unlock(&info->mtx_lock);
2945 	} else {
2946 		wait_event = NULL;
2947 		wake_event = NULL;
2948 		for (i = 0; i < info->head.nthreads; i++) {
2949 			if (info->head.threads[i] == self) {
2950 				inheritor = info->head.threads[i - 1];
2951 				wait_event = (event_t) &info->head.threads[i - 1];
2952 				wake_event = (event_t) &info->head.threads[i];
2953 				break;
2954 			}
2955 		}
2956 		assert(wait_event != NULL);
2957 
2958 		lck_mtx_lock(&info->mtx_lock);
2959 		wake_threads(&info->synch);
2960 
2961 		if (i % 2 != 0) {
2962 			lck_mtx_gate_wait(&info->mtx_lock, &info->gates[i - 1], LCK_SLEEP_UNLOCK, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
2963 			T_ASSERT((uint) self->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", self->sched_pri, max_pri);
2964 
2965 			ret = wakeup_one_with_inheritor(wake_event, THREAD_AWAKENED, LCK_WAKE_DO_NOT_TRANSFER_PUSH, &woken_up);
2966 			if (ret == KERN_SUCCESS) {
2967 				T_ASSERT(i != (info->head.nthreads - 1), "thread id");
2968 				T_ASSERT(woken_up == info->head.threads[i + 1], "wakeup_one_with_inheritor woke next");
2969 			} else {
2970 				T_ASSERT(i == (info->head.nthreads - 1), "thread id");
2971 			}
2972 
2973 			// i am still the inheritor, wake all to drop inheritership
2974 			ret = wakeup_all_with_inheritor(wake_event, LCK_WAKE_DEFAULT);
2975 			T_ASSERT(ret == KERN_NOT_WAITING, "waiters on event");
2976 		} else {
2977 			// I previously closed a gate
2978 			lck_mtx_sleep_with_inheritor(&info->mtx_lock, LCK_SLEEP_UNLOCK, wait_event, inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
2979 			T_ASSERT((uint) self->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", self->sched_pri, max_pri);
2980 
2981 			lck_mtx_lock(&info->mtx_lock);
2982 			lck_mtx_gate_open(&info->mtx_lock, &info->gates[i]);
2983 			lck_mtx_unlock(&info->mtx_lock);
2984 		}
2985 	}
2986 
2987 	assert(current_thread()->kern_promotion_schedpri == 0);
2988 	notify_waiter((struct synch_test_common *)info);
2989 
2990 	thread_terminate_self();
2991 }
2992 
2993 static void
thread_gate_chain_work(void * args,__unused wait_result_t wr)2994 thread_gate_chain_work(
2995 	void *args,
2996 	__unused wait_result_t wr)
2997 {
2998 	struct turnstile_chain_test *info = (struct turnstile_chain_test*) args;
2999 	thread_t self = current_thread();
3000 	uint my_pri = self->sched_pri;
3001 	uint max_pri;
3002 	uint i;
3003 	T_LOG("Started thread pri %d %p", my_pri, self);
3004 
3005 
3006 	/*
3007 	 * Need to use the threads ids, wait for all of them to be populated
3008 	 */
3009 	while (os_atomic_load(&info->head.threads[info->head.nthreads - 1], acquire) == NULL) {
3010 		IOSleep(10);
3011 	}
3012 
3013 	max_pri = get_max_pri((struct synch_test_common *) info);
3014 
3015 	for (i = 0; i < info->head.nthreads; i++) {
3016 		if (info->head.threads[i] == self) {
3017 			lck_mtx_lock(&info->mtx_lock);
3018 			lck_mtx_gate_close(&info->mtx_lock, &info->gates[i]);
3019 			lck_mtx_unlock(&info->mtx_lock);
3020 			break;
3021 		}
3022 	}
3023 	assert(i != info->head.nthreads);
3024 
3025 	wake_threads(&info->synch2);
3026 	wait_threads(&info->synch2, info->synch_value);
3027 
3028 	if (self == os_atomic_load(&info->head.threads[0], acquire)) {
3029 		wait_threads(&info->synch, info->synch_value - 1);
3030 
3031 		wait_for_waiters((struct synch_test_common *)info);
3032 
3033 		T_ASSERT((uint) self->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", self->sched_pri, max_pri);
3034 
3035 		lck_mtx_lock(&info->mtx_lock);
3036 		lck_mtx_gate_open(&info->mtx_lock, &info->gates[0]);
3037 		lck_mtx_unlock(&info->mtx_lock);
3038 	} else {
3039 		lck_mtx_lock(&info->mtx_lock);
3040 		wake_threads(&info->synch);
3041 		lck_mtx_gate_wait(&info->mtx_lock, &info->gates[i - 1], LCK_SLEEP_UNLOCK, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
3042 
3043 		T_ASSERT((uint) self->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", self->sched_pri, max_pri);
3044 
3045 		lck_mtx_lock(&info->mtx_lock);
3046 		lck_mtx_gate_open(&info->mtx_lock, &info->gates[i]);
3047 		lck_mtx_unlock(&info->mtx_lock);
3048 	}
3049 
3050 	assert(current_thread()->kern_promotion_schedpri == 0);
3051 	notify_waiter((struct synch_test_common *)info);
3052 
3053 	thread_terminate_self();
3054 }
3055 
3056 static void
thread_sleep_chain_work(void * args,__unused wait_result_t wr)3057 thread_sleep_chain_work(
3058 	void *args,
3059 	__unused wait_result_t wr)
3060 {
3061 	struct turnstile_chain_test *info = (struct turnstile_chain_test*) args;
3062 	thread_t self = current_thread();
3063 	uint my_pri = self->sched_pri;
3064 	uint max_pri;
3065 	event_t wait_event, wake_event;
3066 	uint i;
3067 	thread_t inheritor = NULL, woken_up = NULL;
3068 	kern_return_t ret;
3069 
3070 	T_LOG("Started thread pri %d %p", my_pri, self);
3071 
3072 	/*
3073 	 * Need to use the threads ids, wait for all of them to be populated
3074 	 */
3075 	while (os_atomic_load(&info->head.threads[info->head.nthreads - 1], acquire) == NULL) {
3076 		IOSleep(10);
3077 	}
3078 
3079 	max_pri = get_max_pri((struct synch_test_common *) info);
3080 
3081 	if (self == os_atomic_load(&info->head.threads[0], acquire)) {
3082 		wait_threads(&info->synch, info->synch_value - 1);
3083 
3084 		wait_for_waiters((struct synch_test_common *)info);
3085 
3086 		T_ASSERT((uint) self->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", self->sched_pri, max_pri);
3087 
3088 		ret = wakeup_one_with_inheritor((event_t) &info->head.threads[0], THREAD_AWAKENED, LCK_WAKE_DO_NOT_TRANSFER_PUSH, &woken_up);
3089 		T_ASSERT(ret == KERN_SUCCESS, "wakeup_one_with_inheritor woke next");
3090 		T_ASSERT(woken_up == info->head.threads[1], "thread woken up");
3091 
3092 		// i am still the inheritor, wake all to drop inheritership
3093 		ret = wakeup_all_with_inheritor((event_t) &info->head.threads[0], LCK_WAKE_DEFAULT);
3094 		T_ASSERT(ret == KERN_NOT_WAITING, "waiters on event");
3095 	} else {
3096 		wait_event = NULL;
3097 		wake_event = NULL;
3098 		for (i = 0; i < info->head.nthreads; i++) {
3099 			if (info->head.threads[i] == self) {
3100 				inheritor = info->head.threads[i - 1];
3101 				wait_event = (event_t) &info->head.threads[i - 1];
3102 				wake_event = (event_t) &info->head.threads[i];
3103 				break;
3104 			}
3105 		}
3106 
3107 		assert(wait_event != NULL);
3108 		lck_mtx_lock(&info->mtx_lock);
3109 		wake_threads(&info->synch);
3110 
3111 		lck_mtx_sleep_with_inheritor(&info->mtx_lock, LCK_SLEEP_UNLOCK, wait_event, inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
3112 
3113 		T_ASSERT((uint) self->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", self->sched_pri, max_pri);
3114 
3115 		ret = wakeup_one_with_inheritor(wake_event, THREAD_AWAKENED, LCK_WAKE_DO_NOT_TRANSFER_PUSH, &woken_up);
3116 		if (ret == KERN_SUCCESS) {
3117 			T_ASSERT(i != (info->head.nthreads - 1), "thread id");
3118 			T_ASSERT(woken_up == info->head.threads[i + 1], "wakeup_one_with_inheritor woke next");
3119 		} else {
3120 			T_ASSERT(i == (info->head.nthreads - 1), "thread id");
3121 		}
3122 
3123 		// i am still the inheritor, wake all to drop inheritership
3124 		ret = wakeup_all_with_inheritor(wake_event, LCK_WAKE_DEFAULT);
3125 		T_ASSERT(ret == KERN_NOT_WAITING, "waiters on event");
3126 	}
3127 
3128 	assert(current_thread()->kern_promotion_schedpri == 0);
3129 	notify_waiter((struct synch_test_common *)info);
3130 
3131 	thread_terminate_self();
3132 }
3133 
3134 static void
test_sleep_chain(struct turnstile_chain_test * info)3135 test_sleep_chain(struct turnstile_chain_test *info)
3136 {
3137 	info->synch = 0;
3138 	info->synch_value = info->head.nthreads;
3139 
3140 	start_threads((thread_continue_t)thread_sleep_chain_work, (struct synch_test_common *)info, FALSE);
3141 	wait_all_thread((struct synch_test_common *)info);
3142 }
3143 
3144 static void
test_gate_chain(struct turnstile_chain_test * info)3145 test_gate_chain(struct turnstile_chain_test *info)
3146 {
3147 	info->synch = 0;
3148 	info->synch2 = 0;
3149 	info->synch_value = info->head.nthreads;
3150 
3151 	start_threads((thread_continue_t)thread_gate_chain_work, (struct synch_test_common *)info, FALSE);
3152 	wait_all_thread((struct synch_test_common *)info);
3153 }
3154 
3155 static void
test_sleep_gate_chain(struct turnstile_chain_test * info)3156 test_sleep_gate_chain(struct turnstile_chain_test *info)
3157 {
3158 	info->synch = 0;
3159 	info->synch2 = 0;
3160 	info->synch_value = info->head.nthreads;
3161 
3162 	start_threads((thread_continue_t)thread_sleep_gate_chain_work, (struct synch_test_common *)info, FALSE);
3163 	wait_all_thread((struct synch_test_common *)info);
3164 }
3165 
3166 kern_return_t
ts_kernel_turnstile_chain_test(void)3167 ts_kernel_turnstile_chain_test(void)
3168 {
3169 	struct turnstile_chain_test info = {};
3170 	int i;
3171 
3172 	init_synch_test_common((struct synch_test_common *)&info, NUM_THREAD_CHAIN);
3173 	lck_attr_t* lck_attr = lck_attr_alloc_init();
3174 	lck_grp_attr_t* lck_grp_attr = lck_grp_attr_alloc_init();
3175 	lck_grp_t* lck_grp = lck_grp_alloc_init("test gate", lck_grp_attr);
3176 
3177 	lck_mtx_init(&info.mtx_lock, lck_grp, lck_attr);
3178 	for (i = 0; i < NUM_THREAD_CHAIN; i++) {
3179 		lck_mtx_gate_init(&info.mtx_lock, &info.gates[i]);
3180 	}
3181 
3182 	T_LOG("Testing sleep chain, lck");
3183 	test_sleep_chain(&info);
3184 
3185 	T_LOG("Testing gate chain, lck");
3186 	test_gate_chain(&info);
3187 
3188 	T_LOG("Testing sleep and gate chain, lck");
3189 	test_sleep_gate_chain(&info);
3190 
3191 	destroy_synch_test_common((struct synch_test_common *)&info);
3192 	for (i = 0; i < NUM_THREAD_CHAIN; i++) {
3193 		lck_mtx_gate_destroy(&info.mtx_lock, &info.gates[i]);
3194 	}
3195 	lck_attr_free(lck_attr);
3196 	lck_grp_attr_free(lck_grp_attr);
3197 	lck_mtx_destroy(&info.mtx_lock, lck_grp);
3198 	lck_grp_free(lck_grp);
3199 
3200 	return KERN_SUCCESS;
3201 }
3202 
3203 kern_return_t
ts_kernel_timingsafe_bcmp_test(void)3204 ts_kernel_timingsafe_bcmp_test(void)
3205 {
3206 	int i, buf_size;
3207 	char *buf = NULL;
3208 
3209 	// empty
3210 	T_ASSERT(timingsafe_bcmp(NULL, NULL, 0) == 0, NULL);
3211 	T_ASSERT(timingsafe_bcmp("foo", "foo", 0) == 0, NULL);
3212 	T_ASSERT(timingsafe_bcmp("foo", "bar", 0) == 0, NULL);
3213 
3214 	// equal
3215 	T_ASSERT(timingsafe_bcmp("foo", "foo", strlen("foo")) == 0, NULL);
3216 
3217 	// unequal
3218 	T_ASSERT(timingsafe_bcmp("foo", "bar", strlen("foo")) == 1, NULL);
3219 	T_ASSERT(timingsafe_bcmp("foo", "goo", strlen("foo")) == 1, NULL);
3220 	T_ASSERT(timingsafe_bcmp("foo", "fpo", strlen("foo")) == 1, NULL);
3221 	T_ASSERT(timingsafe_bcmp("foo", "fop", strlen("foo")) == 1, NULL);
3222 
3223 	// all possible bitwise differences
3224 	for (i = 1; i < 256; i += 1) {
3225 		unsigned char a = 0;
3226 		unsigned char b = (unsigned char)i;
3227 
3228 		T_ASSERT(timingsafe_bcmp(&a, &b, sizeof(a)) == 1, NULL);
3229 	}
3230 
3231 	// large
3232 	buf_size = 1024 * 16;
3233 	buf = kalloc_data(buf_size, Z_WAITOK);
3234 	T_EXPECT_NOTNULL(buf, "kalloc of buf");
3235 
3236 	read_random(buf, buf_size);
3237 	T_ASSERT(timingsafe_bcmp(buf, buf, buf_size) == 0, NULL);
3238 	T_ASSERT(timingsafe_bcmp(buf, buf + 1, buf_size - 1) == 1, NULL);
3239 	T_ASSERT(timingsafe_bcmp(buf, buf + 128, 128) == 1, NULL);
3240 
3241 	memcpy(buf + 128, buf, 128);
3242 	T_ASSERT(timingsafe_bcmp(buf, buf + 128, 128) == 0, NULL);
3243 
3244 	kfree_data(buf, buf_size);
3245 
3246 	return KERN_SUCCESS;
3247 }
3248 
3249 kern_return_t
kprintf_hhx_test(void)3250 kprintf_hhx_test(void)
3251 {
3252 	printf("POST hhx test %hx%hx%hx%hx %hhx%hhx%hhx%hhx - %llx",
3253 	    (unsigned short)0xfeed, (unsigned short)0xface,
3254 	    (unsigned short)0xabad, (unsigned short)0xcafe,
3255 	    (unsigned char)'h', (unsigned char)'h', (unsigned char)'x',
3256 	    (unsigned char)'!',
3257 	    0xfeedfaceULL);
3258 	T_PASS("kprintf_hhx_test passed");
3259 	return KERN_SUCCESS;
3260 }
3261