xref: /xnu-10002.1.13/osfmk/tests/kernel_tests.c (revision 1031c584a5e37aff177559b9f69dbd3c8c3fd30a)
1 /*
2  * Copyright (c) 2019-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <kern/kern_types.h>
30 #include <kern/assert.h>
31 #include <kern/host.h>
32 #include <kern/macro_help.h>
33 #include <kern/sched.h>
34 #include <kern/locks.h>
35 #include <kern/sched_prim.h>
36 #include <kern/misc_protos.h>
37 #include <kern/thread_call.h>
38 #include <kern/zalloc_internal.h>
39 #include <kern/kalloc.h>
40 #include <tests/ktest.h>
41 #include <sys/errno.h>
42 #include <sys/random.h>
43 #include <kern/kern_cdata.h>
44 #include <machine/lowglobals.h>
45 #include <vm/vm_page.h>
46 #include <vm/vm_object.h>
47 #include <vm/vm_protos.h>
48 #include <string.h>
49 #include <kern/kern_apfs_reflock.h>
50 
51 #if !(DEVELOPMENT || DEBUG)
52 #error "Testing is not enabled on RELEASE configurations"
53 #endif
54 
55 #include <tests/xnupost.h>
56 
57 extern boolean_t get_range_bounds(char * c, int64_t * lower, int64_t * upper);
58 __private_extern__ void qsort(void * a, size_t n, size_t es, int (*cmp)(const void *, const void *));
59 
60 uint32_t total_post_tests_count = 0;
61 void xnupost_reset_panic_widgets(void);
62 
63 /* test declarations */
64 kern_return_t zalloc_test(void);
65 kern_return_t RandomULong_test(void);
66 kern_return_t kcdata_api_test(void);
67 kern_return_t ts_kernel_primitive_test(void);
68 kern_return_t ts_kernel_sleep_inheritor_test(void);
69 kern_return_t ts_kernel_gate_test(void);
70 kern_return_t ts_kernel_turnstile_chain_test(void);
71 kern_return_t ts_kernel_timingsafe_bcmp_test(void);
72 
73 #if __ARM_VFP__
74 extern kern_return_t vfp_state_test(void);
75 #endif
76 
77 extern kern_return_t kprintf_hhx_test(void);
78 
79 #if defined(__arm64__)
80 kern_return_t pmap_coredump_test(void);
81 #endif
82 
83 extern kern_return_t console_serial_test(void);
84 extern kern_return_t console_serial_parallel_log_tests(void);
85 extern kern_return_t test_printf(void);
86 extern kern_return_t test_os_log(void);
87 extern kern_return_t test_os_log_parallel(void);
88 extern kern_return_t bitmap_post_test(void);
89 extern kern_return_t counter_tests(void);
90 #if ML_IO_TIMEOUTS_ENABLED
91 extern kern_return_t ml_io_timeout_test(void);
92 #endif
93 
94 #ifdef __arm64__
95 extern kern_return_t arm64_munger_test(void);
96 extern kern_return_t ex_cb_test(void);
97 #if __ARM_PAN_AVAILABLE__
98 extern kern_return_t arm64_pan_test(void);
99 #endif
100 #if defined(HAS_APPLE_PAC)
101 extern kern_return_t arm64_ropjop_test(void);
102 #endif /* defined(HAS_APPLE_PAC) */
103 #endif /* __arm64__ */
104 
105 extern kern_return_t test_thread_call(void);
106 
107 
108 struct xnupost_panic_widget xt_panic_widgets = {.xtp_context_p = NULL,
109 	                                        .xtp_outval_p = NULL,
110 	                                        .xtp_func_name = NULL,
111 	                                        .xtp_func = NULL};
112 
113 struct xnupost_test kernel_post_tests[] = {XNUPOST_TEST_CONFIG_BASIC(zalloc_test),
114 	                                   XNUPOST_TEST_CONFIG_BASIC(RandomULong_test),
115 	                                   XNUPOST_TEST_CONFIG_BASIC(test_printf),
116 	                                   XNUPOST_TEST_CONFIG_BASIC(test_os_log),
117 	                                   XNUPOST_TEST_CONFIG_BASIC(test_os_log_parallel),
118 #ifdef __arm64__
119 	                                   XNUPOST_TEST_CONFIG_BASIC(arm64_munger_test),
120 	                                   XNUPOST_TEST_CONFIG_BASIC(ex_cb_test),
121 #if __ARM_PAN_AVAILABLE__
122 	                                   XNUPOST_TEST_CONFIG_BASIC(arm64_pan_test),
123 #endif
124 #if defined(HAS_APPLE_PAC)
125 	                                   XNUPOST_TEST_CONFIG_BASIC(arm64_ropjop_test),
126 #endif /* defined(HAS_APPLE_PAC) */
127 #endif /* __arm64__ */
128 	                                   XNUPOST_TEST_CONFIG_BASIC(kcdata_api_test),
129 	                                   XNUPOST_TEST_CONFIG_BASIC(console_serial_test),
130 	                                   XNUPOST_TEST_CONFIG_BASIC(console_serial_parallel_log_tests),
131 #if defined(__arm64__)
132 	                                   XNUPOST_TEST_CONFIG_BASIC(pmap_coredump_test),
133 #endif
134 	                                   XNUPOST_TEST_CONFIG_BASIC(bitmap_post_test),
135 	                                   //XNUPOST_TEST_CONFIG_TEST_PANIC(kcdata_api_assert_tests)
136 	                                   XNUPOST_TEST_CONFIG_BASIC(test_thread_call),
137 	                                   XNUPOST_TEST_CONFIG_BASIC(ts_kernel_primitive_test),
138 	                                   XNUPOST_TEST_CONFIG_BASIC(ts_kernel_sleep_inheritor_test),
139 	                                   XNUPOST_TEST_CONFIG_BASIC(ts_kernel_gate_test),
140 	                                   XNUPOST_TEST_CONFIG_BASIC(ts_kernel_turnstile_chain_test),
141 	                                   XNUPOST_TEST_CONFIG_BASIC(ts_kernel_timingsafe_bcmp_test),
142 	                                   XNUPOST_TEST_CONFIG_BASIC(kprintf_hhx_test),
143 #if __ARM_VFP__
144 	                                   XNUPOST_TEST_CONFIG_BASIC(vfp_state_test),
145 #endif
146 	                                   XNUPOST_TEST_CONFIG_BASIC(vm_tests),
147 	                                   XNUPOST_TEST_CONFIG_BASIC(counter_tests),
148 #if ML_IO_TIMEOUTS_ENABLED
149 	                                   XNUPOST_TEST_CONFIG_BASIC(ml_io_timeout_test),
150 #endif
151 };
152 
153 uint32_t kernel_post_tests_count = sizeof(kernel_post_tests) / sizeof(xnupost_test_data_t);
154 
155 #define POSTARGS_RUN_TESTS 0x1
156 #define POSTARGS_CONTROLLER_AVAILABLE 0x2
157 #define POSTARGS_CUSTOM_TEST_RUNLIST 0x4
158 uint64_t kernel_post_args = 0x0;
159 
160 /* static variables to hold state */
161 static kern_return_t parse_config_retval = KERN_INVALID_CAPABILITY;
162 static char kernel_post_test_configs[256];
163 boolean_t xnupost_should_run_test(uint32_t test_num);
164 
165 kern_return_t
xnupost_parse_config()166 xnupost_parse_config()
167 {
168 	if (parse_config_retval != KERN_INVALID_CAPABILITY) {
169 		return parse_config_retval;
170 	}
171 	PE_parse_boot_argn("kernPOST", &kernel_post_args, sizeof(kernel_post_args));
172 
173 	if (PE_parse_boot_argn("kernPOST_config", &kernel_post_test_configs[0], sizeof(kernel_post_test_configs)) == TRUE) {
174 		kernel_post_args |= POSTARGS_CUSTOM_TEST_RUNLIST;
175 	}
176 
177 	if (kernel_post_args != 0) {
178 		parse_config_retval = KERN_SUCCESS;
179 		goto out;
180 	}
181 	parse_config_retval = KERN_NOT_SUPPORTED;
182 out:
183 	return parse_config_retval;
184 }
185 
186 boolean_t
xnupost_should_run_test(uint32_t test_num)187 xnupost_should_run_test(uint32_t test_num)
188 {
189 	if (kernel_post_args & POSTARGS_CUSTOM_TEST_RUNLIST) {
190 		int64_t begin = 0, end = 999999;
191 		char * b = kernel_post_test_configs;
192 		while (*b) {
193 			get_range_bounds(b, &begin, &end);
194 			if (test_num >= begin && test_num <= end) {
195 				return TRUE;
196 			}
197 
198 			/* skip to the next "," */
199 			while (*b != ',') {
200 				if (*b == '\0') {
201 					return FALSE;
202 				}
203 				b++;
204 			}
205 			/* skip past the ',' */
206 			b++;
207 		}
208 		return FALSE;
209 	}
210 	return TRUE;
211 }
212 
213 kern_return_t
xnupost_list_tests(xnupost_test_t test_list,uint32_t test_count)214 xnupost_list_tests(xnupost_test_t test_list, uint32_t test_count)
215 {
216 	if (KERN_SUCCESS != xnupost_parse_config()) {
217 		return KERN_FAILURE;
218 	}
219 
220 	xnupost_test_t testp;
221 	for (uint32_t i = 0; i < test_count; i++) {
222 		testp = &test_list[i];
223 		if (testp->xt_test_num == 0) {
224 			assert(total_post_tests_count < UINT16_MAX);
225 			testp->xt_test_num = (uint16_t)++total_post_tests_count;
226 		}
227 		/* make sure the boot-arg based test run list is honored */
228 		if (kernel_post_args & POSTARGS_CUSTOM_TEST_RUNLIST) {
229 			testp->xt_config |= XT_CONFIG_IGNORE;
230 			if (xnupost_should_run_test(testp->xt_test_num)) {
231 				testp->xt_config &= ~(XT_CONFIG_IGNORE);
232 				testp->xt_config |= XT_CONFIG_RUN;
233 				printf("\n[TEST] #%u is marked as ignored", testp->xt_test_num);
234 			}
235 		}
236 		printf("\n[TEST] TOC#%u name: %s expected: %d config: %x\n", testp->xt_test_num, testp->xt_name, testp->xt_expected_retval,
237 		    testp->xt_config);
238 	}
239 
240 	return KERN_SUCCESS;
241 }
242 
243 kern_return_t
xnupost_run_tests(xnupost_test_t test_list,uint32_t test_count)244 xnupost_run_tests(xnupost_test_t test_list, uint32_t test_count)
245 {
246 	uint32_t i = 0;
247 	int retval = KERN_SUCCESS;
248 	int test_retval = KERN_FAILURE;
249 
250 	if ((kernel_post_args & POSTARGS_RUN_TESTS) == 0) {
251 		printf("No POST boot-arg set.\n");
252 		return retval;
253 	}
254 
255 	T_START;
256 	xnupost_test_t testp;
257 	for (; i < test_count; i++) {
258 		xnupost_reset_panic_widgets();
259 		T_TESTRESULT = T_STATE_UNRESOLVED;
260 		testp = &test_list[i];
261 		T_BEGIN(testp->xt_name);
262 		testp->xt_begin_time = mach_absolute_time();
263 		testp->xt_end_time   = testp->xt_begin_time;
264 
265 		/*
266 		 * If test is designed to panic and controller
267 		 * is not available then mark as SKIPPED
268 		 */
269 		if ((testp->xt_config & XT_CONFIG_EXPECT_PANIC) && !(kernel_post_args & POSTARGS_CONTROLLER_AVAILABLE)) {
270 			T_SKIP(
271 				"Test expects panic but "
272 				"no controller is present");
273 			testp->xt_test_actions = XT_ACTION_SKIPPED;
274 			continue;
275 		}
276 
277 		if ((testp->xt_config & XT_CONFIG_IGNORE)) {
278 			T_SKIP("Test is marked as XT_CONFIG_IGNORE");
279 			testp->xt_test_actions = XT_ACTION_SKIPPED;
280 			continue;
281 		}
282 
283 		test_retval = testp->xt_func();
284 		if (T_STATE_UNRESOLVED == T_TESTRESULT) {
285 			/*
286 			 * If test result is unresolved due to that no T_* test cases are called,
287 			 * determine the test result based on the return value of the test function.
288 			 */
289 			if (KERN_SUCCESS == test_retval) {
290 				T_PASS("Test passed because retval == KERN_SUCCESS");
291 			} else {
292 				T_FAIL("Test failed because retval == KERN_FAILURE");
293 			}
294 		}
295 		T_END;
296 		testp->xt_retval = T_TESTRESULT;
297 		testp->xt_end_time = mach_absolute_time();
298 		if (testp->xt_retval == testp->xt_expected_retval) {
299 			testp->xt_test_actions = XT_ACTION_PASSED;
300 		} else {
301 			testp->xt_test_actions = XT_ACTION_FAILED;
302 		}
303 	}
304 	T_FINISH;
305 	return retval;
306 }
307 
308 kern_return_t
kernel_list_tests()309 kernel_list_tests()
310 {
311 	return xnupost_list_tests(kernel_post_tests, kernel_post_tests_count);
312 }
313 
314 kern_return_t
kernel_do_post()315 kernel_do_post()
316 {
317 	return xnupost_run_tests(kernel_post_tests, kernel_post_tests_count);
318 }
319 
320 kern_return_t
xnupost_register_panic_widget(xt_panic_widget_func funcp,const char * funcname,void * context,void ** outval)321 xnupost_register_panic_widget(xt_panic_widget_func funcp, const char * funcname, void * context, void ** outval)
322 {
323 	if (xt_panic_widgets.xtp_context_p != NULL || xt_panic_widgets.xtp_func != NULL) {
324 		return KERN_RESOURCE_SHORTAGE;
325 	}
326 
327 	xt_panic_widgets.xtp_context_p = context;
328 	xt_panic_widgets.xtp_func      = funcp;
329 	xt_panic_widgets.xtp_func_name = funcname;
330 	xt_panic_widgets.xtp_outval_p  = outval;
331 
332 	return KERN_SUCCESS;
333 }
334 
335 void
xnupost_reset_panic_widgets()336 xnupost_reset_panic_widgets()
337 {
338 	bzero(&xt_panic_widgets, sizeof(xt_panic_widgets));
339 }
340 
341 kern_return_t
xnupost_process_kdb_stop(const char * panic_s)342 xnupost_process_kdb_stop(const char * panic_s)
343 {
344 	xt_panic_return_t retval         = 0;
345 	struct xnupost_panic_widget * pw = &xt_panic_widgets;
346 	const char * name = "unknown";
347 	if (xt_panic_widgets.xtp_func_name) {
348 		name = xt_panic_widgets.xtp_func_name;
349 	}
350 
351 	/* bail early on if kernPOST is not set */
352 	if (kernel_post_args == 0) {
353 		return KERN_INVALID_CAPABILITY;
354 	}
355 
356 	if (xt_panic_widgets.xtp_func) {
357 		T_LOG("%s: Calling out to widget: %s", __func__, xt_panic_widgets.xtp_func_name);
358 		retval = pw->xtp_func(panic_s, pw->xtp_context_p, pw->xtp_outval_p);
359 	} else {
360 		return KERN_INVALID_CAPABILITY;
361 	}
362 
363 	switch (retval) {
364 	case XT_RET_W_SUCCESS:
365 		T_EXPECT_EQ_INT(retval, XT_RET_W_SUCCESS, "%s reported successful handling. Returning from kdb_stop.", name);
366 		/* KERN_SUCCESS means return from panic/assertion */
367 		return KERN_SUCCESS;
368 
369 	case XT_RET_W_FAIL:
370 		T_FAIL("%s reported XT_RET_W_FAIL: Returning from kdb_stop", name);
371 		return KERN_SUCCESS;
372 
373 	case XT_PANIC_W_FAIL:
374 		T_FAIL("%s reported XT_PANIC_W_FAIL: Continuing to kdb_stop", name);
375 		return KERN_FAILURE;
376 
377 	case XT_PANIC_W_SUCCESS:
378 		T_EXPECT_EQ_INT(retval, XT_PANIC_W_SUCCESS, "%s reported successful testcase. But continuing to kdb_stop.", name);
379 		return KERN_FAILURE;
380 
381 	case XT_PANIC_UNRELATED:
382 	default:
383 		T_LOG("UNRELATED: Continuing to kdb_stop.");
384 		return KERN_FAILURE;
385 	}
386 }
387 
388 xt_panic_return_t
_xt_generic_assert_check(const char * s,void * str_to_match,void ** outval)389 _xt_generic_assert_check(const char * s, void * str_to_match, void ** outval)
390 {
391 	xt_panic_return_t ret = XT_PANIC_UNRELATED;
392 
393 	if (NULL != strnstr(__DECONST(char *, s), (char *)str_to_match, strlen(s))) {
394 		T_LOG("%s: kdb_stop string: '%s' MATCHED string: '%s'", __func__, s, (char *)str_to_match);
395 		ret = XT_RET_W_SUCCESS;
396 	}
397 
398 	if (outval) {
399 		*outval = (void *)(uintptr_t)ret;
400 	}
401 	return ret;
402 }
403 
404 kern_return_t
xnupost_reset_tests(xnupost_test_t test_list,uint32_t test_count)405 xnupost_reset_tests(xnupost_test_t test_list, uint32_t test_count)
406 {
407 	uint32_t i = 0;
408 	xnupost_test_t testp;
409 	for (; i < test_count; i++) {
410 		testp                  = &test_list[i];
411 		testp->xt_begin_time   = 0;
412 		testp->xt_end_time     = 0;
413 		testp->xt_test_actions = XT_ACTION_NONE;
414 		testp->xt_retval       = -1;
415 	}
416 	return KERN_SUCCESS;
417 }
418 
419 
420 kern_return_t
zalloc_test(void)421 zalloc_test(void)
422 {
423 	zone_t test_zone;
424 	void * test_ptr;
425 
426 	T_SETUPBEGIN;
427 	test_zone = zone_create("test_uint64_zone", sizeof(uint64_t),
428 	    ZC_DESTRUCTIBLE);
429 	T_ASSERT_NOTNULL(test_zone, NULL);
430 
431 	T_ASSERT_EQ_INT(test_zone->z_elems_free, 0, NULL);
432 	T_SETUPEND;
433 
434 	T_ASSERT_NOTNULL(test_ptr = zalloc(test_zone), NULL);
435 
436 	zfree(test_zone, test_ptr);
437 
438 	/* A sample report for perfdata */
439 	T_PERF("num_threads_at_ktest", threads_count, "count", "# of threads in system at zalloc_test");
440 
441 	return KERN_SUCCESS;
442 }
443 
444 /*
445  * Function used for comparison by qsort()
446  */
447 static int
compare_numbers_ascending(const void * a,const void * b)448 compare_numbers_ascending(const void * a, const void * b)
449 {
450 	const uint64_t x = *(const uint64_t *)a;
451 	const uint64_t y = *(const uint64_t *)b;
452 	if (x < y) {
453 		return -1;
454 	} else if (x > y) {
455 		return 1;
456 	} else {
457 		return 0;
458 	}
459 }
460 
461 /*
462  * Function to count number of bits that are set in a number.
463  * It uses Side Addition using Magic Binary Numbers
464  */
465 static int
count_bits(uint64_t number)466 count_bits(uint64_t number)
467 {
468 	return __builtin_popcountll(number);
469 }
470 
471 kern_return_t
RandomULong_test()472 RandomULong_test()
473 {
474 /*
475  * Randomness test for RandomULong()
476  *
477  * This test verifies that:
478  *  a. RandomULong works
479  *  b. The generated numbers match the following entropy criteria:
480  *     For a thousand iterations, verify:
481  *          1. mean entropy > 12 bits
482  *          2. min entropy > 4 bits
483  *          3. No Duplicate
484  *          4. No incremental/decremental pattern in a window of 3
485  *          5. No Zero
486  *          6. No -1
487  *
488  * <rdar://problem/22526137> Add test to increase code coverage for /dev/random
489  */
490 
491 #define CONF_MIN_ENTROPY 4
492 #define CONF_MEAN_ENTROPY 12
493 #define CONF_ITERATIONS 1000
494 #define CONF_WINDOW_SIZE 3
495 #define CONF_WINDOW_TREND_LIMIT ((CONF_WINDOW_SIZE / 2) + (CONF_WINDOW_SIZE & 1)) >> 0
496 
497 	int i;
498 	uint32_t min_bit_entropy, max_bit_entropy, bit_entropy;
499 	uint32_t aggregate_bit_entropy = 0;
500 	uint32_t mean_bit_entropy      = 0;
501 	uint64_t numbers[CONF_ITERATIONS];
502 	min_bit_entropy = UINT32_MAX;
503 	max_bit_entropy = 0;
504 
505 	/*
506 	 * TEST 1: Number generation and basic and basic validation
507 	 * Check for non-zero (no bits set), -1 (all bits set) and error
508 	 */
509 	for (i = 0; i < CONF_ITERATIONS; i++) {
510 		read_random(&numbers[i], sizeof(numbers[i]));
511 		if (numbers[i] == 0) {
512 			T_ASSERT_NE_ULLONG(numbers[i], 0, "read_random returned zero value.");
513 		}
514 		if (numbers[i] == UINT64_MAX) {
515 			T_ASSERT_NE_ULLONG(numbers[i], UINT64_MAX, "read_random returned -1.");
516 		}
517 	}
518 	T_PASS("Generated %d non-zero random numbers with atleast one bit reset.", CONF_ITERATIONS);
519 
520 	/*
521 	 * TEST 2: Mean and Min Bit Entropy
522 	 * Check the bit entropy and its mean over the generated numbers.
523 	 */
524 	for (i = 1; i < CONF_ITERATIONS; i++) {
525 		bit_entropy = count_bits(numbers[i - 1] ^ numbers[i]);
526 		if (bit_entropy < min_bit_entropy) {
527 			min_bit_entropy = bit_entropy;
528 		}
529 		if (bit_entropy > max_bit_entropy) {
530 			max_bit_entropy = bit_entropy;
531 		}
532 
533 		if (bit_entropy < CONF_MIN_ENTROPY) {
534 			T_EXPECT_GE_UINT(bit_entropy, CONF_MIN_ENTROPY,
535 			    "Number of differing bits in consecutive numbers does not satisfy the min criteria.");
536 		}
537 
538 		aggregate_bit_entropy += bit_entropy;
539 	}
540 	T_PASS("Passed the min bit entropy expectation of %d bits", CONF_MIN_ENTROPY);
541 
542 	mean_bit_entropy = aggregate_bit_entropy / CONF_ITERATIONS;
543 	T_EXPECT_GE_UINT(mean_bit_entropy, CONF_MEAN_ENTROPY, "Test criteria for mean number of differing bits.");
544 	T_PASS("Mean bit entropy criteria satisfied (Required %d, Actual: %d).", CONF_MEAN_ENTROPY, mean_bit_entropy);
545 	T_LOG("{PERFORMANCE} iterations: %d, min_bit_entropy: %d, mean_bit_entropy: %d, max_bit_entropy: %d", CONF_ITERATIONS,
546 	    min_bit_entropy, mean_bit_entropy, max_bit_entropy);
547 	T_PERF("min_bit_entropy_" T_TOSTRING(CONF_ITERATIONS), min_bit_entropy, "bits", "minimum bit entropy in RNG. High is better");
548 	T_PERF("mean_bit_entropy_" T_TOSTRING(CONF_ITERATIONS), mean_bit_entropy, "bits", "mean bit entropy in RNG. High is better");
549 	T_PERF("max_bit_entropy_" T_TOSTRING(CONF_ITERATIONS), max_bit_entropy, "bits", "max bit entropy in RNG. High is better");
550 
551 	/*
552 	 * TEST 3: Incremental Pattern Search
553 	 * Check that incremental/decremental pattern does not exist in the given window
554 	 */
555 	int window_start, window_end, trend;
556 	window_start = window_end = trend = 0;
557 
558 	do {
559 		/*
560 		 * Set the window
561 		 */
562 		window_end = window_start + CONF_WINDOW_SIZE - 1;
563 		if (window_end >= CONF_ITERATIONS) {
564 			window_end = CONF_ITERATIONS - 1;
565 		}
566 
567 		trend = 0;
568 		for (i = window_start; i < window_end; i++) {
569 			if (numbers[i] < numbers[i + 1]) {
570 				trend++;
571 			} else if (numbers[i] > numbers[i + 1]) {
572 				trend--;
573 			}
574 		}
575 		/*
576 		 * Check that there is no increasing or decreasing trend
577 		 * i.e. trend <= ceil(window_size/2)
578 		 */
579 		if (trend < 0) {
580 			trend = -trend;
581 		}
582 		if (trend > CONF_WINDOW_TREND_LIMIT) {
583 			T_ASSERT_LE_INT(trend, CONF_WINDOW_TREND_LIMIT, "Found increasing/decreasing trend in random numbers.");
584 		}
585 
586 		/*
587 		 * Move to the next window
588 		 */
589 		window_start++;
590 	} while (window_start < (CONF_ITERATIONS - 1));
591 	T_PASS("Did not find increasing/decreasing trends in a window of %d numbers.", CONF_WINDOW_SIZE);
592 
593 	/*
594 	 * TEST 4: Find Duplicates
595 	 * Check no duplicate values are generated
596 	 */
597 	qsort(numbers, CONF_ITERATIONS, sizeof(numbers[0]), compare_numbers_ascending);
598 	for (i = 1; i < CONF_ITERATIONS; i++) {
599 		if (numbers[i] == numbers[i - 1]) {
600 			T_ASSERT_NE_ULLONG(numbers[i], numbers[i - 1], "read_random generated duplicate values.");
601 		}
602 	}
603 	T_PASS("Test did not find any duplicates as expected.");
604 
605 	return KERN_SUCCESS;
606 }
607 
608 
609 /* KCDATA kernel api tests */
610 static struct kcdata_descriptor test_kc_data;//, test_kc_data2;
611 struct sample_disk_io_stats {
612 	uint64_t disk_reads_count;
613 	uint64_t disk_reads_size;
614 	uint64_t io_priority_count[4];
615 	uint64_t io_priority_size;
616 } __attribute__((packed));
617 
618 struct kcdata_subtype_descriptor test_disk_io_stats_def[] = {
619 	{
620 		.kcs_flags = KCS_SUBTYPE_FLAGS_NONE,
621 		.kcs_elem_type = KC_ST_UINT64,
622 		.kcs_elem_offset = 0 * sizeof(uint64_t),
623 		.kcs_elem_size = sizeof(uint64_t),
624 		.kcs_name = "disk_reads_count"
625 	},
626 	{
627 		.kcs_flags = KCS_SUBTYPE_FLAGS_NONE,
628 		.kcs_elem_type = KC_ST_UINT64,
629 		.kcs_elem_offset = 1 * sizeof(uint64_t),
630 		.kcs_elem_size = sizeof(uint64_t),
631 		.kcs_name = "disk_reads_size"
632 	},
633 	{
634 		.kcs_flags = KCS_SUBTYPE_FLAGS_ARRAY,
635 		.kcs_elem_type = KC_ST_UINT64,
636 		.kcs_elem_offset = 2 * sizeof(uint64_t),
637 		.kcs_elem_size = KCS_SUBTYPE_PACK_SIZE(4, sizeof(uint64_t)),
638 		.kcs_name = "io_priority_count"
639 	},
640 	{
641 		.kcs_flags = KCS_SUBTYPE_FLAGS_ARRAY,
642 		.kcs_elem_type = KC_ST_UINT64,
643 		.kcs_elem_offset = (2 + 4) * sizeof(uint64_t),
644 		.kcs_elem_size = sizeof(uint64_t),
645 		.kcs_name = "io_priority_size"
646 	},
647 };
648 
649 kern_return_t
kcdata_api_test(void)650 kcdata_api_test(void)
651 {
652 	kern_return_t retval = KERN_SUCCESS;
653 
654 	/* test for NULL input */
655 	retval = kcdata_memory_static_init(NULL, (mach_vm_address_t)0, KCDATA_BUFFER_BEGIN_STACKSHOT, 100, KCFLAG_USE_MEMCOPY);
656 	T_ASSERT(retval == KERN_INVALID_ARGUMENT, "kcdata_memory_static_init with NULL struct");
657 
658 	/* another negative test with buffer size < 32 bytes */
659 	char data[30] = "sample_disk_io_stats";
660 	retval = kcdata_memory_static_init(&test_kc_data, (mach_vm_address_t)&data, KCDATA_BUFFER_BEGIN_CRASHINFO, sizeof(data),
661 	    KCFLAG_USE_MEMCOPY);
662 	T_ASSERT(retval == KERN_INSUFFICIENT_BUFFER_SIZE, "init with 30 bytes failed as expected with KERN_INSUFFICIENT_BUFFER_SIZE");
663 
664 	/* test with COPYOUT for 0x0 address. Should return KERN_NO_ACCESS */
665 	retval = kcdata_memory_static_init(&test_kc_data, (mach_vm_address_t)0, KCDATA_BUFFER_BEGIN_CRASHINFO, PAGE_SIZE,
666 	    KCFLAG_USE_COPYOUT);
667 	T_ASSERT(retval == KERN_NO_ACCESS, "writing to 0x0 returned KERN_NO_ACCESS");
668 
669 	/* test with successful kcdata_memory_static_init */
670 	test_kc_data.kcd_length   = 0xdeadbeef;
671 
672 	void *data_ptr = kalloc_data(PAGE_SIZE, Z_WAITOK_ZERO_NOFAIL);
673 	mach_vm_address_t address = (mach_vm_address_t)data_ptr;
674 	T_EXPECT_NOTNULL(address, "kalloc of PAGE_SIZE data.");
675 
676 	retval = kcdata_memory_static_init(&test_kc_data, (mach_vm_address_t)address, KCDATA_BUFFER_BEGIN_STACKSHOT, PAGE_SIZE,
677 	    KCFLAG_USE_MEMCOPY);
678 
679 	T_ASSERT(retval == KERN_SUCCESS, "successful kcdata_memory_static_init call");
680 
681 	T_ASSERT(test_kc_data.kcd_length == PAGE_SIZE, "kcdata length is set correctly to PAGE_SIZE.");
682 	T_LOG("addr_begin 0x%llx and end 0x%llx and address 0x%llx", test_kc_data.kcd_addr_begin, test_kc_data.kcd_addr_end, address);
683 	T_ASSERT(test_kc_data.kcd_addr_begin == address, "kcdata begin address is correct 0x%llx", (uint64_t)address);
684 
685 	/* verify we have BEGIN and END HEADERS set */
686 	uint32_t * mem = (uint32_t *)address;
687 	T_ASSERT(mem[0] == KCDATA_BUFFER_BEGIN_STACKSHOT, "buffer does contain KCDATA_BUFFER_BEGIN_STACKSHOT");
688 	T_ASSERT(mem[4] == KCDATA_TYPE_BUFFER_END, "KCDATA_TYPE_BUFFER_END is appended as expected");
689 	T_ASSERT(mem[5] == 0, "size of BUFFER_END tag is zero");
690 
691 	/* verify kcdata_memory_get_used_bytes() */
692 	uint64_t bytes_used = 0;
693 	bytes_used = kcdata_memory_get_used_bytes(&test_kc_data);
694 	T_ASSERT(bytes_used == (2 * sizeof(struct kcdata_item)), "bytes_used api returned expected %llu", bytes_used);
695 
696 	/* test for kcdata_get_memory_addr() */
697 
698 	mach_vm_address_t user_addr = 0;
699 	/* negative test for NULL user_addr AND/OR kcdata_descriptor */
700 	retval = kcdata_get_memory_addr(NULL, KCDATA_TYPE_MACH_ABSOLUTE_TIME, sizeof(uint64_t), &user_addr);
701 	T_ASSERT(retval == KERN_INVALID_ARGUMENT, "kcdata_get_memory_addr with NULL struct -> KERN_INVALID_ARGUMENT");
702 
703 	retval = kcdata_get_memory_addr(&test_kc_data, KCDATA_TYPE_MACH_ABSOLUTE_TIME, sizeof(uint64_t), NULL);
704 	T_ASSERT(retval == KERN_INVALID_ARGUMENT, "kcdata_get_memory_addr with NULL user_addr -> KERN_INVALID_ARGUMENT");
705 
706 	/* successful case with size 0. Yes this is expected to succeed as just a item type could be used as boolean */
707 	retval = kcdata_get_memory_addr(&test_kc_data, KCDATA_TYPE_USECS_SINCE_EPOCH, 0, &user_addr);
708 	T_ASSERT(retval == KERN_SUCCESS, "Successfully got kcdata entry for 0 size data");
709 	T_ASSERT(user_addr == test_kc_data.kcd_addr_end, "0 sized data did not add any extra buffer space");
710 
711 	/* successful case with valid size. */
712 	user_addr = 0xdeadbeef;
713 	retval = kcdata_get_memory_addr(&test_kc_data, KCDATA_TYPE_MACH_ABSOLUTE_TIME, sizeof(uint64_t), &user_addr);
714 	T_ASSERT(retval == KERN_SUCCESS, "kcdata_get_memory_addr with valid values succeeded.");
715 	T_ASSERT(user_addr > test_kc_data.kcd_addr_begin, "user_addr is in range of buffer");
716 	T_ASSERT(user_addr < test_kc_data.kcd_addr_end, "user_addr is in range of buffer");
717 
718 	/* Try creating an item with really large size */
719 	user_addr  = 0xdeadbeef;
720 	bytes_used = kcdata_memory_get_used_bytes(&test_kc_data);
721 	retval = kcdata_get_memory_addr(&test_kc_data, KCDATA_TYPE_MACH_ABSOLUTE_TIME, PAGE_SIZE * 4, &user_addr);
722 	T_ASSERT(retval == KERN_INSUFFICIENT_BUFFER_SIZE, "Allocating entry with size > buffer -> KERN_INSUFFICIENT_BUFFER_SIZE");
723 	T_ASSERT(user_addr == 0xdeadbeef, "user_addr remained unaffected with failed kcdata_get_memory_addr");
724 	T_ASSERT(bytes_used == kcdata_memory_get_used_bytes(&test_kc_data), "The data structure should be unaffected");
725 
726 	/* verify convenience functions for uint32_with_description */
727 	retval = kcdata_add_uint32_with_description(&test_kc_data, 0xbdc0ffee, "This is bad coffee");
728 	T_ASSERT(retval == KERN_SUCCESS, "add uint32 with description succeeded.");
729 
730 	retval = kcdata_add_uint64_with_description(&test_kc_data, 0xf001badc0ffee, "another 8 byte no.");
731 	T_ASSERT(retval == KERN_SUCCESS, "add uint64 with desc succeeded.");
732 
733 	/* verify creating an KCDATA_TYPE_ARRAY here */
734 	user_addr  = 0xdeadbeef;
735 	bytes_used = kcdata_memory_get_used_bytes(&test_kc_data);
736 	/* save memory address where the array will come up */
737 	struct kcdata_item * item_p = (struct kcdata_item *)test_kc_data.kcd_addr_end;
738 
739 	retval = kcdata_get_memory_addr_for_array(&test_kc_data, KCDATA_TYPE_MACH_ABSOLUTE_TIME, sizeof(uint64_t), 20, &user_addr);
740 	T_ASSERT(retval == KERN_SUCCESS, "Array of 20 integers should be possible");
741 	T_ASSERT(user_addr != 0xdeadbeef, "user_addr is updated as expected");
742 	T_ASSERT((kcdata_memory_get_used_bytes(&test_kc_data) - bytes_used) >= 20 * sizeof(uint64_t), "memory allocation is in range");
743 	kcdata_iter_t iter = kcdata_iter(item_p, (unsigned long)(PAGE_SIZE - kcdata_memory_get_used_bytes(&test_kc_data)));
744 	T_ASSERT(kcdata_iter_array_elem_count(iter) == 20, "array count is 20");
745 
746 	/* FIXME add tests here for ranges of sizes and counts */
747 
748 	T_ASSERT(item_p->flags == (((uint64_t)KCDATA_TYPE_MACH_ABSOLUTE_TIME << 32) | 20), "flags are set correctly");
749 
750 	/* test adding of custom type */
751 
752 	retval = kcdata_add_type_definition(&test_kc_data, 0x999, data, &test_disk_io_stats_def[0],
753 	    sizeof(test_disk_io_stats_def) / sizeof(struct kcdata_subtype_descriptor));
754 	T_ASSERT(retval == KERN_SUCCESS, "adding custom type succeeded.");
755 
756 	kfree_data(data_ptr, PAGE_SIZE);
757 	return KERN_SUCCESS;
758 }
759 
760 /*
761  *  kern_return_t
762  *  kcdata_api_assert_tests()
763  *  {
764  *       kern_return_t retval       = 0;
765  *       void * assert_check_retval = NULL;
766  *       test_kc_data2.kcd_length   = 0xdeadbeef;
767  *       mach_vm_address_t address = (mach_vm_address_t)kalloc(PAGE_SIZE);
768  *       T_EXPECT_NOTNULL(address, "kalloc of PAGE_SIZE data.");
769  *
770  *       retval = kcdata_memory_static_init(&test_kc_data2, (mach_vm_address_t)address, KCDATA_BUFFER_BEGIN_STACKSHOT, PAGE_SIZE,
771  *                                          KCFLAG_USE_MEMCOPY);
772  *
773  *       T_ASSERT(retval == KERN_SUCCESS, "successful kcdata_memory_static_init call");
774  *
775  *       retval = T_REGISTER_ASSERT_CHECK("KCDATA_DESC_MAXLEN", &assert_check_retval);
776  *       T_ASSERT(retval == KERN_SUCCESS, "registered assert widget");
777  *
778  *       // this will assert
779  *       retval = kcdata_add_uint32_with_description(&test_kc_data2, 0xc0ffee, "really long description string for kcdata");
780  *       T_ASSERT(retval == KERN_INVALID_ARGUMENT, "API param check returned KERN_INVALID_ARGUMENT correctly");
781  *       T_ASSERT(assert_check_retval == (void *)XT_RET_W_SUCCESS, "assertion handler verified that it was hit");
782  *
783  *       return KERN_SUCCESS;
784  *  }
785  */
786 
787 #if defined(__arm64__)
788 
789 #include <arm/pmap.h>
790 
791 #define MAX_PMAP_OBJECT_ELEMENT 100000
792 
793 extern struct vm_object pmap_object_store; /* store pt pages */
794 extern unsigned long gPhysBase, gPhysSize, first_avail;
795 
796 /*
797  * Define macros to transverse the pmap object structures and extract
798  * physical page number with information from low global only
799  * This emulate how Astris extracts information from coredump
800  */
801 #if defined(__arm64__)
802 
803 static inline uintptr_t
astris_vm_page_unpack_ptr(uintptr_t p)804 astris_vm_page_unpack_ptr(uintptr_t p)
805 {
806 	if (!p) {
807 		return (uintptr_t)0;
808 	}
809 
810 	return (p & lowGlo.lgPmapMemFromArrayMask)
811 	       ? lowGlo.lgPmapMemStartAddr + (p & ~(lowGlo.lgPmapMemFromArrayMask)) * lowGlo.lgPmapMemPagesize
812 	       : lowGlo.lgPmapMemPackedBaseAddr + (p << lowGlo.lgPmapMemPackedShift);
813 }
814 
815 // assume next pointer is the first element
816 #define astris_vm_page_queue_next(qc) (astris_vm_page_unpack_ptr(*((uint32_t *)(qc))))
817 
818 #endif
819 
820 #define astris_vm_page_queue_first(q) astris_vm_page_queue_next(q)
821 
822 #define astris_vm_page_queue_end(q, qe) ((q) == (qe))
823 
824 #define astris_vm_page_queue_iterate(head, elt)                                                           \
825 	for ((elt) = (uintptr_t)astris_vm_page_queue_first((head)); !astris_vm_page_queue_end((head), (elt)); \
826 	     (elt) = (uintptr_t)astris_vm_page_queue_next(((elt) + (uintptr_t)lowGlo.lgPmapMemChainOffset)))
827 
828 #define astris_ptoa(x) ((vm_address_t)(x) << lowGlo.lgPageShift)
829 
830 static inline ppnum_t
astris_vm_page_get_phys_page(uintptr_t m)831 astris_vm_page_get_phys_page(uintptr_t m)
832 {
833 	return (m >= lowGlo.lgPmapMemStartAddr && m < lowGlo.lgPmapMemEndAddr)
834 	       ? (ppnum_t)((m - lowGlo.lgPmapMemStartAddr) / lowGlo.lgPmapMemPagesize + lowGlo.lgPmapMemFirstppnum)
835 	       : *((ppnum_t *)(m + lowGlo.lgPmapMemPageOffset));
836 }
837 
838 kern_return_t
pmap_coredump_test(void)839 pmap_coredump_test(void)
840 {
841 	int iter = 0;
842 	uintptr_t p;
843 
844 	T_LOG("Testing coredump info for PMAP.");
845 
846 	T_ASSERT_GE_ULONG(lowGlo.lgStaticAddr, gPhysBase, NULL);
847 	T_ASSERT_LE_ULONG(lowGlo.lgStaticAddr + lowGlo.lgStaticSize, first_avail, NULL);
848 	T_ASSERT_EQ_ULONG(lowGlo.lgLayoutMajorVersion, 3, NULL);
849 	T_ASSERT_GE_ULONG(lowGlo.lgLayoutMinorVersion, 2, NULL);
850 	T_ASSERT_EQ_ULONG(lowGlo.lgLayoutMagic, LOWGLO_LAYOUT_MAGIC, NULL);
851 
852 	// check the constant values in lowGlo
853 	T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemQ, ((typeof(lowGlo.lgPmapMemQ)) & (pmap_object_store.memq)), NULL);
854 	T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemPageOffset, offsetof(struct vm_page_with_ppnum, vmp_phys_page), NULL);
855 	T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemChainOffset, offsetof(struct vm_page, vmp_listq), NULL);
856 	T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemPagesize, sizeof(struct vm_page), NULL);
857 
858 #if defined(__arm64__)
859 	T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemFromArrayMask, VM_PAGE_PACKED_FROM_ARRAY, NULL);
860 	T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemPackedShift, VM_PAGE_PACKED_PTR_SHIFT, NULL);
861 	T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemPackedBaseAddr, VM_PAGE_PACKED_PTR_BASE, NULL);
862 #endif
863 
864 	vm_object_lock_shared(&pmap_object_store);
865 	astris_vm_page_queue_iterate(lowGlo.lgPmapMemQ, p)
866 	{
867 		ppnum_t ppnum   = astris_vm_page_get_phys_page(p);
868 		pmap_paddr_t pa = (pmap_paddr_t)astris_ptoa(ppnum);
869 		T_ASSERT_GE_ULONG(pa, gPhysBase, NULL);
870 		T_ASSERT_LT_ULONG(pa, gPhysBase + gPhysSize, NULL);
871 		iter++;
872 		T_ASSERT_LT_INT(iter, MAX_PMAP_OBJECT_ELEMENT, NULL);
873 	}
874 	vm_object_unlock(&pmap_object_store);
875 
876 	T_ASSERT_GT_INT(iter, 0, NULL);
877 	return KERN_SUCCESS;
878 }
879 #endif /* defined(__arm64__) */
880 
881 struct ts_kern_prim_test_args {
882 	int *end_barrier;
883 	int *notify_b;
884 	int *wait_event_b;
885 	int before_num;
886 	int *notify_a;
887 	int *wait_event_a;
888 	int after_num;
889 	int priority_to_check;
890 };
891 
892 static void
wait_threads(int * var,int num)893 wait_threads(
894 	int* var,
895 	int num)
896 {
897 	if (var != NULL) {
898 		while (os_atomic_load(var, acquire) != num) {
899 			assert_wait((event_t) var, THREAD_UNINT);
900 			if (os_atomic_load(var, acquire) != num) {
901 				(void) thread_block(THREAD_CONTINUE_NULL);
902 			} else {
903 				clear_wait(current_thread(), THREAD_AWAKENED);
904 			}
905 		}
906 	}
907 }
908 
909 static void
wake_threads(int * var)910 wake_threads(
911 	int* var)
912 {
913 	if (var) {
914 		os_atomic_inc(var, relaxed);
915 		thread_wakeup((event_t) var);
916 	}
917 }
918 
919 extern void IOSleep(int);
920 
921 static void
thread_lock_unlock_kernel_primitive(void * args,__unused wait_result_t wr)922 thread_lock_unlock_kernel_primitive(
923 	void *args,
924 	__unused wait_result_t wr)
925 {
926 	thread_t thread = current_thread();
927 	struct ts_kern_prim_test_args *info = (struct ts_kern_prim_test_args*) args;
928 	int pri;
929 
930 	wait_threads(info->wait_event_b, info->before_num);
931 	wake_threads(info->notify_b);
932 
933 	tstile_test_prim_lock(SYSCTL_TURNSTILE_TEST_KERNEL_DEFAULT);
934 
935 	wake_threads(info->notify_a);
936 	wait_threads(info->wait_event_a, info->after_num);
937 
938 	IOSleep(100);
939 
940 	if (info->priority_to_check) {
941 		spl_t s = splsched();
942 		thread_lock(thread);
943 		pri = thread->sched_pri;
944 		thread_unlock(thread);
945 		splx(s);
946 		T_ASSERT(pri == info->priority_to_check, "Priority thread: current sched %d sched wanted %d", pri, info->priority_to_check);
947 	}
948 
949 	tstile_test_prim_unlock(SYSCTL_TURNSTILE_TEST_KERNEL_DEFAULT);
950 
951 	wake_threads(info->end_barrier);
952 	thread_terminate_self();
953 }
954 
955 kern_return_t
ts_kernel_primitive_test(void)956 ts_kernel_primitive_test(void)
957 {
958 	thread_t owner, thread1, thread2;
959 	struct ts_kern_prim_test_args targs[2] = {};
960 	kern_return_t result;
961 	int end_barrier = 0;
962 	int owner_locked = 0;
963 	int waiters_ready = 0;
964 
965 	T_LOG("Testing turnstile kernel primitive");
966 
967 	targs[0].notify_b = NULL;
968 	targs[0].wait_event_b = NULL;
969 	targs[0].before_num = 0;
970 	targs[0].notify_a = &owner_locked;
971 	targs[0].wait_event_a = &waiters_ready;
972 	targs[0].after_num = 2;
973 	targs[0].priority_to_check = 90;
974 	targs[0].end_barrier = &end_barrier;
975 
976 	// Start owner with priority 80
977 	result = kernel_thread_start_priority((thread_continue_t)thread_lock_unlock_kernel_primitive, &targs[0], 80, &owner);
978 	T_ASSERT(result == KERN_SUCCESS, "Starting owner");
979 
980 	targs[1].notify_b = &waiters_ready;
981 	targs[1].wait_event_b = &owner_locked;
982 	targs[1].before_num = 1;
983 	targs[1].notify_a = NULL;
984 	targs[1].wait_event_a = NULL;
985 	targs[1].after_num = 0;
986 	targs[1].priority_to_check = 0;
987 	targs[1].end_barrier = &end_barrier;
988 
989 	// Start waiters with priority 85 and 90
990 	result = kernel_thread_start_priority((thread_continue_t)thread_lock_unlock_kernel_primitive, &targs[1], 85, &thread1);
991 	T_ASSERT(result == KERN_SUCCESS, "Starting thread1");
992 
993 	result = kernel_thread_start_priority((thread_continue_t)thread_lock_unlock_kernel_primitive, &targs[1], 90, &thread2);
994 	T_ASSERT(result == KERN_SUCCESS, "Starting thread2");
995 
996 	wait_threads(&end_barrier, 3);
997 
998 	return KERN_SUCCESS;
999 }
1000 
1001 #define MTX_LOCK 0
1002 #define RW_LOCK 1
1003 
1004 #define NUM_THREADS 4
1005 
1006 struct synch_test_common {
1007 	unsigned int nthreads;
1008 	thread_t *threads;
1009 	int max_pri;
1010 	int test_done;
1011 };
1012 
1013 static kern_return_t
init_synch_test_common(struct synch_test_common * info,unsigned int nthreads)1014 init_synch_test_common(struct synch_test_common *info, unsigned int nthreads)
1015 {
1016 	info->nthreads = nthreads;
1017 	info->threads = kalloc_type(thread_t, nthreads, Z_WAITOK);
1018 	if (!info->threads) {
1019 		return ENOMEM;
1020 	}
1021 
1022 	return KERN_SUCCESS;
1023 }
1024 
1025 static void
destroy_synch_test_common(struct synch_test_common * info)1026 destroy_synch_test_common(struct synch_test_common *info)
1027 {
1028 	kfree_type(thread_t, info->nthreads, info->threads);
1029 }
1030 
1031 static void
start_threads(thread_continue_t func,struct synch_test_common * info,bool sleep_after_first)1032 start_threads(thread_continue_t func, struct synch_test_common *info, bool sleep_after_first)
1033 {
1034 	thread_t thread;
1035 	kern_return_t result;
1036 	uint i;
1037 	int priority = 75;
1038 
1039 	info->test_done = 0;
1040 
1041 	for (i = 0; i < info->nthreads; i++) {
1042 		info->threads[i] = NULL;
1043 	}
1044 
1045 	info->max_pri = priority + (info->nthreads - 1) * 5;
1046 	if (info->max_pri > 95) {
1047 		info->max_pri = 95;
1048 	}
1049 
1050 	for (i = 0; i < info->nthreads; i++) {
1051 		result = kernel_thread_start_priority((thread_continue_t)func, info, priority, &thread);
1052 		os_atomic_store(&info->threads[i], thread, release);
1053 		T_ASSERT(result == KERN_SUCCESS, "Starting thread %d, priority %d, %p", i, priority, thread);
1054 
1055 		priority += 5;
1056 
1057 		if (i == 0 && sleep_after_first) {
1058 			IOSleep(100);
1059 		}
1060 	}
1061 }
1062 
1063 static unsigned int
get_max_pri(struct synch_test_common * info)1064 get_max_pri(struct synch_test_common * info)
1065 {
1066 	return info->max_pri;
1067 }
1068 
1069 static void
wait_all_thread(struct synch_test_common * info)1070 wait_all_thread(struct synch_test_common * info)
1071 {
1072 	wait_threads(&info->test_done, info->nthreads);
1073 }
1074 
1075 static void
notify_waiter(struct synch_test_common * info)1076 notify_waiter(struct synch_test_common * info)
1077 {
1078 	wake_threads(&info->test_done);
1079 }
1080 
1081 static void
wait_for_waiters(struct synch_test_common * info)1082 wait_for_waiters(struct synch_test_common *info)
1083 {
1084 	uint i, j;
1085 	thread_t thread;
1086 
1087 	for (i = 0; i < info->nthreads; i++) {
1088 		j = 0;
1089 		while (os_atomic_load(&info->threads[i], acquire) == NULL) {
1090 			if (j % 100 == 0) {
1091 				IOSleep(10);
1092 			}
1093 			j++;
1094 		}
1095 
1096 		if (info->threads[i] != current_thread()) {
1097 			j = 0;
1098 			do {
1099 				thread = os_atomic_load(&info->threads[i], relaxed);
1100 				if (thread == (thread_t) 1) {
1101 					break;
1102 				}
1103 
1104 				if (!(thread->state & TH_RUN)) {
1105 					break;
1106 				}
1107 
1108 				if (j % 100 == 0) {
1109 					IOSleep(100);
1110 				}
1111 				j++;
1112 
1113 				if (thread->started == FALSE) {
1114 					continue;
1115 				}
1116 			} while (thread->state & TH_RUN);
1117 		}
1118 	}
1119 }
1120 
1121 static void
exclude_current_waiter(struct synch_test_common * info)1122 exclude_current_waiter(struct synch_test_common *info)
1123 {
1124 	uint i, j;
1125 
1126 	for (i = 0; i < info->nthreads; i++) {
1127 		j = 0;
1128 		while (os_atomic_load(&info->threads[i], acquire) == NULL) {
1129 			if (j % 100 == 0) {
1130 				IOSleep(10);
1131 			}
1132 			j++;
1133 		}
1134 
1135 		if (os_atomic_load(&info->threads[i], acquire) == current_thread()) {
1136 			os_atomic_store(&info->threads[i], (thread_t)1, release);
1137 			return;
1138 		}
1139 	}
1140 }
1141 
1142 struct info_sleep_inheritor_test {
1143 	struct synch_test_common head;
1144 	lck_mtx_t mtx_lock;
1145 	lck_rw_t rw_lock;
1146 	decl_lck_mtx_gate_data(, gate);
1147 	boolean_t gate_closed;
1148 	int prim_type;
1149 	boolean_t work_to_do;
1150 	unsigned int max_pri;
1151 	unsigned int steal_pri;
1152 	int synch_value;
1153 	int synch;
1154 	int value;
1155 	int handoff_failure;
1156 	thread_t thread_inheritor;
1157 	bool use_alloc_gate;
1158 	gate_t *alloc_gate;
1159 	struct obj_cached **obj_cache;
1160 	kern_apfs_reflock_data(, reflock);
1161 	int reflock_protected_status;
1162 };
1163 
1164 static void
primitive_lock(struct info_sleep_inheritor_test * info)1165 primitive_lock(struct info_sleep_inheritor_test *info)
1166 {
1167 	switch (info->prim_type) {
1168 	case MTX_LOCK:
1169 		lck_mtx_lock(&info->mtx_lock);
1170 		break;
1171 	case RW_LOCK:
1172 		lck_rw_lock(&info->rw_lock, LCK_RW_TYPE_EXCLUSIVE);
1173 		break;
1174 	default:
1175 		panic("invalid type %d", info->prim_type);
1176 	}
1177 }
1178 
1179 static void
primitive_unlock(struct info_sleep_inheritor_test * info)1180 primitive_unlock(struct info_sleep_inheritor_test *info)
1181 {
1182 	switch (info->prim_type) {
1183 	case MTX_LOCK:
1184 		lck_mtx_unlock(&info->mtx_lock);
1185 		break;
1186 	case RW_LOCK:
1187 		lck_rw_unlock(&info->rw_lock, LCK_RW_TYPE_EXCLUSIVE);
1188 		break;
1189 	default:
1190 		panic("invalid type %d", info->prim_type);
1191 	}
1192 }
1193 
1194 static wait_result_t
primitive_sleep_with_inheritor(struct info_sleep_inheritor_test * info)1195 primitive_sleep_with_inheritor(struct info_sleep_inheritor_test *info)
1196 {
1197 	wait_result_t ret = KERN_SUCCESS;
1198 	switch (info->prim_type) {
1199 	case MTX_LOCK:
1200 		ret = lck_mtx_sleep_with_inheritor(&info->mtx_lock, LCK_SLEEP_DEFAULT, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1201 		break;
1202 	case RW_LOCK:
1203 		ret = lck_rw_sleep_with_inheritor(&info->rw_lock, LCK_SLEEP_DEFAULT, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1204 		break;
1205 	default:
1206 		panic("invalid type %d", info->prim_type);
1207 	}
1208 
1209 	return ret;
1210 }
1211 
1212 static void
primitive_wakeup_one_with_inheritor(struct info_sleep_inheritor_test * info)1213 primitive_wakeup_one_with_inheritor(struct info_sleep_inheritor_test *info)
1214 {
1215 	switch (info->prim_type) {
1216 	case MTX_LOCK:
1217 	case RW_LOCK:
1218 		wakeup_one_with_inheritor((event_t) &info->thread_inheritor, THREAD_AWAKENED, LCK_WAKE_DEFAULT, &info->thread_inheritor);
1219 		break;
1220 	default:
1221 		panic("invalid type %d", info->prim_type);
1222 	}
1223 }
1224 
1225 static void
primitive_wakeup_all_with_inheritor(struct info_sleep_inheritor_test * info)1226 primitive_wakeup_all_with_inheritor(struct info_sleep_inheritor_test *info)
1227 {
1228 	switch (info->prim_type) {
1229 	case MTX_LOCK:
1230 	case RW_LOCK:
1231 		wakeup_all_with_inheritor((event_t) &info->thread_inheritor, THREAD_AWAKENED);
1232 		break;
1233 	default:
1234 		panic("invalid type %d", info->prim_type);
1235 	}
1236 	return;
1237 }
1238 
1239 static void
primitive_change_sleep_inheritor(struct info_sleep_inheritor_test * info)1240 primitive_change_sleep_inheritor(struct info_sleep_inheritor_test *info)
1241 {
1242 	switch (info->prim_type) {
1243 	case MTX_LOCK:
1244 	case RW_LOCK:
1245 		change_sleep_inheritor((event_t) &info->thread_inheritor, info->thread_inheritor);
1246 		break;
1247 	default:
1248 		panic("invalid type %d", info->prim_type);
1249 	}
1250 	return;
1251 }
1252 
1253 static kern_return_t
primitive_gate_try_close(struct info_sleep_inheritor_test * info)1254 primitive_gate_try_close(struct info_sleep_inheritor_test *info)
1255 {
1256 	gate_t *gate = &info->gate;
1257 	if (info->use_alloc_gate == true) {
1258 		gate = info->alloc_gate;
1259 	}
1260 	kern_return_t ret = KERN_SUCCESS;
1261 	switch (info->prim_type) {
1262 	case MTX_LOCK:
1263 		ret = lck_mtx_gate_try_close(&info->mtx_lock, gate);
1264 		break;
1265 	case RW_LOCK:
1266 		ret = lck_rw_gate_try_close(&info->rw_lock, gate);
1267 		break;
1268 	default:
1269 		panic("invalid type %d", info->prim_type);
1270 	}
1271 	return ret;
1272 }
1273 
1274 static gate_wait_result_t
primitive_gate_wait(struct info_sleep_inheritor_test * info)1275 primitive_gate_wait(struct info_sleep_inheritor_test *info)
1276 {
1277 	gate_t *gate = &info->gate;
1278 	if (info->use_alloc_gate == true) {
1279 		gate = info->alloc_gate;
1280 	}
1281 	gate_wait_result_t ret = GATE_OPENED;
1282 	switch (info->prim_type) {
1283 	case MTX_LOCK:
1284 		ret = lck_mtx_gate_wait(&info->mtx_lock, gate, LCK_SLEEP_DEFAULT, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1285 		break;
1286 	case RW_LOCK:
1287 		ret = lck_rw_gate_wait(&info->rw_lock, gate, LCK_SLEEP_DEFAULT, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1288 		break;
1289 	default:
1290 		panic("invalid type %d", info->prim_type);
1291 	}
1292 	return ret;
1293 }
1294 
1295 static void
primitive_gate_open(struct info_sleep_inheritor_test * info)1296 primitive_gate_open(struct info_sleep_inheritor_test *info)
1297 {
1298 	gate_t *gate = &info->gate;
1299 	if (info->use_alloc_gate == true) {
1300 		gate = info->alloc_gate;
1301 	}
1302 	switch (info->prim_type) {
1303 	case MTX_LOCK:
1304 		lck_mtx_gate_open(&info->mtx_lock, gate);
1305 		break;
1306 	case RW_LOCK:
1307 		lck_rw_gate_open(&info->rw_lock, gate);
1308 		break;
1309 	default:
1310 		panic("invalid type %d", info->prim_type);
1311 	}
1312 }
1313 
1314 static void
primitive_gate_close(struct info_sleep_inheritor_test * info)1315 primitive_gate_close(struct info_sleep_inheritor_test *info)
1316 {
1317 	gate_t *gate = &info->gate;
1318 	if (info->use_alloc_gate == true) {
1319 		gate = info->alloc_gate;
1320 	}
1321 
1322 	switch (info->prim_type) {
1323 	case MTX_LOCK:
1324 		lck_mtx_gate_close(&info->mtx_lock, gate);
1325 		break;
1326 	case RW_LOCK:
1327 		lck_rw_gate_close(&info->rw_lock, gate);
1328 		break;
1329 	default:
1330 		panic("invalid type %d", info->prim_type);
1331 	}
1332 }
1333 
1334 static void
primitive_gate_steal(struct info_sleep_inheritor_test * info)1335 primitive_gate_steal(struct info_sleep_inheritor_test *info)
1336 {
1337 	gate_t *gate = &info->gate;
1338 	if (info->use_alloc_gate == true) {
1339 		gate = info->alloc_gate;
1340 	}
1341 
1342 	switch (info->prim_type) {
1343 	case MTX_LOCK:
1344 		lck_mtx_gate_steal(&info->mtx_lock, gate);
1345 		break;
1346 	case RW_LOCK:
1347 		lck_rw_gate_steal(&info->rw_lock, gate);
1348 		break;
1349 	default:
1350 		panic("invalid type %d", info->prim_type);
1351 	}
1352 }
1353 
1354 static kern_return_t
primitive_gate_handoff(struct info_sleep_inheritor_test * info,int flags)1355 primitive_gate_handoff(struct info_sleep_inheritor_test *info, int flags)
1356 {
1357 	gate_t *gate = &info->gate;
1358 	if (info->use_alloc_gate == true) {
1359 		gate = info->alloc_gate;
1360 	}
1361 
1362 	kern_return_t ret = KERN_SUCCESS;
1363 	switch (info->prim_type) {
1364 	case MTX_LOCK:
1365 		ret = lck_mtx_gate_handoff(&info->mtx_lock, gate, flags);
1366 		break;
1367 	case RW_LOCK:
1368 		ret = lck_rw_gate_handoff(&info->rw_lock, gate, flags);
1369 		break;
1370 	default:
1371 		panic("invalid type %d", info->prim_type);
1372 	}
1373 	return ret;
1374 }
1375 
1376 static void
primitive_gate_assert(struct info_sleep_inheritor_test * info,int type)1377 primitive_gate_assert(struct info_sleep_inheritor_test *info, int type)
1378 {
1379 	gate_t *gate = &info->gate;
1380 	if (info->use_alloc_gate == true) {
1381 		gate = info->alloc_gate;
1382 	}
1383 
1384 	switch (info->prim_type) {
1385 	case MTX_LOCK:
1386 		lck_mtx_gate_assert(&info->mtx_lock, gate, type);
1387 		break;
1388 	case RW_LOCK:
1389 		lck_rw_gate_assert(&info->rw_lock, gate, type);
1390 		break;
1391 	default:
1392 		panic("invalid type %d", info->prim_type);
1393 	}
1394 }
1395 
1396 static void
primitive_gate_init(struct info_sleep_inheritor_test * info)1397 primitive_gate_init(struct info_sleep_inheritor_test *info)
1398 {
1399 	switch (info->prim_type) {
1400 	case MTX_LOCK:
1401 		lck_mtx_gate_init(&info->mtx_lock, &info->gate);
1402 		break;
1403 	case RW_LOCK:
1404 		lck_rw_gate_init(&info->rw_lock, &info->gate);
1405 		break;
1406 	default:
1407 		panic("invalid type %d", info->prim_type);
1408 	}
1409 }
1410 
1411 static void
primitive_gate_destroy(struct info_sleep_inheritor_test * info)1412 primitive_gate_destroy(struct info_sleep_inheritor_test *info)
1413 {
1414 	switch (info->prim_type) {
1415 	case MTX_LOCK:
1416 		lck_mtx_gate_destroy(&info->mtx_lock, &info->gate);
1417 		break;
1418 	case RW_LOCK:
1419 		lck_rw_gate_destroy(&info->rw_lock, &info->gate);
1420 		break;
1421 	default:
1422 		panic("invalid type %d", info->prim_type);
1423 	}
1424 }
1425 
1426 static void
primitive_gate_alloc(struct info_sleep_inheritor_test * info)1427 primitive_gate_alloc(struct info_sleep_inheritor_test *info)
1428 {
1429 	gate_t *gate;
1430 	switch (info->prim_type) {
1431 	case MTX_LOCK:
1432 		gate = lck_mtx_gate_alloc_init(&info->mtx_lock);
1433 		break;
1434 	case RW_LOCK:
1435 		gate = lck_rw_gate_alloc_init(&info->rw_lock);
1436 		break;
1437 	default:
1438 		panic("invalid type %d", info->prim_type);
1439 	}
1440 	info->alloc_gate = gate;
1441 }
1442 
1443 static void
primitive_gate_free(struct info_sleep_inheritor_test * info)1444 primitive_gate_free(struct info_sleep_inheritor_test *info)
1445 {
1446 	T_ASSERT(info->alloc_gate != NULL, "gate not yet freed");
1447 
1448 	switch (info->prim_type) {
1449 	case MTX_LOCK:
1450 		lck_mtx_gate_free(&info->mtx_lock, info->alloc_gate);
1451 		break;
1452 	case RW_LOCK:
1453 		lck_rw_gate_free(&info->rw_lock, info->alloc_gate);
1454 		break;
1455 	default:
1456 		panic("invalid type %d", info->prim_type);
1457 	}
1458 	info->alloc_gate = NULL;
1459 }
1460 
1461 static void
thread_inheritor_like_mutex(void * args,__unused wait_result_t wr)1462 thread_inheritor_like_mutex(
1463 	void *args,
1464 	__unused wait_result_t wr)
1465 {
1466 	wait_result_t wait;
1467 
1468 	struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
1469 	uint my_pri = current_thread()->sched_pri;
1470 
1471 	T_LOG("Started thread pri %d %p", my_pri, current_thread());
1472 
1473 	/*
1474 	 * spin here to start concurrently
1475 	 */
1476 	wake_threads(&info->synch);
1477 	wait_threads(&info->synch, info->synch_value);
1478 
1479 	primitive_lock(info);
1480 
1481 	if (info->thread_inheritor == NULL) {
1482 		info->thread_inheritor = current_thread();
1483 	} else {
1484 		wait = primitive_sleep_with_inheritor(info);
1485 		T_ASSERT(wait == THREAD_AWAKENED || wait == THREAD_NOT_WAITING, "sleep_with_inheritor return");
1486 	}
1487 	primitive_unlock(info);
1488 
1489 	IOSleep(100);
1490 	info->value++;
1491 
1492 	primitive_lock(info);
1493 
1494 	T_ASSERT(info->thread_inheritor == current_thread(), "thread_inheritor is %p", info->thread_inheritor);
1495 	primitive_wakeup_one_with_inheritor(info);
1496 	T_LOG("woken up %p", info->thread_inheritor);
1497 
1498 	if (info->thread_inheritor == NULL) {
1499 		T_ASSERT(info->handoff_failure == 0, "handoff failures");
1500 		info->handoff_failure++;
1501 	} else {
1502 		T_ASSERT(info->thread_inheritor != current_thread(), "thread_inheritor is %p", info->thread_inheritor);
1503 		thread_deallocate(info->thread_inheritor);
1504 	}
1505 
1506 	primitive_unlock(info);
1507 
1508 	assert(current_thread()->kern_promotion_schedpri == 0);
1509 	notify_waiter((struct synch_test_common *)info);
1510 
1511 	thread_terminate_self();
1512 }
1513 
1514 static void
thread_just_inheritor_do_work(void * args,__unused wait_result_t wr)1515 thread_just_inheritor_do_work(
1516 	void *args,
1517 	__unused wait_result_t wr)
1518 {
1519 	struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
1520 	uint my_pri = current_thread()->sched_pri;
1521 	uint max_pri;
1522 
1523 	T_LOG("Started thread pri %d %p", my_pri, current_thread());
1524 	primitive_lock(info);
1525 
1526 	if (info->thread_inheritor == NULL) {
1527 		info->thread_inheritor = current_thread();
1528 		primitive_unlock(info);
1529 		T_LOG("Thread pri %d first to run %p", my_pri, current_thread());
1530 
1531 		wait_threads(&info->synch, info->synch_value - 1);
1532 
1533 		wait_for_waiters((struct synch_test_common *)info);
1534 
1535 		max_pri = get_max_pri((struct synch_test_common *) info);
1536 		T_ASSERT((uint) current_thread()->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", current_thread()->sched_pri, max_pri);
1537 
1538 		os_atomic_store(&info->synch, 0, relaxed);
1539 		primitive_lock(info);
1540 		primitive_wakeup_all_with_inheritor(info);
1541 	} else {
1542 		wake_threads(&info->synch);
1543 		primitive_sleep_with_inheritor(info);
1544 	}
1545 
1546 	primitive_unlock(info);
1547 
1548 	assert(current_thread()->kern_promotion_schedpri == 0);
1549 	notify_waiter((struct synch_test_common *)info);
1550 
1551 	thread_terminate_self();
1552 }
1553 
1554 static void
thread_steal_work(void * args,__unused wait_result_t wr)1555 thread_steal_work(
1556 	void *args,
1557 	__unused wait_result_t wr)
1558 {
1559 	struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
1560 	uint my_pri = current_thread()->sched_pri;
1561 
1562 	T_LOG("Started thread pri %d %p", my_pri, current_thread());
1563 	primitive_lock(info);
1564 
1565 	if (info->thread_inheritor == NULL) {
1566 		info->thread_inheritor = current_thread();
1567 		exclude_current_waiter((struct synch_test_common *)info);
1568 
1569 		T_LOG("Thread pri %d first to run %p", my_pri, current_thread());
1570 		primitive_unlock(info);
1571 
1572 		wait_threads(&info->synch, info->synch_value - 2);
1573 
1574 		wait_for_waiters((struct synch_test_common *)info);
1575 		T_LOG("Thread pri %d first to run %p", my_pri, current_thread());
1576 		primitive_lock(info);
1577 		if (info->thread_inheritor == current_thread()) {
1578 			primitive_wakeup_all_with_inheritor(info);
1579 		}
1580 	} else {
1581 		if (info->steal_pri == 0) {
1582 			info->steal_pri = my_pri;
1583 			info->thread_inheritor = current_thread();
1584 			primitive_change_sleep_inheritor(info);
1585 			exclude_current_waiter((struct synch_test_common *)info);
1586 
1587 			primitive_unlock(info);
1588 
1589 			wait_threads(&info->synch, info->synch_value - 2);
1590 
1591 			T_LOG("Thread pri %d stole push %p", my_pri, current_thread());
1592 			wait_for_waiters((struct synch_test_common *)info);
1593 
1594 			T_ASSERT((uint) current_thread()->sched_pri == info->steal_pri, "sleep_inheritor inheritor priority current is %d, should be %d", current_thread()->sched_pri, info->steal_pri);
1595 
1596 			primitive_lock(info);
1597 			primitive_wakeup_all_with_inheritor(info);
1598 		} else {
1599 			if (my_pri > info->steal_pri) {
1600 				info->steal_pri = my_pri;
1601 			}
1602 			wake_threads(&info->synch);
1603 			primitive_sleep_with_inheritor(info);
1604 			exclude_current_waiter((struct synch_test_common *)info);
1605 		}
1606 	}
1607 	primitive_unlock(info);
1608 
1609 	assert(current_thread()->kern_promotion_schedpri == 0);
1610 	notify_waiter((struct synch_test_common *)info);
1611 
1612 	thread_terminate_self();
1613 }
1614 
1615 static void
thread_no_inheritor_work(void * args,__unused wait_result_t wr)1616 thread_no_inheritor_work(
1617 	void *args,
1618 	__unused wait_result_t wr)
1619 {
1620 	struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
1621 	uint my_pri = current_thread()->sched_pri;
1622 
1623 	T_LOG("Started thread pri %d %p", my_pri, current_thread());
1624 	primitive_lock(info);
1625 
1626 	info->value--;
1627 	if (info->value == 0) {
1628 		primitive_wakeup_all_with_inheritor(info);
1629 	} else {
1630 		info->thread_inheritor = NULL;
1631 		primitive_sleep_with_inheritor(info);
1632 	}
1633 
1634 	primitive_unlock(info);
1635 
1636 	assert(current_thread()->kern_promotion_schedpri == 0);
1637 	notify_waiter((struct synch_test_common *)info);
1638 
1639 	thread_terminate_self();
1640 }
1641 
1642 static void
thread_mtx_work(void * args,__unused wait_result_t wr)1643 thread_mtx_work(
1644 	void *args,
1645 	__unused wait_result_t wr)
1646 {
1647 	struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
1648 	uint my_pri = current_thread()->sched_pri;
1649 	int i;
1650 	u_int8_t rand;
1651 	unsigned int mod_rand;
1652 	uint max_pri;
1653 
1654 	T_LOG("Started thread pri %d %p", my_pri, current_thread());
1655 
1656 	for (i = 0; i < 10; i++) {
1657 		lck_mtx_lock(&info->mtx_lock);
1658 		if (info->thread_inheritor == NULL) {
1659 			info->thread_inheritor = current_thread();
1660 			lck_mtx_unlock(&info->mtx_lock);
1661 
1662 			T_LOG("Thread pri %d first to run %p", my_pri, current_thread());
1663 
1664 			wait_threads(&info->synch, info->synch_value - 1);
1665 			wait_for_waiters((struct synch_test_common *)info);
1666 			max_pri = get_max_pri((struct synch_test_common *) info);
1667 			T_ASSERT((uint) current_thread()->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", current_thread()->sched_pri, max_pri);
1668 
1669 			os_atomic_store(&info->synch, 0, relaxed);
1670 
1671 			lck_mtx_lock(&info->mtx_lock);
1672 			info->thread_inheritor = NULL;
1673 			wakeup_all_with_inheritor((event_t) &info->thread_inheritor, THREAD_AWAKENED);
1674 			lck_mtx_unlock(&info->mtx_lock);
1675 			continue;
1676 		}
1677 
1678 		read_random(&rand, sizeof(rand));
1679 		mod_rand = rand % 2;
1680 
1681 		wake_threads(&info->synch);
1682 		switch (mod_rand) {
1683 		case 0:
1684 			lck_mtx_sleep_with_inheritor(&info->mtx_lock, LCK_SLEEP_DEFAULT, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1685 			lck_mtx_unlock(&info->mtx_lock);
1686 			break;
1687 		case 1:
1688 			lck_mtx_sleep_with_inheritor(&info->mtx_lock, LCK_SLEEP_UNLOCK, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1689 			break;
1690 		default:
1691 			panic("rand()mod4 returned %u (random %u)", mod_rand, rand);
1692 		}
1693 	}
1694 
1695 	/*
1696 	 * spin here to stop using the lock as mutex
1697 	 */
1698 	wake_threads(&info->synch);
1699 	wait_threads(&info->synch, info->synch_value);
1700 
1701 	for (i = 0; i < 10; i++) {
1702 		/* read_random might sleep so read it before acquiring the mtx as spin */
1703 		read_random(&rand, sizeof(rand));
1704 
1705 		lck_mtx_lock_spin(&info->mtx_lock);
1706 		if (info->thread_inheritor == NULL) {
1707 			info->thread_inheritor = current_thread();
1708 			lck_mtx_unlock(&info->mtx_lock);
1709 
1710 			T_LOG("Thread pri %d first to run %p", my_pri, current_thread());
1711 			wait_for_waiters((struct synch_test_common *)info);
1712 			max_pri = get_max_pri((struct synch_test_common *) info);
1713 			T_ASSERT((uint) current_thread()->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", current_thread()->sched_pri, max_pri);
1714 
1715 			lck_mtx_lock_spin(&info->mtx_lock);
1716 			info->thread_inheritor = NULL;
1717 			wakeup_all_with_inheritor((event_t) &info->thread_inheritor, THREAD_AWAKENED);
1718 			lck_mtx_unlock(&info->mtx_lock);
1719 			continue;
1720 		}
1721 
1722 		mod_rand = rand % 2;
1723 		switch (mod_rand) {
1724 		case 0:
1725 			lck_mtx_sleep_with_inheritor(&info->mtx_lock, LCK_SLEEP_SPIN, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1726 			lck_mtx_unlock(&info->mtx_lock);
1727 			break;
1728 		case 1:
1729 			lck_mtx_sleep_with_inheritor(&info->mtx_lock, LCK_SLEEP_SPIN_ALWAYS, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1730 			lck_mtx_unlock(&info->mtx_lock);
1731 			break;
1732 		default:
1733 			panic("rand()mod4 returned %u (random %u)", mod_rand, rand);
1734 		}
1735 	}
1736 	assert(current_thread()->kern_promotion_schedpri == 0);
1737 	notify_waiter((struct synch_test_common *)info);
1738 
1739 	thread_terminate_self();
1740 }
1741 
1742 static void
thread_rw_work(void * args,__unused wait_result_t wr)1743 thread_rw_work(
1744 	void *args,
1745 	__unused wait_result_t wr)
1746 {
1747 	struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
1748 	uint my_pri = current_thread()->sched_pri;
1749 	int i;
1750 	lck_rw_type_t type;
1751 	u_int8_t rand;
1752 	unsigned int mod_rand;
1753 	uint max_pri;
1754 
1755 	T_LOG("Started thread pri %d %p", my_pri, current_thread());
1756 
1757 	for (i = 0; i < 10; i++) {
1758 try_again:
1759 		type = LCK_RW_TYPE_SHARED;
1760 		lck_rw_lock(&info->rw_lock, type);
1761 		if (info->thread_inheritor == NULL) {
1762 			type = LCK_RW_TYPE_EXCLUSIVE;
1763 
1764 			if (lck_rw_lock_shared_to_exclusive(&info->rw_lock)) {
1765 				if (info->thread_inheritor == NULL) {
1766 					info->thread_inheritor = current_thread();
1767 					lck_rw_unlock(&info->rw_lock, type);
1768 					wait_threads(&info->synch, info->synch_value - 1);
1769 
1770 					T_LOG("Thread pri %d first to run %p", my_pri, current_thread());
1771 					wait_for_waiters((struct synch_test_common *)info);
1772 					max_pri = get_max_pri((struct synch_test_common *) info);
1773 					T_ASSERT((uint) current_thread()->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", current_thread()->sched_pri, max_pri);
1774 
1775 					os_atomic_store(&info->synch, 0, relaxed);
1776 
1777 					lck_rw_lock(&info->rw_lock, type);
1778 					info->thread_inheritor = NULL;
1779 					wakeup_all_with_inheritor((event_t) &info->thread_inheritor, THREAD_AWAKENED);
1780 					lck_rw_unlock(&info->rw_lock, type);
1781 					continue;
1782 				}
1783 			} else {
1784 				goto try_again;
1785 			}
1786 		}
1787 
1788 		read_random(&rand, sizeof(rand));
1789 		mod_rand = rand % 4;
1790 
1791 		wake_threads(&info->synch);
1792 		switch (mod_rand) {
1793 		case 0:
1794 			lck_rw_sleep_with_inheritor(&info->rw_lock, LCK_SLEEP_DEFAULT, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1795 			lck_rw_unlock(&info->rw_lock, type);
1796 			break;
1797 		case 1:
1798 			lck_rw_sleep_with_inheritor(&info->rw_lock, LCK_SLEEP_UNLOCK, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1799 			break;
1800 		case 2:
1801 			lck_rw_sleep_with_inheritor(&info->rw_lock, LCK_SLEEP_SHARED, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1802 			lck_rw_unlock(&info->rw_lock, LCK_RW_TYPE_SHARED);
1803 			break;
1804 		case 3:
1805 			lck_rw_sleep_with_inheritor(&info->rw_lock, LCK_SLEEP_EXCLUSIVE, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1806 			lck_rw_unlock(&info->rw_lock, LCK_RW_TYPE_EXCLUSIVE);
1807 			break;
1808 		default:
1809 			panic("rand()mod4 returned %u (random %u)", mod_rand, rand);
1810 		}
1811 	}
1812 
1813 	assert(current_thread()->kern_promotion_schedpri == 0);
1814 	notify_waiter((struct synch_test_common *)info);
1815 
1816 	thread_terminate_self();
1817 }
1818 
1819 #define OBJ_STATE_UNUSED        0
1820 #define OBJ_STATE_REAL          1
1821 #define OBJ_STATE_PLACEHOLDER   2
1822 
1823 #define OBJ_BUFF_SIZE 11
1824 struct obj_cached {
1825 	int obj_id;
1826 	int obj_state;
1827 	struct kern_apfs_reflock *obj_refcount;
1828 	char obj_buff[OBJ_BUFF_SIZE];
1829 };
1830 
1831 #define CACHE_SIZE 2
1832 #define USE_CACHE_ROUNDS 15
1833 
1834 #define REFCOUNT_REFLOCK_ROUNDS 15
1835 
1836 /*
1837  * For the reflock cache test the cache is allocated
1838  * and its pointer is saved in obj_cache.
1839  * The lock for the cache is going to be one of the exclusive
1840  * locks already present in struct info_sleep_inheritor_test.
1841  */
1842 
1843 static struct obj_cached *
alloc_init_cache_entry(void)1844 alloc_init_cache_entry(void)
1845 {
1846 	struct obj_cached *cache_entry = kalloc_type(struct obj_cached, 1, Z_WAITOK | Z_NOFAIL | Z_ZERO);
1847 	cache_entry->obj_id = 0;
1848 	cache_entry->obj_state = OBJ_STATE_UNUSED;
1849 	cache_entry->obj_refcount = kern_apfs_reflock_alloc_init();
1850 	snprintf(cache_entry->obj_buff, OBJ_BUFF_SIZE, "I am groot");
1851 	return cache_entry;
1852 }
1853 
1854 static void
init_cache(struct info_sleep_inheritor_test * info)1855 init_cache(struct info_sleep_inheritor_test *info)
1856 {
1857 	struct obj_cached **obj_cache = kalloc_type(struct obj_cached *, CACHE_SIZE, Z_WAITOK | Z_NOFAIL | Z_ZERO);
1858 
1859 	int i;
1860 	for (i = 0; i < CACHE_SIZE; i++) {
1861 		obj_cache[i] = alloc_init_cache_entry();
1862 	}
1863 
1864 	info->obj_cache = obj_cache;
1865 }
1866 
1867 static void
check_cache_empty(struct info_sleep_inheritor_test * info)1868 check_cache_empty(struct info_sleep_inheritor_test *info)
1869 {
1870 	struct obj_cached **obj_cache = info->obj_cache;
1871 
1872 	int i, ret;
1873 	for (i = 0; i < CACHE_SIZE; i++) {
1874 		if (obj_cache[i] != NULL) {
1875 			T_ASSERT(obj_cache[i]->obj_state == OBJ_STATE_UNUSED, "checked OBJ_STATE_UNUSED");
1876 			T_ASSERT(obj_cache[i]->obj_refcount != NULL, "checked obj_refcount");
1877 			ret = memcmp(obj_cache[i]->obj_buff, "I am groot", OBJ_BUFF_SIZE);
1878 			T_ASSERT(ret == 0, "checked buff correctly emptied");
1879 		}
1880 	}
1881 }
1882 
1883 static void
free_cache(struct info_sleep_inheritor_test * info)1884 free_cache(struct info_sleep_inheritor_test *info)
1885 {
1886 	struct obj_cached **obj_cache = info->obj_cache;
1887 
1888 	int i;
1889 	for (i = 0; i < CACHE_SIZE; i++) {
1890 		if (obj_cache[i] != NULL) {
1891 			kern_apfs_reflock_free(obj_cache[i]->obj_refcount);
1892 			obj_cache[i]->obj_refcount = NULL;
1893 			kfree_type(struct obj_cached, 1, obj_cache[i]);
1894 			obj_cache[i] = NULL;
1895 		}
1896 	}
1897 
1898 	kfree_type(struct obj_cached *, CACHE_SIZE, obj_cache);
1899 	info->obj_cache = NULL;
1900 }
1901 
1902 static struct obj_cached *
find_id_in_cache(int obj_id,struct info_sleep_inheritor_test * info)1903 find_id_in_cache(int obj_id, struct info_sleep_inheritor_test *info)
1904 {
1905 	struct obj_cached **obj_cache = info->obj_cache;
1906 	int i;
1907 	for (i = 0; i < CACHE_SIZE; i++) {
1908 		if (obj_cache[i] != NULL && obj_cache[i]->obj_id == obj_id) {
1909 			return obj_cache[i];
1910 		}
1911 	}
1912 	return NULL;
1913 }
1914 
1915 static bool
free_id_in_cache(int obj_id,struct info_sleep_inheritor_test * info,struct obj_cached * expected)1916 free_id_in_cache(int obj_id, struct info_sleep_inheritor_test *info, struct obj_cached *expected)
1917 {
1918 	struct obj_cached **obj_cache = info->obj_cache;
1919 	int i;
1920 	for (i = 0; i < CACHE_SIZE; i++) {
1921 		if (obj_cache[i] != NULL && obj_cache[i]->obj_id == obj_id) {
1922 			assert(obj_cache[i] == expected);
1923 			kfree_type(struct obj_cached, 1, obj_cache[i]);
1924 			obj_cache[i] = NULL;
1925 			return true;
1926 		}
1927 	}
1928 	return false;
1929 }
1930 
1931 static struct obj_cached *
find_empty_spot_in_cache(struct info_sleep_inheritor_test * info)1932 find_empty_spot_in_cache(struct info_sleep_inheritor_test *info)
1933 {
1934 	struct obj_cached **obj_cache = info->obj_cache;
1935 	int i;
1936 	for (i = 0; i < CACHE_SIZE; i++) {
1937 		if (obj_cache[i] == NULL) {
1938 			obj_cache[i] = alloc_init_cache_entry();
1939 			return obj_cache[i];
1940 		}
1941 		if (obj_cache[i]->obj_state == OBJ_STATE_UNUSED) {
1942 			return obj_cache[i];
1943 		}
1944 	}
1945 	return NULL;
1946 }
1947 
1948 static int
get_obj_cache(int obj_id,struct info_sleep_inheritor_test * info,char ** buff)1949 get_obj_cache(int obj_id, struct info_sleep_inheritor_test *info, char **buff)
1950 {
1951 	struct obj_cached *obj = NULL, *obj2 = NULL;
1952 	kern_apfs_reflock_t refcount = NULL;
1953 	bool ret;
1954 	kern_apfs_reflock_out_flags_t out_flags;
1955 
1956 try_again:
1957 	primitive_lock(info);
1958 	if ((obj = find_id_in_cache(obj_id, info)) != NULL) {
1959 		/* Found an allocated object on the cache with same id */
1960 
1961 		/*
1962 		 * copy the pointer to obj_refcount as obj might
1963 		 * get deallocated after primitive_unlock()
1964 		 */
1965 		refcount = obj->obj_refcount;
1966 		if (kern_apfs_reflock_try_get_ref(refcount, KERN_APFS_REFLOCK_IN_WILL_WAIT, &out_flags)) {
1967 			/*
1968 			 * Got a ref, let's check the state
1969 			 */
1970 			switch (obj->obj_state) {
1971 			case OBJ_STATE_UNUSED:
1972 				goto init;
1973 			case OBJ_STATE_REAL:
1974 				goto done;
1975 			case OBJ_STATE_PLACEHOLDER:
1976 				panic("Thread %p observed OBJ_STATE_PLACEHOLDER %d for obj %d", current_thread(), obj->obj_state, obj_id);
1977 			default:
1978 				panic("Thread %p observed an unknown obj_state %d for obj %d", current_thread(), obj->obj_state, obj_id);
1979 			}
1980 		} else {
1981 			/*
1982 			 * Didn't get a ref.
1983 			 * This means or an obj_put() of the last ref is ongoing
1984 			 * or a init of the object is happening.
1985 			 * Both cases wait for that to finish and retry.
1986 			 * While waiting the thread that is holding the reflock
1987 			 * will get a priority at least as the one of this thread.
1988 			 */
1989 			primitive_unlock(info);
1990 			kern_apfs_reflock_wait_for_unlock(refcount, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1991 			goto try_again;
1992 		}
1993 	} else {
1994 		/* Look for a spot on the cache where we can save the object */
1995 
1996 		if ((obj = find_empty_spot_in_cache(info)) == NULL) {
1997 			/*
1998 			 * Sadness cache is full, and everyting in the cache is
1999 			 * used.
2000 			 */
2001 			primitive_unlock(info);
2002 			return -1;
2003 		} else {
2004 			/*
2005 			 * copy the pointer to obj_refcount as obj might
2006 			 * get deallocated after primitive_unlock()
2007 			 */
2008 			refcount = obj->obj_refcount;
2009 			if (kern_apfs_reflock_try_get_ref(refcount, KERN_APFS_REFLOCK_IN_WILL_WAIT, &out_flags)) {
2010 				/*
2011 				 * Got a ref on a OBJ_STATE_UNUSED obj.
2012 				 * Recicle time.
2013 				 */
2014 				obj->obj_id = obj_id;
2015 				goto init;
2016 			} else {
2017 				/*
2018 				 * This could happen if the obj_put() has just changed the
2019 				 * state to OBJ_STATE_UNUSED, but not unlocked the reflock yet.
2020 				 */
2021 				primitive_unlock(info);
2022 				kern_apfs_reflock_wait_for_unlock(refcount, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
2023 				goto try_again;
2024 			}
2025 		}
2026 	}
2027 init:
2028 	assert(obj->obj_id == obj_id);
2029 	assert(obj->obj_state == OBJ_STATE_UNUSED);
2030 	/*
2031 	 * We already got a ref on the object, but we need
2032 	 * to initialize it. Mark it as
2033 	 * OBJ_STATE_PLACEHOLDER and get the obj_reflock.
2034 	 * In this way all thread waiting for this init
2035 	 * to finish will push on this thread.
2036 	 */
2037 	ret = kern_apfs_reflock_try_lock(refcount, KERN_APFS_REFLOCK_IN_DEFAULT, NULL);
2038 	assert(ret == true);
2039 	obj->obj_state = OBJ_STATE_PLACEHOLDER;
2040 	primitive_unlock(info);
2041 
2042 	//let's pretend we are populating the obj
2043 	IOSleep(10);
2044 	/*
2045 	 * obj will not be deallocated while I hold a ref.
2046 	 * So it is safe to access it.
2047 	 */
2048 	snprintf(obj->obj_buff, OBJ_BUFF_SIZE, "I am %d", obj_id);
2049 
2050 	primitive_lock(info);
2051 	obj2 = find_id_in_cache(obj_id, info);
2052 	assert(obj == obj2);
2053 	assert(obj->obj_state == OBJ_STATE_PLACEHOLDER);
2054 
2055 	obj->obj_state = OBJ_STATE_REAL;
2056 	kern_apfs_reflock_unlock(refcount);
2057 
2058 done:
2059 	*buff = obj->obj_buff;
2060 	primitive_unlock(info);
2061 	return 0;
2062 }
2063 
2064 static void
put_obj_cache(int obj_id,struct info_sleep_inheritor_test * info,bool free)2065 put_obj_cache(int obj_id, struct info_sleep_inheritor_test *info, bool free)
2066 {
2067 	struct obj_cached *obj = NULL, *obj2 = NULL;
2068 	bool ret;
2069 	kern_apfs_reflock_out_flags_t out_flags;
2070 	kern_apfs_reflock_t refcount = NULL;
2071 
2072 	primitive_lock(info);
2073 	obj = find_id_in_cache(obj_id, info);
2074 	primitive_unlock(info);
2075 
2076 	/*
2077 	 * Nobody should have been able to remove obj_id
2078 	 * from the cache.
2079 	 */
2080 	assert(obj != NULL);
2081 	assert(obj->obj_state == OBJ_STATE_REAL);
2082 
2083 	refcount = obj->obj_refcount;
2084 
2085 	/*
2086 	 * This should never fail, as or the reflock
2087 	 * was acquired when the state was OBJ_STATE_UNUSED to init,
2088 	 * or from a put that reached zero. And if the latter
2089 	 * happened subsequent reflock_get_ref() will had to wait to transition
2090 	 * to OBJ_STATE_REAL.
2091 	 */
2092 	ret = kern_apfs_reflock_try_put_ref(refcount, KERN_APFS_REFLOCK_IN_LOCK_IF_LAST, &out_flags);
2093 	assert(ret == true);
2094 	if ((out_flags & KERN_APFS_REFLOCK_OUT_LOCKED) == 0) {
2095 		return;
2096 	}
2097 
2098 	/*
2099 	 * Note: nobody at this point will be able to get a ref or a lock on
2100 	 * refcount.
2101 	 * All people waiting on refcount will push on this thread.
2102 	 */
2103 
2104 	//let's pretend we are flushing the obj somewhere.
2105 	IOSleep(10);
2106 	snprintf(obj->obj_buff, OBJ_BUFF_SIZE, "I am groot");
2107 
2108 	primitive_lock(info);
2109 	obj->obj_state = OBJ_STATE_UNUSED;
2110 	if (free) {
2111 		obj2 = find_id_in_cache(obj_id, info);
2112 		assert(obj == obj2);
2113 
2114 		ret = free_id_in_cache(obj_id, info, obj);
2115 		assert(ret == true);
2116 	}
2117 	primitive_unlock(info);
2118 
2119 	kern_apfs_reflock_unlock(refcount);
2120 
2121 	if (free) {
2122 		kern_apfs_reflock_free(refcount);
2123 	}
2124 }
2125 
2126 static void
thread_use_cache(void * args,__unused wait_result_t wr)2127 thread_use_cache(
2128 	void *args,
2129 	__unused wait_result_t wr)
2130 {
2131 	struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
2132 	int my_obj;
2133 
2134 	primitive_lock(info);
2135 	my_obj = ((info->value--) % (CACHE_SIZE + 1)) + 1;
2136 	primitive_unlock(info);
2137 
2138 	T_LOG("Thread %p started and it is going to use obj %d", current_thread(), my_obj);
2139 	/*
2140 	 * This is the string I would expect to see
2141 	 * on my_obj buff.
2142 	 */
2143 	char my_string[OBJ_BUFF_SIZE];
2144 	int my_string_size = snprintf(my_string, OBJ_BUFF_SIZE, "I am %d", my_obj);
2145 
2146 	/*
2147 	 * spin here to start concurrently with the other threads
2148 	 */
2149 	wake_threads(&info->synch);
2150 	wait_threads(&info->synch, info->synch_value);
2151 
2152 	for (int i = 0; i < USE_CACHE_ROUNDS; i++) {
2153 		char *buff;
2154 		while (get_obj_cache(my_obj, info, &buff) == -1) {
2155 			/*
2156 			 * Cache is full, wait.
2157 			 */
2158 			IOSleep(10);
2159 		}
2160 		T_ASSERT(memcmp(buff, my_string, my_string_size) == 0, "reflock: thread %p obj_id %d value in buff", current_thread(), my_obj);
2161 		IOSleep(10);
2162 		T_ASSERT(memcmp(buff, my_string, my_string_size) == 0, "reflock: thread %p obj_id %d value in buff", current_thread(), my_obj);
2163 		put_obj_cache(my_obj, info, (i % 2 == 0));
2164 	}
2165 
2166 	notify_waiter((struct synch_test_common *)info);
2167 	thread_terminate_self();
2168 }
2169 
2170 static void
thread_refcount_reflock(void * args,__unused wait_result_t wr)2171 thread_refcount_reflock(
2172 	void *args,
2173 	__unused wait_result_t wr)
2174 {
2175 	struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
2176 	bool ret;
2177 	kern_apfs_reflock_out_flags_t out_flags;
2178 	kern_apfs_reflock_in_flags_t in_flags;
2179 
2180 	T_LOG("Thread %p started", current_thread());
2181 	/*
2182 	 * spin here to start concurrently with the other threads
2183 	 */
2184 	wake_threads(&info->synch);
2185 	wait_threads(&info->synch, info->synch_value);
2186 
2187 	for (int i = 0; i < REFCOUNT_REFLOCK_ROUNDS; i++) {
2188 		in_flags = KERN_APFS_REFLOCK_IN_LOCK_IF_FIRST;
2189 		if ((i % 2) == 0) {
2190 			in_flags |= KERN_APFS_REFLOCK_IN_WILL_WAIT;
2191 		}
2192 		ret = kern_apfs_reflock_try_get_ref(&info->reflock, in_flags, &out_flags);
2193 		if (ret == true) {
2194 			/* got reference, check if we did 0->1 */
2195 			if ((out_flags & KERN_APFS_REFLOCK_OUT_LOCKED) == KERN_APFS_REFLOCK_OUT_LOCKED) {
2196 				T_ASSERT(info->reflock_protected_status == 0, "status init check");
2197 				info->reflock_protected_status = 1;
2198 				kern_apfs_reflock_unlock(&info->reflock);
2199 			} else {
2200 				T_ASSERT(info->reflock_protected_status == 1, "status set check");
2201 			}
2202 			/* release the reference and check if we did 1->0 */
2203 			ret = kern_apfs_reflock_try_put_ref(&info->reflock, KERN_APFS_REFLOCK_IN_LOCK_IF_LAST, &out_flags);
2204 			T_ASSERT(ret == true, "kern_apfs_reflock_try_put_ref success");
2205 			if ((out_flags & KERN_APFS_REFLOCK_OUT_LOCKED) == KERN_APFS_REFLOCK_OUT_LOCKED) {
2206 				T_ASSERT(info->reflock_protected_status == 1, "status set check");
2207 				info->reflock_protected_status = 0;
2208 				kern_apfs_reflock_unlock(&info->reflock);
2209 			}
2210 		} else {
2211 			/* didn't get a reference */
2212 			if ((in_flags & KERN_APFS_REFLOCK_IN_WILL_WAIT) == KERN_APFS_REFLOCK_IN_WILL_WAIT) {
2213 				kern_apfs_reflock_wait_for_unlock(&info->reflock, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
2214 			}
2215 		}
2216 	}
2217 
2218 	notify_waiter((struct synch_test_common *)info);
2219 	thread_terminate_self();
2220 }
2221 
2222 static void
thread_force_reflock(void * args,__unused wait_result_t wr)2223 thread_force_reflock(
2224 	void *args,
2225 	__unused wait_result_t wr)
2226 {
2227 	struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
2228 	bool ret;
2229 	kern_apfs_reflock_out_flags_t out_flags;
2230 	bool lock = false;
2231 	uint32_t count;
2232 
2233 	T_LOG("Thread %p started", current_thread());
2234 	if (os_atomic_inc_orig(&info->value, relaxed) == 0) {
2235 		T_LOG("Thread %p is locker", current_thread());
2236 		lock = true;
2237 		ret = kern_apfs_reflock_try_lock(&info->reflock, KERN_APFS_REFLOCK_IN_ALLOW_FORCE, &count);
2238 		T_ASSERT(ret == true, "kern_apfs_reflock_try_lock success");
2239 		T_ASSERT(count == 0, "refcount value");
2240 	}
2241 	/*
2242 	 * spin here to start concurrently with the other threads
2243 	 */
2244 	wake_threads(&info->synch);
2245 	wait_threads(&info->synch, info->synch_value);
2246 
2247 	if (lock) {
2248 		IOSleep(100);
2249 		kern_apfs_reflock_unlock(&info->reflock);
2250 	} else {
2251 		for (int i = 0; i < REFCOUNT_REFLOCK_ROUNDS; i++) {
2252 			ret = kern_apfs_reflock_try_get_ref(&info->reflock, KERN_APFS_REFLOCK_IN_FORCE, &out_flags);
2253 			T_ASSERT(ret == true, "kern_apfs_reflock_try_get_ref success");
2254 			ret = kern_apfs_reflock_try_put_ref(&info->reflock, KERN_APFS_REFLOCK_IN_FORCE, &out_flags);
2255 			T_ASSERT(ret == true, "kern_apfs_reflock_try_put_ref success");
2256 		}
2257 	}
2258 
2259 	notify_waiter((struct synch_test_common *)info);
2260 	thread_terminate_self();
2261 }
2262 
2263 static void
thread_lock_reflock(void * args,__unused wait_result_t wr)2264 thread_lock_reflock(
2265 	void *args,
2266 	__unused wait_result_t wr)
2267 {
2268 	struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
2269 	bool ret;
2270 	kern_apfs_reflock_out_flags_t out_flags;
2271 	bool lock = false;
2272 	uint32_t count;
2273 
2274 	T_LOG("Thread %p started", current_thread());
2275 	if (os_atomic_inc_orig(&info->value, relaxed) == 0) {
2276 		T_LOG("Thread %p is locker", current_thread());
2277 		lock = true;
2278 		ret = kern_apfs_reflock_try_lock(&info->reflock, KERN_APFS_REFLOCK_IN_DEFAULT, &count);
2279 		T_ASSERT(ret == true, "kern_apfs_reflock_try_lock success");
2280 		T_ASSERT(count == 0, "refcount value");
2281 		info->reflock_protected_status = 1;
2282 	}
2283 	/*
2284 	 * spin here to start concurrently with the other threads
2285 	 */
2286 	wake_threads(&info->synch);
2287 	wait_threads(&info->synch, info->synch_value);
2288 
2289 	if (lock) {
2290 		IOSleep(100);
2291 		info->reflock_protected_status = 0;
2292 		kern_apfs_reflock_unlock(&info->reflock);
2293 	} else {
2294 		for (int i = 0; i < REFCOUNT_REFLOCK_ROUNDS; i++) {
2295 			ret = kern_apfs_reflock_try_get_ref(&info->reflock, KERN_APFS_REFLOCK_IN_DEFAULT, &out_flags);
2296 			if (ret == true) {
2297 				T_ASSERT(info->reflock_protected_status == 0, "unlocked status check");
2298 				ret = kern_apfs_reflock_try_put_ref(&info->reflock, KERN_APFS_REFLOCK_IN_DEFAULT, &out_flags);
2299 				T_ASSERT(ret == true, "kern_apfs_reflock_try_put_ref success");
2300 				break;
2301 			}
2302 		}
2303 	}
2304 
2305 	notify_waiter((struct synch_test_common *)info);
2306 	thread_terminate_self();
2307 }
2308 
2309 static void
test_cache_reflock(struct info_sleep_inheritor_test * info)2310 test_cache_reflock(struct info_sleep_inheritor_test *info)
2311 {
2312 	info->synch = 0;
2313 	info->synch_value = info->head.nthreads;
2314 
2315 	info->value = info->head.nthreads;
2316 	/*
2317 	 * Use the mtx as cache lock
2318 	 */
2319 	info->prim_type = MTX_LOCK;
2320 
2321 	init_cache(info);
2322 
2323 	start_threads((thread_continue_t)thread_use_cache, (struct synch_test_common *)info, FALSE);
2324 	wait_all_thread((struct synch_test_common *)info);
2325 
2326 	check_cache_empty(info);
2327 	free_cache(info);
2328 }
2329 
2330 static void
test_refcount_reflock(struct info_sleep_inheritor_test * info)2331 test_refcount_reflock(struct info_sleep_inheritor_test *info)
2332 {
2333 	info->synch = 0;
2334 	info->synch_value = info->head.nthreads;
2335 	kern_apfs_reflock_init(&info->reflock);
2336 	info->reflock_protected_status = 0;
2337 
2338 	start_threads((thread_continue_t)thread_refcount_reflock, (struct synch_test_common *)info, FALSE);
2339 	wait_all_thread((struct synch_test_common *)info);
2340 
2341 	kern_apfs_reflock_destroy(&info->reflock);
2342 
2343 	T_ASSERT(info->reflock_protected_status == 0, "unlocked status check");
2344 }
2345 
2346 static void
test_force_reflock(struct info_sleep_inheritor_test * info)2347 test_force_reflock(struct info_sleep_inheritor_test *info)
2348 {
2349 	info->synch = 0;
2350 	info->synch_value = info->head.nthreads;
2351 	kern_apfs_reflock_init(&info->reflock);
2352 	info->value = 0;
2353 
2354 	start_threads((thread_continue_t)thread_force_reflock, (struct synch_test_common *)info, FALSE);
2355 	wait_all_thread((struct synch_test_common *)info);
2356 
2357 	kern_apfs_reflock_destroy(&info->reflock);
2358 }
2359 
2360 static void
test_lock_reflock(struct info_sleep_inheritor_test * info)2361 test_lock_reflock(struct info_sleep_inheritor_test *info)
2362 {
2363 	info->synch = 0;
2364 	info->synch_value = info->head.nthreads;
2365 	kern_apfs_reflock_init(&info->reflock);
2366 	info->value = 0;
2367 
2368 	start_threads((thread_continue_t)thread_lock_reflock, (struct synch_test_common *)info, FALSE);
2369 	wait_all_thread((struct synch_test_common *)info);
2370 
2371 	kern_apfs_reflock_destroy(&info->reflock);
2372 }
2373 
2374 static void
test_sleep_with_wake_all(struct info_sleep_inheritor_test * info,int prim_type)2375 test_sleep_with_wake_all(struct info_sleep_inheritor_test *info, int prim_type)
2376 {
2377 	info->prim_type = prim_type;
2378 	info->synch = 0;
2379 	info->synch_value = info->head.nthreads;
2380 
2381 	info->thread_inheritor = NULL;
2382 
2383 	start_threads((thread_continue_t)thread_just_inheritor_do_work, (struct synch_test_common *)info, TRUE);
2384 	wait_all_thread((struct synch_test_common *)info);
2385 }
2386 
2387 static void
test_sleep_with_wake_one(struct info_sleep_inheritor_test * info,int prim_type)2388 test_sleep_with_wake_one(struct info_sleep_inheritor_test *info, int prim_type)
2389 {
2390 	info->prim_type = prim_type;
2391 
2392 	info->synch = 0;
2393 	info->synch_value = info->head.nthreads;
2394 	info->value = 0;
2395 	info->handoff_failure = 0;
2396 	info->thread_inheritor = NULL;
2397 
2398 	start_threads((thread_continue_t)thread_inheritor_like_mutex, (struct synch_test_common *)info, FALSE);
2399 	wait_all_thread((struct synch_test_common *)info);
2400 
2401 	T_ASSERT(info->value == (int)info->head.nthreads, "value protected by sleep");
2402 	T_ASSERT(info->handoff_failure == 1, "handoff failures");
2403 }
2404 
2405 static void
test_change_sleep_inheritor(struct info_sleep_inheritor_test * info,int prim_type)2406 test_change_sleep_inheritor(struct info_sleep_inheritor_test *info, int prim_type)
2407 {
2408 	info->prim_type = prim_type;
2409 
2410 	info->thread_inheritor = NULL;
2411 	info->steal_pri = 0;
2412 	info->synch = 0;
2413 	info->synch_value = info->head.nthreads;
2414 
2415 	start_threads((thread_continue_t)thread_steal_work, (struct synch_test_common *)info, FALSE);
2416 	wait_all_thread((struct synch_test_common *)info);
2417 }
2418 
2419 static void
test_no_inheritor(struct info_sleep_inheritor_test * info,int prim_type)2420 test_no_inheritor(struct info_sleep_inheritor_test *info, int prim_type)
2421 {
2422 	info->prim_type = prim_type;
2423 	info->synch = 0;
2424 	info->synch_value = info->head.nthreads;
2425 
2426 	info->thread_inheritor = NULL;
2427 	info->value = info->head.nthreads;
2428 
2429 	start_threads((thread_continue_t)thread_no_inheritor_work, (struct synch_test_common *)info, FALSE);
2430 	wait_all_thread((struct synch_test_common *)info);
2431 }
2432 
2433 static void
test_rw_lock(struct info_sleep_inheritor_test * info)2434 test_rw_lock(struct info_sleep_inheritor_test *info)
2435 {
2436 	info->thread_inheritor = NULL;
2437 	info->value = info->head.nthreads;
2438 	info->synch = 0;
2439 	info->synch_value = info->head.nthreads;
2440 
2441 	start_threads((thread_continue_t)thread_rw_work, (struct synch_test_common *)info, FALSE);
2442 	wait_all_thread((struct synch_test_common *)info);
2443 }
2444 
2445 static void
test_mtx_lock(struct info_sleep_inheritor_test * info)2446 test_mtx_lock(struct info_sleep_inheritor_test *info)
2447 {
2448 	info->thread_inheritor = NULL;
2449 	info->value = info->head.nthreads;
2450 	info->synch = 0;
2451 	info->synch_value = info->head.nthreads;
2452 
2453 	start_threads((thread_continue_t)thread_mtx_work, (struct synch_test_common *)info, FALSE);
2454 	wait_all_thread((struct synch_test_common *)info);
2455 }
2456 
2457 kern_return_t
ts_kernel_sleep_inheritor_test(void)2458 ts_kernel_sleep_inheritor_test(void)
2459 {
2460 	struct info_sleep_inheritor_test info = {};
2461 
2462 	init_synch_test_common((struct synch_test_common *)&info, NUM_THREADS);
2463 
2464 	lck_attr_t* lck_attr = lck_attr_alloc_init();
2465 	lck_grp_attr_t* lck_grp_attr = lck_grp_attr_alloc_init();
2466 	lck_grp_t* lck_grp = lck_grp_alloc_init("test sleep_inheritor", lck_grp_attr);
2467 
2468 	lck_mtx_init(&info.mtx_lock, lck_grp, lck_attr);
2469 	lck_rw_init(&info.rw_lock, lck_grp, lck_attr);
2470 
2471 	/*
2472 	 * Testing lck_mtx_sleep_with_inheritor and wakeup_all_with_inheritor
2473 	 */
2474 	T_LOG("Testing mtx sleep with inheritor and wake_all_with_inheritor");
2475 	test_sleep_with_wake_all(&info, MTX_LOCK);
2476 
2477 	/*
2478 	 * Testing rw_mtx_sleep_with_inheritor and wakeup_all_with_inheritor
2479 	 */
2480 	T_LOG("Testing rw sleep with inheritor and wake_all_with_inheritor");
2481 	test_sleep_with_wake_all(&info, RW_LOCK);
2482 
2483 	/*
2484 	 * Testing lck_mtx_sleep_with_inheritor and wakeup_one_with_inheritor
2485 	 */
2486 	T_LOG("Testing mtx sleep with inheritor and wake_one_with_inheritor");
2487 	test_sleep_with_wake_one(&info, MTX_LOCK);
2488 
2489 	/*
2490 	 * Testing lck_rw_sleep_with_inheritor and wakeup_one_with_inheritor
2491 	 */
2492 	T_LOG("Testing rw sleep with inheritor and wake_one_with_inheritor");
2493 	test_sleep_with_wake_one(&info, RW_LOCK);
2494 
2495 	/*
2496 	 * Testing lck_mtx_sleep_with_inheritor and wakeup_all_with_inheritor
2497 	 * and change_sleep_inheritor
2498 	 */
2499 	T_LOG("Testing change_sleep_inheritor with mxt sleep");
2500 	test_change_sleep_inheritor(&info, MTX_LOCK);
2501 
2502 	/*
2503 	 * Testing lck_mtx_sleep_with_inheritor and wakeup_all_with_inheritor
2504 	 * and change_sleep_inheritor
2505 	 */
2506 	T_LOG("Testing change_sleep_inheritor with rw sleep");
2507 	test_change_sleep_inheritor(&info, RW_LOCK);
2508 
2509 	/*
2510 	 * Testing lck_mtx_sleep_with_inheritor and wakeup_all_with_inheritor
2511 	 * with inheritor NULL
2512 	 */
2513 	T_LOG("Testing inheritor NULL");
2514 	test_no_inheritor(&info, MTX_LOCK);
2515 
2516 	/*
2517 	 * Testing lck_mtx_sleep_with_inheritor and wakeup_all_with_inheritor
2518 	 * with inheritor NULL
2519 	 */
2520 	T_LOG("Testing inheritor NULL");
2521 	test_no_inheritor(&info, RW_LOCK);
2522 
2523 	/*
2524 	 * Testing mtx locking combinations
2525 	 */
2526 	T_LOG("Testing mtx locking combinations");
2527 	test_mtx_lock(&info);
2528 
2529 	/*
2530 	 * Testing rw locking combinations
2531 	 */
2532 	T_LOG("Testing rw locking combinations");
2533 	test_rw_lock(&info);
2534 
2535 	/*
2536 	 * Testing reflock / cond_sleep_with_inheritor
2537 	 */
2538 	T_LOG("Test cache reflock + cond_sleep_with_inheritor");
2539 	test_cache_reflock(&info);
2540 	T_LOG("Test force reflock + cond_sleep_with_inheritor");
2541 	test_force_reflock(&info);
2542 	T_LOG("Test refcount reflock + cond_sleep_with_inheritor");
2543 	test_refcount_reflock(&info);
2544 	T_LOG("Test lock reflock + cond_sleep_with_inheritor");
2545 	test_lock_reflock(&info);
2546 
2547 	destroy_synch_test_common((struct synch_test_common *)&info);
2548 
2549 	lck_attr_free(lck_attr);
2550 	lck_grp_attr_free(lck_grp_attr);
2551 	lck_rw_destroy(&info.rw_lock, lck_grp);
2552 	lck_mtx_destroy(&info.mtx_lock, lck_grp);
2553 	lck_grp_free(lck_grp);
2554 
2555 	return KERN_SUCCESS;
2556 }
2557 
2558 static void
thread_gate_aggressive(void * args,__unused wait_result_t wr)2559 thread_gate_aggressive(
2560 	void *args,
2561 	__unused wait_result_t wr)
2562 {
2563 	struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
2564 	uint my_pri = current_thread()->sched_pri;
2565 
2566 	T_LOG("Started thread pri %d %p", my_pri, current_thread());
2567 
2568 	primitive_lock(info);
2569 	if (info->thread_inheritor == NULL) {
2570 		info->thread_inheritor = current_thread();
2571 		primitive_gate_assert(info, GATE_ASSERT_OPEN);
2572 		primitive_gate_close(info);
2573 		exclude_current_waiter((struct synch_test_common *)info);
2574 
2575 		primitive_unlock(info);
2576 
2577 		wait_threads(&info->synch, info->synch_value - 2);
2578 		wait_for_waiters((struct synch_test_common *)info);
2579 		T_LOG("Thread pri %d first to run %p", my_pri, current_thread());
2580 
2581 		primitive_lock(info);
2582 		if (info->thread_inheritor == current_thread()) {
2583 			primitive_gate_open(info);
2584 		}
2585 	} else {
2586 		if (info->steal_pri == 0) {
2587 			info->steal_pri = my_pri;
2588 			info->thread_inheritor = current_thread();
2589 			primitive_gate_steal(info);
2590 			exclude_current_waiter((struct synch_test_common *)info);
2591 
2592 			primitive_unlock(info);
2593 			wait_threads(&info->synch, info->synch_value - 2);
2594 
2595 			T_LOG("Thread pri %d stole push %p", my_pri, current_thread());
2596 			wait_for_waiters((struct synch_test_common *)info);
2597 			T_ASSERT((uint) current_thread()->sched_pri == info->steal_pri, "gate keeper priority current is %d, should be %d", current_thread()->sched_pri, info->steal_pri);
2598 
2599 			primitive_lock(info);
2600 			primitive_gate_open(info);
2601 		} else {
2602 			if (my_pri > info->steal_pri) {
2603 				info->steal_pri = my_pri;
2604 			}
2605 			wake_threads(&info->synch);
2606 			primitive_gate_wait(info);
2607 			exclude_current_waiter((struct synch_test_common *)info);
2608 		}
2609 	}
2610 	primitive_unlock(info);
2611 
2612 	assert(current_thread()->kern_promotion_schedpri == 0);
2613 	notify_waiter((struct synch_test_common *)info);
2614 
2615 	thread_terminate_self();
2616 }
2617 
2618 static void
thread_gate_free(void * args,__unused wait_result_t wr)2619 thread_gate_free(
2620 	void *args,
2621 	__unused wait_result_t wr)
2622 {
2623 	struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
2624 	uint my_pri = current_thread()->sched_pri;
2625 
2626 	T_LOG("Started thread pri %d %p", my_pri, current_thread());
2627 
2628 	primitive_lock(info);
2629 
2630 	if (primitive_gate_try_close(info) == KERN_SUCCESS) {
2631 		primitive_gate_assert(info, GATE_ASSERT_HELD);
2632 		primitive_unlock(info);
2633 
2634 		wait_threads(&info->synch, info->synch_value - 1);
2635 		wait_for_waiters((struct synch_test_common *) info);
2636 
2637 		primitive_lock(info);
2638 		primitive_gate_open(info);
2639 		primitive_gate_free(info);
2640 	} else {
2641 		primitive_gate_assert(info, GATE_ASSERT_CLOSED);
2642 		wake_threads(&info->synch);
2643 		gate_wait_result_t ret = primitive_gate_wait(info);
2644 		T_ASSERT(ret == GATE_OPENED, "open gate");
2645 	}
2646 
2647 	primitive_unlock(info);
2648 
2649 	notify_waiter((struct synch_test_common *)info);
2650 
2651 	thread_terminate_self();
2652 }
2653 
2654 static void
thread_gate_like_mutex(void * args,__unused wait_result_t wr)2655 thread_gate_like_mutex(
2656 	void *args,
2657 	__unused wait_result_t wr)
2658 {
2659 	gate_wait_result_t wait;
2660 	kern_return_t ret;
2661 	uint my_pri = current_thread()->sched_pri;
2662 
2663 	struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
2664 
2665 	T_LOG("Started thread pri %d %p", my_pri, current_thread());
2666 
2667 	/*
2668 	 * spin here to start concurrently
2669 	 */
2670 	wake_threads(&info->synch);
2671 	wait_threads(&info->synch, info->synch_value);
2672 
2673 	primitive_lock(info);
2674 
2675 	if (primitive_gate_try_close(info) != KERN_SUCCESS) {
2676 		wait = primitive_gate_wait(info);
2677 		T_ASSERT(wait == GATE_HANDOFF, "gate_wait return");
2678 	}
2679 
2680 	primitive_gate_assert(info, GATE_ASSERT_HELD);
2681 
2682 	primitive_unlock(info);
2683 
2684 	IOSleep(100);
2685 	info->value++;
2686 
2687 	primitive_lock(info);
2688 
2689 	ret = primitive_gate_handoff(info, GATE_HANDOFF_DEFAULT);
2690 	if (ret == KERN_NOT_WAITING) {
2691 		T_ASSERT(info->handoff_failure == 0, "handoff failures");
2692 		primitive_gate_handoff(info, GATE_HANDOFF_OPEN_IF_NO_WAITERS);
2693 		info->handoff_failure++;
2694 	}
2695 
2696 	primitive_unlock(info);
2697 	notify_waiter((struct synch_test_common *)info);
2698 
2699 	thread_terminate_self();
2700 }
2701 
2702 static void
thread_just_one_do_work(void * args,__unused wait_result_t wr)2703 thread_just_one_do_work(
2704 	void *args,
2705 	__unused wait_result_t wr)
2706 {
2707 	struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
2708 	uint my_pri = current_thread()->sched_pri;
2709 	uint max_pri;
2710 
2711 	T_LOG("Started thread pri %d %p", my_pri, current_thread());
2712 
2713 	primitive_lock(info);
2714 check_again:
2715 	if (info->work_to_do) {
2716 		if (primitive_gate_try_close(info) == KERN_SUCCESS) {
2717 			primitive_gate_assert(info, GATE_ASSERT_HELD);
2718 			primitive_unlock(info);
2719 
2720 			T_LOG("Thread pri %d acquired the gate %p", my_pri, current_thread());
2721 			wait_threads(&info->synch, info->synch_value - 1);
2722 			wait_for_waiters((struct synch_test_common *)info);
2723 			max_pri = get_max_pri((struct synch_test_common *) info);
2724 			T_ASSERT((uint) current_thread()->sched_pri == max_pri, "gate owner priority current is %d, should be %d", current_thread()->sched_pri, max_pri);
2725 			os_atomic_store(&info->synch, 0, relaxed);
2726 
2727 			primitive_lock(info);
2728 			info->work_to_do = FALSE;
2729 			primitive_gate_open(info);
2730 		} else {
2731 			primitive_gate_assert(info, GATE_ASSERT_CLOSED);
2732 			wake_threads(&info->synch);
2733 			primitive_gate_wait(info);
2734 			goto check_again;
2735 		}
2736 	}
2737 	primitive_unlock(info);
2738 
2739 	assert(current_thread()->kern_promotion_schedpri == 0);
2740 	notify_waiter((struct synch_test_common *)info);
2741 	thread_terminate_self();
2742 }
2743 
2744 static void
test_gate_push(struct info_sleep_inheritor_test * info,int prim_type)2745 test_gate_push(struct info_sleep_inheritor_test *info, int prim_type)
2746 {
2747 	info->prim_type = prim_type;
2748 	info->use_alloc_gate = false;
2749 
2750 	primitive_gate_init(info);
2751 	info->work_to_do = TRUE;
2752 	info->synch = 0;
2753 	info->synch_value = NUM_THREADS;
2754 
2755 	start_threads((thread_continue_t)thread_just_one_do_work, (struct synch_test_common *) info, TRUE);
2756 	wait_all_thread((struct synch_test_common *)info);
2757 
2758 	primitive_gate_destroy(info);
2759 }
2760 
2761 static void
test_gate_handoff(struct info_sleep_inheritor_test * info,int prim_type)2762 test_gate_handoff(struct info_sleep_inheritor_test *info, int prim_type)
2763 {
2764 	info->prim_type = prim_type;
2765 	info->use_alloc_gate = false;
2766 
2767 	primitive_gate_init(info);
2768 
2769 	info->synch = 0;
2770 	info->synch_value = NUM_THREADS;
2771 	info->value = 0;
2772 	info->handoff_failure = 0;
2773 
2774 	start_threads((thread_continue_t)thread_gate_like_mutex, (struct synch_test_common *)info, false);
2775 	wait_all_thread((struct synch_test_common *)info);
2776 
2777 	T_ASSERT(info->value == NUM_THREADS, "value protected by gate");
2778 	T_ASSERT(info->handoff_failure == 1, "handoff failures");
2779 
2780 	primitive_gate_destroy(info);
2781 }
2782 
2783 static void
test_gate_steal(struct info_sleep_inheritor_test * info,int prim_type)2784 test_gate_steal(struct info_sleep_inheritor_test *info, int prim_type)
2785 {
2786 	info->prim_type = prim_type;
2787 	info->use_alloc_gate = false;
2788 
2789 	primitive_gate_init(info);
2790 
2791 	info->synch = 0;
2792 	info->synch_value = NUM_THREADS;
2793 	info->thread_inheritor = NULL;
2794 	info->steal_pri = 0;
2795 
2796 	start_threads((thread_continue_t)thread_gate_aggressive, (struct synch_test_common *)info, FALSE);
2797 	wait_all_thread((struct synch_test_common *)info);
2798 
2799 	primitive_gate_destroy(info);
2800 }
2801 
2802 static void
test_gate_alloc_free(struct info_sleep_inheritor_test * info,int prim_type)2803 test_gate_alloc_free(struct info_sleep_inheritor_test *info, int prim_type)
2804 {
2805 	(void)info;
2806 	(void) prim_type;
2807 	info->prim_type = prim_type;
2808 	info->use_alloc_gate = true;
2809 
2810 	primitive_gate_alloc(info);
2811 
2812 	info->synch = 0;
2813 	info->synch_value = NUM_THREADS;
2814 
2815 	start_threads((thread_continue_t)thread_gate_free, (struct synch_test_common *)info, FALSE);
2816 	wait_all_thread((struct synch_test_common *)info);
2817 
2818 	T_ASSERT(info->alloc_gate == NULL, "gate free");
2819 	info->use_alloc_gate = false;
2820 }
2821 
2822 kern_return_t
ts_kernel_gate_test(void)2823 ts_kernel_gate_test(void)
2824 {
2825 	struct info_sleep_inheritor_test info = {};
2826 
2827 	T_LOG("Testing gate primitive");
2828 
2829 	init_synch_test_common((struct synch_test_common *)&info, NUM_THREADS);
2830 
2831 	lck_attr_t* lck_attr = lck_attr_alloc_init();
2832 	lck_grp_attr_t* lck_grp_attr = lck_grp_attr_alloc_init();
2833 	lck_grp_t* lck_grp = lck_grp_alloc_init("test gate", lck_grp_attr);
2834 
2835 	lck_mtx_init(&info.mtx_lock, lck_grp, lck_attr);
2836 	lck_rw_init(&info.rw_lock, lck_grp, lck_attr);
2837 
2838 	/*
2839 	 * Testing the priority inherited by the keeper
2840 	 * lck_mtx_gate_try_close, lck_mtx_gate_open, lck_mtx_gate_wait
2841 	 */
2842 	T_LOG("Testing gate push, mtx");
2843 	test_gate_push(&info, MTX_LOCK);
2844 
2845 	T_LOG("Testing gate push, rw");
2846 	test_gate_push(&info, RW_LOCK);
2847 
2848 	/*
2849 	 * Testing the handoff
2850 	 * lck_mtx_gate_wait, lck_mtx_gate_handoff
2851 	 */
2852 	T_LOG("Testing gate handoff, mtx");
2853 	test_gate_handoff(&info, MTX_LOCK);
2854 
2855 	T_LOG("Testing gate handoff, rw");
2856 	test_gate_handoff(&info, RW_LOCK);
2857 
2858 	/*
2859 	 * Testing the steal
2860 	 * lck_mtx_gate_close, lck_mtx_gate_wait, lck_mtx_gate_steal, lck_mtx_gate_handoff
2861 	 */
2862 	T_LOG("Testing gate steal, mtx");
2863 	test_gate_steal(&info, MTX_LOCK);
2864 
2865 	T_LOG("Testing gate steal, rw");
2866 	test_gate_steal(&info, RW_LOCK);
2867 
2868 	/*
2869 	 * Testing the alloc/free
2870 	 * lck_mtx_gate_alloc_init, lck_mtx_gate_close, lck_mtx_gate_wait, lck_mtx_gate_free
2871 	 */
2872 	T_LOG("Testing gate alloc/free, mtx");
2873 	test_gate_alloc_free(&info, MTX_LOCK);
2874 
2875 	T_LOG("Testing gate alloc/free, rw");
2876 	test_gate_alloc_free(&info, RW_LOCK);
2877 
2878 	destroy_synch_test_common((struct synch_test_common *)&info);
2879 
2880 	lck_attr_free(lck_attr);
2881 	lck_grp_attr_free(lck_grp_attr);
2882 	lck_mtx_destroy(&info.mtx_lock, lck_grp);
2883 	lck_grp_free(lck_grp);
2884 
2885 	return KERN_SUCCESS;
2886 }
2887 
2888 #define NUM_THREAD_CHAIN 6
2889 
2890 struct turnstile_chain_test {
2891 	struct synch_test_common head;
2892 	lck_mtx_t mtx_lock;
2893 	int synch_value;
2894 	int synch;
2895 	int synch2;
2896 	gate_t gates[NUM_THREAD_CHAIN];
2897 };
2898 
2899 static void
thread_sleep_gate_chain_work(void * args,__unused wait_result_t wr)2900 thread_sleep_gate_chain_work(
2901 	void *args,
2902 	__unused wait_result_t wr)
2903 {
2904 	struct turnstile_chain_test *info = (struct turnstile_chain_test*) args;
2905 	thread_t self = current_thread();
2906 	uint my_pri = self->sched_pri;
2907 	uint max_pri;
2908 	uint i;
2909 	thread_t inheritor = NULL, woken_up;
2910 	event_t wait_event, wake_event;
2911 	kern_return_t ret;
2912 
2913 	T_LOG("Started thread pri %d %p", my_pri, self);
2914 
2915 	/*
2916 	 * Need to use the threads ids, wait for all of them to be populated
2917 	 */
2918 
2919 	while (os_atomic_load(&info->head.threads[info->head.nthreads - 1], acquire) == NULL) {
2920 		IOSleep(10);
2921 	}
2922 
2923 	max_pri = get_max_pri((struct synch_test_common *) info);
2924 
2925 	for (i = 0; i < info->head.nthreads; i = i + 2) {
2926 		// even threads will close a gate
2927 		if (info->head.threads[i] == self) {
2928 			lck_mtx_lock(&info->mtx_lock);
2929 			lck_mtx_gate_close(&info->mtx_lock, &info->gates[i]);
2930 			lck_mtx_unlock(&info->mtx_lock);
2931 			break;
2932 		}
2933 	}
2934 
2935 	wake_threads(&info->synch2);
2936 	wait_threads(&info->synch2, info->synch_value);
2937 
2938 	if (self == os_atomic_load(&info->head.threads[0], acquire)) {
2939 		wait_threads(&info->synch, info->synch_value - 1);
2940 		wait_for_waiters((struct synch_test_common *)info);
2941 
2942 		T_ASSERT((uint) self->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", self->sched_pri, max_pri);
2943 
2944 		lck_mtx_lock(&info->mtx_lock);
2945 		lck_mtx_gate_open(&info->mtx_lock, &info->gates[0]);
2946 		lck_mtx_unlock(&info->mtx_lock);
2947 	} else {
2948 		wait_event = NULL;
2949 		wake_event = NULL;
2950 		for (i = 0; i < info->head.nthreads; i++) {
2951 			if (info->head.threads[i] == self) {
2952 				inheritor = info->head.threads[i - 1];
2953 				wait_event = (event_t) &info->head.threads[i - 1];
2954 				wake_event = (event_t) &info->head.threads[i];
2955 				break;
2956 			}
2957 		}
2958 		assert(wait_event != NULL);
2959 
2960 		lck_mtx_lock(&info->mtx_lock);
2961 		wake_threads(&info->synch);
2962 
2963 		if (i % 2 != 0) {
2964 			lck_mtx_gate_wait(&info->mtx_lock, &info->gates[i - 1], LCK_SLEEP_UNLOCK, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
2965 			T_ASSERT((uint) self->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", self->sched_pri, max_pri);
2966 
2967 			ret = wakeup_one_with_inheritor(wake_event, THREAD_AWAKENED, LCK_WAKE_DO_NOT_TRANSFER_PUSH, &woken_up);
2968 			if (ret == KERN_SUCCESS) {
2969 				T_ASSERT(i != (info->head.nthreads - 1), "thread id");
2970 				T_ASSERT(woken_up == info->head.threads[i + 1], "wakeup_one_with_inheritor woke next");
2971 			} else {
2972 				T_ASSERT(i == (info->head.nthreads - 1), "thread id");
2973 			}
2974 
2975 			// i am still the inheritor, wake all to drop inheritership
2976 			ret = wakeup_all_with_inheritor(wake_event, LCK_WAKE_DEFAULT);
2977 			T_ASSERT(ret == KERN_NOT_WAITING, "waiters on event");
2978 		} else {
2979 			// I previously closed a gate
2980 			lck_mtx_sleep_with_inheritor(&info->mtx_lock, LCK_SLEEP_UNLOCK, wait_event, inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
2981 			T_ASSERT((uint) self->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", self->sched_pri, max_pri);
2982 
2983 			lck_mtx_lock(&info->mtx_lock);
2984 			lck_mtx_gate_open(&info->mtx_lock, &info->gates[i]);
2985 			lck_mtx_unlock(&info->mtx_lock);
2986 		}
2987 	}
2988 
2989 	assert(current_thread()->kern_promotion_schedpri == 0);
2990 	notify_waiter((struct synch_test_common *)info);
2991 
2992 	thread_terminate_self();
2993 }
2994 
2995 static void
thread_gate_chain_work(void * args,__unused wait_result_t wr)2996 thread_gate_chain_work(
2997 	void *args,
2998 	__unused wait_result_t wr)
2999 {
3000 	struct turnstile_chain_test *info = (struct turnstile_chain_test*) args;
3001 	thread_t self = current_thread();
3002 	uint my_pri = self->sched_pri;
3003 	uint max_pri;
3004 	uint i;
3005 	T_LOG("Started thread pri %d %p", my_pri, self);
3006 
3007 
3008 	/*
3009 	 * Need to use the threads ids, wait for all of them to be populated
3010 	 */
3011 	while (os_atomic_load(&info->head.threads[info->head.nthreads - 1], acquire) == NULL) {
3012 		IOSleep(10);
3013 	}
3014 
3015 	max_pri = get_max_pri((struct synch_test_common *) info);
3016 
3017 	for (i = 0; i < info->head.nthreads; i++) {
3018 		if (info->head.threads[i] == self) {
3019 			lck_mtx_lock(&info->mtx_lock);
3020 			lck_mtx_gate_close(&info->mtx_lock, &info->gates[i]);
3021 			lck_mtx_unlock(&info->mtx_lock);
3022 			break;
3023 		}
3024 	}
3025 	assert(i != info->head.nthreads);
3026 
3027 	wake_threads(&info->synch2);
3028 	wait_threads(&info->synch2, info->synch_value);
3029 
3030 	if (self == os_atomic_load(&info->head.threads[0], acquire)) {
3031 		wait_threads(&info->synch, info->synch_value - 1);
3032 
3033 		wait_for_waiters((struct synch_test_common *)info);
3034 
3035 		T_ASSERT((uint) self->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", self->sched_pri, max_pri);
3036 
3037 		lck_mtx_lock(&info->mtx_lock);
3038 		lck_mtx_gate_open(&info->mtx_lock, &info->gates[0]);
3039 		lck_mtx_unlock(&info->mtx_lock);
3040 	} else {
3041 		lck_mtx_lock(&info->mtx_lock);
3042 		wake_threads(&info->synch);
3043 		lck_mtx_gate_wait(&info->mtx_lock, &info->gates[i - 1], LCK_SLEEP_UNLOCK, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
3044 
3045 		T_ASSERT((uint) self->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", self->sched_pri, max_pri);
3046 
3047 		lck_mtx_lock(&info->mtx_lock);
3048 		lck_mtx_gate_open(&info->mtx_lock, &info->gates[i]);
3049 		lck_mtx_unlock(&info->mtx_lock);
3050 	}
3051 
3052 	assert(current_thread()->kern_promotion_schedpri == 0);
3053 	notify_waiter((struct synch_test_common *)info);
3054 
3055 	thread_terminate_self();
3056 }
3057 
3058 static void
thread_sleep_chain_work(void * args,__unused wait_result_t wr)3059 thread_sleep_chain_work(
3060 	void *args,
3061 	__unused wait_result_t wr)
3062 {
3063 	struct turnstile_chain_test *info = (struct turnstile_chain_test*) args;
3064 	thread_t self = current_thread();
3065 	uint my_pri = self->sched_pri;
3066 	uint max_pri;
3067 	event_t wait_event, wake_event;
3068 	uint i;
3069 	thread_t inheritor = NULL, woken_up = NULL;
3070 	kern_return_t ret;
3071 
3072 	T_LOG("Started thread pri %d %p", my_pri, self);
3073 
3074 	/*
3075 	 * Need to use the threads ids, wait for all of them to be populated
3076 	 */
3077 	while (os_atomic_load(&info->head.threads[info->head.nthreads - 1], acquire) == NULL) {
3078 		IOSleep(10);
3079 	}
3080 
3081 	max_pri = get_max_pri((struct synch_test_common *) info);
3082 
3083 	if (self == os_atomic_load(&info->head.threads[0], acquire)) {
3084 		wait_threads(&info->synch, info->synch_value - 1);
3085 
3086 		wait_for_waiters((struct synch_test_common *)info);
3087 
3088 		T_ASSERT((uint) self->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", self->sched_pri, max_pri);
3089 
3090 		ret = wakeup_one_with_inheritor((event_t) &info->head.threads[0], THREAD_AWAKENED, LCK_WAKE_DO_NOT_TRANSFER_PUSH, &woken_up);
3091 		T_ASSERT(ret == KERN_SUCCESS, "wakeup_one_with_inheritor woke next");
3092 		T_ASSERT(woken_up == info->head.threads[1], "thread woken up");
3093 
3094 		// i am still the inheritor, wake all to drop inheritership
3095 		ret = wakeup_all_with_inheritor((event_t) &info->head.threads[0], LCK_WAKE_DEFAULT);
3096 		T_ASSERT(ret == KERN_NOT_WAITING, "waiters on event");
3097 	} else {
3098 		wait_event = NULL;
3099 		wake_event = NULL;
3100 		for (i = 0; i < info->head.nthreads; i++) {
3101 			if (info->head.threads[i] == self) {
3102 				inheritor = info->head.threads[i - 1];
3103 				wait_event = (event_t) &info->head.threads[i - 1];
3104 				wake_event = (event_t) &info->head.threads[i];
3105 				break;
3106 			}
3107 		}
3108 
3109 		assert(wait_event != NULL);
3110 		lck_mtx_lock(&info->mtx_lock);
3111 		wake_threads(&info->synch);
3112 
3113 		lck_mtx_sleep_with_inheritor(&info->mtx_lock, LCK_SLEEP_UNLOCK, wait_event, inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
3114 
3115 		T_ASSERT((uint) self->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", self->sched_pri, max_pri);
3116 
3117 		ret = wakeup_one_with_inheritor(wake_event, THREAD_AWAKENED, LCK_WAKE_DO_NOT_TRANSFER_PUSH, &woken_up);
3118 		if (ret == KERN_SUCCESS) {
3119 			T_ASSERT(i != (info->head.nthreads - 1), "thread id");
3120 			T_ASSERT(woken_up == info->head.threads[i + 1], "wakeup_one_with_inheritor woke next");
3121 		} else {
3122 			T_ASSERT(i == (info->head.nthreads - 1), "thread id");
3123 		}
3124 
3125 		// i am still the inheritor, wake all to drop inheritership
3126 		ret = wakeup_all_with_inheritor(wake_event, LCK_WAKE_DEFAULT);
3127 		T_ASSERT(ret == KERN_NOT_WAITING, "waiters on event");
3128 	}
3129 
3130 	assert(current_thread()->kern_promotion_schedpri == 0);
3131 	notify_waiter((struct synch_test_common *)info);
3132 
3133 	thread_terminate_self();
3134 }
3135 
3136 static void
test_sleep_chain(struct turnstile_chain_test * info)3137 test_sleep_chain(struct turnstile_chain_test *info)
3138 {
3139 	info->synch = 0;
3140 	info->synch_value = info->head.nthreads;
3141 
3142 	start_threads((thread_continue_t)thread_sleep_chain_work, (struct synch_test_common *)info, FALSE);
3143 	wait_all_thread((struct synch_test_common *)info);
3144 }
3145 
3146 static void
test_gate_chain(struct turnstile_chain_test * info)3147 test_gate_chain(struct turnstile_chain_test *info)
3148 {
3149 	info->synch = 0;
3150 	info->synch2 = 0;
3151 	info->synch_value = info->head.nthreads;
3152 
3153 	start_threads((thread_continue_t)thread_gate_chain_work, (struct synch_test_common *)info, FALSE);
3154 	wait_all_thread((struct synch_test_common *)info);
3155 }
3156 
3157 static void
test_sleep_gate_chain(struct turnstile_chain_test * info)3158 test_sleep_gate_chain(struct turnstile_chain_test *info)
3159 {
3160 	info->synch = 0;
3161 	info->synch2 = 0;
3162 	info->synch_value = info->head.nthreads;
3163 
3164 	start_threads((thread_continue_t)thread_sleep_gate_chain_work, (struct synch_test_common *)info, FALSE);
3165 	wait_all_thread((struct synch_test_common *)info);
3166 }
3167 
3168 kern_return_t
ts_kernel_turnstile_chain_test(void)3169 ts_kernel_turnstile_chain_test(void)
3170 {
3171 	struct turnstile_chain_test info = {};
3172 	int i;
3173 
3174 	init_synch_test_common((struct synch_test_common *)&info, NUM_THREAD_CHAIN);
3175 	lck_attr_t* lck_attr = lck_attr_alloc_init();
3176 	lck_grp_attr_t* lck_grp_attr = lck_grp_attr_alloc_init();
3177 	lck_grp_t* lck_grp = lck_grp_alloc_init("test gate", lck_grp_attr);
3178 
3179 	lck_mtx_init(&info.mtx_lock, lck_grp, lck_attr);
3180 	for (i = 0; i < NUM_THREAD_CHAIN; i++) {
3181 		lck_mtx_gate_init(&info.mtx_lock, &info.gates[i]);
3182 	}
3183 
3184 	T_LOG("Testing sleep chain, lck");
3185 	test_sleep_chain(&info);
3186 
3187 	T_LOG("Testing gate chain, lck");
3188 	test_gate_chain(&info);
3189 
3190 	T_LOG("Testing sleep and gate chain, lck");
3191 	test_sleep_gate_chain(&info);
3192 
3193 	destroy_synch_test_common((struct synch_test_common *)&info);
3194 	for (i = 0; i < NUM_THREAD_CHAIN; i++) {
3195 		lck_mtx_gate_destroy(&info.mtx_lock, &info.gates[i]);
3196 	}
3197 	lck_attr_free(lck_attr);
3198 	lck_grp_attr_free(lck_grp_attr);
3199 	lck_mtx_destroy(&info.mtx_lock, lck_grp);
3200 	lck_grp_free(lck_grp);
3201 
3202 	return KERN_SUCCESS;
3203 }
3204 
3205 kern_return_t
ts_kernel_timingsafe_bcmp_test(void)3206 ts_kernel_timingsafe_bcmp_test(void)
3207 {
3208 	int i, buf_size;
3209 	char *buf = NULL;
3210 
3211 	// empty
3212 	T_ASSERT(timingsafe_bcmp(NULL, NULL, 0) == 0, NULL);
3213 	T_ASSERT(timingsafe_bcmp("foo", "foo", 0) == 0, NULL);
3214 	T_ASSERT(timingsafe_bcmp("foo", "bar", 0) == 0, NULL);
3215 
3216 	// equal
3217 	T_ASSERT(timingsafe_bcmp("foo", "foo", strlen("foo")) == 0, NULL);
3218 
3219 	// unequal
3220 	T_ASSERT(timingsafe_bcmp("foo", "bar", strlen("foo")) == 1, NULL);
3221 	T_ASSERT(timingsafe_bcmp("foo", "goo", strlen("foo")) == 1, NULL);
3222 	T_ASSERT(timingsafe_bcmp("foo", "fpo", strlen("foo")) == 1, NULL);
3223 	T_ASSERT(timingsafe_bcmp("foo", "fop", strlen("foo")) == 1, NULL);
3224 
3225 	// all possible bitwise differences
3226 	for (i = 1; i < 256; i += 1) {
3227 		unsigned char a = 0;
3228 		unsigned char b = (unsigned char)i;
3229 
3230 		T_ASSERT(timingsafe_bcmp(&a, &b, sizeof(a)) == 1, NULL);
3231 	}
3232 
3233 	// large
3234 	buf_size = 1024 * 16;
3235 	buf = kalloc_data(buf_size, Z_WAITOK);
3236 	T_EXPECT_NOTNULL(buf, "kalloc of buf");
3237 
3238 	read_random(buf, buf_size);
3239 	T_ASSERT(timingsafe_bcmp(buf, buf, buf_size) == 0, NULL);
3240 	T_ASSERT(timingsafe_bcmp(buf, buf + 1, buf_size - 1) == 1, NULL);
3241 	T_ASSERT(timingsafe_bcmp(buf, buf + 128, 128) == 1, NULL);
3242 
3243 	memcpy(buf + 128, buf, 128);
3244 	T_ASSERT(timingsafe_bcmp(buf, buf + 128, 128) == 0, NULL);
3245 
3246 	kfree_data(buf, buf_size);
3247 
3248 	return KERN_SUCCESS;
3249 }
3250 
3251 kern_return_t
kprintf_hhx_test(void)3252 kprintf_hhx_test(void)
3253 {
3254 	printf("POST hhx test %hx%hx%hx%hx %hhx%hhx%hhx%hhx - %llx",
3255 	    (unsigned short)0xfeed, (unsigned short)0xface,
3256 	    (unsigned short)0xabad, (unsigned short)0xcafe,
3257 	    (unsigned char)'h', (unsigned char)'h', (unsigned char)'x',
3258 	    (unsigned char)'!',
3259 	    0xfeedfaceULL);
3260 	T_PASS("kprintf_hhx_test passed");
3261 	return KERN_SUCCESS;
3262 }
3263