xref: /xnu-12377.1.9/osfmk/tests/kernel_tests.c (revision f6217f891ac0bb64f3d375211650a4c1ff8ca1ea)
1 /*
2  * Copyright (c) 2019-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <kern/kern_types.h>
30 #include <kern/assert.h>
31 #include <kern/host.h>
32 #include <kern/macro_help.h>
33 #include <kern/sched.h>
34 #include <kern/locks.h>
35 #include <kern/sched_prim.h>
36 #include <kern/misc_protos.h>
37 #include <kern/thread_call.h>
38 #include <kern/zalloc_internal.h>
39 #include <kern/kalloc.h>
40 #include <tests/ktest.h>
41 #include <sys/errno.h>
42 #include <sys/random.h>
43 #include <kern/kern_cdata.h>
44 #include <machine/lowglobals.h>
45 #include <machine/static_if.h>
46 #include <vm/vm_page.h>
47 #include <vm/vm_object_internal.h>
48 #include <vm/vm_protos.h>
49 #include <vm/vm_iokit.h>
50 #include <string.h>
51 #include <kern/kern_apfs_reflock.h>
52 
53 #if !(DEVELOPMENT || DEBUG)
54 #error "Testing is not enabled on RELEASE configurations"
55 #endif
56 
57 #include <tests/xnupost.h>
58 
59 extern boolean_t get_range_bounds(char * c, int64_t * lower, int64_t * upper);
60 __private_extern__ void qsort(void * a, size_t n, size_t es, int (*cmp)(const void *, const void *));
61 
62 uint32_t total_post_tests_count = 0;
63 void xnupost_reset_panic_widgets(void);
64 
65 /* test declarations */
66 kern_return_t zalloc_test(void);
67 kern_return_t RandomULong_test(void);
68 kern_return_t kcdata_api_test(void);
69 kern_return_t ts_kernel_primitive_test(void);
70 kern_return_t ts_kernel_sleep_inheritor_test(void);
71 kern_return_t ts_kernel_gate_test(void);
72 kern_return_t ts_kernel_turnstile_chain_test(void);
73 kern_return_t ts_kernel_timingsafe_bcmp_test(void);
74 
75 #if __ARM_VFP__
76 extern kern_return_t vfp_state_test(void);
77 #endif
78 
79 extern kern_return_t kprintf_hhx_test(void);
80 
81 #if defined(__arm64__)
82 kern_return_t pmap_coredump_test(void);
83 #endif
84 
85 extern kern_return_t console_serial_test(void);
86 extern kern_return_t console_serial_parallel_log_tests(void);
87 extern kern_return_t test_printf(void);
88 extern kern_return_t test_os_log(void);
89 extern kern_return_t test_os_log_handles(void);
90 extern kern_return_t test_os_log_parallel(void);
91 extern kern_return_t bitmap_post_test(void);
92 extern kern_return_t counter_tests(void);
93 #if ML_IO_TIMEOUTS_ENABLED
94 extern kern_return_t ml_io_timeout_test(void);
95 #endif
96 
97 #ifdef __arm64__
98 extern kern_return_t arm64_backtrace_test(void);
99 extern kern_return_t arm64_munger_test(void);
100 #if __ARM_PAN_AVAILABLE__
101 extern kern_return_t arm64_pan_test(void);
102 #endif
103 #if defined(HAS_APPLE_PAC)
104 extern kern_return_t arm64_ropjop_test(void);
105 #endif /* defined(HAS_APPLE_PAC) */
106 #if CONFIG_SPTM
107 extern kern_return_t arm64_panic_lockdown_test(void);
108 #endif /* CONFIG_SPTM */
109 #if HAS_SPECRES
110 extern kern_return_t specres_test(void);
111 #endif /* HAS_SPECRES */
112 #if BTI_ENFORCED
113 kern_return_t arm64_bti_test(void);
114 #endif /* BTI_ENFORCED */
115 extern kern_return_t arm64_speculation_guard_test(void);
116 extern kern_return_t arm64_aie_test(void);
117 #endif /* __arm64__ */
118 
119 extern kern_return_t test_thread_call(void);
120 
121 struct xnupost_panic_widget xt_panic_widgets = {.xtp_context_p = NULL,
122 	                                        .xtp_outval_p = NULL,
123 	                                        .xtp_func_name = NULL,
124 	                                        .xtp_func = NULL};
125 
126 struct xnupost_test kernel_post_tests[] = {
127 	XNUPOST_TEST_CONFIG_BASIC(zalloc_test),
128 	XNUPOST_TEST_CONFIG_BASIC(RandomULong_test),
129 	XNUPOST_TEST_CONFIG_BASIC(test_printf),
130 	XNUPOST_TEST_CONFIG_BASIC(test_os_log_handles),
131 	XNUPOST_TEST_CONFIG_BASIC(test_os_log),
132 	XNUPOST_TEST_CONFIG_BASIC(test_os_log_parallel),
133 #ifdef __arm64__
134 	XNUPOST_TEST_CONFIG_BASIC(arm64_backtrace_test),
135 	XNUPOST_TEST_CONFIG_BASIC(arm64_munger_test),
136 #if __ARM_PAN_AVAILABLE__
137 	XNUPOST_TEST_CONFIG_BASIC(arm64_pan_test),
138 #endif
139 #if defined(HAS_APPLE_PAC)
140 	XNUPOST_TEST_CONFIG_BASIC(arm64_ropjop_test),
141 #endif /* defined(HAS_APPLE_PAC) */
142 #if CONFIG_SPTM
143 	XNUPOST_TEST_CONFIG_BASIC(arm64_panic_lockdown_test),
144 #endif /* CONFIG_SPTM */
145 	XNUPOST_TEST_CONFIG_BASIC(arm64_speculation_guard_test),
146 #endif /* __arm64__ */
147 	XNUPOST_TEST_CONFIG_BASIC(kcdata_api_test),
148 	XNUPOST_TEST_CONFIG_BASIC(console_serial_test),
149 	XNUPOST_TEST_CONFIG_BASIC(console_serial_parallel_log_tests),
150 #if defined(__arm64__)
151 	XNUPOST_TEST_CONFIG_BASIC(pmap_coredump_test),
152 #endif
153 	XNUPOST_TEST_CONFIG_BASIC(bitmap_post_test),
154 	//XNUPOST_TEST_CONFIG_TEST_PANIC(kcdata_api_assert_tests)
155 	XNUPOST_TEST_CONFIG_BASIC(test_thread_call),
156 	XNUPOST_TEST_CONFIG_BASIC(ts_kernel_primitive_test),
157 	XNUPOST_TEST_CONFIG_BASIC(ts_kernel_sleep_inheritor_test),
158 	XNUPOST_TEST_CONFIG_BASIC(ts_kernel_gate_test),
159 	XNUPOST_TEST_CONFIG_BASIC(ts_kernel_turnstile_chain_test),
160 	XNUPOST_TEST_CONFIG_BASIC(ts_kernel_timingsafe_bcmp_test),
161 	XNUPOST_TEST_CONFIG_BASIC(kprintf_hhx_test),
162 #if __ARM_VFP__
163 	XNUPOST_TEST_CONFIG_BASIC(vfp_state_test),
164 #endif
165 	XNUPOST_TEST_CONFIG_BASIC(vm_tests),
166 	XNUPOST_TEST_CONFIG_BASIC(counter_tests),
167 #if ML_IO_TIMEOUTS_ENABLED
168 	XNUPOST_TEST_CONFIG_BASIC(ml_io_timeout_test),
169 #endif
170 #if HAS_SPECRES
171 	XNUPOST_TEST_CONFIG_BASIC(specres_test),
172 #endif
173 };
174 
175 uint32_t kernel_post_tests_count = sizeof(kernel_post_tests) / sizeof(xnupost_test_data_t);
176 
177 #define POSTARGS_RUN_TESTS 0x1
178 #define POSTARGS_CONTROLLER_AVAILABLE 0x2
179 #define POSTARGS_CUSTOM_TEST_RUNLIST 0x4
180 uint64_t kernel_post_args = 0x0;
181 
182 /* static variables to hold state */
183 static kern_return_t parse_config_retval = KERN_INVALID_CAPABILITY;
184 static char kernel_post_test_configs[256];
185 boolean_t xnupost_should_run_test(uint32_t test_num);
186 
187 kern_return_t
xnupost_parse_config()188 xnupost_parse_config()
189 {
190 	if (parse_config_retval != KERN_INVALID_CAPABILITY) {
191 		return parse_config_retval;
192 	}
193 	PE_parse_boot_argn("kernPOST", &kernel_post_args, sizeof(kernel_post_args));
194 
195 	if (PE_parse_boot_argn("kernPOST_config", &kernel_post_test_configs[0], sizeof(kernel_post_test_configs)) == TRUE) {
196 		kernel_post_args |= POSTARGS_CUSTOM_TEST_RUNLIST;
197 	}
198 
199 	if (kernel_post_args != 0) {
200 		parse_config_retval = KERN_SUCCESS;
201 		goto out;
202 	}
203 	parse_config_retval = KERN_NOT_SUPPORTED;
204 out:
205 	return parse_config_retval;
206 }
207 
208 boolean_t
xnupost_should_run_test(uint32_t test_num)209 xnupost_should_run_test(uint32_t test_num)
210 {
211 	if (kernel_post_args & POSTARGS_CUSTOM_TEST_RUNLIST) {
212 		int64_t begin = 0, end = 999999;
213 		char * b = kernel_post_test_configs;
214 		while (*b) {
215 			get_range_bounds(b, &begin, &end);
216 			if (test_num >= begin && test_num <= end) {
217 				return TRUE;
218 			}
219 
220 			/* skip to the next "," */
221 			while (*b != ',') {
222 				if (*b == '\0') {
223 					return FALSE;
224 				}
225 				b++;
226 			}
227 			/* skip past the ',' */
228 			b++;
229 		}
230 		return FALSE;
231 	}
232 	return TRUE;
233 }
234 
235 kern_return_t
xnupost_list_tests(xnupost_test_t test_list,uint32_t test_count)236 xnupost_list_tests(xnupost_test_t test_list, uint32_t test_count)
237 {
238 	if (KERN_SUCCESS != xnupost_parse_config()) {
239 		return KERN_FAILURE;
240 	}
241 
242 	xnupost_test_t testp;
243 	for (uint32_t i = 0; i < test_count; i++) {
244 		testp = &test_list[i];
245 		if (testp->xt_test_num == 0) {
246 			assert(total_post_tests_count < UINT16_MAX);
247 			testp->xt_test_num = (uint16_t)++total_post_tests_count;
248 		}
249 		/* make sure the boot-arg based test run list is honored */
250 		if (kernel_post_args & POSTARGS_CUSTOM_TEST_RUNLIST) {
251 			testp->xt_config |= XT_CONFIG_IGNORE;
252 			if (xnupost_should_run_test(testp->xt_test_num)) {
253 				testp->xt_config &= ~(XT_CONFIG_IGNORE);
254 				testp->xt_config |= XT_CONFIG_RUN;
255 				printf("\n[TEST] #%u is marked as ignored", testp->xt_test_num);
256 			}
257 		}
258 		printf("\n[TEST] TOC#%u name: %s expected: %d config: %x\n", testp->xt_test_num, testp->xt_name, testp->xt_expected_retval,
259 		    testp->xt_config);
260 	}
261 
262 	return KERN_SUCCESS;
263 }
264 
265 kern_return_t
xnupost_run_tests(xnupost_test_t test_list,uint32_t test_count)266 xnupost_run_tests(xnupost_test_t test_list, uint32_t test_count)
267 {
268 	uint32_t i = 0;
269 	int retval = KERN_SUCCESS;
270 	int test_retval = KERN_FAILURE;
271 
272 	if ((kernel_post_args & POSTARGS_RUN_TESTS) == 0) {
273 		printf("No POST boot-arg set.\n");
274 		return retval;
275 	}
276 
277 	T_START;
278 	xnupost_test_t testp;
279 	for (; i < test_count; i++) {
280 		xnupost_reset_panic_widgets();
281 		T_TESTRESULT = T_STATE_UNRESOLVED;
282 		testp = &test_list[i];
283 		T_BEGIN(testp->xt_name);
284 		testp->xt_begin_time = mach_absolute_time();
285 		testp->xt_end_time   = testp->xt_begin_time;
286 
287 		/*
288 		 * If test is designed to panic and controller
289 		 * is not available then mark as SKIPPED
290 		 */
291 		if ((testp->xt_config & XT_CONFIG_EXPECT_PANIC) && !(kernel_post_args & POSTARGS_CONTROLLER_AVAILABLE)) {
292 			T_SKIP(
293 				"Test expects panic but "
294 				"no controller is present");
295 			testp->xt_test_actions = XT_ACTION_SKIPPED;
296 			continue;
297 		}
298 
299 		if ((testp->xt_config & XT_CONFIG_IGNORE)) {
300 			T_SKIP("Test is marked as XT_CONFIG_IGNORE");
301 			testp->xt_test_actions = XT_ACTION_SKIPPED;
302 			continue;
303 		}
304 
305 		test_retval = testp->xt_func();
306 		if (T_STATE_UNRESOLVED == T_TESTRESULT) {
307 			/*
308 			 * If test result is unresolved due to that no T_* test cases are called,
309 			 * determine the test result based on the return value of the test function.
310 			 */
311 			if (KERN_SUCCESS == test_retval) {
312 				T_PASS("Test passed because retval == KERN_SUCCESS");
313 			} else {
314 				T_FAIL("Test failed because retval == KERN_FAILURE");
315 			}
316 		}
317 		T_END;
318 		testp->xt_retval = T_TESTRESULT;
319 		testp->xt_end_time = mach_absolute_time();
320 		if (testp->xt_retval == testp->xt_expected_retval) {
321 			testp->xt_test_actions = XT_ACTION_PASSED;
322 		} else {
323 			testp->xt_test_actions = XT_ACTION_FAILED;
324 		}
325 	}
326 	T_FINISH;
327 	return retval;
328 }
329 
330 kern_return_t
kernel_list_tests()331 kernel_list_tests()
332 {
333 	return xnupost_list_tests(kernel_post_tests, kernel_post_tests_count);
334 }
335 
336 kern_return_t
kernel_do_post()337 kernel_do_post()
338 {
339 	return xnupost_run_tests(kernel_post_tests, kernel_post_tests_count);
340 }
341 
342 kern_return_t
xnupost_register_panic_widget(xt_panic_widget_func funcp,const char * funcname,void * context,void ** outval)343 xnupost_register_panic_widget(xt_panic_widget_func funcp, const char * funcname, void * context, void ** outval)
344 {
345 	if (xt_panic_widgets.xtp_context_p != NULL || xt_panic_widgets.xtp_func != NULL) {
346 		return KERN_RESOURCE_SHORTAGE;
347 	}
348 
349 	xt_panic_widgets.xtp_context_p = context;
350 	xt_panic_widgets.xtp_func      = funcp;
351 	xt_panic_widgets.xtp_func_name = funcname;
352 	xt_panic_widgets.xtp_outval_p  = outval;
353 
354 	return KERN_SUCCESS;
355 }
356 
357 void
xnupost_reset_panic_widgets()358 xnupost_reset_panic_widgets()
359 {
360 	bzero(&xt_panic_widgets, sizeof(xt_panic_widgets));
361 }
362 
363 kern_return_t
xnupost_process_kdb_stop(const char * panic_s)364 xnupost_process_kdb_stop(const char * panic_s)
365 {
366 	xt_panic_return_t retval         = 0;
367 	struct xnupost_panic_widget * pw = &xt_panic_widgets;
368 	const char * name = "unknown";
369 	if (xt_panic_widgets.xtp_func_name) {
370 		name = xt_panic_widgets.xtp_func_name;
371 	}
372 
373 	/* bail early on if kernPOST is not set */
374 	if (kernel_post_args == 0) {
375 		return KERN_INVALID_CAPABILITY;
376 	}
377 
378 	if (xt_panic_widgets.xtp_func) {
379 		T_LOG("%s: Calling out to widget: %s", __func__, xt_panic_widgets.xtp_func_name);
380 		retval = pw->xtp_func(panic_s, pw->xtp_context_p, pw->xtp_outval_p);
381 	} else {
382 		return KERN_INVALID_CAPABILITY;
383 	}
384 
385 	switch (retval) {
386 	case XT_RET_W_SUCCESS:
387 		T_EXPECT_EQ_INT(retval, XT_RET_W_SUCCESS, "%s reported successful handling. Returning from kdb_stop.", name);
388 		/* KERN_SUCCESS means return from panic/assertion */
389 		return KERN_SUCCESS;
390 
391 	case XT_RET_W_FAIL:
392 		T_FAIL("%s reported XT_RET_W_FAIL: Returning from kdb_stop", name);
393 		return KERN_SUCCESS;
394 
395 	case XT_PANIC_W_FAIL:
396 		T_FAIL("%s reported XT_PANIC_W_FAIL: Continuing to kdb_stop", name);
397 		return KERN_FAILURE;
398 
399 	case XT_PANIC_W_SUCCESS:
400 		T_EXPECT_EQ_INT(retval, XT_PANIC_W_SUCCESS, "%s reported successful testcase. But continuing to kdb_stop.", name);
401 		return KERN_FAILURE;
402 
403 	case XT_PANIC_UNRELATED:
404 	default:
405 		T_LOG("UNRELATED: Continuing to kdb_stop.");
406 		return KERN_FAILURE;
407 	}
408 }
409 
410 xt_panic_return_t
_xt_generic_assert_check(const char * s,void * str_to_match,void ** outval)411 _xt_generic_assert_check(const char * s, void * str_to_match, void ** outval)
412 {
413 	xt_panic_return_t ret = XT_PANIC_UNRELATED;
414 
415 	if (NULL != strnstr(__DECONST(char *, s), (char *)str_to_match, strlen(s))) {
416 		T_LOG("%s: kdb_stop string: '%s' MATCHED string: '%s'", __func__, s, (char *)str_to_match);
417 		ret = XT_RET_W_SUCCESS;
418 	}
419 
420 	if (outval) {
421 		*outval = (void *)(uintptr_t)ret;
422 	}
423 	return ret;
424 }
425 
426 kern_return_t
xnupost_reset_tests(xnupost_test_t test_list,uint32_t test_count)427 xnupost_reset_tests(xnupost_test_t test_list, uint32_t test_count)
428 {
429 	uint32_t i = 0;
430 	xnupost_test_t testp;
431 	for (; i < test_count; i++) {
432 		testp                  = &test_list[i];
433 		testp->xt_begin_time   = 0;
434 		testp->xt_end_time     = 0;
435 		testp->xt_test_actions = XT_ACTION_NONE;
436 		testp->xt_retval       = -1;
437 	}
438 	return KERN_SUCCESS;
439 }
440 
441 
442 kern_return_t
zalloc_test(void)443 zalloc_test(void)
444 {
445 	zone_t test_zone;
446 	void * test_ptr;
447 
448 	T_SETUPBEGIN;
449 	test_zone = zone_create("test_uint64_zone", sizeof(uint64_t),
450 	    ZC_DESTRUCTIBLE);
451 	T_ASSERT_NOTNULL(test_zone, NULL);
452 
453 	T_ASSERT_EQ_INT(test_zone->z_elems_free, 0, NULL);
454 	T_SETUPEND;
455 
456 	T_ASSERT_NOTNULL(test_ptr = zalloc(test_zone), NULL);
457 
458 	zfree(test_zone, test_ptr);
459 
460 	/* A sample report for perfdata */
461 	T_PERF("num_threads_at_ktest", threads_count, "count", "# of threads in system at zalloc_test");
462 
463 	return KERN_SUCCESS;
464 }
465 
466 /*
467  * Function used for comparison by qsort()
468  */
469 static int
compare_numbers_ascending(const void * a,const void * b)470 compare_numbers_ascending(const void * a, const void * b)
471 {
472 	const uint64_t x = *(const uint64_t *)a;
473 	const uint64_t y = *(const uint64_t *)b;
474 	if (x < y) {
475 		return -1;
476 	} else if (x > y) {
477 		return 1;
478 	} else {
479 		return 0;
480 	}
481 }
482 
483 /*
484  * Function to count number of bits that are set in a number.
485  * It uses Side Addition using Magic Binary Numbers
486  */
487 static int
count_bits(uint64_t number)488 count_bits(uint64_t number)
489 {
490 	return __builtin_popcountll(number);
491 }
492 
493 kern_return_t
RandomULong_test()494 RandomULong_test()
495 {
496 /*
497  * Randomness test for RandomULong()
498  *
499  * This test verifies that:
500  *  a. RandomULong works
501  *  b. The generated numbers match the following entropy criteria:
502  *     For a thousand iterations, verify:
503  *          1. mean entropy > 12 bits
504  *          2. min entropy > 4 bits
505  *          3. No Duplicate
506  *          4. No incremental/decremental pattern in a window of 3
507  *          5. No Zero
508  *          6. No -1
509  *
510  * <rdar://problem/22526137> Add test to increase code coverage for /dev/random
511  */
512 
513 #define CONF_MIN_ENTROPY 4
514 #define CONF_MEAN_ENTROPY 12
515 #define CONF_ITERATIONS 1000
516 #define CONF_WINDOW_SIZE 3
517 #define CONF_WINDOW_TREND_LIMIT ((CONF_WINDOW_SIZE / 2) + (CONF_WINDOW_SIZE & 1)) >> 0
518 
519 	int i;
520 	uint32_t min_bit_entropy, max_bit_entropy, bit_entropy;
521 	uint32_t aggregate_bit_entropy = 0;
522 	uint32_t mean_bit_entropy      = 0;
523 	uint64_t numbers[CONF_ITERATIONS];
524 	min_bit_entropy = UINT32_MAX;
525 	max_bit_entropy = 0;
526 
527 	/*
528 	 * TEST 1: Number generation and basic and basic validation
529 	 * Check for non-zero (no bits set), -1 (all bits set) and error
530 	 */
531 	for (i = 0; i < CONF_ITERATIONS; i++) {
532 		read_random(&numbers[i], sizeof(numbers[i]));
533 		if (numbers[i] == 0) {
534 			T_ASSERT_NE_ULLONG(numbers[i], 0, "read_random returned zero value.");
535 		}
536 		if (numbers[i] == UINT64_MAX) {
537 			T_ASSERT_NE_ULLONG(numbers[i], UINT64_MAX, "read_random returned -1.");
538 		}
539 	}
540 	T_PASS("Generated %d non-zero random numbers with atleast one bit reset.", CONF_ITERATIONS);
541 
542 	/*
543 	 * TEST 2: Mean and Min Bit Entropy
544 	 * Check the bit entropy and its mean over the generated numbers.
545 	 */
546 	for (i = 1; i < CONF_ITERATIONS; i++) {
547 		bit_entropy = count_bits(numbers[i - 1] ^ numbers[i]);
548 		if (bit_entropy < min_bit_entropy) {
549 			min_bit_entropy = bit_entropy;
550 		}
551 		if (bit_entropy > max_bit_entropy) {
552 			max_bit_entropy = bit_entropy;
553 		}
554 
555 		if (bit_entropy < CONF_MIN_ENTROPY) {
556 			T_EXPECT_GE_UINT(bit_entropy, CONF_MIN_ENTROPY,
557 			    "Number of differing bits in consecutive numbers does not satisfy the min criteria.");
558 		}
559 
560 		aggregate_bit_entropy += bit_entropy;
561 	}
562 	T_PASS("Passed the min bit entropy expectation of %d bits", CONF_MIN_ENTROPY);
563 
564 	mean_bit_entropy = aggregate_bit_entropy / CONF_ITERATIONS;
565 	T_EXPECT_GE_UINT(mean_bit_entropy, CONF_MEAN_ENTROPY, "Test criteria for mean number of differing bits.");
566 	T_PASS("Mean bit entropy criteria satisfied (Required %d, Actual: %d).", CONF_MEAN_ENTROPY, mean_bit_entropy);
567 	T_LOG("{PERFORMANCE} iterations: %d, min_bit_entropy: %d, mean_bit_entropy: %d, max_bit_entropy: %d", CONF_ITERATIONS,
568 	    min_bit_entropy, mean_bit_entropy, max_bit_entropy);
569 	T_PERF("min_bit_entropy_" T_TOSTRING(CONF_ITERATIONS), min_bit_entropy, "bits", "minimum bit entropy in RNG. High is better");
570 	T_PERF("mean_bit_entropy_" T_TOSTRING(CONF_ITERATIONS), mean_bit_entropy, "bits", "mean bit entropy in RNG. High is better");
571 	T_PERF("max_bit_entropy_" T_TOSTRING(CONF_ITERATIONS), max_bit_entropy, "bits", "max bit entropy in RNG. High is better");
572 
573 	/*
574 	 * TEST 3: Incremental Pattern Search
575 	 * Check that incremental/decremental pattern does not exist in the given window
576 	 */
577 	int window_start, window_end, trend;
578 	window_start = window_end = trend = 0;
579 
580 	do {
581 		/*
582 		 * Set the window
583 		 */
584 		window_end = window_start + CONF_WINDOW_SIZE - 1;
585 		if (window_end >= CONF_ITERATIONS) {
586 			window_end = CONF_ITERATIONS - 1;
587 		}
588 
589 		trend = 0;
590 		for (i = window_start; i < window_end; i++) {
591 			if (numbers[i] < numbers[i + 1]) {
592 				trend++;
593 			} else if (numbers[i] > numbers[i + 1]) {
594 				trend--;
595 			}
596 		}
597 		/*
598 		 * Check that there is no increasing or decreasing trend
599 		 * i.e. trend <= ceil(window_size/2)
600 		 */
601 		if (trend < 0) {
602 			trend = -trend;
603 		}
604 		if (trend > CONF_WINDOW_TREND_LIMIT) {
605 			T_ASSERT_LE_INT(trend, CONF_WINDOW_TREND_LIMIT, "Found increasing/decreasing trend in random numbers.");
606 		}
607 
608 		/*
609 		 * Move to the next window
610 		 */
611 		window_start++;
612 	} while (window_start < (CONF_ITERATIONS - 1));
613 	T_PASS("Did not find increasing/decreasing trends in a window of %d numbers.", CONF_WINDOW_SIZE);
614 
615 	/*
616 	 * TEST 4: Find Duplicates
617 	 * Check no duplicate values are generated
618 	 */
619 	qsort(numbers, CONF_ITERATIONS, sizeof(numbers[0]), compare_numbers_ascending);
620 	for (i = 1; i < CONF_ITERATIONS; i++) {
621 		if (numbers[i] == numbers[i - 1]) {
622 			T_ASSERT_NE_ULLONG(numbers[i], numbers[i - 1], "read_random generated duplicate values.");
623 		}
624 	}
625 	T_PASS("Test did not find any duplicates as expected.");
626 
627 	return KERN_SUCCESS;
628 }
629 
630 
631 /* KCDATA kernel api tests */
632 static struct kcdata_descriptor test_kc_data;//, test_kc_data2;
633 struct sample_disk_io_stats {
634 	uint64_t disk_reads_count;
635 	uint64_t disk_reads_size;
636 	uint64_t io_priority_count[4];
637 	uint64_t io_priority_size;
638 } __attribute__((packed));
639 
640 struct kcdata_subtype_descriptor test_disk_io_stats_def[] = {
641 	{
642 		.kcs_flags = KCS_SUBTYPE_FLAGS_NONE,
643 		.kcs_elem_type = KC_ST_UINT64,
644 		.kcs_elem_offset = 0 * sizeof(uint64_t),
645 		.kcs_elem_size = sizeof(uint64_t),
646 		.kcs_name = "disk_reads_count"
647 	},
648 	{
649 		.kcs_flags = KCS_SUBTYPE_FLAGS_NONE,
650 		.kcs_elem_type = KC_ST_UINT64,
651 		.kcs_elem_offset = 1 * sizeof(uint64_t),
652 		.kcs_elem_size = sizeof(uint64_t),
653 		.kcs_name = "disk_reads_size"
654 	},
655 	{
656 		.kcs_flags = KCS_SUBTYPE_FLAGS_ARRAY,
657 		.kcs_elem_type = KC_ST_UINT64,
658 		.kcs_elem_offset = 2 * sizeof(uint64_t),
659 		.kcs_elem_size = KCS_SUBTYPE_PACK_SIZE(4, sizeof(uint64_t)),
660 		.kcs_name = "io_priority_count"
661 	},
662 	{
663 		.kcs_flags = KCS_SUBTYPE_FLAGS_ARRAY,
664 		.kcs_elem_type = KC_ST_UINT64,
665 		.kcs_elem_offset = (2 + 4) * sizeof(uint64_t),
666 		.kcs_elem_size = sizeof(uint64_t),
667 		.kcs_name = "io_priority_size"
668 	},
669 };
670 
671 kern_return_t
kcdata_api_test(void)672 kcdata_api_test(void)
673 {
674 	kern_return_t retval = KERN_SUCCESS;
675 
676 	/* test for NULL input */
677 	retval = kcdata_memory_static_init(NULL, (mach_vm_address_t)0, KCDATA_BUFFER_BEGIN_STACKSHOT, 100, KCFLAG_USE_MEMCOPY);
678 	T_ASSERT(retval == KERN_INVALID_ARGUMENT, "kcdata_memory_static_init with NULL struct");
679 
680 	/* another negative test with buffer size < 32 bytes */
681 	char data[30] = "sample_disk_io_stats";
682 	retval = kcdata_memory_static_init(&test_kc_data, (mach_vm_address_t)&data, KCDATA_BUFFER_BEGIN_CRASHINFO, sizeof(data),
683 	    KCFLAG_USE_MEMCOPY);
684 	T_ASSERT(retval == KERN_INSUFFICIENT_BUFFER_SIZE, "init with 30 bytes failed as expected with KERN_INSUFFICIENT_BUFFER_SIZE");
685 
686 	/* test with COPYOUT for 0x0 address. Should return KERN_NO_ACCESS */
687 	retval = kcdata_memory_static_init(&test_kc_data, (mach_vm_address_t)0, KCDATA_BUFFER_BEGIN_CRASHINFO, PAGE_SIZE,
688 	    KCFLAG_USE_COPYOUT);
689 	T_ASSERT(retval == KERN_NO_ACCESS, "writing to 0x0 returned KERN_NO_ACCESS");
690 
691 	/* test with successful kcdata_memory_static_init */
692 	test_kc_data.kcd_length   = 0xdeadbeef;
693 
694 	void *data_ptr = kalloc_data(PAGE_SIZE, Z_WAITOK_ZERO_NOFAIL);
695 	mach_vm_address_t address = (mach_vm_address_t)data_ptr;
696 	T_EXPECT_NOTNULL(address, "kalloc of PAGE_SIZE data.");
697 
698 	retval = kcdata_memory_static_init(&test_kc_data, (mach_vm_address_t)address, KCDATA_BUFFER_BEGIN_STACKSHOT, PAGE_SIZE,
699 	    KCFLAG_USE_MEMCOPY);
700 
701 	T_ASSERT(retval == KERN_SUCCESS, "successful kcdata_memory_static_init call");
702 
703 	T_ASSERT(test_kc_data.kcd_length == PAGE_SIZE, "kcdata length is set correctly to PAGE_SIZE.");
704 	T_LOG("addr_begin 0x%llx and end 0x%llx and address 0x%llx", test_kc_data.kcd_addr_begin, test_kc_data.kcd_addr_end, address);
705 	T_ASSERT(test_kc_data.kcd_addr_begin == address, "kcdata begin address is correct 0x%llx", (uint64_t)address);
706 
707 	/* verify we have BEGIN and END HEADERS set */
708 	uint32_t * mem = (uint32_t *)address;
709 	T_ASSERT(mem[0] == KCDATA_BUFFER_BEGIN_STACKSHOT, "buffer does contain KCDATA_BUFFER_BEGIN_STACKSHOT");
710 	T_ASSERT(mem[4] == KCDATA_TYPE_BUFFER_END, "KCDATA_TYPE_BUFFER_END is appended as expected");
711 	T_ASSERT(mem[5] == 0, "size of BUFFER_END tag is zero");
712 
713 	/* verify kcdata_memory_get_used_bytes() */
714 	uint64_t bytes_used = 0;
715 	bytes_used = kcdata_memory_get_used_bytes(&test_kc_data);
716 	T_ASSERT(bytes_used == (2 * sizeof(struct kcdata_item)), "bytes_used api returned expected %llu", bytes_used);
717 
718 	/* test for kcdata_get_memory_addr() */
719 
720 	mach_vm_address_t user_addr = 0;
721 	/* negative test for NULL user_addr AND/OR kcdata_descriptor */
722 	retval = kcdata_get_memory_addr(NULL, KCDATA_TYPE_MACH_ABSOLUTE_TIME, sizeof(uint64_t), &user_addr);
723 	T_ASSERT(retval == KERN_INVALID_ARGUMENT, "kcdata_get_memory_addr with NULL struct -> KERN_INVALID_ARGUMENT");
724 
725 	retval = kcdata_get_memory_addr(&test_kc_data, KCDATA_TYPE_MACH_ABSOLUTE_TIME, sizeof(uint64_t), NULL);
726 	T_ASSERT(retval == KERN_INVALID_ARGUMENT, "kcdata_get_memory_addr with NULL user_addr -> KERN_INVALID_ARGUMENT");
727 
728 	/* successful case with size 0. Yes this is expected to succeed as just a item type could be used as boolean */
729 	retval = kcdata_get_memory_addr(&test_kc_data, KCDATA_TYPE_USECS_SINCE_EPOCH, 0, &user_addr);
730 	T_ASSERT(retval == KERN_SUCCESS, "Successfully got kcdata entry for 0 size data");
731 	T_ASSERT(user_addr == test_kc_data.kcd_addr_end, "0 sized data did not add any extra buffer space");
732 
733 	/* successful case with valid size. */
734 	user_addr = 0xdeadbeef;
735 	retval = kcdata_get_memory_addr(&test_kc_data, KCDATA_TYPE_MACH_ABSOLUTE_TIME, sizeof(uint64_t), &user_addr);
736 	T_ASSERT(retval == KERN_SUCCESS, "kcdata_get_memory_addr with valid values succeeded.");
737 	T_ASSERT(user_addr > test_kc_data.kcd_addr_begin, "user_addr is in range of buffer");
738 	T_ASSERT(user_addr < test_kc_data.kcd_addr_end, "user_addr is in range of buffer");
739 
740 	/* Try creating an item with really large size */
741 	user_addr  = 0xdeadbeef;
742 	bytes_used = kcdata_memory_get_used_bytes(&test_kc_data);
743 	retval = kcdata_get_memory_addr(&test_kc_data, KCDATA_TYPE_MACH_ABSOLUTE_TIME, PAGE_SIZE * 4, &user_addr);
744 	T_ASSERT(retval == KERN_INSUFFICIENT_BUFFER_SIZE, "Allocating entry with size > buffer -> KERN_INSUFFICIENT_BUFFER_SIZE");
745 	T_ASSERT(user_addr == 0xdeadbeef, "user_addr remained unaffected with failed kcdata_get_memory_addr");
746 	T_ASSERT(bytes_used == kcdata_memory_get_used_bytes(&test_kc_data), "The data structure should be unaffected");
747 
748 	/* verify convenience functions for uint32_with_description */
749 	retval = kcdata_add_uint32_with_description(&test_kc_data, 0xbdc0ffee, "This is bad coffee");
750 	T_ASSERT(retval == KERN_SUCCESS, "add uint32 with description succeeded.");
751 
752 	retval = kcdata_add_uint64_with_description(&test_kc_data, 0xf001badc0ffee, "another 8 byte no.");
753 	T_ASSERT(retval == KERN_SUCCESS, "add uint64 with desc succeeded.");
754 
755 	/* verify creating an KCDATA_TYPE_ARRAY here */
756 	user_addr  = 0xdeadbeef;
757 	bytes_used = kcdata_memory_get_used_bytes(&test_kc_data);
758 	/* save memory address where the array will come up */
759 	struct kcdata_item * item_p = (struct kcdata_item *)test_kc_data.kcd_addr_end;
760 
761 	retval = kcdata_get_memory_addr_for_array(&test_kc_data, KCDATA_TYPE_MACH_ABSOLUTE_TIME, sizeof(uint64_t), 20, &user_addr);
762 	T_ASSERT(retval == KERN_SUCCESS, "Array of 20 integers should be possible");
763 	T_ASSERT(user_addr != 0xdeadbeef, "user_addr is updated as expected");
764 	T_ASSERT((kcdata_memory_get_used_bytes(&test_kc_data) - bytes_used) >= 20 * sizeof(uint64_t), "memory allocation is in range");
765 	kcdata_iter_t iter = kcdata_iter(item_p, (unsigned long)(PAGE_SIZE - kcdata_memory_get_used_bytes(&test_kc_data)));
766 	T_ASSERT(kcdata_iter_array_elem_count(iter) == 20, "array count is 20");
767 
768 	/* FIXME add tests here for ranges of sizes and counts */
769 
770 	T_ASSERT(item_p->flags == (((uint64_t)KCDATA_TYPE_MACH_ABSOLUTE_TIME << 32) | 20), "flags are set correctly");
771 
772 	/* test adding of custom type */
773 
774 	retval = kcdata_add_type_definition(&test_kc_data, 0x999, data, &test_disk_io_stats_def[0],
775 	    sizeof(test_disk_io_stats_def) / sizeof(struct kcdata_subtype_descriptor));
776 	T_ASSERT(retval == KERN_SUCCESS, "adding custom type succeeded.");
777 
778 	kfree_data(data_ptr, PAGE_SIZE);
779 	return KERN_SUCCESS;
780 }
781 
782 /*
783  *  kern_return_t
784  *  kcdata_api_assert_tests()
785  *  {
786  *       kern_return_t retval       = 0;
787  *       void * assert_check_retval = NULL;
788  *       test_kc_data2.kcd_length   = 0xdeadbeef;
789  *       mach_vm_address_t address = (mach_vm_address_t)kalloc(PAGE_SIZE);
790  *       T_EXPECT_NOTNULL(address, "kalloc of PAGE_SIZE data.");
791  *
792  *       retval = kcdata_memory_static_init(&test_kc_data2, (mach_vm_address_t)address, KCDATA_BUFFER_BEGIN_STACKSHOT, PAGE_SIZE,
793  *                                          KCFLAG_USE_MEMCOPY);
794  *
795  *       T_ASSERT(retval == KERN_SUCCESS, "successful kcdata_memory_static_init call");
796  *
797  *       retval = T_REGISTER_ASSERT_CHECK("KCDATA_DESC_MAXLEN", &assert_check_retval);
798  *       T_ASSERT(retval == KERN_SUCCESS, "registered assert widget");
799  *
800  *       // this will assert
801  *       retval = kcdata_add_uint32_with_description(&test_kc_data2, 0xc0ffee, "really long description string for kcdata");
802  *       T_ASSERT(retval == KERN_INVALID_ARGUMENT, "API param check returned KERN_INVALID_ARGUMENT correctly");
803  *       T_ASSERT(assert_check_retval == (void *)XT_RET_W_SUCCESS, "assertion handler verified that it was hit");
804  *
805  *       return KERN_SUCCESS;
806  *  }
807  */
808 
809 #if defined(__arm64__)
810 
811 #include <arm/pmap.h>
812 
813 #define MAX_PMAP_OBJECT_ELEMENT 100000
814 
815 extern struct vm_object pmap_object_store; /* store pt pages */
816 extern unsigned long gPhysBase, gPhysSize, first_avail;
817 
818 /*
819  * Define macros to transverse the pmap object structures and extract
820  * physical page number with information from low global only
821  * This emulate how Astris extracts information from coredump
822  */
823 #if defined(__arm64__)
824 
825 static inline uintptr_t
astris_vm_page_unpack_ptr(uintptr_t p)826 astris_vm_page_unpack_ptr(uintptr_t p)
827 {
828 	if (!p) {
829 		return (uintptr_t)0;
830 	}
831 
832 	return (p & lowGlo.lgPmapMemFromArrayMask)
833 	       ? lowGlo.lgPmapMemStartAddr + (p & ~(lowGlo.lgPmapMemFromArrayMask)) * lowGlo.lgPmapMemPagesize
834 	       : lowGlo.lgPmapMemPackedBaseAddr + (p << lowGlo.lgPmapMemPackedShift);
835 }
836 
837 // assume next pointer is the first element
838 #define astris_vm_page_queue_next(qc) (astris_vm_page_unpack_ptr(*((uint32_t *)(qc))))
839 
840 #endif
841 
842 #define astris_vm_page_queue_first(q) astris_vm_page_queue_next(q)
843 
844 #define astris_vm_page_queue_end(q, qe) ((q) == (qe))
845 
846 #define astris_vm_page_queue_iterate(head, elt)                                                           \
847 	for ((elt) = (uintptr_t)astris_vm_page_queue_first((head)); !astris_vm_page_queue_end((head), (elt)); \
848 	     (elt) = (uintptr_t)astris_vm_page_queue_next(((elt) + (uintptr_t)lowGlo.lgPmapMemChainOffset)))
849 
850 #define astris_ptoa(x) ((vm_address_t)(x) << lowGlo.lgPageShift)
851 
852 static inline ppnum_t
astris_vm_page_get_phys_page(uintptr_t m)853 astris_vm_page_get_phys_page(uintptr_t m)
854 {
855 	return (m >= lowGlo.lgPmapMemStartAddr && m < lowGlo.lgPmapMemEndAddr)
856 	       ? (ppnum_t)((m - lowGlo.lgPmapMemStartAddr) / lowGlo.lgPmapMemPagesize + lowGlo.lgPmapMemFirstppnum)
857 	       : *((ppnum_t *)(m + lowGlo.lgPmapMemPageOffset));
858 }
859 
860 kern_return_t
pmap_coredump_test(void)861 pmap_coredump_test(void)
862 {
863 	int iter = 0;
864 	uintptr_t p;
865 
866 	T_LOG("Testing coredump info for PMAP.");
867 
868 	T_ASSERT_GE_ULONG(lowGlo.lgStaticAddr, gPhysBase, NULL);
869 	T_ASSERT_LE_ULONG(lowGlo.lgStaticAddr + lowGlo.lgStaticSize, first_avail, NULL);
870 	T_ASSERT_EQ_ULONG(lowGlo.lgLayoutMajorVersion, 3, NULL);
871 	T_ASSERT_GE_ULONG(lowGlo.lgLayoutMinorVersion, 2, NULL);
872 	T_ASSERT_EQ_ULONG(lowGlo.lgLayoutMagic, LOWGLO_LAYOUT_MAGIC, NULL);
873 
874 	// check the constant values in lowGlo
875 	T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemQ, ((typeof(lowGlo.lgPmapMemQ)) & (pmap_object_store.memq)), NULL);
876 	T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemPageOffset, offsetof(struct vm_page_with_ppnum, vmp_phys_page), NULL);
877 	T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemChainOffset, offsetof(struct vm_page, vmp_listq), NULL);
878 	T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemPagesize, sizeof(struct vm_page), NULL);
879 
880 #if defined(__arm64__)
881 	T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemFromArrayMask, VM_PAGE_PACKED_FROM_ARRAY, NULL);
882 	T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemPackedShift, VM_PAGE_PACKED_PTR_SHIFT, NULL);
883 	T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemPackedBaseAddr, VM_PAGE_PACKED_PTR_BASE, NULL);
884 #endif
885 
886 	vm_object_lock_shared(&pmap_object_store);
887 	astris_vm_page_queue_iterate(lowGlo.lgPmapMemQ, p)
888 	{
889 		ppnum_t ppnum   = astris_vm_page_get_phys_page(p);
890 		pmap_paddr_t pa = (pmap_paddr_t)astris_ptoa(ppnum);
891 		T_ASSERT_GE_ULONG(pa, gPhysBase, NULL);
892 		T_ASSERT_LT_ULONG(pa, gPhysBase + gPhysSize, NULL);
893 		iter++;
894 		T_ASSERT_LT_INT(iter, MAX_PMAP_OBJECT_ELEMENT, NULL);
895 	}
896 	vm_object_unlock(&pmap_object_store);
897 
898 	T_ASSERT_GT_INT(iter, 0, NULL);
899 	return KERN_SUCCESS;
900 }
901 #endif /* defined(__arm64__) */
902 
903 struct ts_kern_prim_test_args {
904 	int *end_barrier;
905 	int *notify_b;
906 	int *wait_event_b;
907 	int before_num;
908 	int *notify_a;
909 	int *wait_event_a;
910 	int after_num;
911 	int priority_to_check;
912 };
913 
914 static void
wait_threads(int * var,int num)915 wait_threads(
916 	int* var,
917 	int num)
918 {
919 	if (var != NULL) {
920 		while (os_atomic_load(var, acquire) != num) {
921 			assert_wait((event_t) var, THREAD_UNINT);
922 			if (os_atomic_load(var, acquire) != num) {
923 				(void) thread_block(THREAD_CONTINUE_NULL);
924 			} else {
925 				clear_wait(current_thread(), THREAD_AWAKENED);
926 			}
927 		}
928 	}
929 }
930 
931 static void
wake_threads(int * var)932 wake_threads(
933 	int* var)
934 {
935 	if (var) {
936 		os_atomic_inc(var, relaxed);
937 		thread_wakeup((event_t) var);
938 	}
939 }
940 
941 extern void IOSleep(int);
942 
943 static void
thread_lock_unlock_kernel_primitive(void * args,__unused wait_result_t wr)944 thread_lock_unlock_kernel_primitive(
945 	void *args,
946 	__unused wait_result_t wr)
947 {
948 	thread_t thread = current_thread();
949 	struct ts_kern_prim_test_args *info = (struct ts_kern_prim_test_args*) args;
950 	int pri;
951 
952 	wait_threads(info->wait_event_b, info->before_num);
953 	wake_threads(info->notify_b);
954 
955 	tstile_test_prim_lock(SYSCTL_TURNSTILE_TEST_KERNEL_DEFAULT);
956 
957 	wake_threads(info->notify_a);
958 	wait_threads(info->wait_event_a, info->after_num);
959 
960 	IOSleep(100);
961 
962 	if (info->priority_to_check) {
963 		spl_t s = splsched();
964 		thread_lock(thread);
965 		pri = thread->sched_pri;
966 		thread_unlock(thread);
967 		splx(s);
968 		T_ASSERT(pri == info->priority_to_check, "Priority thread: current sched %d sched wanted %d", pri, info->priority_to_check);
969 	}
970 
971 	tstile_test_prim_unlock(SYSCTL_TURNSTILE_TEST_KERNEL_DEFAULT);
972 
973 	wake_threads(info->end_barrier);
974 	thread_terminate_self();
975 }
976 
977 kern_return_t
ts_kernel_primitive_test(void)978 ts_kernel_primitive_test(void)
979 {
980 	thread_t owner, thread1, thread2;
981 	struct ts_kern_prim_test_args targs[2] = {};
982 	kern_return_t result;
983 	int end_barrier = 0;
984 	int owner_locked = 0;
985 	int waiters_ready = 0;
986 
987 	T_LOG("Testing turnstile kernel primitive");
988 
989 	targs[0].notify_b = NULL;
990 	targs[0].wait_event_b = NULL;
991 	targs[0].before_num = 0;
992 	targs[0].notify_a = &owner_locked;
993 	targs[0].wait_event_a = &waiters_ready;
994 	targs[0].after_num = 2;
995 	targs[0].priority_to_check = 90;
996 	targs[0].end_barrier = &end_barrier;
997 
998 	// Start owner with priority 80
999 	result = kernel_thread_start_priority((thread_continue_t)thread_lock_unlock_kernel_primitive, &targs[0], 80, &owner);
1000 	T_ASSERT(result == KERN_SUCCESS, "Starting owner");
1001 
1002 	targs[1].notify_b = &waiters_ready;
1003 	targs[1].wait_event_b = &owner_locked;
1004 	targs[1].before_num = 1;
1005 	targs[1].notify_a = NULL;
1006 	targs[1].wait_event_a = NULL;
1007 	targs[1].after_num = 0;
1008 	targs[1].priority_to_check = 0;
1009 	targs[1].end_barrier = &end_barrier;
1010 
1011 	// Start waiters with priority 85 and 90
1012 	result = kernel_thread_start_priority((thread_continue_t)thread_lock_unlock_kernel_primitive, &targs[1], 85, &thread1);
1013 	T_ASSERT(result == KERN_SUCCESS, "Starting thread1");
1014 
1015 	result = kernel_thread_start_priority((thread_continue_t)thread_lock_unlock_kernel_primitive, &targs[1], 90, &thread2);
1016 	T_ASSERT(result == KERN_SUCCESS, "Starting thread2");
1017 
1018 	wait_threads(&end_barrier, 3);
1019 
1020 	return KERN_SUCCESS;
1021 }
1022 
1023 #define MTX_LOCK 0
1024 #define RW_LOCK 1
1025 
1026 #define NUM_THREADS 4
1027 
1028 struct synch_test_common {
1029 	unsigned int nthreads;
1030 	thread_t *threads;
1031 	int max_pri;
1032 	int test_done;
1033 };
1034 
1035 static kern_return_t
init_synch_test_common(struct synch_test_common * info,unsigned int nthreads)1036 init_synch_test_common(struct synch_test_common *info, unsigned int nthreads)
1037 {
1038 	info->nthreads = nthreads;
1039 	info->threads = kalloc_type(thread_t, nthreads, Z_WAITOK);
1040 	if (!info->threads) {
1041 		return ENOMEM;
1042 	}
1043 
1044 	return KERN_SUCCESS;
1045 }
1046 
1047 static void
destroy_synch_test_common(struct synch_test_common * info)1048 destroy_synch_test_common(struct synch_test_common *info)
1049 {
1050 	kfree_type(thread_t, info->nthreads, info->threads);
1051 }
1052 
1053 static void
start_threads(thread_continue_t func,struct synch_test_common * info,bool sleep_after_first)1054 start_threads(thread_continue_t func, struct synch_test_common *info, bool sleep_after_first)
1055 {
1056 	thread_t thread;
1057 	kern_return_t result;
1058 	uint i;
1059 	int priority = 75;
1060 
1061 	info->test_done = 0;
1062 
1063 	for (i = 0; i < info->nthreads; i++) {
1064 		info->threads[i] = NULL;
1065 	}
1066 
1067 	info->max_pri = priority + (info->nthreads - 1) * 5;
1068 	if (info->max_pri > 95) {
1069 		info->max_pri = 95;
1070 	}
1071 
1072 	for (i = 0; i < info->nthreads; i++) {
1073 		result = kernel_thread_start_priority((thread_continue_t)func, info, priority, &thread);
1074 		os_atomic_store(&info->threads[i], thread, release);
1075 		T_ASSERT(result == KERN_SUCCESS, "Starting thread %d, priority %d, %p", i, priority, thread);
1076 
1077 		priority += 5;
1078 
1079 		if (i == 0 && sleep_after_first) {
1080 			IOSleep(100);
1081 		}
1082 	}
1083 }
1084 
1085 static unsigned int
get_max_pri(struct synch_test_common * info)1086 get_max_pri(struct synch_test_common * info)
1087 {
1088 	return info->max_pri;
1089 }
1090 
1091 static void
wait_all_thread(struct synch_test_common * info)1092 wait_all_thread(struct synch_test_common * info)
1093 {
1094 	wait_threads(&info->test_done, info->nthreads);
1095 }
1096 
1097 static void
notify_waiter(struct synch_test_common * info)1098 notify_waiter(struct synch_test_common * info)
1099 {
1100 	wake_threads(&info->test_done);
1101 }
1102 
1103 static void
wait_for_waiters(struct synch_test_common * info)1104 wait_for_waiters(struct synch_test_common *info)
1105 {
1106 	uint i, j;
1107 	thread_t thread;
1108 
1109 	for (i = 0; i < info->nthreads; i++) {
1110 		j = 0;
1111 		while (os_atomic_load(&info->threads[i], acquire) == NULL) {
1112 			if (j % 100 == 0) {
1113 				IOSleep(10);
1114 			}
1115 			j++;
1116 		}
1117 
1118 		if (info->threads[i] != current_thread()) {
1119 			j = 0;
1120 			do {
1121 				thread = os_atomic_load(&info->threads[i], relaxed);
1122 				if (thread == (thread_t) 1) {
1123 					break;
1124 				}
1125 
1126 				if (!(thread->state & TH_RUN)) {
1127 					break;
1128 				}
1129 
1130 				if (j % 100 == 0) {
1131 					IOSleep(100);
1132 				}
1133 				j++;
1134 
1135 				if (thread->started == FALSE) {
1136 					continue;
1137 				}
1138 			} while (thread->state & TH_RUN);
1139 		}
1140 	}
1141 }
1142 
1143 static void
exclude_current_waiter(struct synch_test_common * info)1144 exclude_current_waiter(struct synch_test_common *info)
1145 {
1146 	uint i, j;
1147 
1148 	for (i = 0; i < info->nthreads; i++) {
1149 		j = 0;
1150 		while (os_atomic_load(&info->threads[i], acquire) == NULL) {
1151 			if (j % 100 == 0) {
1152 				IOSleep(10);
1153 			}
1154 			j++;
1155 		}
1156 
1157 		if (os_atomic_load(&info->threads[i], acquire) == current_thread()) {
1158 			os_atomic_store(&info->threads[i], (thread_t)1, release);
1159 			return;
1160 		}
1161 	}
1162 }
1163 
1164 struct info_sleep_inheritor_test {
1165 	struct synch_test_common head;
1166 	lck_mtx_t mtx_lock;
1167 	lck_rw_t rw_lock;
1168 	decl_lck_mtx_gate_data(, gate);
1169 	boolean_t gate_closed;
1170 	int prim_type;
1171 	boolean_t work_to_do;
1172 	unsigned int max_pri;
1173 	unsigned int steal_pri;
1174 	int synch_value;
1175 	int synch;
1176 	int value;
1177 	int handoff_failure;
1178 	thread_t thread_inheritor;
1179 	bool use_alloc_gate;
1180 	gate_t *alloc_gate;
1181 	struct obj_cached **obj_cache;
1182 	kern_apfs_reflock_data(, reflock);
1183 	int reflock_protected_status;
1184 };
1185 
1186 static void
primitive_lock(struct info_sleep_inheritor_test * info)1187 primitive_lock(struct info_sleep_inheritor_test *info)
1188 {
1189 	switch (info->prim_type) {
1190 	case MTX_LOCK:
1191 		lck_mtx_lock(&info->mtx_lock);
1192 		break;
1193 	case RW_LOCK:
1194 		lck_rw_lock(&info->rw_lock, LCK_RW_TYPE_EXCLUSIVE);
1195 		break;
1196 	default:
1197 		panic("invalid type %d", info->prim_type);
1198 	}
1199 }
1200 
1201 static void
primitive_unlock(struct info_sleep_inheritor_test * info)1202 primitive_unlock(struct info_sleep_inheritor_test *info)
1203 {
1204 	switch (info->prim_type) {
1205 	case MTX_LOCK:
1206 		lck_mtx_unlock(&info->mtx_lock);
1207 		break;
1208 	case RW_LOCK:
1209 		lck_rw_unlock(&info->rw_lock, LCK_RW_TYPE_EXCLUSIVE);
1210 		break;
1211 	default:
1212 		panic("invalid type %d", info->prim_type);
1213 	}
1214 }
1215 
1216 static wait_result_t
primitive_sleep_with_inheritor(struct info_sleep_inheritor_test * info)1217 primitive_sleep_with_inheritor(struct info_sleep_inheritor_test *info)
1218 {
1219 	wait_result_t ret = KERN_SUCCESS;
1220 	switch (info->prim_type) {
1221 	case MTX_LOCK:
1222 		ret = lck_mtx_sleep_with_inheritor(&info->mtx_lock, LCK_SLEEP_DEFAULT, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1223 		break;
1224 	case RW_LOCK:
1225 		ret = lck_rw_sleep_with_inheritor(&info->rw_lock, LCK_SLEEP_DEFAULT, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1226 		break;
1227 	default:
1228 		panic("invalid type %d", info->prim_type);
1229 	}
1230 
1231 	return ret;
1232 }
1233 
1234 static void
primitive_wakeup_one_with_inheritor(struct info_sleep_inheritor_test * info)1235 primitive_wakeup_one_with_inheritor(struct info_sleep_inheritor_test *info)
1236 {
1237 	switch (info->prim_type) {
1238 	case MTX_LOCK:
1239 	case RW_LOCK:
1240 		wakeup_one_with_inheritor((event_t) &info->thread_inheritor, THREAD_AWAKENED, LCK_WAKE_DEFAULT, &info->thread_inheritor);
1241 		break;
1242 	default:
1243 		panic("invalid type %d", info->prim_type);
1244 	}
1245 }
1246 
1247 static void
primitive_wakeup_all_with_inheritor(struct info_sleep_inheritor_test * info)1248 primitive_wakeup_all_with_inheritor(struct info_sleep_inheritor_test *info)
1249 {
1250 	switch (info->prim_type) {
1251 	case MTX_LOCK:
1252 	case RW_LOCK:
1253 		wakeup_all_with_inheritor((event_t) &info->thread_inheritor, THREAD_AWAKENED);
1254 		break;
1255 	default:
1256 		panic("invalid type %d", info->prim_type);
1257 	}
1258 	return;
1259 }
1260 
1261 static void
primitive_change_sleep_inheritor(struct info_sleep_inheritor_test * info)1262 primitive_change_sleep_inheritor(struct info_sleep_inheritor_test *info)
1263 {
1264 	switch (info->prim_type) {
1265 	case MTX_LOCK:
1266 	case RW_LOCK:
1267 		change_sleep_inheritor((event_t) &info->thread_inheritor, info->thread_inheritor);
1268 		break;
1269 	default:
1270 		panic("invalid type %d", info->prim_type);
1271 	}
1272 	return;
1273 }
1274 
1275 static kern_return_t
primitive_gate_try_close(struct info_sleep_inheritor_test * info)1276 primitive_gate_try_close(struct info_sleep_inheritor_test *info)
1277 {
1278 	gate_t *gate = &info->gate;
1279 	if (info->use_alloc_gate == true) {
1280 		gate = info->alloc_gate;
1281 	}
1282 	kern_return_t ret = KERN_SUCCESS;
1283 	switch (info->prim_type) {
1284 	case MTX_LOCK:
1285 		ret = lck_mtx_gate_try_close(&info->mtx_lock, gate);
1286 		break;
1287 	case RW_LOCK:
1288 		ret = lck_rw_gate_try_close(&info->rw_lock, gate);
1289 		break;
1290 	default:
1291 		panic("invalid type %d", info->prim_type);
1292 	}
1293 	return ret;
1294 }
1295 
1296 static gate_wait_result_t
primitive_gate_wait(struct info_sleep_inheritor_test * info)1297 primitive_gate_wait(struct info_sleep_inheritor_test *info)
1298 {
1299 	gate_t *gate = &info->gate;
1300 	if (info->use_alloc_gate == true) {
1301 		gate = info->alloc_gate;
1302 	}
1303 	gate_wait_result_t ret = GATE_OPENED;
1304 	switch (info->prim_type) {
1305 	case MTX_LOCK:
1306 		ret = lck_mtx_gate_wait(&info->mtx_lock, gate, LCK_SLEEP_DEFAULT, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1307 		break;
1308 	case RW_LOCK:
1309 		ret = lck_rw_gate_wait(&info->rw_lock, gate, LCK_SLEEP_DEFAULT, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1310 		break;
1311 	default:
1312 		panic("invalid type %d", info->prim_type);
1313 	}
1314 	return ret;
1315 }
1316 
1317 static void
primitive_gate_open(struct info_sleep_inheritor_test * info)1318 primitive_gate_open(struct info_sleep_inheritor_test *info)
1319 {
1320 	gate_t *gate = &info->gate;
1321 	if (info->use_alloc_gate == true) {
1322 		gate = info->alloc_gate;
1323 	}
1324 	switch (info->prim_type) {
1325 	case MTX_LOCK:
1326 		lck_mtx_gate_open(&info->mtx_lock, gate);
1327 		break;
1328 	case RW_LOCK:
1329 		lck_rw_gate_open(&info->rw_lock, gate);
1330 		break;
1331 	default:
1332 		panic("invalid type %d", info->prim_type);
1333 	}
1334 }
1335 
1336 static void
primitive_gate_close(struct info_sleep_inheritor_test * info)1337 primitive_gate_close(struct info_sleep_inheritor_test *info)
1338 {
1339 	gate_t *gate = &info->gate;
1340 	if (info->use_alloc_gate == true) {
1341 		gate = info->alloc_gate;
1342 	}
1343 
1344 	switch (info->prim_type) {
1345 	case MTX_LOCK:
1346 		lck_mtx_gate_close(&info->mtx_lock, gate);
1347 		break;
1348 	case RW_LOCK:
1349 		lck_rw_gate_close(&info->rw_lock, gate);
1350 		break;
1351 	default:
1352 		panic("invalid type %d", info->prim_type);
1353 	}
1354 }
1355 
1356 static void
primitive_gate_steal(struct info_sleep_inheritor_test * info)1357 primitive_gate_steal(struct info_sleep_inheritor_test *info)
1358 {
1359 	gate_t *gate = &info->gate;
1360 	if (info->use_alloc_gate == true) {
1361 		gate = info->alloc_gate;
1362 	}
1363 
1364 	switch (info->prim_type) {
1365 	case MTX_LOCK:
1366 		lck_mtx_gate_steal(&info->mtx_lock, gate);
1367 		break;
1368 	case RW_LOCK:
1369 		lck_rw_gate_steal(&info->rw_lock, gate);
1370 		break;
1371 	default:
1372 		panic("invalid type %d", info->prim_type);
1373 	}
1374 }
1375 
1376 static kern_return_t
primitive_gate_handoff(struct info_sleep_inheritor_test * info,int flags)1377 primitive_gate_handoff(struct info_sleep_inheritor_test *info, int flags)
1378 {
1379 	gate_t *gate = &info->gate;
1380 	if (info->use_alloc_gate == true) {
1381 		gate = info->alloc_gate;
1382 	}
1383 
1384 	kern_return_t ret = KERN_SUCCESS;
1385 	switch (info->prim_type) {
1386 	case MTX_LOCK:
1387 		ret = lck_mtx_gate_handoff(&info->mtx_lock, gate, flags);
1388 		break;
1389 	case RW_LOCK:
1390 		ret = lck_rw_gate_handoff(&info->rw_lock, gate, flags);
1391 		break;
1392 	default:
1393 		panic("invalid type %d", info->prim_type);
1394 	}
1395 	return ret;
1396 }
1397 
1398 static void
primitive_gate_assert(struct info_sleep_inheritor_test * info,int type)1399 primitive_gate_assert(struct info_sleep_inheritor_test *info, int type)
1400 {
1401 	gate_t *gate = &info->gate;
1402 	if (info->use_alloc_gate == true) {
1403 		gate = info->alloc_gate;
1404 	}
1405 
1406 	switch (info->prim_type) {
1407 	case MTX_LOCK:
1408 		lck_mtx_gate_assert(&info->mtx_lock, gate, type);
1409 		break;
1410 	case RW_LOCK:
1411 		lck_rw_gate_assert(&info->rw_lock, gate, type);
1412 		break;
1413 	default:
1414 		panic("invalid type %d", info->prim_type);
1415 	}
1416 }
1417 
1418 static void
primitive_gate_init(struct info_sleep_inheritor_test * info)1419 primitive_gate_init(struct info_sleep_inheritor_test *info)
1420 {
1421 	switch (info->prim_type) {
1422 	case MTX_LOCK:
1423 		lck_mtx_gate_init(&info->mtx_lock, &info->gate);
1424 		break;
1425 	case RW_LOCK:
1426 		lck_rw_gate_init(&info->rw_lock, &info->gate);
1427 		break;
1428 	default:
1429 		panic("invalid type %d", info->prim_type);
1430 	}
1431 }
1432 
1433 static void
primitive_gate_destroy(struct info_sleep_inheritor_test * info)1434 primitive_gate_destroy(struct info_sleep_inheritor_test *info)
1435 {
1436 	switch (info->prim_type) {
1437 	case MTX_LOCK:
1438 		lck_mtx_gate_destroy(&info->mtx_lock, &info->gate);
1439 		break;
1440 	case RW_LOCK:
1441 		lck_rw_gate_destroy(&info->rw_lock, &info->gate);
1442 		break;
1443 	default:
1444 		panic("invalid type %d", info->prim_type);
1445 	}
1446 }
1447 
1448 static void
primitive_gate_alloc(struct info_sleep_inheritor_test * info)1449 primitive_gate_alloc(struct info_sleep_inheritor_test *info)
1450 {
1451 	gate_t *gate;
1452 	switch (info->prim_type) {
1453 	case MTX_LOCK:
1454 		gate = lck_mtx_gate_alloc_init(&info->mtx_lock);
1455 		break;
1456 	case RW_LOCK:
1457 		gate = lck_rw_gate_alloc_init(&info->rw_lock);
1458 		break;
1459 	default:
1460 		panic("invalid type %d", info->prim_type);
1461 	}
1462 	info->alloc_gate = gate;
1463 }
1464 
1465 static void
primitive_gate_free(struct info_sleep_inheritor_test * info)1466 primitive_gate_free(struct info_sleep_inheritor_test *info)
1467 {
1468 	T_ASSERT(info->alloc_gate != NULL, "gate not yet freed");
1469 
1470 	switch (info->prim_type) {
1471 	case MTX_LOCK:
1472 		lck_mtx_gate_free(&info->mtx_lock, info->alloc_gate);
1473 		break;
1474 	case RW_LOCK:
1475 		lck_rw_gate_free(&info->rw_lock, info->alloc_gate);
1476 		break;
1477 	default:
1478 		panic("invalid type %d", info->prim_type);
1479 	}
1480 	info->alloc_gate = NULL;
1481 }
1482 
1483 static void
thread_inheritor_like_mutex(void * args,__unused wait_result_t wr)1484 thread_inheritor_like_mutex(
1485 	void *args,
1486 	__unused wait_result_t wr)
1487 {
1488 	wait_result_t wait;
1489 
1490 	struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
1491 	uint my_pri = current_thread()->sched_pri;
1492 
1493 	T_LOG("Started thread pri %d %p", my_pri, current_thread());
1494 
1495 	/*
1496 	 * spin here to start concurrently
1497 	 */
1498 	wake_threads(&info->synch);
1499 	wait_threads(&info->synch, info->synch_value);
1500 
1501 	primitive_lock(info);
1502 
1503 	if (info->thread_inheritor == NULL) {
1504 		info->thread_inheritor = current_thread();
1505 	} else {
1506 		wait = primitive_sleep_with_inheritor(info);
1507 		T_ASSERT(wait == THREAD_AWAKENED || wait == THREAD_NOT_WAITING, "sleep_with_inheritor return");
1508 	}
1509 	primitive_unlock(info);
1510 
1511 	IOSleep(100);
1512 	info->value++;
1513 
1514 	primitive_lock(info);
1515 
1516 	T_ASSERT(info->thread_inheritor == current_thread(), "thread_inheritor is %p", info->thread_inheritor);
1517 	primitive_wakeup_one_with_inheritor(info);
1518 	T_LOG("woken up %p", info->thread_inheritor);
1519 
1520 	if (info->thread_inheritor == NULL) {
1521 		T_ASSERT(info->handoff_failure == 0, "handoff failures");
1522 		info->handoff_failure++;
1523 	} else {
1524 		T_ASSERT(info->thread_inheritor != current_thread(), "thread_inheritor is %p", info->thread_inheritor);
1525 		thread_deallocate(info->thread_inheritor);
1526 	}
1527 
1528 	primitive_unlock(info);
1529 
1530 	assert(current_thread()->kern_promotion_schedpri == 0);
1531 	notify_waiter((struct synch_test_common *)info);
1532 
1533 	thread_terminate_self();
1534 }
1535 
1536 static void
thread_just_inheritor_do_work(void * args,__unused wait_result_t wr)1537 thread_just_inheritor_do_work(
1538 	void *args,
1539 	__unused wait_result_t wr)
1540 {
1541 	struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
1542 	uint my_pri = current_thread()->sched_pri;
1543 	uint max_pri;
1544 
1545 	T_LOG("Started thread pri %d %p", my_pri, current_thread());
1546 	primitive_lock(info);
1547 
1548 	if (info->thread_inheritor == NULL) {
1549 		info->thread_inheritor = current_thread();
1550 		primitive_unlock(info);
1551 		T_LOG("Thread pri %d first to run %p", my_pri, current_thread());
1552 
1553 		wait_threads(&info->synch, info->synch_value - 1);
1554 
1555 		wait_for_waiters((struct synch_test_common *)info);
1556 
1557 		max_pri = get_max_pri((struct synch_test_common *) info);
1558 		T_ASSERT((uint) current_thread()->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", current_thread()->sched_pri, max_pri);
1559 
1560 		os_atomic_store(&info->synch, 0, relaxed);
1561 		primitive_lock(info);
1562 		primitive_wakeup_all_with_inheritor(info);
1563 	} else {
1564 		wake_threads(&info->synch);
1565 		primitive_sleep_with_inheritor(info);
1566 	}
1567 
1568 	primitive_unlock(info);
1569 
1570 	assert(current_thread()->kern_promotion_schedpri == 0);
1571 	notify_waiter((struct synch_test_common *)info);
1572 
1573 	thread_terminate_self();
1574 }
1575 
1576 static void
thread_steal_work(void * args,__unused wait_result_t wr)1577 thread_steal_work(
1578 	void *args,
1579 	__unused wait_result_t wr)
1580 {
1581 	struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
1582 	uint my_pri = current_thread()->sched_pri;
1583 
1584 	T_LOG("Started thread pri %d %p", my_pri, current_thread());
1585 	primitive_lock(info);
1586 
1587 	if (info->thread_inheritor == NULL) {
1588 		info->thread_inheritor = current_thread();
1589 		exclude_current_waiter((struct synch_test_common *)info);
1590 
1591 		T_LOG("Thread pri %d first to run %p", my_pri, current_thread());
1592 		primitive_unlock(info);
1593 
1594 		wait_threads(&info->synch, info->synch_value - 2);
1595 
1596 		wait_for_waiters((struct synch_test_common *)info);
1597 		T_LOG("Thread pri %d first to run %p", my_pri, current_thread());
1598 		primitive_lock(info);
1599 		if (info->thread_inheritor == current_thread()) {
1600 			primitive_wakeup_all_with_inheritor(info);
1601 		}
1602 	} else {
1603 		if (info->steal_pri == 0) {
1604 			info->steal_pri = my_pri;
1605 			info->thread_inheritor = current_thread();
1606 			primitive_change_sleep_inheritor(info);
1607 			exclude_current_waiter((struct synch_test_common *)info);
1608 
1609 			primitive_unlock(info);
1610 
1611 			wait_threads(&info->synch, info->synch_value - 2);
1612 
1613 			T_LOG("Thread pri %d stole push %p", my_pri, current_thread());
1614 			wait_for_waiters((struct synch_test_common *)info);
1615 
1616 			T_ASSERT((uint) current_thread()->sched_pri == info->steal_pri, "sleep_inheritor inheritor priority current is %d, should be %d", current_thread()->sched_pri, info->steal_pri);
1617 
1618 			primitive_lock(info);
1619 			primitive_wakeup_all_with_inheritor(info);
1620 		} else {
1621 			if (my_pri > info->steal_pri) {
1622 				info->steal_pri = my_pri;
1623 			}
1624 			wake_threads(&info->synch);
1625 			primitive_sleep_with_inheritor(info);
1626 			exclude_current_waiter((struct synch_test_common *)info);
1627 		}
1628 	}
1629 	primitive_unlock(info);
1630 
1631 	assert(current_thread()->kern_promotion_schedpri == 0);
1632 	notify_waiter((struct synch_test_common *)info);
1633 
1634 	thread_terminate_self();
1635 }
1636 
1637 static void
thread_no_inheritor_work(void * args,__unused wait_result_t wr)1638 thread_no_inheritor_work(
1639 	void *args,
1640 	__unused wait_result_t wr)
1641 {
1642 	struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
1643 	uint my_pri = current_thread()->sched_pri;
1644 
1645 	T_LOG("Started thread pri %d %p", my_pri, current_thread());
1646 	primitive_lock(info);
1647 
1648 	info->value--;
1649 	if (info->value == 0) {
1650 		primitive_wakeup_all_with_inheritor(info);
1651 	} else {
1652 		info->thread_inheritor = NULL;
1653 		primitive_sleep_with_inheritor(info);
1654 	}
1655 
1656 	primitive_unlock(info);
1657 
1658 	assert(current_thread()->kern_promotion_schedpri == 0);
1659 	notify_waiter((struct synch_test_common *)info);
1660 
1661 	thread_terminate_self();
1662 }
1663 
1664 static void
thread_mtx_work(void * args,__unused wait_result_t wr)1665 thread_mtx_work(
1666 	void *args,
1667 	__unused wait_result_t wr)
1668 {
1669 	struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
1670 	uint my_pri = current_thread()->sched_pri;
1671 	int i;
1672 	u_int8_t rand;
1673 	unsigned int mod_rand;
1674 	uint max_pri;
1675 
1676 	T_LOG("Started thread pri %d %p", my_pri, current_thread());
1677 
1678 	for (i = 0; i < 10; i++) {
1679 		lck_mtx_lock(&info->mtx_lock);
1680 		if (info->thread_inheritor == NULL) {
1681 			info->thread_inheritor = current_thread();
1682 			lck_mtx_unlock(&info->mtx_lock);
1683 
1684 			T_LOG("Thread pri %d first to run %p", my_pri, current_thread());
1685 
1686 			wait_threads(&info->synch, info->synch_value - 1);
1687 			wait_for_waiters((struct synch_test_common *)info);
1688 			max_pri = get_max_pri((struct synch_test_common *) info);
1689 			T_ASSERT((uint) current_thread()->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", current_thread()->sched_pri, max_pri);
1690 
1691 			os_atomic_store(&info->synch, 0, relaxed);
1692 
1693 			lck_mtx_lock(&info->mtx_lock);
1694 			info->thread_inheritor = NULL;
1695 			wakeup_all_with_inheritor((event_t) &info->thread_inheritor, THREAD_AWAKENED);
1696 			lck_mtx_unlock(&info->mtx_lock);
1697 			continue;
1698 		}
1699 
1700 		read_random(&rand, sizeof(rand));
1701 		mod_rand = rand % 2;
1702 
1703 		wake_threads(&info->synch);
1704 		switch (mod_rand) {
1705 		case 0:
1706 			lck_mtx_sleep_with_inheritor(&info->mtx_lock, LCK_SLEEP_DEFAULT, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1707 			lck_mtx_unlock(&info->mtx_lock);
1708 			break;
1709 		case 1:
1710 			lck_mtx_sleep_with_inheritor(&info->mtx_lock, LCK_SLEEP_UNLOCK, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1711 			break;
1712 		default:
1713 			panic("rand()mod4 returned %u (random %u)", mod_rand, rand);
1714 		}
1715 	}
1716 
1717 	/*
1718 	 * spin here to stop using the lock as mutex
1719 	 */
1720 	wake_threads(&info->synch);
1721 	wait_threads(&info->synch, info->synch_value);
1722 
1723 	for (i = 0; i < 10; i++) {
1724 		/* read_random might sleep so read it before acquiring the mtx as spin */
1725 		read_random(&rand, sizeof(rand));
1726 
1727 		lck_mtx_lock_spin(&info->mtx_lock);
1728 		if (info->thread_inheritor == NULL) {
1729 			info->thread_inheritor = current_thread();
1730 			lck_mtx_unlock(&info->mtx_lock);
1731 
1732 			T_LOG("Thread pri %d first to run %p", my_pri, current_thread());
1733 			wait_for_waiters((struct synch_test_common *)info);
1734 			max_pri = get_max_pri((struct synch_test_common *) info);
1735 			T_ASSERT((uint) current_thread()->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", current_thread()->sched_pri, max_pri);
1736 
1737 			lck_mtx_lock_spin(&info->mtx_lock);
1738 			info->thread_inheritor = NULL;
1739 			wakeup_all_with_inheritor((event_t) &info->thread_inheritor, THREAD_AWAKENED);
1740 			lck_mtx_unlock(&info->mtx_lock);
1741 			continue;
1742 		}
1743 
1744 		mod_rand = rand % 2;
1745 		switch (mod_rand) {
1746 		case 0:
1747 			lck_mtx_sleep_with_inheritor(&info->mtx_lock, LCK_SLEEP_SPIN, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1748 			lck_mtx_unlock(&info->mtx_lock);
1749 			break;
1750 		case 1:
1751 			lck_mtx_sleep_with_inheritor(&info->mtx_lock, LCK_SLEEP_SPIN_ALWAYS, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1752 			lck_mtx_unlock(&info->mtx_lock);
1753 			break;
1754 		default:
1755 			panic("rand()mod4 returned %u (random %u)", mod_rand, rand);
1756 		}
1757 	}
1758 	assert(current_thread()->kern_promotion_schedpri == 0);
1759 	notify_waiter((struct synch_test_common *)info);
1760 
1761 	thread_terminate_self();
1762 }
1763 
1764 static void
thread_rw_work(void * args,__unused wait_result_t wr)1765 thread_rw_work(
1766 	void *args,
1767 	__unused wait_result_t wr)
1768 {
1769 	struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
1770 	uint my_pri = current_thread()->sched_pri;
1771 	int i;
1772 	lck_rw_type_t type;
1773 	u_int8_t rand;
1774 	unsigned int mod_rand;
1775 	uint max_pri;
1776 
1777 	T_LOG("Started thread pri %d %p", my_pri, current_thread());
1778 
1779 	for (i = 0; i < 10; i++) {
1780 try_again:
1781 		type = LCK_RW_TYPE_SHARED;
1782 		lck_rw_lock(&info->rw_lock, type);
1783 		if (info->thread_inheritor == NULL) {
1784 			type = LCK_RW_TYPE_EXCLUSIVE;
1785 
1786 			if (lck_rw_lock_shared_to_exclusive(&info->rw_lock)) {
1787 				if (info->thread_inheritor == NULL) {
1788 					info->thread_inheritor = current_thread();
1789 					lck_rw_unlock(&info->rw_lock, type);
1790 					wait_threads(&info->synch, info->synch_value - 1);
1791 
1792 					T_LOG("Thread pri %d first to run %p", my_pri, current_thread());
1793 					wait_for_waiters((struct synch_test_common *)info);
1794 					max_pri = get_max_pri((struct synch_test_common *) info);
1795 					T_ASSERT((uint) current_thread()->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", current_thread()->sched_pri, max_pri);
1796 
1797 					os_atomic_store(&info->synch, 0, relaxed);
1798 
1799 					lck_rw_lock(&info->rw_lock, type);
1800 					info->thread_inheritor = NULL;
1801 					wakeup_all_with_inheritor((event_t) &info->thread_inheritor, THREAD_AWAKENED);
1802 					lck_rw_unlock(&info->rw_lock, type);
1803 					continue;
1804 				}
1805 			} else {
1806 				goto try_again;
1807 			}
1808 		}
1809 
1810 		read_random(&rand, sizeof(rand));
1811 		mod_rand = rand % 4;
1812 
1813 		wake_threads(&info->synch);
1814 		switch (mod_rand) {
1815 		case 0:
1816 			lck_rw_sleep_with_inheritor(&info->rw_lock, LCK_SLEEP_DEFAULT, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1817 			lck_rw_unlock(&info->rw_lock, type);
1818 			break;
1819 		case 1:
1820 			lck_rw_sleep_with_inheritor(&info->rw_lock, LCK_SLEEP_UNLOCK, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1821 			break;
1822 		case 2:
1823 			lck_rw_sleep_with_inheritor(&info->rw_lock, LCK_SLEEP_SHARED, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1824 			lck_rw_unlock(&info->rw_lock, LCK_RW_TYPE_SHARED);
1825 			break;
1826 		case 3:
1827 			lck_rw_sleep_with_inheritor(&info->rw_lock, LCK_SLEEP_EXCLUSIVE, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1828 			lck_rw_unlock(&info->rw_lock, LCK_RW_TYPE_EXCLUSIVE);
1829 			break;
1830 		default:
1831 			panic("rand()mod4 returned %u (random %u)", mod_rand, rand);
1832 		}
1833 	}
1834 
1835 	assert(current_thread()->kern_promotion_schedpri == 0);
1836 	notify_waiter((struct synch_test_common *)info);
1837 
1838 	thread_terminate_self();
1839 }
1840 
1841 #define OBJ_STATE_UNUSED        0
1842 #define OBJ_STATE_REAL          1
1843 #define OBJ_STATE_PLACEHOLDER   2
1844 
1845 #define OBJ_BUFF_SIZE 11
1846 struct obj_cached {
1847 	int obj_id;
1848 	int obj_state;
1849 	struct kern_apfs_reflock *obj_refcount;
1850 	char obj_buff[OBJ_BUFF_SIZE];
1851 };
1852 
1853 #define CACHE_SIZE 2
1854 #define USE_CACHE_ROUNDS 15
1855 
1856 #define REFCOUNT_REFLOCK_ROUNDS 15
1857 
1858 /*
1859  * For the reflock cache test the cache is allocated
1860  * and its pointer is saved in obj_cache.
1861  * The lock for the cache is going to be one of the exclusive
1862  * locks already present in struct info_sleep_inheritor_test.
1863  */
1864 
1865 static struct obj_cached *
alloc_init_cache_entry(void)1866 alloc_init_cache_entry(void)
1867 {
1868 	struct obj_cached *cache_entry = kalloc_type(struct obj_cached, 1, Z_WAITOK | Z_NOFAIL | Z_ZERO);
1869 	cache_entry->obj_id = 0;
1870 	cache_entry->obj_state = OBJ_STATE_UNUSED;
1871 	cache_entry->obj_refcount = kern_apfs_reflock_alloc_init();
1872 	snprintf(cache_entry->obj_buff, OBJ_BUFF_SIZE, "I am groot");
1873 	return cache_entry;
1874 }
1875 
1876 static void
init_cache(struct info_sleep_inheritor_test * info)1877 init_cache(struct info_sleep_inheritor_test *info)
1878 {
1879 	struct obj_cached **obj_cache = kalloc_type(struct obj_cached *, CACHE_SIZE, Z_WAITOK | Z_NOFAIL | Z_ZERO);
1880 
1881 	int i;
1882 	for (i = 0; i < CACHE_SIZE; i++) {
1883 		obj_cache[i] = alloc_init_cache_entry();
1884 	}
1885 
1886 	info->obj_cache = obj_cache;
1887 }
1888 
1889 static void
check_cache_empty(struct info_sleep_inheritor_test * info)1890 check_cache_empty(struct info_sleep_inheritor_test *info)
1891 {
1892 	struct obj_cached **obj_cache = info->obj_cache;
1893 
1894 	int i, ret;
1895 	for (i = 0; i < CACHE_SIZE; i++) {
1896 		if (obj_cache[i] != NULL) {
1897 			T_ASSERT(obj_cache[i]->obj_state == OBJ_STATE_UNUSED, "checked OBJ_STATE_UNUSED");
1898 			T_ASSERT(obj_cache[i]->obj_refcount != NULL, "checked obj_refcount");
1899 			ret = memcmp(obj_cache[i]->obj_buff, "I am groot", OBJ_BUFF_SIZE);
1900 			T_ASSERT(ret == 0, "checked buff correctly emptied");
1901 		}
1902 	}
1903 }
1904 
1905 static void
free_cache(struct info_sleep_inheritor_test * info)1906 free_cache(struct info_sleep_inheritor_test *info)
1907 {
1908 	struct obj_cached **obj_cache = info->obj_cache;
1909 
1910 	int i;
1911 	for (i = 0; i < CACHE_SIZE; i++) {
1912 		if (obj_cache[i] != NULL) {
1913 			kern_apfs_reflock_free(obj_cache[i]->obj_refcount);
1914 			obj_cache[i]->obj_refcount = NULL;
1915 			kfree_type(struct obj_cached, 1, obj_cache[i]);
1916 			obj_cache[i] = NULL;
1917 		}
1918 	}
1919 
1920 	kfree_type(struct obj_cached *, CACHE_SIZE, obj_cache);
1921 	info->obj_cache = NULL;
1922 }
1923 
1924 static struct obj_cached *
find_id_in_cache(int obj_id,struct info_sleep_inheritor_test * info)1925 find_id_in_cache(int obj_id, struct info_sleep_inheritor_test *info)
1926 {
1927 	struct obj_cached **obj_cache = info->obj_cache;
1928 	int i;
1929 	for (i = 0; i < CACHE_SIZE; i++) {
1930 		if (obj_cache[i] != NULL && obj_cache[i]->obj_id == obj_id) {
1931 			return obj_cache[i];
1932 		}
1933 	}
1934 	return NULL;
1935 }
1936 
1937 static bool
free_id_in_cache(int obj_id,struct info_sleep_inheritor_test * info,__assert_only struct obj_cached * expected)1938 free_id_in_cache(int obj_id, struct info_sleep_inheritor_test *info, __assert_only struct obj_cached *expected)
1939 {
1940 	struct obj_cached **obj_cache = info->obj_cache;
1941 	int i;
1942 	for (i = 0; i < CACHE_SIZE; i++) {
1943 		if (obj_cache[i] != NULL && obj_cache[i]->obj_id == obj_id) {
1944 			assert(obj_cache[i] == expected);
1945 			kfree_type(struct obj_cached, 1, obj_cache[i]);
1946 			obj_cache[i] = NULL;
1947 			return true;
1948 		}
1949 	}
1950 	return false;
1951 }
1952 
1953 static struct obj_cached *
find_empty_spot_in_cache(struct info_sleep_inheritor_test * info)1954 find_empty_spot_in_cache(struct info_sleep_inheritor_test *info)
1955 {
1956 	struct obj_cached **obj_cache = info->obj_cache;
1957 	int i;
1958 	for (i = 0; i < CACHE_SIZE; i++) {
1959 		if (obj_cache[i] == NULL) {
1960 			obj_cache[i] = alloc_init_cache_entry();
1961 			return obj_cache[i];
1962 		}
1963 		if (obj_cache[i]->obj_state == OBJ_STATE_UNUSED) {
1964 			return obj_cache[i];
1965 		}
1966 	}
1967 	return NULL;
1968 }
1969 
1970 static int
get_obj_cache(int obj_id,struct info_sleep_inheritor_test * info,char ** buff)1971 get_obj_cache(int obj_id, struct info_sleep_inheritor_test *info, char **buff)
1972 {
1973 	struct obj_cached *obj = NULL, *obj2 = NULL;
1974 	kern_apfs_reflock_t refcount = NULL;
1975 	bool ret;
1976 	kern_apfs_reflock_out_flags_t out_flags;
1977 
1978 try_again:
1979 	primitive_lock(info);
1980 	if ((obj = find_id_in_cache(obj_id, info)) != NULL) {
1981 		/* Found an allocated object on the cache with same id */
1982 
1983 		/*
1984 		 * copy the pointer to obj_refcount as obj might
1985 		 * get deallocated after primitive_unlock()
1986 		 */
1987 		refcount = obj->obj_refcount;
1988 		if (kern_apfs_reflock_try_get_ref(refcount, KERN_APFS_REFLOCK_IN_WILL_WAIT, &out_flags)) {
1989 			/*
1990 			 * Got a ref, let's check the state
1991 			 */
1992 			switch (obj->obj_state) {
1993 			case OBJ_STATE_UNUSED:
1994 				goto init;
1995 			case OBJ_STATE_REAL:
1996 				goto done;
1997 			case OBJ_STATE_PLACEHOLDER:
1998 				panic("Thread %p observed OBJ_STATE_PLACEHOLDER %d for obj %d", current_thread(), obj->obj_state, obj_id);
1999 			default:
2000 				panic("Thread %p observed an unknown obj_state %d for obj %d", current_thread(), obj->obj_state, obj_id);
2001 			}
2002 		} else {
2003 			/*
2004 			 * Didn't get a ref.
2005 			 * This means or an obj_put() of the last ref is ongoing
2006 			 * or a init of the object is happening.
2007 			 * Both cases wait for that to finish and retry.
2008 			 * While waiting the thread that is holding the reflock
2009 			 * will get a priority at least as the one of this thread.
2010 			 */
2011 			primitive_unlock(info);
2012 			kern_apfs_reflock_wait_for_unlock(refcount, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
2013 			goto try_again;
2014 		}
2015 	} else {
2016 		/* Look for a spot on the cache where we can save the object */
2017 
2018 		if ((obj = find_empty_spot_in_cache(info)) == NULL) {
2019 			/*
2020 			 * Sadness cache is full, and everyting in the cache is
2021 			 * used.
2022 			 */
2023 			primitive_unlock(info);
2024 			return -1;
2025 		} else {
2026 			/*
2027 			 * copy the pointer to obj_refcount as obj might
2028 			 * get deallocated after primitive_unlock()
2029 			 */
2030 			refcount = obj->obj_refcount;
2031 			if (kern_apfs_reflock_try_get_ref(refcount, KERN_APFS_REFLOCK_IN_WILL_WAIT, &out_flags)) {
2032 				/*
2033 				 * Got a ref on a OBJ_STATE_UNUSED obj.
2034 				 * Recicle time.
2035 				 */
2036 				obj->obj_id = obj_id;
2037 				goto init;
2038 			} else {
2039 				/*
2040 				 * This could happen if the obj_put() has just changed the
2041 				 * state to OBJ_STATE_UNUSED, but not unlocked the reflock yet.
2042 				 */
2043 				primitive_unlock(info);
2044 				kern_apfs_reflock_wait_for_unlock(refcount, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
2045 				goto try_again;
2046 			}
2047 		}
2048 	}
2049 init:
2050 	assert(obj->obj_id == obj_id);
2051 	assert(obj->obj_state == OBJ_STATE_UNUSED);
2052 	/*
2053 	 * We already got a ref on the object, but we need
2054 	 * to initialize it. Mark it as
2055 	 * OBJ_STATE_PLACEHOLDER and get the obj_reflock.
2056 	 * In this way all thread waiting for this init
2057 	 * to finish will push on this thread.
2058 	 */
2059 	ret = kern_apfs_reflock_try_lock(refcount, KERN_APFS_REFLOCK_IN_DEFAULT, NULL);
2060 	assert(ret == true);
2061 	obj->obj_state = OBJ_STATE_PLACEHOLDER;
2062 	primitive_unlock(info);
2063 
2064 	//let's pretend we are populating the obj
2065 	IOSleep(10);
2066 	/*
2067 	 * obj will not be deallocated while I hold a ref.
2068 	 * So it is safe to access it.
2069 	 */
2070 	snprintf(obj->obj_buff, OBJ_BUFF_SIZE, "I am %d", obj_id);
2071 
2072 	primitive_lock(info);
2073 	obj2 = find_id_in_cache(obj_id, info);
2074 	assert(obj == obj2);
2075 	assert(obj->obj_state == OBJ_STATE_PLACEHOLDER);
2076 
2077 	obj->obj_state = OBJ_STATE_REAL;
2078 	kern_apfs_reflock_unlock(refcount);
2079 
2080 done:
2081 	*buff = obj->obj_buff;
2082 	primitive_unlock(info);
2083 	return 0;
2084 }
2085 
2086 static void
put_obj_cache(int obj_id,struct info_sleep_inheritor_test * info,bool free)2087 put_obj_cache(int obj_id, struct info_sleep_inheritor_test *info, bool free)
2088 {
2089 	struct obj_cached *obj = NULL, *obj2 = NULL;
2090 	bool ret;
2091 	kern_apfs_reflock_out_flags_t out_flags;
2092 	kern_apfs_reflock_t refcount = NULL;
2093 
2094 	primitive_lock(info);
2095 	obj = find_id_in_cache(obj_id, info);
2096 	primitive_unlock(info);
2097 
2098 	/*
2099 	 * Nobody should have been able to remove obj_id
2100 	 * from the cache.
2101 	 */
2102 	assert(obj != NULL);
2103 	assert(obj->obj_state == OBJ_STATE_REAL);
2104 
2105 	refcount = obj->obj_refcount;
2106 
2107 	/*
2108 	 * This should never fail, as or the reflock
2109 	 * was acquired when the state was OBJ_STATE_UNUSED to init,
2110 	 * or from a put that reached zero. And if the latter
2111 	 * happened subsequent reflock_get_ref() will had to wait to transition
2112 	 * to OBJ_STATE_REAL.
2113 	 */
2114 	ret = kern_apfs_reflock_try_put_ref(refcount, KERN_APFS_REFLOCK_IN_LOCK_IF_LAST, &out_flags);
2115 	assert(ret == true);
2116 	if ((out_flags & KERN_APFS_REFLOCK_OUT_LOCKED) == 0) {
2117 		return;
2118 	}
2119 
2120 	/*
2121 	 * Note: nobody at this point will be able to get a ref or a lock on
2122 	 * refcount.
2123 	 * All people waiting on refcount will push on this thread.
2124 	 */
2125 
2126 	//let's pretend we are flushing the obj somewhere.
2127 	IOSleep(10);
2128 	snprintf(obj->obj_buff, OBJ_BUFF_SIZE, "I am groot");
2129 
2130 	primitive_lock(info);
2131 	obj->obj_state = OBJ_STATE_UNUSED;
2132 	if (free) {
2133 		obj2 = find_id_in_cache(obj_id, info);
2134 		assert(obj == obj2);
2135 
2136 		ret = free_id_in_cache(obj_id, info, obj);
2137 		assert(ret == true);
2138 	}
2139 	primitive_unlock(info);
2140 
2141 	kern_apfs_reflock_unlock(refcount);
2142 
2143 	if (free) {
2144 		kern_apfs_reflock_free(refcount);
2145 	}
2146 }
2147 
2148 static void
thread_use_cache(void * args,__unused wait_result_t wr)2149 thread_use_cache(
2150 	void *args,
2151 	__unused wait_result_t wr)
2152 {
2153 	struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
2154 	int my_obj;
2155 
2156 	primitive_lock(info);
2157 	my_obj = ((info->value--) % (CACHE_SIZE + 1)) + 1;
2158 	primitive_unlock(info);
2159 
2160 	T_LOG("Thread %p started and it is going to use obj %d", current_thread(), my_obj);
2161 	/*
2162 	 * This is the string I would expect to see
2163 	 * on my_obj buff.
2164 	 */
2165 	char my_string[OBJ_BUFF_SIZE];
2166 	int my_string_size = snprintf(my_string, OBJ_BUFF_SIZE, "I am %d", my_obj);
2167 
2168 	/*
2169 	 * spin here to start concurrently with the other threads
2170 	 */
2171 	wake_threads(&info->synch);
2172 	wait_threads(&info->synch, info->synch_value);
2173 
2174 	for (int i = 0; i < USE_CACHE_ROUNDS; i++) {
2175 		char *buff;
2176 		while (get_obj_cache(my_obj, info, &buff) == -1) {
2177 			/*
2178 			 * Cache is full, wait.
2179 			 */
2180 			IOSleep(10);
2181 		}
2182 		T_ASSERT(memcmp(buff, my_string, my_string_size) == 0, "reflock: thread %p obj_id %d value in buff", current_thread(), my_obj);
2183 		IOSleep(10);
2184 		T_ASSERT(memcmp(buff, my_string, my_string_size) == 0, "reflock: thread %p obj_id %d value in buff", current_thread(), my_obj);
2185 		put_obj_cache(my_obj, info, (i % 2 == 0));
2186 	}
2187 
2188 	notify_waiter((struct synch_test_common *)info);
2189 	thread_terminate_self();
2190 }
2191 
2192 static void
thread_refcount_reflock(void * args,__unused wait_result_t wr)2193 thread_refcount_reflock(
2194 	void *args,
2195 	__unused wait_result_t wr)
2196 {
2197 	struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
2198 	bool ret;
2199 	kern_apfs_reflock_out_flags_t out_flags;
2200 	kern_apfs_reflock_in_flags_t in_flags;
2201 
2202 	T_LOG("Thread %p started", current_thread());
2203 	/*
2204 	 * spin here to start concurrently with the other threads
2205 	 */
2206 	wake_threads(&info->synch);
2207 	wait_threads(&info->synch, info->synch_value);
2208 
2209 	for (int i = 0; i < REFCOUNT_REFLOCK_ROUNDS; i++) {
2210 		in_flags = KERN_APFS_REFLOCK_IN_LOCK_IF_FIRST;
2211 		if ((i % 2) == 0) {
2212 			in_flags |= KERN_APFS_REFLOCK_IN_WILL_WAIT;
2213 		}
2214 		ret = kern_apfs_reflock_try_get_ref(&info->reflock, in_flags, &out_flags);
2215 		if (ret == true) {
2216 			/* got reference, check if we did 0->1 */
2217 			if ((out_flags & KERN_APFS_REFLOCK_OUT_LOCKED) == KERN_APFS_REFLOCK_OUT_LOCKED) {
2218 				T_ASSERT(info->reflock_protected_status == 0, "status init check");
2219 				info->reflock_protected_status = 1;
2220 				kern_apfs_reflock_unlock(&info->reflock);
2221 			} else {
2222 				T_ASSERT(info->reflock_protected_status == 1, "status set check");
2223 			}
2224 			/* release the reference and check if we did 1->0 */
2225 			ret = kern_apfs_reflock_try_put_ref(&info->reflock, KERN_APFS_REFLOCK_IN_LOCK_IF_LAST, &out_flags);
2226 			T_ASSERT(ret == true, "kern_apfs_reflock_try_put_ref success");
2227 			if ((out_flags & KERN_APFS_REFLOCK_OUT_LOCKED) == KERN_APFS_REFLOCK_OUT_LOCKED) {
2228 				T_ASSERT(info->reflock_protected_status == 1, "status set check");
2229 				info->reflock_protected_status = 0;
2230 				kern_apfs_reflock_unlock(&info->reflock);
2231 			}
2232 		} else {
2233 			/* didn't get a reference */
2234 			if ((in_flags & KERN_APFS_REFLOCK_IN_WILL_WAIT) == KERN_APFS_REFLOCK_IN_WILL_WAIT) {
2235 				kern_apfs_reflock_wait_for_unlock(&info->reflock, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
2236 			}
2237 		}
2238 	}
2239 
2240 	notify_waiter((struct synch_test_common *)info);
2241 	thread_terminate_self();
2242 }
2243 
2244 static void
thread_force_reflock(void * args,__unused wait_result_t wr)2245 thread_force_reflock(
2246 	void *args,
2247 	__unused wait_result_t wr)
2248 {
2249 	struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
2250 	bool ret;
2251 	kern_apfs_reflock_out_flags_t out_flags;
2252 	bool lock = false;
2253 	uint32_t count;
2254 
2255 	T_LOG("Thread %p started", current_thread());
2256 	if (os_atomic_inc_orig(&info->value, relaxed) == 0) {
2257 		T_LOG("Thread %p is locker", current_thread());
2258 		lock = true;
2259 		ret = kern_apfs_reflock_try_lock(&info->reflock, KERN_APFS_REFLOCK_IN_ALLOW_FORCE, &count);
2260 		T_ASSERT(ret == true, "kern_apfs_reflock_try_lock success");
2261 		T_ASSERT(count == 0, "refcount value");
2262 	}
2263 	/*
2264 	 * spin here to start concurrently with the other threads
2265 	 */
2266 	wake_threads(&info->synch);
2267 	wait_threads(&info->synch, info->synch_value);
2268 
2269 	if (lock) {
2270 		IOSleep(100);
2271 		kern_apfs_reflock_unlock(&info->reflock);
2272 	} else {
2273 		for (int i = 0; i < REFCOUNT_REFLOCK_ROUNDS; i++) {
2274 			ret = kern_apfs_reflock_try_get_ref(&info->reflock, KERN_APFS_REFLOCK_IN_FORCE, &out_flags);
2275 			T_ASSERT(ret == true, "kern_apfs_reflock_try_get_ref success");
2276 			ret = kern_apfs_reflock_try_put_ref(&info->reflock, KERN_APFS_REFLOCK_IN_FORCE, &out_flags);
2277 			T_ASSERT(ret == true, "kern_apfs_reflock_try_put_ref success");
2278 		}
2279 	}
2280 
2281 	notify_waiter((struct synch_test_common *)info);
2282 	thread_terminate_self();
2283 }
2284 
2285 static void
thread_lock_reflock(void * args,__unused wait_result_t wr)2286 thread_lock_reflock(
2287 	void *args,
2288 	__unused wait_result_t wr)
2289 {
2290 	struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
2291 	bool ret;
2292 	kern_apfs_reflock_out_flags_t out_flags;
2293 	bool lock = false;
2294 	uint32_t count;
2295 
2296 	T_LOG("Thread %p started", current_thread());
2297 	if (os_atomic_inc_orig(&info->value, relaxed) == 0) {
2298 		T_LOG("Thread %p is locker", current_thread());
2299 		lock = true;
2300 		ret = kern_apfs_reflock_try_lock(&info->reflock, KERN_APFS_REFLOCK_IN_DEFAULT, &count);
2301 		T_ASSERT(ret == true, "kern_apfs_reflock_try_lock success");
2302 		T_ASSERT(count == 0, "refcount value");
2303 		info->reflock_protected_status = 1;
2304 	}
2305 	/*
2306 	 * spin here to start concurrently with the other threads
2307 	 */
2308 	wake_threads(&info->synch);
2309 	wait_threads(&info->synch, info->synch_value);
2310 
2311 	if (lock) {
2312 		IOSleep(100);
2313 		info->reflock_protected_status = 0;
2314 		kern_apfs_reflock_unlock(&info->reflock);
2315 	} else {
2316 		for (int i = 0; i < REFCOUNT_REFLOCK_ROUNDS; i++) {
2317 			ret = kern_apfs_reflock_try_get_ref(&info->reflock, KERN_APFS_REFLOCK_IN_DEFAULT, &out_flags);
2318 			if (ret == true) {
2319 				T_ASSERT(info->reflock_protected_status == 0, "unlocked status check");
2320 				ret = kern_apfs_reflock_try_put_ref(&info->reflock, KERN_APFS_REFLOCK_IN_DEFAULT, &out_flags);
2321 				T_ASSERT(ret == true, "kern_apfs_reflock_try_put_ref success");
2322 				break;
2323 			}
2324 		}
2325 	}
2326 
2327 	notify_waiter((struct synch_test_common *)info);
2328 	thread_terminate_self();
2329 }
2330 
2331 static void
test_cache_reflock(struct info_sleep_inheritor_test * info)2332 test_cache_reflock(struct info_sleep_inheritor_test *info)
2333 {
2334 	info->synch = 0;
2335 	info->synch_value = info->head.nthreads;
2336 
2337 	info->value = info->head.nthreads;
2338 	/*
2339 	 * Use the mtx as cache lock
2340 	 */
2341 	info->prim_type = MTX_LOCK;
2342 
2343 	init_cache(info);
2344 
2345 	start_threads((thread_continue_t)thread_use_cache, (struct synch_test_common *)info, FALSE);
2346 	wait_all_thread((struct synch_test_common *)info);
2347 
2348 	check_cache_empty(info);
2349 	free_cache(info);
2350 }
2351 
2352 static void
test_refcount_reflock(struct info_sleep_inheritor_test * info)2353 test_refcount_reflock(struct info_sleep_inheritor_test *info)
2354 {
2355 	info->synch = 0;
2356 	info->synch_value = info->head.nthreads;
2357 	kern_apfs_reflock_init(&info->reflock);
2358 	info->reflock_protected_status = 0;
2359 
2360 	start_threads((thread_continue_t)thread_refcount_reflock, (struct synch_test_common *)info, FALSE);
2361 	wait_all_thread((struct synch_test_common *)info);
2362 
2363 	kern_apfs_reflock_destroy(&info->reflock);
2364 
2365 	T_ASSERT(info->reflock_protected_status == 0, "unlocked status check");
2366 }
2367 
2368 static void
test_force_reflock(struct info_sleep_inheritor_test * info)2369 test_force_reflock(struct info_sleep_inheritor_test *info)
2370 {
2371 	info->synch = 0;
2372 	info->synch_value = info->head.nthreads;
2373 	kern_apfs_reflock_init(&info->reflock);
2374 	info->value = 0;
2375 
2376 	start_threads((thread_continue_t)thread_force_reflock, (struct synch_test_common *)info, FALSE);
2377 	wait_all_thread((struct synch_test_common *)info);
2378 
2379 	kern_apfs_reflock_destroy(&info->reflock);
2380 }
2381 
2382 static void
test_lock_reflock(struct info_sleep_inheritor_test * info)2383 test_lock_reflock(struct info_sleep_inheritor_test *info)
2384 {
2385 	info->synch = 0;
2386 	info->synch_value = info->head.nthreads;
2387 	kern_apfs_reflock_init(&info->reflock);
2388 	info->value = 0;
2389 
2390 	start_threads((thread_continue_t)thread_lock_reflock, (struct synch_test_common *)info, FALSE);
2391 	wait_all_thread((struct synch_test_common *)info);
2392 
2393 	kern_apfs_reflock_destroy(&info->reflock);
2394 }
2395 
2396 static void
test_sleep_with_wake_all(struct info_sleep_inheritor_test * info,int prim_type)2397 test_sleep_with_wake_all(struct info_sleep_inheritor_test *info, int prim_type)
2398 {
2399 	info->prim_type = prim_type;
2400 	info->synch = 0;
2401 	info->synch_value = info->head.nthreads;
2402 
2403 	info->thread_inheritor = NULL;
2404 
2405 	start_threads((thread_continue_t)thread_just_inheritor_do_work, (struct synch_test_common *)info, TRUE);
2406 	wait_all_thread((struct synch_test_common *)info);
2407 }
2408 
2409 static void
test_sleep_with_wake_one(struct info_sleep_inheritor_test * info,int prim_type)2410 test_sleep_with_wake_one(struct info_sleep_inheritor_test *info, int prim_type)
2411 {
2412 	info->prim_type = prim_type;
2413 
2414 	info->synch = 0;
2415 	info->synch_value = info->head.nthreads;
2416 	info->value = 0;
2417 	info->handoff_failure = 0;
2418 	info->thread_inheritor = NULL;
2419 
2420 	start_threads((thread_continue_t)thread_inheritor_like_mutex, (struct synch_test_common *)info, FALSE);
2421 	wait_all_thread((struct synch_test_common *)info);
2422 
2423 	T_ASSERT(info->value == (int)info->head.nthreads, "value protected by sleep");
2424 	T_ASSERT(info->handoff_failure == 1, "handoff failures");
2425 }
2426 
2427 static void
test_change_sleep_inheritor(struct info_sleep_inheritor_test * info,int prim_type)2428 test_change_sleep_inheritor(struct info_sleep_inheritor_test *info, int prim_type)
2429 {
2430 	info->prim_type = prim_type;
2431 
2432 	info->thread_inheritor = NULL;
2433 	info->steal_pri = 0;
2434 	info->synch = 0;
2435 	info->synch_value = info->head.nthreads;
2436 
2437 	start_threads((thread_continue_t)thread_steal_work, (struct synch_test_common *)info, FALSE);
2438 	wait_all_thread((struct synch_test_common *)info);
2439 }
2440 
2441 static void
test_no_inheritor(struct info_sleep_inheritor_test * info,int prim_type)2442 test_no_inheritor(struct info_sleep_inheritor_test *info, int prim_type)
2443 {
2444 	info->prim_type = prim_type;
2445 	info->synch = 0;
2446 	info->synch_value = info->head.nthreads;
2447 
2448 	info->thread_inheritor = NULL;
2449 	info->value = info->head.nthreads;
2450 
2451 	start_threads((thread_continue_t)thread_no_inheritor_work, (struct synch_test_common *)info, FALSE);
2452 	wait_all_thread((struct synch_test_common *)info);
2453 }
2454 
2455 static void
test_rw_lock(struct info_sleep_inheritor_test * info)2456 test_rw_lock(struct info_sleep_inheritor_test *info)
2457 {
2458 	info->thread_inheritor = NULL;
2459 	info->value = info->head.nthreads;
2460 	info->synch = 0;
2461 	info->synch_value = info->head.nthreads;
2462 
2463 	start_threads((thread_continue_t)thread_rw_work, (struct synch_test_common *)info, FALSE);
2464 	wait_all_thread((struct synch_test_common *)info);
2465 }
2466 
2467 static void
test_mtx_lock(struct info_sleep_inheritor_test * info)2468 test_mtx_lock(struct info_sleep_inheritor_test *info)
2469 {
2470 	info->thread_inheritor = NULL;
2471 	info->value = info->head.nthreads;
2472 	info->synch = 0;
2473 	info->synch_value = info->head.nthreads;
2474 
2475 	start_threads((thread_continue_t)thread_mtx_work, (struct synch_test_common *)info, FALSE);
2476 	wait_all_thread((struct synch_test_common *)info);
2477 }
2478 
2479 kern_return_t
ts_kernel_sleep_inheritor_test(void)2480 ts_kernel_sleep_inheritor_test(void)
2481 {
2482 	struct info_sleep_inheritor_test info = {};
2483 
2484 	init_synch_test_common((struct synch_test_common *)&info, NUM_THREADS);
2485 
2486 	lck_attr_t* lck_attr = lck_attr_alloc_init();
2487 	lck_grp_attr_t* lck_grp_attr = lck_grp_attr_alloc_init();
2488 	lck_grp_t* lck_grp = lck_grp_alloc_init("test sleep_inheritor", lck_grp_attr);
2489 
2490 	lck_mtx_init(&info.mtx_lock, lck_grp, lck_attr);
2491 	lck_rw_init(&info.rw_lock, lck_grp, lck_attr);
2492 
2493 	/*
2494 	 * Testing lck_mtx_sleep_with_inheritor and wakeup_all_with_inheritor
2495 	 */
2496 	T_LOG("Testing mtx sleep with inheritor and wake_all_with_inheritor");
2497 	test_sleep_with_wake_all(&info, MTX_LOCK);
2498 
2499 	/*
2500 	 * Testing rw_mtx_sleep_with_inheritor and wakeup_all_with_inheritor
2501 	 */
2502 	T_LOG("Testing rw sleep with inheritor and wake_all_with_inheritor");
2503 	test_sleep_with_wake_all(&info, RW_LOCK);
2504 
2505 	/*
2506 	 * Testing lck_mtx_sleep_with_inheritor and wakeup_one_with_inheritor
2507 	 */
2508 	T_LOG("Testing mtx sleep with inheritor and wake_one_with_inheritor");
2509 	test_sleep_with_wake_one(&info, MTX_LOCK);
2510 
2511 	/*
2512 	 * Testing lck_rw_sleep_with_inheritor and wakeup_one_with_inheritor
2513 	 */
2514 	T_LOG("Testing rw sleep with inheritor and wake_one_with_inheritor");
2515 	test_sleep_with_wake_one(&info, RW_LOCK);
2516 
2517 	/*
2518 	 * Testing lck_mtx_sleep_with_inheritor and wakeup_all_with_inheritor
2519 	 * and change_sleep_inheritor
2520 	 */
2521 	T_LOG("Testing change_sleep_inheritor with mxt sleep");
2522 	test_change_sleep_inheritor(&info, MTX_LOCK);
2523 
2524 	/*
2525 	 * Testing lck_mtx_sleep_with_inheritor and wakeup_all_with_inheritor
2526 	 * and change_sleep_inheritor
2527 	 */
2528 	T_LOG("Testing change_sleep_inheritor with rw sleep");
2529 	test_change_sleep_inheritor(&info, RW_LOCK);
2530 
2531 	/*
2532 	 * Testing lck_mtx_sleep_with_inheritor and wakeup_all_with_inheritor
2533 	 * with inheritor NULL
2534 	 */
2535 	T_LOG("Testing inheritor NULL");
2536 	test_no_inheritor(&info, MTX_LOCK);
2537 
2538 	/*
2539 	 * Testing lck_mtx_sleep_with_inheritor and wakeup_all_with_inheritor
2540 	 * with inheritor NULL
2541 	 */
2542 	T_LOG("Testing inheritor NULL");
2543 	test_no_inheritor(&info, RW_LOCK);
2544 
2545 	/*
2546 	 * Testing mtx locking combinations
2547 	 */
2548 	T_LOG("Testing mtx locking combinations");
2549 	test_mtx_lock(&info);
2550 
2551 	/*
2552 	 * Testing rw locking combinations
2553 	 */
2554 	T_LOG("Testing rw locking combinations");
2555 	test_rw_lock(&info);
2556 
2557 	/*
2558 	 * Testing reflock / cond_sleep_with_inheritor
2559 	 */
2560 	T_LOG("Test cache reflock + cond_sleep_with_inheritor");
2561 	test_cache_reflock(&info);
2562 	T_LOG("Test force reflock + cond_sleep_with_inheritor");
2563 	test_force_reflock(&info);
2564 	T_LOG("Test refcount reflock + cond_sleep_with_inheritor");
2565 	test_refcount_reflock(&info);
2566 	T_LOG("Test lock reflock + cond_sleep_with_inheritor");
2567 	test_lock_reflock(&info);
2568 
2569 	destroy_synch_test_common((struct synch_test_common *)&info);
2570 
2571 	lck_attr_free(lck_attr);
2572 	lck_grp_attr_free(lck_grp_attr);
2573 	lck_rw_destroy(&info.rw_lock, lck_grp);
2574 	lck_mtx_destroy(&info.mtx_lock, lck_grp);
2575 	lck_grp_free(lck_grp);
2576 
2577 	return KERN_SUCCESS;
2578 }
2579 
2580 static void
thread_gate_aggressive(void * args,__unused wait_result_t wr)2581 thread_gate_aggressive(
2582 	void *args,
2583 	__unused wait_result_t wr)
2584 {
2585 	struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
2586 	uint my_pri = current_thread()->sched_pri;
2587 
2588 	T_LOG("Started thread pri %d %p", my_pri, current_thread());
2589 
2590 	primitive_lock(info);
2591 	if (info->thread_inheritor == NULL) {
2592 		info->thread_inheritor = current_thread();
2593 		primitive_gate_assert(info, GATE_ASSERT_OPEN);
2594 		primitive_gate_close(info);
2595 		exclude_current_waiter((struct synch_test_common *)info);
2596 
2597 		primitive_unlock(info);
2598 
2599 		wait_threads(&info->synch, info->synch_value - 2);
2600 		wait_for_waiters((struct synch_test_common *)info);
2601 		T_LOG("Thread pri %d first to run %p", my_pri, current_thread());
2602 
2603 		primitive_lock(info);
2604 		if (info->thread_inheritor == current_thread()) {
2605 			primitive_gate_open(info);
2606 		}
2607 	} else {
2608 		if (info->steal_pri == 0) {
2609 			info->steal_pri = my_pri;
2610 			info->thread_inheritor = current_thread();
2611 			primitive_gate_steal(info);
2612 			exclude_current_waiter((struct synch_test_common *)info);
2613 
2614 			primitive_unlock(info);
2615 			wait_threads(&info->synch, info->synch_value - 2);
2616 
2617 			T_LOG("Thread pri %d stole push %p", my_pri, current_thread());
2618 			wait_for_waiters((struct synch_test_common *)info);
2619 			T_ASSERT((uint) current_thread()->sched_pri == info->steal_pri, "gate keeper priority current is %d, should be %d", current_thread()->sched_pri, info->steal_pri);
2620 
2621 			primitive_lock(info);
2622 			primitive_gate_open(info);
2623 		} else {
2624 			if (my_pri > info->steal_pri) {
2625 				info->steal_pri = my_pri;
2626 			}
2627 			wake_threads(&info->synch);
2628 			primitive_gate_wait(info);
2629 			exclude_current_waiter((struct synch_test_common *)info);
2630 		}
2631 	}
2632 	primitive_unlock(info);
2633 
2634 	assert(current_thread()->kern_promotion_schedpri == 0);
2635 	notify_waiter((struct synch_test_common *)info);
2636 
2637 	thread_terminate_self();
2638 }
2639 
2640 static void
thread_gate_free(void * args,__unused wait_result_t wr)2641 thread_gate_free(
2642 	void *args,
2643 	__unused wait_result_t wr)
2644 {
2645 	struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
2646 	uint my_pri = current_thread()->sched_pri;
2647 
2648 	T_LOG("Started thread pri %d %p", my_pri, current_thread());
2649 
2650 	primitive_lock(info);
2651 
2652 	if (primitive_gate_try_close(info) == KERN_SUCCESS) {
2653 		primitive_gate_assert(info, GATE_ASSERT_HELD);
2654 		primitive_unlock(info);
2655 
2656 		wait_threads(&info->synch, info->synch_value - 1);
2657 		wait_for_waiters((struct synch_test_common *) info);
2658 
2659 		primitive_lock(info);
2660 		primitive_gate_open(info);
2661 		primitive_gate_free(info);
2662 	} else {
2663 		primitive_gate_assert(info, GATE_ASSERT_CLOSED);
2664 		wake_threads(&info->synch);
2665 		gate_wait_result_t ret = primitive_gate_wait(info);
2666 		T_ASSERT(ret == GATE_OPENED, "open gate");
2667 	}
2668 
2669 	primitive_unlock(info);
2670 
2671 	notify_waiter((struct synch_test_common *)info);
2672 
2673 	thread_terminate_self();
2674 }
2675 
2676 static void
thread_gate_like_mutex(void * args,__unused wait_result_t wr)2677 thread_gate_like_mutex(
2678 	void *args,
2679 	__unused wait_result_t wr)
2680 {
2681 	gate_wait_result_t wait;
2682 	kern_return_t ret;
2683 	uint my_pri = current_thread()->sched_pri;
2684 
2685 	struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
2686 
2687 	T_LOG("Started thread pri %d %p", my_pri, current_thread());
2688 
2689 	/*
2690 	 * spin here to start concurrently
2691 	 */
2692 	wake_threads(&info->synch);
2693 	wait_threads(&info->synch, info->synch_value);
2694 
2695 	primitive_lock(info);
2696 
2697 	if (primitive_gate_try_close(info) != KERN_SUCCESS) {
2698 		wait = primitive_gate_wait(info);
2699 		T_ASSERT(wait == GATE_HANDOFF, "gate_wait return");
2700 	}
2701 
2702 	primitive_gate_assert(info, GATE_ASSERT_HELD);
2703 
2704 	primitive_unlock(info);
2705 
2706 	IOSleep(100);
2707 	info->value++;
2708 
2709 	primitive_lock(info);
2710 
2711 	ret = primitive_gate_handoff(info, GATE_HANDOFF_DEFAULT);
2712 	if (ret == KERN_NOT_WAITING) {
2713 		T_ASSERT(info->handoff_failure == 0, "handoff failures");
2714 		primitive_gate_handoff(info, GATE_HANDOFF_OPEN_IF_NO_WAITERS);
2715 		info->handoff_failure++;
2716 	}
2717 
2718 	primitive_unlock(info);
2719 	notify_waiter((struct synch_test_common *)info);
2720 
2721 	thread_terminate_self();
2722 }
2723 
2724 static void
thread_just_one_do_work(void * args,__unused wait_result_t wr)2725 thread_just_one_do_work(
2726 	void *args,
2727 	__unused wait_result_t wr)
2728 {
2729 	struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
2730 	uint my_pri = current_thread()->sched_pri;
2731 	uint max_pri;
2732 
2733 	T_LOG("Started thread pri %d %p", my_pri, current_thread());
2734 
2735 	primitive_lock(info);
2736 check_again:
2737 	if (info->work_to_do) {
2738 		if (primitive_gate_try_close(info) == KERN_SUCCESS) {
2739 			primitive_gate_assert(info, GATE_ASSERT_HELD);
2740 			primitive_unlock(info);
2741 
2742 			T_LOG("Thread pri %d acquired the gate %p", my_pri, current_thread());
2743 			wait_threads(&info->synch, info->synch_value - 1);
2744 			wait_for_waiters((struct synch_test_common *)info);
2745 			max_pri = get_max_pri((struct synch_test_common *) info);
2746 			T_ASSERT((uint) current_thread()->sched_pri == max_pri, "gate owner priority current is %d, should be %d", current_thread()->sched_pri, max_pri);
2747 			os_atomic_store(&info->synch, 0, relaxed);
2748 
2749 			primitive_lock(info);
2750 			info->work_to_do = FALSE;
2751 			primitive_gate_open(info);
2752 		} else {
2753 			primitive_gate_assert(info, GATE_ASSERT_CLOSED);
2754 			wake_threads(&info->synch);
2755 			primitive_gate_wait(info);
2756 			goto check_again;
2757 		}
2758 	}
2759 	primitive_unlock(info);
2760 
2761 	assert(current_thread()->kern_promotion_schedpri == 0);
2762 	notify_waiter((struct synch_test_common *)info);
2763 	thread_terminate_self();
2764 }
2765 
2766 static void
test_gate_push(struct info_sleep_inheritor_test * info,int prim_type)2767 test_gate_push(struct info_sleep_inheritor_test *info, int prim_type)
2768 {
2769 	info->prim_type = prim_type;
2770 	info->use_alloc_gate = false;
2771 
2772 	primitive_gate_init(info);
2773 	info->work_to_do = TRUE;
2774 	info->synch = 0;
2775 	info->synch_value = NUM_THREADS;
2776 
2777 	start_threads((thread_continue_t)thread_just_one_do_work, (struct synch_test_common *) info, TRUE);
2778 	wait_all_thread((struct synch_test_common *)info);
2779 
2780 	primitive_gate_destroy(info);
2781 }
2782 
2783 static void
test_gate_handoff(struct info_sleep_inheritor_test * info,int prim_type)2784 test_gate_handoff(struct info_sleep_inheritor_test *info, int prim_type)
2785 {
2786 	info->prim_type = prim_type;
2787 	info->use_alloc_gate = false;
2788 
2789 	primitive_gate_init(info);
2790 
2791 	info->synch = 0;
2792 	info->synch_value = NUM_THREADS;
2793 	info->value = 0;
2794 	info->handoff_failure = 0;
2795 
2796 	start_threads((thread_continue_t)thread_gate_like_mutex, (struct synch_test_common *)info, false);
2797 	wait_all_thread((struct synch_test_common *)info);
2798 
2799 	T_ASSERT(info->value == NUM_THREADS, "value protected by gate");
2800 	T_ASSERT(info->handoff_failure == 1, "handoff failures");
2801 
2802 	primitive_gate_destroy(info);
2803 }
2804 
2805 static void
test_gate_steal(struct info_sleep_inheritor_test * info,int prim_type)2806 test_gate_steal(struct info_sleep_inheritor_test *info, int prim_type)
2807 {
2808 	info->prim_type = prim_type;
2809 	info->use_alloc_gate = false;
2810 
2811 	primitive_gate_init(info);
2812 
2813 	info->synch = 0;
2814 	info->synch_value = NUM_THREADS;
2815 	info->thread_inheritor = NULL;
2816 	info->steal_pri = 0;
2817 
2818 	start_threads((thread_continue_t)thread_gate_aggressive, (struct synch_test_common *)info, FALSE);
2819 	wait_all_thread((struct synch_test_common *)info);
2820 
2821 	primitive_gate_destroy(info);
2822 }
2823 
2824 static void
test_gate_alloc_free(struct info_sleep_inheritor_test * info,int prim_type)2825 test_gate_alloc_free(struct info_sleep_inheritor_test *info, int prim_type)
2826 {
2827 	(void)info;
2828 	(void) prim_type;
2829 	info->prim_type = prim_type;
2830 	info->use_alloc_gate = true;
2831 
2832 	primitive_gate_alloc(info);
2833 
2834 	info->synch = 0;
2835 	info->synch_value = NUM_THREADS;
2836 
2837 	start_threads((thread_continue_t)thread_gate_free, (struct synch_test_common *)info, FALSE);
2838 	wait_all_thread((struct synch_test_common *)info);
2839 
2840 	T_ASSERT(info->alloc_gate == NULL, "gate free");
2841 	info->use_alloc_gate = false;
2842 }
2843 
2844 kern_return_t
ts_kernel_gate_test(void)2845 ts_kernel_gate_test(void)
2846 {
2847 	struct info_sleep_inheritor_test info = {};
2848 
2849 	T_LOG("Testing gate primitive");
2850 
2851 	init_synch_test_common((struct synch_test_common *)&info, NUM_THREADS);
2852 
2853 	lck_attr_t* lck_attr = lck_attr_alloc_init();
2854 	lck_grp_attr_t* lck_grp_attr = lck_grp_attr_alloc_init();
2855 	lck_grp_t* lck_grp = lck_grp_alloc_init("test gate", lck_grp_attr);
2856 
2857 	lck_mtx_init(&info.mtx_lock, lck_grp, lck_attr);
2858 	lck_rw_init(&info.rw_lock, lck_grp, lck_attr);
2859 
2860 	/*
2861 	 * Testing the priority inherited by the keeper
2862 	 * lck_mtx_gate_try_close, lck_mtx_gate_open, lck_mtx_gate_wait
2863 	 */
2864 	T_LOG("Testing gate push, mtx");
2865 	test_gate_push(&info, MTX_LOCK);
2866 
2867 	T_LOG("Testing gate push, rw");
2868 	test_gate_push(&info, RW_LOCK);
2869 
2870 	/*
2871 	 * Testing the handoff
2872 	 * lck_mtx_gate_wait, lck_mtx_gate_handoff
2873 	 */
2874 	T_LOG("Testing gate handoff, mtx");
2875 	test_gate_handoff(&info, MTX_LOCK);
2876 
2877 	T_LOG("Testing gate handoff, rw");
2878 	test_gate_handoff(&info, RW_LOCK);
2879 
2880 	/*
2881 	 * Testing the steal
2882 	 * lck_mtx_gate_close, lck_mtx_gate_wait, lck_mtx_gate_steal, lck_mtx_gate_handoff
2883 	 */
2884 	T_LOG("Testing gate steal, mtx");
2885 	test_gate_steal(&info, MTX_LOCK);
2886 
2887 	T_LOG("Testing gate steal, rw");
2888 	test_gate_steal(&info, RW_LOCK);
2889 
2890 	/*
2891 	 * Testing the alloc/free
2892 	 * lck_mtx_gate_alloc_init, lck_mtx_gate_close, lck_mtx_gate_wait, lck_mtx_gate_free
2893 	 */
2894 	T_LOG("Testing gate alloc/free, mtx");
2895 	test_gate_alloc_free(&info, MTX_LOCK);
2896 
2897 	T_LOG("Testing gate alloc/free, rw");
2898 	test_gate_alloc_free(&info, RW_LOCK);
2899 
2900 	destroy_synch_test_common((struct synch_test_common *)&info);
2901 
2902 	lck_attr_free(lck_attr);
2903 	lck_grp_attr_free(lck_grp_attr);
2904 	lck_mtx_destroy(&info.mtx_lock, lck_grp);
2905 	lck_grp_free(lck_grp);
2906 
2907 	return KERN_SUCCESS;
2908 }
2909 
2910 #define NUM_THREAD_CHAIN 6
2911 
2912 struct turnstile_chain_test {
2913 	struct synch_test_common head;
2914 	lck_mtx_t mtx_lock;
2915 	int synch_value;
2916 	int synch;
2917 	int synch2;
2918 	gate_t gates[NUM_THREAD_CHAIN];
2919 };
2920 
2921 static void
thread_sleep_gate_chain_work(void * args,__unused wait_result_t wr)2922 thread_sleep_gate_chain_work(
2923 	void *args,
2924 	__unused wait_result_t wr)
2925 {
2926 	struct turnstile_chain_test *info = (struct turnstile_chain_test*) args;
2927 	thread_t self = current_thread();
2928 	uint my_pri = self->sched_pri;
2929 	uint max_pri;
2930 	uint i;
2931 	thread_t inheritor = NULL, woken_up;
2932 	event_t wait_event, wake_event;
2933 	kern_return_t ret;
2934 
2935 	T_LOG("Started thread pri %d %p", my_pri, self);
2936 
2937 	/*
2938 	 * Need to use the threads ids, wait for all of them to be populated
2939 	 */
2940 
2941 	while (os_atomic_load(&info->head.threads[info->head.nthreads - 1], acquire) == NULL) {
2942 		IOSleep(10);
2943 	}
2944 
2945 	max_pri = get_max_pri((struct synch_test_common *) info);
2946 
2947 	for (i = 0; i < info->head.nthreads; i = i + 2) {
2948 		// even threads will close a gate
2949 		if (info->head.threads[i] == self) {
2950 			lck_mtx_lock(&info->mtx_lock);
2951 			lck_mtx_gate_close(&info->mtx_lock, &info->gates[i]);
2952 			lck_mtx_unlock(&info->mtx_lock);
2953 			break;
2954 		}
2955 	}
2956 
2957 	wake_threads(&info->synch2);
2958 	wait_threads(&info->synch2, info->synch_value);
2959 
2960 	if (self == os_atomic_load(&info->head.threads[0], acquire)) {
2961 		wait_threads(&info->synch, info->synch_value - 1);
2962 		wait_for_waiters((struct synch_test_common *)info);
2963 
2964 		T_ASSERT((uint) self->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", self->sched_pri, max_pri);
2965 
2966 		lck_mtx_lock(&info->mtx_lock);
2967 		lck_mtx_gate_open(&info->mtx_lock, &info->gates[0]);
2968 		lck_mtx_unlock(&info->mtx_lock);
2969 	} else {
2970 		wait_event = NULL;
2971 		wake_event = NULL;
2972 		for (i = 0; i < info->head.nthreads; i++) {
2973 			if (info->head.threads[i] == self) {
2974 				inheritor = info->head.threads[i - 1];
2975 				wait_event = (event_t) &info->head.threads[i - 1];
2976 				wake_event = (event_t) &info->head.threads[i];
2977 				break;
2978 			}
2979 		}
2980 		assert(wait_event != NULL);
2981 
2982 		lck_mtx_lock(&info->mtx_lock);
2983 		wake_threads(&info->synch);
2984 
2985 		if (i % 2 != 0) {
2986 			lck_mtx_gate_wait(&info->mtx_lock, &info->gates[i - 1], LCK_SLEEP_UNLOCK, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
2987 			T_ASSERT((uint) self->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", self->sched_pri, max_pri);
2988 
2989 			ret = wakeup_one_with_inheritor(wake_event, THREAD_AWAKENED, LCK_WAKE_DO_NOT_TRANSFER_PUSH, &woken_up);
2990 			if (ret == KERN_SUCCESS) {
2991 				T_ASSERT(i != (info->head.nthreads - 1), "thread id");
2992 				T_ASSERT(woken_up == info->head.threads[i + 1], "wakeup_one_with_inheritor woke next");
2993 			} else {
2994 				T_ASSERT(i == (info->head.nthreads - 1), "thread id");
2995 			}
2996 
2997 			// i am still the inheritor, wake all to drop inheritership
2998 			ret = wakeup_all_with_inheritor(wake_event, LCK_WAKE_DEFAULT);
2999 			T_ASSERT(ret == KERN_NOT_WAITING, "waiters on event");
3000 		} else {
3001 			// I previously closed a gate
3002 			lck_mtx_sleep_with_inheritor(&info->mtx_lock, LCK_SLEEP_UNLOCK, wait_event, inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
3003 			T_ASSERT((uint) self->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", self->sched_pri, max_pri);
3004 
3005 			lck_mtx_lock(&info->mtx_lock);
3006 			lck_mtx_gate_open(&info->mtx_lock, &info->gates[i]);
3007 			lck_mtx_unlock(&info->mtx_lock);
3008 		}
3009 	}
3010 
3011 	assert(current_thread()->kern_promotion_schedpri == 0);
3012 	notify_waiter((struct synch_test_common *)info);
3013 
3014 	thread_terminate_self();
3015 }
3016 
3017 static void
thread_gate_chain_work(void * args,__unused wait_result_t wr)3018 thread_gate_chain_work(
3019 	void *args,
3020 	__unused wait_result_t wr)
3021 {
3022 	struct turnstile_chain_test *info = (struct turnstile_chain_test*) args;
3023 	thread_t self = current_thread();
3024 	uint my_pri = self->sched_pri;
3025 	uint max_pri;
3026 	uint i;
3027 	T_LOG("Started thread pri %d %p", my_pri, self);
3028 
3029 
3030 	/*
3031 	 * Need to use the threads ids, wait for all of them to be populated
3032 	 */
3033 	while (os_atomic_load(&info->head.threads[info->head.nthreads - 1], acquire) == NULL) {
3034 		IOSleep(10);
3035 	}
3036 
3037 	max_pri = get_max_pri((struct synch_test_common *) info);
3038 
3039 	for (i = 0; i < info->head.nthreads; i++) {
3040 		if (info->head.threads[i] == self) {
3041 			lck_mtx_lock(&info->mtx_lock);
3042 			lck_mtx_gate_close(&info->mtx_lock, &info->gates[i]);
3043 			lck_mtx_unlock(&info->mtx_lock);
3044 			break;
3045 		}
3046 	}
3047 	assert(i != info->head.nthreads);
3048 
3049 	wake_threads(&info->synch2);
3050 	wait_threads(&info->synch2, info->synch_value);
3051 
3052 	if (self == os_atomic_load(&info->head.threads[0], acquire)) {
3053 		wait_threads(&info->synch, info->synch_value - 1);
3054 
3055 		wait_for_waiters((struct synch_test_common *)info);
3056 
3057 		T_ASSERT((uint) self->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", self->sched_pri, max_pri);
3058 
3059 		lck_mtx_lock(&info->mtx_lock);
3060 		lck_mtx_gate_open(&info->mtx_lock, &info->gates[0]);
3061 		lck_mtx_unlock(&info->mtx_lock);
3062 	} else {
3063 		lck_mtx_lock(&info->mtx_lock);
3064 		wake_threads(&info->synch);
3065 		lck_mtx_gate_wait(&info->mtx_lock, &info->gates[i - 1], LCK_SLEEP_UNLOCK, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
3066 
3067 		T_ASSERT((uint) self->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", self->sched_pri, max_pri);
3068 
3069 		lck_mtx_lock(&info->mtx_lock);
3070 		lck_mtx_gate_open(&info->mtx_lock, &info->gates[i]);
3071 		lck_mtx_unlock(&info->mtx_lock);
3072 	}
3073 
3074 	assert(current_thread()->kern_promotion_schedpri == 0);
3075 	notify_waiter((struct synch_test_common *)info);
3076 
3077 	thread_terminate_self();
3078 }
3079 
3080 static void
thread_sleep_chain_work(void * args,__unused wait_result_t wr)3081 thread_sleep_chain_work(
3082 	void *args,
3083 	__unused wait_result_t wr)
3084 {
3085 	struct turnstile_chain_test *info = (struct turnstile_chain_test*) args;
3086 	thread_t self = current_thread();
3087 	uint my_pri = self->sched_pri;
3088 	uint max_pri;
3089 	event_t wait_event, wake_event;
3090 	uint i;
3091 	thread_t inheritor = NULL, woken_up = NULL;
3092 	kern_return_t ret;
3093 
3094 	T_LOG("Started thread pri %d %p", my_pri, self);
3095 
3096 	/*
3097 	 * Need to use the threads ids, wait for all of them to be populated
3098 	 */
3099 	while (os_atomic_load(&info->head.threads[info->head.nthreads - 1], acquire) == NULL) {
3100 		IOSleep(10);
3101 	}
3102 
3103 	max_pri = get_max_pri((struct synch_test_common *) info);
3104 
3105 	if (self == os_atomic_load(&info->head.threads[0], acquire)) {
3106 		wait_threads(&info->synch, info->synch_value - 1);
3107 
3108 		wait_for_waiters((struct synch_test_common *)info);
3109 
3110 		T_ASSERT((uint) self->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", self->sched_pri, max_pri);
3111 
3112 		ret = wakeup_one_with_inheritor((event_t) &info->head.threads[0], THREAD_AWAKENED, LCK_WAKE_DO_NOT_TRANSFER_PUSH, &woken_up);
3113 		T_ASSERT(ret == KERN_SUCCESS, "wakeup_one_with_inheritor woke next");
3114 		T_ASSERT(woken_up == info->head.threads[1], "thread woken up");
3115 
3116 		// i am still the inheritor, wake all to drop inheritership
3117 		ret = wakeup_all_with_inheritor((event_t) &info->head.threads[0], LCK_WAKE_DEFAULT);
3118 		T_ASSERT(ret == KERN_NOT_WAITING, "waiters on event");
3119 	} else {
3120 		wait_event = NULL;
3121 		wake_event = NULL;
3122 		for (i = 0; i < info->head.nthreads; i++) {
3123 			if (info->head.threads[i] == self) {
3124 				inheritor = info->head.threads[i - 1];
3125 				wait_event = (event_t) &info->head.threads[i - 1];
3126 				wake_event = (event_t) &info->head.threads[i];
3127 				break;
3128 			}
3129 		}
3130 
3131 		assert(wait_event != NULL);
3132 		lck_mtx_lock(&info->mtx_lock);
3133 		wake_threads(&info->synch);
3134 
3135 		lck_mtx_sleep_with_inheritor(&info->mtx_lock, LCK_SLEEP_UNLOCK, wait_event, inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
3136 
3137 		T_ASSERT((uint) self->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", self->sched_pri, max_pri);
3138 
3139 		ret = wakeup_one_with_inheritor(wake_event, THREAD_AWAKENED, LCK_WAKE_DO_NOT_TRANSFER_PUSH, &woken_up);
3140 		if (ret == KERN_SUCCESS) {
3141 			T_ASSERT(i != (info->head.nthreads - 1), "thread id");
3142 			T_ASSERT(woken_up == info->head.threads[i + 1], "wakeup_one_with_inheritor woke next");
3143 		} else {
3144 			T_ASSERT(i == (info->head.nthreads - 1), "thread id");
3145 		}
3146 
3147 		// i am still the inheritor, wake all to drop inheritership
3148 		ret = wakeup_all_with_inheritor(wake_event, LCK_WAKE_DEFAULT);
3149 		T_ASSERT(ret == KERN_NOT_WAITING, "waiters on event");
3150 	}
3151 
3152 	assert(current_thread()->kern_promotion_schedpri == 0);
3153 	notify_waiter((struct synch_test_common *)info);
3154 
3155 	thread_terminate_self();
3156 }
3157 
3158 static void
test_sleep_chain(struct turnstile_chain_test * info)3159 test_sleep_chain(struct turnstile_chain_test *info)
3160 {
3161 	info->synch = 0;
3162 	info->synch_value = info->head.nthreads;
3163 
3164 	start_threads((thread_continue_t)thread_sleep_chain_work, (struct synch_test_common *)info, FALSE);
3165 	wait_all_thread((struct synch_test_common *)info);
3166 }
3167 
3168 static void
test_gate_chain(struct turnstile_chain_test * info)3169 test_gate_chain(struct turnstile_chain_test *info)
3170 {
3171 	info->synch = 0;
3172 	info->synch2 = 0;
3173 	info->synch_value = info->head.nthreads;
3174 
3175 	start_threads((thread_continue_t)thread_gate_chain_work, (struct synch_test_common *)info, FALSE);
3176 	wait_all_thread((struct synch_test_common *)info);
3177 }
3178 
3179 static void
test_sleep_gate_chain(struct turnstile_chain_test * info)3180 test_sleep_gate_chain(struct turnstile_chain_test *info)
3181 {
3182 	info->synch = 0;
3183 	info->synch2 = 0;
3184 	info->synch_value = info->head.nthreads;
3185 
3186 	start_threads((thread_continue_t)thread_sleep_gate_chain_work, (struct synch_test_common *)info, FALSE);
3187 	wait_all_thread((struct synch_test_common *)info);
3188 }
3189 
3190 kern_return_t
ts_kernel_turnstile_chain_test(void)3191 ts_kernel_turnstile_chain_test(void)
3192 {
3193 	struct turnstile_chain_test info = {};
3194 	int i;
3195 
3196 	init_synch_test_common((struct synch_test_common *)&info, NUM_THREAD_CHAIN);
3197 	lck_attr_t* lck_attr = lck_attr_alloc_init();
3198 	lck_grp_attr_t* lck_grp_attr = lck_grp_attr_alloc_init();
3199 	lck_grp_t* lck_grp = lck_grp_alloc_init("test gate", lck_grp_attr);
3200 
3201 	lck_mtx_init(&info.mtx_lock, lck_grp, lck_attr);
3202 	for (i = 0; i < NUM_THREAD_CHAIN; i++) {
3203 		lck_mtx_gate_init(&info.mtx_lock, &info.gates[i]);
3204 	}
3205 
3206 	T_LOG("Testing sleep chain, lck");
3207 	test_sleep_chain(&info);
3208 
3209 	T_LOG("Testing gate chain, lck");
3210 	test_gate_chain(&info);
3211 
3212 	T_LOG("Testing sleep and gate chain, lck");
3213 	test_sleep_gate_chain(&info);
3214 
3215 	destroy_synch_test_common((struct synch_test_common *)&info);
3216 	for (i = 0; i < NUM_THREAD_CHAIN; i++) {
3217 		lck_mtx_gate_destroy(&info.mtx_lock, &info.gates[i]);
3218 	}
3219 	lck_attr_free(lck_attr);
3220 	lck_grp_attr_free(lck_grp_attr);
3221 	lck_mtx_destroy(&info.mtx_lock, lck_grp);
3222 	lck_grp_free(lck_grp);
3223 
3224 	return KERN_SUCCESS;
3225 }
3226 
3227 kern_return_t
ts_kernel_timingsafe_bcmp_test(void)3228 ts_kernel_timingsafe_bcmp_test(void)
3229 {
3230 	int i, buf_size;
3231 	char *buf = NULL;
3232 
3233 	// empty
3234 	T_ASSERT(timingsafe_bcmp(NULL, NULL, 0) == 0, NULL);
3235 	T_ASSERT(timingsafe_bcmp("foo", "foo", 0) == 0, NULL);
3236 	T_ASSERT(timingsafe_bcmp("foo", "bar", 0) == 0, NULL);
3237 
3238 	// equal
3239 	T_ASSERT(timingsafe_bcmp("foo", "foo", strlen("foo")) == 0, NULL);
3240 
3241 	// unequal
3242 	T_ASSERT(timingsafe_bcmp("foo", "bar", strlen("foo")) == 1, NULL);
3243 	T_ASSERT(timingsafe_bcmp("foo", "goo", strlen("foo")) == 1, NULL);
3244 	T_ASSERT(timingsafe_bcmp("foo", "fpo", strlen("foo")) == 1, NULL);
3245 	T_ASSERT(timingsafe_bcmp("foo", "fop", strlen("foo")) == 1, NULL);
3246 
3247 	// all possible bitwise differences
3248 	for (i = 1; i < 256; i += 1) {
3249 		unsigned char a = 0;
3250 		unsigned char b = (unsigned char)i;
3251 
3252 		T_ASSERT(timingsafe_bcmp(&a, &b, sizeof(a)) == 1, NULL);
3253 	}
3254 
3255 	// large
3256 	buf_size = 1024 * 16;
3257 	buf = kalloc_data(buf_size, Z_WAITOK);
3258 	T_EXPECT_NOTNULL(buf, "kalloc of buf");
3259 
3260 	read_random(buf, buf_size);
3261 	T_ASSERT(timingsafe_bcmp(buf, buf, buf_size) == 0, NULL);
3262 	T_ASSERT(timingsafe_bcmp(buf, buf + 1, buf_size - 1) == 1, NULL);
3263 	T_ASSERT(timingsafe_bcmp(buf, buf + 128, 128) == 1, NULL);
3264 
3265 	memcpy(buf + 128, buf, 128);
3266 	T_ASSERT(timingsafe_bcmp(buf, buf + 128, 128) == 0, NULL);
3267 
3268 	kfree_data(buf, buf_size);
3269 
3270 	return KERN_SUCCESS;
3271 }
3272 
3273 kern_return_t
kprintf_hhx_test(void)3274 kprintf_hhx_test(void)
3275 {
3276 	printf("POST hhx test %hx%hx%hx%hx %hhx%hhx%hhx%hhx - %llx",
3277 	    (unsigned short)0xfeed, (unsigned short)0xface,
3278 	    (unsigned short)0xabad, (unsigned short)0xcafe,
3279 	    (unsigned char)'h', (unsigned char)'h', (unsigned char)'x',
3280 	    (unsigned char)'!',
3281 	    0xfeedfaceULL);
3282 	T_PASS("kprintf_hhx_test passed");
3283 	return KERN_SUCCESS;
3284 }
3285 
3286 static STATIC_IF_KEY_DEFINE_TRUE(key_true);
3287 static STATIC_IF_KEY_DEFINE_TRUE(key_true_to_false);
3288 static STATIC_IF_KEY_DEFINE_FALSE(key_false);
3289 static STATIC_IF_KEY_DEFINE_FALSE(key_false_to_true);
3290 
3291 __static_if_init_func
3292 static void
static_if_tests_setup(const char * args __unused)3293 static_if_tests_setup(const char *args __unused)
3294 {
3295 	static_if_key_disable(key_true_to_false);
3296 	static_if_key_enable(key_false_to_true);
3297 }
3298 STATIC_IF_INIT(static_if_tests_setup);
3299 
3300 static void
static_if_tests(void)3301 static_if_tests(void)
3302 {
3303 	int n = 0;
3304 
3305 	if (static_if(key_true)) {
3306 		n++;
3307 	}
3308 	if (probable_static_if(key_true)) {
3309 		n++;
3310 	}
3311 	if (improbable_static_if(key_true)) {
3312 		n++;
3313 	}
3314 	if (n != 3) {
3315 		panic("should still be enabled [n == %d, expected %d]", n, 3);
3316 	}
3317 
3318 	if (static_if(key_true_to_false)) {
3319 		n++;
3320 	}
3321 	if (probable_static_if(key_true_to_false)) {
3322 		n++;
3323 	}
3324 	if (improbable_static_if(key_true_to_false)) {
3325 		n++;
3326 	}
3327 	if (n != 3) {
3328 		panic("should now be disabled [n == %d, expected %d]", n, 3);
3329 	}
3330 
3331 	if (static_if(key_false)) {
3332 		n++;
3333 	}
3334 	if (probable_static_if(key_false)) {
3335 		n++;
3336 	}
3337 	if (improbable_static_if(key_false)) {
3338 		n++;
3339 	}
3340 	if (n != 3) {
3341 		panic("should still be disabled [n == %d, expected %d]", n, 3);
3342 	}
3343 
3344 	if (static_if(key_false_to_true)) {
3345 		n++;
3346 	}
3347 	if (probable_static_if(key_false_to_true)) {
3348 		n++;
3349 	}
3350 	if (improbable_static_if(key_false_to_true)) {
3351 		n++;
3352 	}
3353 	if (n != 6) {
3354 		panic("should now be disabled [n == %d, expected %d]", n, 3);
3355 	}
3356 }
3357 STARTUP(EARLY_BOOT, STARTUP_RANK_MIDDLE, static_if_tests);
3358 
3359 #if __BUILDING_XNU_LIB_UNITTEST__
3360 /* these functions are used for testing the unittest mocking framework and interposing */
3361 
3362 __mockable size_t
kernel_func1(__unused int a,__unused char b)3363 kernel_func1(__unused int a, __unused char b)
3364 {
3365 	return 1000;
3366 }
3367 __mockable size_t
kernel_func2(__unused int a,__unused char b)3368 kernel_func2(__unused int a, __unused char b)
3369 {
3370 	return 2000;
3371 }
3372 __mockable size_t
kernel_func3(__unused int a,__unused char b)3373 kernel_func3(__unused int a, __unused char b)
3374 {
3375 	return 3000;
3376 }
3377 __mockable size_t
kernel_func4(__unused int a,__unused char b)3378 kernel_func4(__unused int a, __unused char b)
3379 {
3380 	return 4000;
3381 }
3382 __mockable size_t
kernel_func5(__unused int a,__unused char b)3383 kernel_func5(__unused int a, __unused char b)
3384 {
3385 	return 5000;
3386 }
3387 int kernel_func6_was_called = 0;
3388 __mockable void
kernel_func6(__unused int a,__unused char b)3389 kernel_func6(__unused int a, __unused char b)
3390 {
3391 	printf("in void func6");
3392 	kernel_func6_was_called = a;
3393 }
3394 __mockable size_t
kernel_func7(__unused int a,__unused char b)3395 kernel_func7(__unused int a, __unused char b)
3396 {
3397 	return 7000;
3398 }
3399 int kernel_func8_was_called = 0;
3400 __mockable void
kernel_func8(__unused int a,__unused char b)3401 kernel_func8(__unused int a, __unused char b)
3402 {
3403 	printf("in void func8");
3404 	kernel_func8_was_called = a;
3405 }
3406 
3407 #endif /* __BUILDING_XNU_LIB_UNITTEST__ */
3408