xref: /xnu-12377.41.6/osfmk/tests/kernel_tests.c (revision bbb1b6f9e71b8cdde6e5cd6f4841f207dee3d828)
1 /*
2  * Copyright (c) 2019-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <kern/kern_types.h>
30 #include <kern/assert.h>
31 #include <kern/host.h>
32 #include <kern/macro_help.h>
33 #include <kern/sched.h>
34 #include <kern/locks.h>
35 #include <kern/sched_prim.h>
36 #include <kern/misc_protos.h>
37 #include <kern/thread_call.h>
38 #include <kern/zalloc_internal.h>
39 #include <kern/kalloc.h>
40 #include <tests/ktest.h>
41 #include <sys/errno.h>
42 #include <sys/random.h>
43 #include <kern/kern_cdata.h>
44 #include <machine/lowglobals.h>
45 #include <machine/static_if.h>
46 #include <vm/vm_page.h>
47 #include <vm/vm_object_internal.h>
48 #include <vm/vm_protos.h>
49 #include <vm/vm_iokit.h>
50 #include <string.h>
51 #include <kern/kern_apfs_reflock.h>
52 
53 #if !(DEVELOPMENT || DEBUG)
54 #error "Testing is not enabled on RELEASE configurations"
55 #endif
56 
57 #include <tests/xnupost.h>
58 
59 extern boolean_t get_range_bounds(char * c, int64_t * lower, int64_t * upper);
60 __private_extern__ void qsort(void * a, size_t n, size_t es, int (*cmp)(const void *, const void *));
61 
62 uint32_t total_post_tests_count = 0;
63 void xnupost_reset_panic_widgets(void);
64 
65 /* test declarations */
66 kern_return_t zalloc_test(void);
67 kern_return_t RandomULong_test(void);
68 kern_return_t kcdata_api_test(void);
69 kern_return_t ts_kernel_primitive_test(void);
70 kern_return_t ts_kernel_sleep_inheritor_test(void);
71 kern_return_t ts_kernel_gate_test(void);
72 kern_return_t ts_kernel_turnstile_chain_test(void);
73 kern_return_t ts_kernel_timingsafe_bcmp_test(void);
74 
75 #if __ARM_VFP__
76 extern kern_return_t vfp_state_test(void);
77 #endif
78 
79 extern kern_return_t kprintf_hhx_test(void);
80 
81 #if defined(__arm64__)
82 kern_return_t pmap_coredump_test(void);
83 #endif
84 
85 extern kern_return_t console_serial_test(void);
86 extern kern_return_t console_serial_parallel_log_tests(void);
87 extern kern_return_t test_printf(void);
88 extern kern_return_t test_os_log(void);
89 extern kern_return_t test_os_log_handles(void);
90 extern kern_return_t test_os_log_parallel(void);
91 extern kern_return_t bitmap_post_test(void);
92 extern kern_return_t counter_tests(void);
93 #if ML_IO_TIMEOUTS_ENABLED
94 extern kern_return_t ml_io_timeout_test(void);
95 #endif
96 
97 #ifdef __arm64__
98 extern kern_return_t arm64_backtrace_test(void);
99 extern kern_return_t arm64_munger_test(void);
100 #if __ARM_PAN_AVAILABLE__
101 extern kern_return_t arm64_pan_test(void);
102 #endif
103 #if defined(HAS_APPLE_PAC)
104 extern kern_return_t arm64_ropjop_test(void);
105 #endif /* defined(HAS_APPLE_PAC) */
106 #if CONFIG_SPTM
107 extern kern_return_t arm64_panic_lockdown_test(void);
108 #endif /* CONFIG_SPTM */
109 #if HAS_MTE
110 extern kern_return_t mte_test(void);
111 extern kern_return_t mte_copyio_recovery_handler_test(void);
112 #endif /* HAS_MTE */
113 #if HAS_SPECRES
114 extern kern_return_t specres_test(void);
115 #endif /* HAS_SPECRES */
116 #if BTI_ENFORCED
117 kern_return_t arm64_bti_test(void);
118 #endif /* BTI_ENFORCED */
119 extern kern_return_t arm64_speculation_guard_test(void);
120 extern kern_return_t arm64_aie_test(void);
121 #endif /* __arm64__ */
122 
123 extern kern_return_t test_thread_call(void);
124 
125 struct xnupost_panic_widget xt_panic_widgets = {.xtp_context_p = NULL,
126 	                                        .xtp_outval_p = NULL,
127 	                                        .xtp_func_name = NULL,
128 	                                        .xtp_func = NULL};
129 
130 struct xnupost_test kernel_post_tests[] = {
131 	XNUPOST_TEST_CONFIG_BASIC(zalloc_test),
132 	XNUPOST_TEST_CONFIG_BASIC(RandomULong_test),
133 	XNUPOST_TEST_CONFIG_BASIC(test_printf),
134 	XNUPOST_TEST_CONFIG_BASIC(test_os_log_handles),
135 	XNUPOST_TEST_CONFIG_BASIC(test_os_log),
136 	XNUPOST_TEST_CONFIG_BASIC(test_os_log_parallel),
137 #ifdef __arm64__
138 	XNUPOST_TEST_CONFIG_BASIC(arm64_backtrace_test),
139 	XNUPOST_TEST_CONFIG_BASIC(arm64_munger_test),
140 #if __ARM_PAN_AVAILABLE__
141 	XNUPOST_TEST_CONFIG_BASIC(arm64_pan_test),
142 #endif
143 #if defined(HAS_APPLE_PAC)
144 	XNUPOST_TEST_CONFIG_BASIC(arm64_ropjop_test),
145 #endif /* defined(HAS_APPLE_PAC) */
146 #if CONFIG_SPTM
147 	XNUPOST_TEST_CONFIG_BASIC(arm64_panic_lockdown_test),
148 #endif /* CONFIG_SPTM */
149 	XNUPOST_TEST_CONFIG_BASIC(arm64_speculation_guard_test),
150 #endif /* __arm64__ */
151 	XNUPOST_TEST_CONFIG_BASIC(kcdata_api_test),
152 	XNUPOST_TEST_CONFIG_BASIC(console_serial_test),
153 	XNUPOST_TEST_CONFIG_BASIC(console_serial_parallel_log_tests),
154 #if defined(__arm64__)
155 	XNUPOST_TEST_CONFIG_BASIC(pmap_coredump_test),
156 #endif
157 	XNUPOST_TEST_CONFIG_BASIC(bitmap_post_test),
158 	//XNUPOST_TEST_CONFIG_TEST_PANIC(kcdata_api_assert_tests)
159 	XNUPOST_TEST_CONFIG_BASIC(test_thread_call),
160 	XNUPOST_TEST_CONFIG_BASIC(ts_kernel_primitive_test),
161 	XNUPOST_TEST_CONFIG_BASIC(ts_kernel_sleep_inheritor_test),
162 	XNUPOST_TEST_CONFIG_BASIC(ts_kernel_gate_test),
163 	XNUPOST_TEST_CONFIG_BASIC(ts_kernel_turnstile_chain_test),
164 	XNUPOST_TEST_CONFIG_BASIC(ts_kernel_timingsafe_bcmp_test),
165 	XNUPOST_TEST_CONFIG_BASIC(kprintf_hhx_test),
166 #if __ARM_VFP__
167 	XNUPOST_TEST_CONFIG_BASIC(vfp_state_test),
168 #endif
169 	XNUPOST_TEST_CONFIG_BASIC(vm_tests),
170 	XNUPOST_TEST_CONFIG_BASIC(counter_tests),
171 #if ML_IO_TIMEOUTS_ENABLED
172 	XNUPOST_TEST_CONFIG_BASIC(ml_io_timeout_test),
173 #endif
174 #if HAS_MTE
175 	XNUPOST_TEST_CONFIG_BASIC(mte_test),
176 	XNUPOST_TEST_CONFIG_BASIC(mte_copyio_recovery_handler_test),
177 #endif
178 #if HAS_SPECRES
179 	XNUPOST_TEST_CONFIG_BASIC(specres_test),
180 #endif
181 };
182 
183 uint32_t kernel_post_tests_count = sizeof(kernel_post_tests) / sizeof(xnupost_test_data_t);
184 
185 #define POSTARGS_RUN_TESTS 0x1
186 #define POSTARGS_CONTROLLER_AVAILABLE 0x2
187 #define POSTARGS_CUSTOM_TEST_RUNLIST 0x4
188 uint64_t kernel_post_args = 0x0;
189 
190 /* static variables to hold state */
191 static kern_return_t parse_config_retval = KERN_INVALID_CAPABILITY;
192 static char kernel_post_test_configs[256];
193 boolean_t xnupost_should_run_test(uint32_t test_num);
194 
195 kern_return_t
xnupost_parse_config()196 xnupost_parse_config()
197 {
198 	if (parse_config_retval != KERN_INVALID_CAPABILITY) {
199 		return parse_config_retval;
200 	}
201 	PE_parse_boot_argn("kernPOST", &kernel_post_args, sizeof(kernel_post_args));
202 
203 	if (PE_parse_boot_argn("kernPOST_config", &kernel_post_test_configs[0], sizeof(kernel_post_test_configs)) == TRUE) {
204 		kernel_post_args |= POSTARGS_CUSTOM_TEST_RUNLIST;
205 	}
206 
207 	if (kernel_post_args != 0) {
208 		parse_config_retval = KERN_SUCCESS;
209 		goto out;
210 	}
211 	parse_config_retval = KERN_NOT_SUPPORTED;
212 out:
213 	return parse_config_retval;
214 }
215 
216 boolean_t
xnupost_should_run_test(uint32_t test_num)217 xnupost_should_run_test(uint32_t test_num)
218 {
219 	if (kernel_post_args & POSTARGS_CUSTOM_TEST_RUNLIST) {
220 		int64_t begin = 0, end = 999999;
221 		char * b = kernel_post_test_configs;
222 		while (*b) {
223 			get_range_bounds(b, &begin, &end);
224 			if (test_num >= begin && test_num <= end) {
225 				return TRUE;
226 			}
227 
228 			/* skip to the next "," */
229 			while (*b != ',') {
230 				if (*b == '\0') {
231 					return FALSE;
232 				}
233 				b++;
234 			}
235 			/* skip past the ',' */
236 			b++;
237 		}
238 		return FALSE;
239 	}
240 	return TRUE;
241 }
242 
243 kern_return_t
xnupost_list_tests(xnupost_test_t test_list,uint32_t test_count)244 xnupost_list_tests(xnupost_test_t test_list, uint32_t test_count)
245 {
246 	if (KERN_SUCCESS != xnupost_parse_config()) {
247 		return KERN_FAILURE;
248 	}
249 
250 	xnupost_test_t testp;
251 	for (uint32_t i = 0; i < test_count; i++) {
252 		testp = &test_list[i];
253 		if (testp->xt_test_num == 0) {
254 			assert(total_post_tests_count < UINT16_MAX);
255 			testp->xt_test_num = (uint16_t)++total_post_tests_count;
256 		}
257 		/* make sure the boot-arg based test run list is honored */
258 		if (kernel_post_args & POSTARGS_CUSTOM_TEST_RUNLIST) {
259 			testp->xt_config |= XT_CONFIG_IGNORE;
260 			if (xnupost_should_run_test(testp->xt_test_num)) {
261 				testp->xt_config &= ~(XT_CONFIG_IGNORE);
262 				testp->xt_config |= XT_CONFIG_RUN;
263 				printf("\n[TEST] #%u is marked as ignored", testp->xt_test_num);
264 			}
265 		}
266 		printf("\n[TEST] TOC#%u name: %s expected: %d config: %x\n", testp->xt_test_num, testp->xt_name, testp->xt_expected_retval,
267 		    testp->xt_config);
268 	}
269 
270 	return KERN_SUCCESS;
271 }
272 
273 kern_return_t
xnupost_run_tests(xnupost_test_t test_list,uint32_t test_count)274 xnupost_run_tests(xnupost_test_t test_list, uint32_t test_count)
275 {
276 	uint32_t i = 0;
277 	int retval = KERN_SUCCESS;
278 	int test_retval = KERN_FAILURE;
279 
280 	if ((kernel_post_args & POSTARGS_RUN_TESTS) == 0) {
281 		printf("No POST boot-arg set.\n");
282 		return retval;
283 	}
284 
285 	T_START;
286 	xnupost_test_t testp;
287 	for (; i < test_count; i++) {
288 		xnupost_reset_panic_widgets();
289 		T_TESTRESULT = T_STATE_UNRESOLVED;
290 		testp = &test_list[i];
291 		T_BEGIN(testp->xt_name);
292 		testp->xt_begin_time = mach_absolute_time();
293 		testp->xt_end_time   = testp->xt_begin_time;
294 
295 		/*
296 		 * If test is designed to panic and controller
297 		 * is not available then mark as SKIPPED
298 		 */
299 		if ((testp->xt_config & XT_CONFIG_EXPECT_PANIC) && !(kernel_post_args & POSTARGS_CONTROLLER_AVAILABLE)) {
300 			T_SKIP(
301 				"Test expects panic but "
302 				"no controller is present");
303 			testp->xt_test_actions = XT_ACTION_SKIPPED;
304 			continue;
305 		}
306 
307 		if ((testp->xt_config & XT_CONFIG_IGNORE)) {
308 			T_SKIP("Test is marked as XT_CONFIG_IGNORE");
309 			testp->xt_test_actions = XT_ACTION_SKIPPED;
310 			continue;
311 		}
312 
313 		test_retval = testp->xt_func();
314 		if (T_STATE_UNRESOLVED == T_TESTRESULT) {
315 			/*
316 			 * If test result is unresolved due to that no T_* test cases are called,
317 			 * determine the test result based on the return value of the test function.
318 			 */
319 			if (KERN_SUCCESS == test_retval) {
320 				T_PASS("Test passed because retval == KERN_SUCCESS");
321 			} else {
322 				T_FAIL("Test failed because retval == KERN_FAILURE");
323 			}
324 		}
325 		T_END;
326 		testp->xt_retval = T_TESTRESULT;
327 		testp->xt_end_time = mach_absolute_time();
328 		if (testp->xt_retval == testp->xt_expected_retval) {
329 			testp->xt_test_actions = XT_ACTION_PASSED;
330 		} else {
331 			testp->xt_test_actions = XT_ACTION_FAILED;
332 		}
333 	}
334 	T_FINISH;
335 	return retval;
336 }
337 
338 kern_return_t
kernel_list_tests()339 kernel_list_tests()
340 {
341 	return xnupost_list_tests(kernel_post_tests, kernel_post_tests_count);
342 }
343 
344 kern_return_t
kernel_do_post()345 kernel_do_post()
346 {
347 	return xnupost_run_tests(kernel_post_tests, kernel_post_tests_count);
348 }
349 
350 kern_return_t
xnupost_register_panic_widget(xt_panic_widget_func funcp,const char * funcname,void * context,void ** outval)351 xnupost_register_panic_widget(xt_panic_widget_func funcp, const char * funcname, void * context, void ** outval)
352 {
353 	if (xt_panic_widgets.xtp_context_p != NULL || xt_panic_widgets.xtp_func != NULL) {
354 		return KERN_RESOURCE_SHORTAGE;
355 	}
356 
357 	xt_panic_widgets.xtp_context_p = context;
358 	xt_panic_widgets.xtp_func      = funcp;
359 	xt_panic_widgets.xtp_func_name = funcname;
360 	xt_panic_widgets.xtp_outval_p  = outval;
361 
362 	return KERN_SUCCESS;
363 }
364 
365 void
xnupost_reset_panic_widgets()366 xnupost_reset_panic_widgets()
367 {
368 	bzero(&xt_panic_widgets, sizeof(xt_panic_widgets));
369 }
370 
371 kern_return_t
xnupost_process_kdb_stop(const char * panic_s)372 xnupost_process_kdb_stop(const char * panic_s)
373 {
374 	xt_panic_return_t retval         = 0;
375 	struct xnupost_panic_widget * pw = &xt_panic_widgets;
376 	const char * name = "unknown";
377 	if (xt_panic_widgets.xtp_func_name) {
378 		name = xt_panic_widgets.xtp_func_name;
379 	}
380 
381 	/* bail early on if kernPOST is not set */
382 	if (kernel_post_args == 0) {
383 		return KERN_INVALID_CAPABILITY;
384 	}
385 
386 	if (xt_panic_widgets.xtp_func) {
387 		T_LOG("%s: Calling out to widget: %s", __func__, xt_panic_widgets.xtp_func_name);
388 		retval = pw->xtp_func(panic_s, pw->xtp_context_p, pw->xtp_outval_p);
389 	} else {
390 		return KERN_INVALID_CAPABILITY;
391 	}
392 
393 	switch (retval) {
394 	case XT_RET_W_SUCCESS:
395 		T_EXPECT_EQ_INT(retval, XT_RET_W_SUCCESS, "%s reported successful handling. Returning from kdb_stop.", name);
396 		/* KERN_SUCCESS means return from panic/assertion */
397 		return KERN_SUCCESS;
398 
399 	case XT_RET_W_FAIL:
400 		T_FAIL("%s reported XT_RET_W_FAIL: Returning from kdb_stop", name);
401 		return KERN_SUCCESS;
402 
403 	case XT_PANIC_W_FAIL:
404 		T_FAIL("%s reported XT_PANIC_W_FAIL: Continuing to kdb_stop", name);
405 		return KERN_FAILURE;
406 
407 	case XT_PANIC_W_SUCCESS:
408 		T_EXPECT_EQ_INT(retval, XT_PANIC_W_SUCCESS, "%s reported successful testcase. But continuing to kdb_stop.", name);
409 		return KERN_FAILURE;
410 
411 	case XT_PANIC_UNRELATED:
412 	default:
413 		T_LOG("UNRELATED: Continuing to kdb_stop.");
414 		return KERN_FAILURE;
415 	}
416 }
417 
418 xt_panic_return_t
_xt_generic_assert_check(const char * s,void * str_to_match,void ** outval)419 _xt_generic_assert_check(const char * s, void * str_to_match, void ** outval)
420 {
421 	xt_panic_return_t ret = XT_PANIC_UNRELATED;
422 
423 	if (NULL != strnstr(__DECONST(char *, s), (char *)str_to_match, strlen(s))) {
424 		T_LOG("%s: kdb_stop string: '%s' MATCHED string: '%s'", __func__, s, (char *)str_to_match);
425 		ret = XT_RET_W_SUCCESS;
426 	}
427 
428 	if (outval) {
429 		*outval = (void *)(uintptr_t)ret;
430 	}
431 	return ret;
432 }
433 
434 kern_return_t
xnupost_reset_tests(xnupost_test_t test_list,uint32_t test_count)435 xnupost_reset_tests(xnupost_test_t test_list, uint32_t test_count)
436 {
437 	uint32_t i = 0;
438 	xnupost_test_t testp;
439 	for (; i < test_count; i++) {
440 		testp                  = &test_list[i];
441 		testp->xt_begin_time   = 0;
442 		testp->xt_end_time     = 0;
443 		testp->xt_test_actions = XT_ACTION_NONE;
444 		testp->xt_retval       = -1;
445 	}
446 	return KERN_SUCCESS;
447 }
448 
449 
450 kern_return_t
zalloc_test(void)451 zalloc_test(void)
452 {
453 	zone_t test_zone;
454 	void * test_ptr;
455 
456 	T_SETUPBEGIN;
457 	test_zone = zone_create("test_uint64_zone", sizeof(uint64_t),
458 	    ZC_DESTRUCTIBLE);
459 	T_ASSERT_NOTNULL(test_zone, NULL);
460 
461 	T_ASSERT_EQ_INT(test_zone->z_elems_free, 0, NULL);
462 	T_SETUPEND;
463 
464 	T_ASSERT_NOTNULL(test_ptr = zalloc(test_zone), NULL);
465 
466 	zfree(test_zone, test_ptr);
467 
468 	/* A sample report for perfdata */
469 	T_PERF("num_threads_at_ktest", threads_count, "count", "# of threads in system at zalloc_test");
470 
471 	return KERN_SUCCESS;
472 }
473 
474 /*
475  * Function used for comparison by qsort()
476  */
477 static int
compare_numbers_ascending(const void * a,const void * b)478 compare_numbers_ascending(const void * a, const void * b)
479 {
480 	const uint64_t x = *(const uint64_t *)a;
481 	const uint64_t y = *(const uint64_t *)b;
482 	if (x < y) {
483 		return -1;
484 	} else if (x > y) {
485 		return 1;
486 	} else {
487 		return 0;
488 	}
489 }
490 
491 /*
492  * Function to count number of bits that are set in a number.
493  * It uses Side Addition using Magic Binary Numbers
494  */
495 static int
count_bits(uint64_t number)496 count_bits(uint64_t number)
497 {
498 	return __builtin_popcountll(number);
499 }
500 
501 kern_return_t
RandomULong_test()502 RandomULong_test()
503 {
504 /*
505  * Randomness test for RandomULong()
506  *
507  * This test verifies that:
508  *  a. RandomULong works
509  *  b. The generated numbers match the following entropy criteria:
510  *     For a thousand iterations, verify:
511  *          1. mean entropy > 12 bits
512  *          2. min entropy > 4 bits
513  *          3. No Duplicate
514  *          4. No incremental/decremental pattern in a window of 3
515  *          5. No Zero
516  *          6. No -1
517  *
518  * <rdar://problem/22526137> Add test to increase code coverage for /dev/random
519  */
520 
521 #define CONF_MIN_ENTROPY 4
522 #define CONF_MEAN_ENTROPY 12
523 #define CONF_ITERATIONS 1000
524 #define CONF_WINDOW_SIZE 3
525 #define CONF_WINDOW_TREND_LIMIT ((CONF_WINDOW_SIZE / 2) + (CONF_WINDOW_SIZE & 1)) >> 0
526 
527 	int i;
528 	uint32_t min_bit_entropy, max_bit_entropy, bit_entropy;
529 	uint32_t aggregate_bit_entropy = 0;
530 	uint32_t mean_bit_entropy      = 0;
531 	uint64_t numbers[CONF_ITERATIONS];
532 	min_bit_entropy = UINT32_MAX;
533 	max_bit_entropy = 0;
534 
535 	/*
536 	 * TEST 1: Number generation and basic and basic validation
537 	 * Check for non-zero (no bits set), -1 (all bits set) and error
538 	 */
539 	for (i = 0; i < CONF_ITERATIONS; i++) {
540 		read_random(&numbers[i], sizeof(numbers[i]));
541 		if (numbers[i] == 0) {
542 			T_ASSERT_NE_ULLONG(numbers[i], 0, "read_random returned zero value.");
543 		}
544 		if (numbers[i] == UINT64_MAX) {
545 			T_ASSERT_NE_ULLONG(numbers[i], UINT64_MAX, "read_random returned -1.");
546 		}
547 	}
548 	T_PASS("Generated %d non-zero random numbers with atleast one bit reset.", CONF_ITERATIONS);
549 
550 	/*
551 	 * TEST 2: Mean and Min Bit Entropy
552 	 * Check the bit entropy and its mean over the generated numbers.
553 	 */
554 	for (i = 1; i < CONF_ITERATIONS; i++) {
555 		bit_entropy = count_bits(numbers[i - 1] ^ numbers[i]);
556 		if (bit_entropy < min_bit_entropy) {
557 			min_bit_entropy = bit_entropy;
558 		}
559 		if (bit_entropy > max_bit_entropy) {
560 			max_bit_entropy = bit_entropy;
561 		}
562 
563 		if (bit_entropy < CONF_MIN_ENTROPY) {
564 			T_EXPECT_GE_UINT(bit_entropy, CONF_MIN_ENTROPY,
565 			    "Number of differing bits in consecutive numbers does not satisfy the min criteria.");
566 		}
567 
568 		aggregate_bit_entropy += bit_entropy;
569 	}
570 	T_PASS("Passed the min bit entropy expectation of %d bits", CONF_MIN_ENTROPY);
571 
572 	mean_bit_entropy = aggregate_bit_entropy / CONF_ITERATIONS;
573 	T_EXPECT_GE_UINT(mean_bit_entropy, CONF_MEAN_ENTROPY, "Test criteria for mean number of differing bits.");
574 	T_PASS("Mean bit entropy criteria satisfied (Required %d, Actual: %d).", CONF_MEAN_ENTROPY, mean_bit_entropy);
575 	T_LOG("{PERFORMANCE} iterations: %d, min_bit_entropy: %d, mean_bit_entropy: %d, max_bit_entropy: %d", CONF_ITERATIONS,
576 	    min_bit_entropy, mean_bit_entropy, max_bit_entropy);
577 	T_PERF("min_bit_entropy_" T_TOSTRING(CONF_ITERATIONS), min_bit_entropy, "bits", "minimum bit entropy in RNG. High is better");
578 	T_PERF("mean_bit_entropy_" T_TOSTRING(CONF_ITERATIONS), mean_bit_entropy, "bits", "mean bit entropy in RNG. High is better");
579 	T_PERF("max_bit_entropy_" T_TOSTRING(CONF_ITERATIONS), max_bit_entropy, "bits", "max bit entropy in RNG. High is better");
580 
581 	/*
582 	 * TEST 3: Incremental Pattern Search
583 	 * Check that incremental/decremental pattern does not exist in the given window
584 	 */
585 	int window_start, window_end, trend;
586 	window_start = window_end = trend = 0;
587 
588 	do {
589 		/*
590 		 * Set the window
591 		 */
592 		window_end = window_start + CONF_WINDOW_SIZE - 1;
593 		if (window_end >= CONF_ITERATIONS) {
594 			window_end = CONF_ITERATIONS - 1;
595 		}
596 
597 		trend = 0;
598 		for (i = window_start; i < window_end; i++) {
599 			if (numbers[i] < numbers[i + 1]) {
600 				trend++;
601 			} else if (numbers[i] > numbers[i + 1]) {
602 				trend--;
603 			}
604 		}
605 		/*
606 		 * Check that there is no increasing or decreasing trend
607 		 * i.e. trend <= ceil(window_size/2)
608 		 */
609 		if (trend < 0) {
610 			trend = -trend;
611 		}
612 		if (trend > CONF_WINDOW_TREND_LIMIT) {
613 			T_ASSERT_LE_INT(trend, CONF_WINDOW_TREND_LIMIT, "Found increasing/decreasing trend in random numbers.");
614 		}
615 
616 		/*
617 		 * Move to the next window
618 		 */
619 		window_start++;
620 	} while (window_start < (CONF_ITERATIONS - 1));
621 	T_PASS("Did not find increasing/decreasing trends in a window of %d numbers.", CONF_WINDOW_SIZE);
622 
623 	/*
624 	 * TEST 4: Find Duplicates
625 	 * Check no duplicate values are generated
626 	 */
627 	qsort(numbers, CONF_ITERATIONS, sizeof(numbers[0]), compare_numbers_ascending);
628 	for (i = 1; i < CONF_ITERATIONS; i++) {
629 		if (numbers[i] == numbers[i - 1]) {
630 			T_ASSERT_NE_ULLONG(numbers[i], numbers[i - 1], "read_random generated duplicate values.");
631 		}
632 	}
633 	T_PASS("Test did not find any duplicates as expected.");
634 
635 	return KERN_SUCCESS;
636 }
637 
638 
639 /* KCDATA kernel api tests */
640 static struct kcdata_descriptor test_kc_data;//, test_kc_data2;
641 struct sample_disk_io_stats {
642 	uint64_t disk_reads_count;
643 	uint64_t disk_reads_size;
644 	uint64_t io_priority_count[4];
645 	uint64_t io_priority_size;
646 } __attribute__((packed));
647 
648 struct kcdata_subtype_descriptor test_disk_io_stats_def[] = {
649 	{
650 		.kcs_flags = KCS_SUBTYPE_FLAGS_NONE,
651 		.kcs_elem_type = KC_ST_UINT64,
652 		.kcs_elem_offset = 0 * sizeof(uint64_t),
653 		.kcs_elem_size = sizeof(uint64_t),
654 		.kcs_name = "disk_reads_count"
655 	},
656 	{
657 		.kcs_flags = KCS_SUBTYPE_FLAGS_NONE,
658 		.kcs_elem_type = KC_ST_UINT64,
659 		.kcs_elem_offset = 1 * sizeof(uint64_t),
660 		.kcs_elem_size = sizeof(uint64_t),
661 		.kcs_name = "disk_reads_size"
662 	},
663 	{
664 		.kcs_flags = KCS_SUBTYPE_FLAGS_ARRAY,
665 		.kcs_elem_type = KC_ST_UINT64,
666 		.kcs_elem_offset = 2 * sizeof(uint64_t),
667 		.kcs_elem_size = KCS_SUBTYPE_PACK_SIZE(4, sizeof(uint64_t)),
668 		.kcs_name = "io_priority_count"
669 	},
670 	{
671 		.kcs_flags = KCS_SUBTYPE_FLAGS_ARRAY,
672 		.kcs_elem_type = KC_ST_UINT64,
673 		.kcs_elem_offset = (2 + 4) * sizeof(uint64_t),
674 		.kcs_elem_size = sizeof(uint64_t),
675 		.kcs_name = "io_priority_size"
676 	},
677 };
678 
679 kern_return_t
kcdata_api_test(void)680 kcdata_api_test(void)
681 {
682 	kern_return_t retval = KERN_SUCCESS;
683 
684 	/* test for NULL input */
685 	retval = kcdata_memory_static_init(NULL, (mach_vm_address_t)0, KCDATA_BUFFER_BEGIN_STACKSHOT, 100, KCFLAG_USE_MEMCOPY);
686 	T_ASSERT(retval == KERN_INVALID_ARGUMENT, "kcdata_memory_static_init with NULL struct");
687 
688 	/* another negative test with buffer size < 32 bytes */
689 	char data[30] = "sample_disk_io_stats";
690 	retval = kcdata_memory_static_init(&test_kc_data, (mach_vm_address_t)&data, KCDATA_BUFFER_BEGIN_CRASHINFO, sizeof(data),
691 	    KCFLAG_USE_MEMCOPY);
692 	T_ASSERT(retval == KERN_INSUFFICIENT_BUFFER_SIZE, "init with 30 bytes failed as expected with KERN_INSUFFICIENT_BUFFER_SIZE");
693 
694 	/* test with COPYOUT for 0x0 address. Should return KERN_NO_ACCESS */
695 	retval = kcdata_memory_static_init(&test_kc_data, (mach_vm_address_t)0, KCDATA_BUFFER_BEGIN_CRASHINFO, PAGE_SIZE,
696 	    KCFLAG_USE_COPYOUT);
697 	T_ASSERT(retval == KERN_NO_ACCESS, "writing to 0x0 returned KERN_NO_ACCESS");
698 
699 	/* test with successful kcdata_memory_static_init */
700 	test_kc_data.kcd_length   = 0xdeadbeef;
701 
702 	void *data_ptr = kalloc_data(PAGE_SIZE, Z_WAITOK_ZERO_NOFAIL);
703 	mach_vm_address_t address = (mach_vm_address_t)data_ptr;
704 	T_EXPECT_NOTNULL(address, "kalloc of PAGE_SIZE data.");
705 
706 	retval = kcdata_memory_static_init(&test_kc_data, (mach_vm_address_t)address, KCDATA_BUFFER_BEGIN_STACKSHOT, PAGE_SIZE,
707 	    KCFLAG_USE_MEMCOPY);
708 
709 	T_ASSERT(retval == KERN_SUCCESS, "successful kcdata_memory_static_init call");
710 
711 	T_ASSERT(test_kc_data.kcd_length == PAGE_SIZE, "kcdata length is set correctly to PAGE_SIZE.");
712 	T_LOG("addr_begin 0x%llx and end 0x%llx and address 0x%llx", test_kc_data.kcd_addr_begin, test_kc_data.kcd_addr_end, address);
713 	T_ASSERT(test_kc_data.kcd_addr_begin == address, "kcdata begin address is correct 0x%llx", (uint64_t)address);
714 
715 	/* verify we have BEGIN and END HEADERS set */
716 	uint32_t * mem = (uint32_t *)address;
717 	T_ASSERT(mem[0] == KCDATA_BUFFER_BEGIN_STACKSHOT, "buffer does contain KCDATA_BUFFER_BEGIN_STACKSHOT");
718 	T_ASSERT(mem[4] == KCDATA_TYPE_BUFFER_END, "KCDATA_TYPE_BUFFER_END is appended as expected");
719 	T_ASSERT(mem[5] == 0, "size of BUFFER_END tag is zero");
720 
721 	/* verify kcdata_memory_get_used_bytes() */
722 	uint64_t bytes_used = 0;
723 	bytes_used = kcdata_memory_get_used_bytes(&test_kc_data);
724 	T_ASSERT(bytes_used == (2 * sizeof(struct kcdata_item)), "bytes_used api returned expected %llu", bytes_used);
725 
726 	/* test for kcdata_get_memory_addr() */
727 
728 	mach_vm_address_t user_addr = 0;
729 	/* negative test for NULL user_addr AND/OR kcdata_descriptor */
730 	retval = kcdata_get_memory_addr(NULL, KCDATA_TYPE_MACH_ABSOLUTE_TIME, sizeof(uint64_t), &user_addr);
731 	T_ASSERT(retval == KERN_INVALID_ARGUMENT, "kcdata_get_memory_addr with NULL struct -> KERN_INVALID_ARGUMENT");
732 
733 	retval = kcdata_get_memory_addr(&test_kc_data, KCDATA_TYPE_MACH_ABSOLUTE_TIME, sizeof(uint64_t), NULL);
734 	T_ASSERT(retval == KERN_INVALID_ARGUMENT, "kcdata_get_memory_addr with NULL user_addr -> KERN_INVALID_ARGUMENT");
735 
736 	/* successful case with size 0. Yes this is expected to succeed as just a item type could be used as boolean */
737 	retval = kcdata_get_memory_addr(&test_kc_data, KCDATA_TYPE_USECS_SINCE_EPOCH, 0, &user_addr);
738 	T_ASSERT(retval == KERN_SUCCESS, "Successfully got kcdata entry for 0 size data");
739 	T_ASSERT(user_addr == test_kc_data.kcd_addr_end, "0 sized data did not add any extra buffer space");
740 
741 	/* successful case with valid size. */
742 	user_addr = 0xdeadbeef;
743 	retval = kcdata_get_memory_addr(&test_kc_data, KCDATA_TYPE_MACH_ABSOLUTE_TIME, sizeof(uint64_t), &user_addr);
744 	T_ASSERT(retval == KERN_SUCCESS, "kcdata_get_memory_addr with valid values succeeded.");
745 	T_ASSERT(user_addr > test_kc_data.kcd_addr_begin, "user_addr is in range of buffer");
746 	T_ASSERT(user_addr < test_kc_data.kcd_addr_end, "user_addr is in range of buffer");
747 
748 	/* Try creating an item with really large size */
749 	user_addr  = 0xdeadbeef;
750 	bytes_used = kcdata_memory_get_used_bytes(&test_kc_data);
751 	retval = kcdata_get_memory_addr(&test_kc_data, KCDATA_TYPE_MACH_ABSOLUTE_TIME, PAGE_SIZE * 4, &user_addr);
752 	T_ASSERT(retval == KERN_INSUFFICIENT_BUFFER_SIZE, "Allocating entry with size > buffer -> KERN_INSUFFICIENT_BUFFER_SIZE");
753 	T_ASSERT(user_addr == 0xdeadbeef, "user_addr remained unaffected with failed kcdata_get_memory_addr");
754 	T_ASSERT(bytes_used == kcdata_memory_get_used_bytes(&test_kc_data), "The data structure should be unaffected");
755 
756 	/* verify convenience functions for uint32_with_description */
757 	retval = kcdata_add_uint32_with_description(&test_kc_data, 0xbdc0ffee, "This is bad coffee");
758 	T_ASSERT(retval == KERN_SUCCESS, "add uint32 with description succeeded.");
759 
760 	retval = kcdata_add_uint64_with_description(&test_kc_data, 0xf001badc0ffee, "another 8 byte no.");
761 	T_ASSERT(retval == KERN_SUCCESS, "add uint64 with desc succeeded.");
762 
763 	/* verify creating an KCDATA_TYPE_ARRAY here */
764 	user_addr  = 0xdeadbeef;
765 	bytes_used = kcdata_memory_get_used_bytes(&test_kc_data);
766 	/* save memory address where the array will come up */
767 	struct kcdata_item * item_p = (struct kcdata_item *)test_kc_data.kcd_addr_end;
768 
769 	retval = kcdata_get_memory_addr_for_array(&test_kc_data, KCDATA_TYPE_MACH_ABSOLUTE_TIME, sizeof(uint64_t), 20, &user_addr);
770 	T_ASSERT(retval == KERN_SUCCESS, "Array of 20 integers should be possible");
771 	T_ASSERT(user_addr != 0xdeadbeef, "user_addr is updated as expected");
772 	T_ASSERT((kcdata_memory_get_used_bytes(&test_kc_data) - bytes_used) >= 20 * sizeof(uint64_t), "memory allocation is in range");
773 	kcdata_iter_t iter = kcdata_iter(item_p, (unsigned long)(PAGE_SIZE - kcdata_memory_get_used_bytes(&test_kc_data)));
774 	T_ASSERT(kcdata_iter_array_elem_count(iter) == 20, "array count is 20");
775 
776 	/* FIXME add tests here for ranges of sizes and counts */
777 
778 	T_ASSERT(item_p->flags == (((uint64_t)KCDATA_TYPE_MACH_ABSOLUTE_TIME << 32) | 20), "flags are set correctly");
779 
780 	/* test adding of custom type */
781 
782 	retval = kcdata_add_type_definition(&test_kc_data, 0x999, data, &test_disk_io_stats_def[0],
783 	    sizeof(test_disk_io_stats_def) / sizeof(struct kcdata_subtype_descriptor));
784 	T_ASSERT(retval == KERN_SUCCESS, "adding custom type succeeded.");
785 
786 	kfree_data(data_ptr, PAGE_SIZE);
787 	return KERN_SUCCESS;
788 }
789 
790 /*
791  *  kern_return_t
792  *  kcdata_api_assert_tests()
793  *  {
794  *       kern_return_t retval       = 0;
795  *       void * assert_check_retval = NULL;
796  *       test_kc_data2.kcd_length   = 0xdeadbeef;
797  *       mach_vm_address_t address = (mach_vm_address_t)kalloc(PAGE_SIZE);
798  *       T_EXPECT_NOTNULL(address, "kalloc of PAGE_SIZE data.");
799  *
800  *       retval = kcdata_memory_static_init(&test_kc_data2, (mach_vm_address_t)address, KCDATA_BUFFER_BEGIN_STACKSHOT, PAGE_SIZE,
801  *                                          KCFLAG_USE_MEMCOPY);
802  *
803  *       T_ASSERT(retval == KERN_SUCCESS, "successful kcdata_memory_static_init call");
804  *
805  *       retval = T_REGISTER_ASSERT_CHECK("KCDATA_DESC_MAXLEN", &assert_check_retval);
806  *       T_ASSERT(retval == KERN_SUCCESS, "registered assert widget");
807  *
808  *       // this will assert
809  *       retval = kcdata_add_uint32_with_description(&test_kc_data2, 0xc0ffee, "really long description string for kcdata");
810  *       T_ASSERT(retval == KERN_INVALID_ARGUMENT, "API param check returned KERN_INVALID_ARGUMENT correctly");
811  *       T_ASSERT(assert_check_retval == (void *)XT_RET_W_SUCCESS, "assertion handler verified that it was hit");
812  *
813  *       return KERN_SUCCESS;
814  *  }
815  */
816 
817 #if defined(__arm64__)
818 
819 #include <arm/pmap.h>
820 
821 #define MAX_PMAP_OBJECT_ELEMENT 100000
822 
823 extern struct vm_object pmap_object_store; /* store pt pages */
824 extern unsigned long gPhysBase, gPhysSize, first_avail;
825 
826 /*
827  * Define macros to transverse the pmap object structures and extract
828  * physical page number with information from low global only
829  * This emulate how Astris extracts information from coredump
830  */
831 #if defined(__arm64__)
832 
833 static inline uintptr_t
astris_vm_page_unpack_ptr(uintptr_t p)834 astris_vm_page_unpack_ptr(uintptr_t p)
835 {
836 	if (!p) {
837 		return (uintptr_t)0;
838 	}
839 
840 	return (p & lowGlo.lgPmapMemFromArrayMask)
841 	       ? lowGlo.lgPmapMemStartAddr + (p & ~(lowGlo.lgPmapMemFromArrayMask)) * lowGlo.lgPmapMemPagesize
842 	       : lowGlo.lgPmapMemPackedBaseAddr + (p << lowGlo.lgPmapMemPackedShift);
843 }
844 
845 // assume next pointer is the first element
846 #define astris_vm_page_queue_next(qc) (astris_vm_page_unpack_ptr(*((uint32_t *)(qc))))
847 
848 #endif
849 
850 #define astris_vm_page_queue_first(q) astris_vm_page_queue_next(q)
851 
852 #define astris_vm_page_queue_end(q, qe) ((q) == (qe))
853 
854 #define astris_vm_page_queue_iterate(head, elt)                                                           \
855 	for ((elt) = (uintptr_t)astris_vm_page_queue_first((head)); !astris_vm_page_queue_end((head), (elt)); \
856 	     (elt) = (uintptr_t)astris_vm_page_queue_next(((elt) + (uintptr_t)lowGlo.lgPmapMemChainOffset)))
857 
858 #define astris_ptoa(x) ((vm_address_t)(x) << lowGlo.lgPageShift)
859 
860 static inline ppnum_t
astris_vm_page_get_phys_page(uintptr_t m)861 astris_vm_page_get_phys_page(uintptr_t m)
862 {
863 	return (m >= lowGlo.lgPmapMemStartAddr && m < lowGlo.lgPmapMemEndAddr)
864 	       ? (ppnum_t)((m - lowGlo.lgPmapMemStartAddr) / lowGlo.lgPmapMemPagesize + lowGlo.lgPmapMemFirstppnum)
865 	       : *((ppnum_t *)(m + lowGlo.lgPmapMemPageOffset));
866 }
867 
868 kern_return_t
pmap_coredump_test(void)869 pmap_coredump_test(void)
870 {
871 	int iter = 0;
872 	uintptr_t p;
873 
874 	T_LOG("Testing coredump info for PMAP.");
875 
876 	T_ASSERT_GE_ULONG(lowGlo.lgStaticAddr, gPhysBase, NULL);
877 	T_ASSERT_LE_ULONG(lowGlo.lgStaticAddr + lowGlo.lgStaticSize, first_avail, NULL);
878 	T_ASSERT_EQ_ULONG(lowGlo.lgLayoutMajorVersion, 3, NULL);
879 	T_ASSERT_GE_ULONG(lowGlo.lgLayoutMinorVersion, 2, NULL);
880 	T_ASSERT_EQ_ULONG(lowGlo.lgLayoutMagic, LOWGLO_LAYOUT_MAGIC, NULL);
881 
882 	// check the constant values in lowGlo
883 	T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemQ, ((typeof(lowGlo.lgPmapMemQ)) & (pmap_object_store.memq)), NULL);
884 	T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemPageOffset, offsetof(struct vm_page_with_ppnum, vmp_phys_page), NULL);
885 	T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemChainOffset, offsetof(struct vm_page, vmp_listq), NULL);
886 	T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemPagesize, sizeof(struct vm_page), NULL);
887 
888 #if defined(__arm64__)
889 	T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemFromArrayMask, VM_PAGE_PACKED_FROM_ARRAY, NULL);
890 	T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemPackedShift, VM_PAGE_PACKED_PTR_SHIFT, NULL);
891 	T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemPackedBaseAddr, VM_PAGE_PACKED_PTR_BASE, NULL);
892 #endif
893 
894 	vm_object_lock_shared(&pmap_object_store);
895 	astris_vm_page_queue_iterate(lowGlo.lgPmapMemQ, p)
896 	{
897 		ppnum_t ppnum   = astris_vm_page_get_phys_page(p);
898 		pmap_paddr_t pa = (pmap_paddr_t)astris_ptoa(ppnum);
899 		T_ASSERT_GE_ULONG(pa, gPhysBase, NULL);
900 		T_ASSERT_LT_ULONG(pa, gPhysBase + gPhysSize, NULL);
901 		iter++;
902 		T_ASSERT_LT_INT(iter, MAX_PMAP_OBJECT_ELEMENT, NULL);
903 	}
904 	vm_object_unlock(&pmap_object_store);
905 
906 	T_ASSERT_GT_INT(iter, 0, NULL);
907 	return KERN_SUCCESS;
908 }
909 #endif /* defined(__arm64__) */
910 
911 struct ts_kern_prim_test_args {
912 	int *end_barrier;
913 	int *notify_b;
914 	int *wait_event_b;
915 	int before_num;
916 	int *notify_a;
917 	int *wait_event_a;
918 	int after_num;
919 	int priority_to_check;
920 };
921 
922 static void
wait_threads(int * var,int num)923 wait_threads(
924 	int* var,
925 	int num)
926 {
927 	if (var != NULL) {
928 		while (os_atomic_load(var, acquire) != num) {
929 			assert_wait((event_t) var, THREAD_UNINT);
930 			if (os_atomic_load(var, acquire) != num) {
931 				(void) thread_block(THREAD_CONTINUE_NULL);
932 			} else {
933 				clear_wait(current_thread(), THREAD_AWAKENED);
934 			}
935 		}
936 	}
937 }
938 
939 static void
wake_threads(int * var)940 wake_threads(
941 	int* var)
942 {
943 	if (var) {
944 		os_atomic_inc(var, relaxed);
945 		thread_wakeup((event_t) var);
946 	}
947 }
948 
949 extern void IOSleep(int);
950 
951 static void
thread_lock_unlock_kernel_primitive(void * args,__unused wait_result_t wr)952 thread_lock_unlock_kernel_primitive(
953 	void *args,
954 	__unused wait_result_t wr)
955 {
956 	thread_t thread = current_thread();
957 	struct ts_kern_prim_test_args *info = (struct ts_kern_prim_test_args*) args;
958 	int pri;
959 
960 	wait_threads(info->wait_event_b, info->before_num);
961 	wake_threads(info->notify_b);
962 
963 	tstile_test_prim_lock(SYSCTL_TURNSTILE_TEST_KERNEL_DEFAULT);
964 
965 	wake_threads(info->notify_a);
966 	wait_threads(info->wait_event_a, info->after_num);
967 
968 	IOSleep(100);
969 
970 	if (info->priority_to_check) {
971 		spl_t s = splsched();
972 		thread_lock(thread);
973 		pri = thread->sched_pri;
974 		thread_unlock(thread);
975 		splx(s);
976 		T_ASSERT(pri == info->priority_to_check, "Priority thread: current sched %d sched wanted %d", pri, info->priority_to_check);
977 	}
978 
979 	tstile_test_prim_unlock(SYSCTL_TURNSTILE_TEST_KERNEL_DEFAULT);
980 
981 	wake_threads(info->end_barrier);
982 	thread_terminate_self();
983 }
984 
985 kern_return_t
ts_kernel_primitive_test(void)986 ts_kernel_primitive_test(void)
987 {
988 	thread_t owner, thread1, thread2;
989 	struct ts_kern_prim_test_args targs[2] = {};
990 	kern_return_t result;
991 	int end_barrier = 0;
992 	int owner_locked = 0;
993 	int waiters_ready = 0;
994 
995 	T_LOG("Testing turnstile kernel primitive");
996 
997 	targs[0].notify_b = NULL;
998 	targs[0].wait_event_b = NULL;
999 	targs[0].before_num = 0;
1000 	targs[0].notify_a = &owner_locked;
1001 	targs[0].wait_event_a = &waiters_ready;
1002 	targs[0].after_num = 2;
1003 	targs[0].priority_to_check = 90;
1004 	targs[0].end_barrier = &end_barrier;
1005 
1006 	// Start owner with priority 80
1007 	result = kernel_thread_start_priority((thread_continue_t)thread_lock_unlock_kernel_primitive, &targs[0], 80, &owner);
1008 	T_ASSERT(result == KERN_SUCCESS, "Starting owner");
1009 
1010 	targs[1].notify_b = &waiters_ready;
1011 	targs[1].wait_event_b = &owner_locked;
1012 	targs[1].before_num = 1;
1013 	targs[1].notify_a = NULL;
1014 	targs[1].wait_event_a = NULL;
1015 	targs[1].after_num = 0;
1016 	targs[1].priority_to_check = 0;
1017 	targs[1].end_barrier = &end_barrier;
1018 
1019 	// Start waiters with priority 85 and 90
1020 	result = kernel_thread_start_priority((thread_continue_t)thread_lock_unlock_kernel_primitive, &targs[1], 85, &thread1);
1021 	T_ASSERT(result == KERN_SUCCESS, "Starting thread1");
1022 
1023 	result = kernel_thread_start_priority((thread_continue_t)thread_lock_unlock_kernel_primitive, &targs[1], 90, &thread2);
1024 	T_ASSERT(result == KERN_SUCCESS, "Starting thread2");
1025 
1026 	wait_threads(&end_barrier, 3);
1027 
1028 	return KERN_SUCCESS;
1029 }
1030 
1031 #define MTX_LOCK 0
1032 #define RW_LOCK 1
1033 
1034 #define NUM_THREADS 4
1035 
1036 struct synch_test_common {
1037 	unsigned int nthreads;
1038 	thread_t *threads;
1039 	int max_pri;
1040 	int test_done;
1041 };
1042 
1043 static kern_return_t
init_synch_test_common(struct synch_test_common * info,unsigned int nthreads)1044 init_synch_test_common(struct synch_test_common *info, unsigned int nthreads)
1045 {
1046 	info->nthreads = nthreads;
1047 	info->threads = kalloc_type(thread_t, nthreads, Z_WAITOK);
1048 	if (!info->threads) {
1049 		return ENOMEM;
1050 	}
1051 
1052 	return KERN_SUCCESS;
1053 }
1054 
1055 static void
destroy_synch_test_common(struct synch_test_common * info)1056 destroy_synch_test_common(struct synch_test_common *info)
1057 {
1058 	kfree_type(thread_t, info->nthreads, info->threads);
1059 }
1060 
1061 static void
start_threads(thread_continue_t func,struct synch_test_common * info,bool sleep_after_first)1062 start_threads(thread_continue_t func, struct synch_test_common *info, bool sleep_after_first)
1063 {
1064 	thread_t thread;
1065 	kern_return_t result;
1066 	uint i;
1067 	int priority = 75;
1068 
1069 	info->test_done = 0;
1070 
1071 	for (i = 0; i < info->nthreads; i++) {
1072 		info->threads[i] = NULL;
1073 	}
1074 
1075 	info->max_pri = priority + (info->nthreads - 1) * 5;
1076 	if (info->max_pri > 95) {
1077 		info->max_pri = 95;
1078 	}
1079 
1080 	for (i = 0; i < info->nthreads; i++) {
1081 		result = kernel_thread_start_priority((thread_continue_t)func, info, priority, &thread);
1082 		os_atomic_store(&info->threads[i], thread, release);
1083 		T_ASSERT(result == KERN_SUCCESS, "Starting thread %d, priority %d, %p", i, priority, thread);
1084 
1085 		priority += 5;
1086 
1087 		if (i == 0 && sleep_after_first) {
1088 			IOSleep(100);
1089 		}
1090 	}
1091 }
1092 
1093 static unsigned int
get_max_pri(struct synch_test_common * info)1094 get_max_pri(struct synch_test_common * info)
1095 {
1096 	return info->max_pri;
1097 }
1098 
1099 static void
wait_all_thread(struct synch_test_common * info)1100 wait_all_thread(struct synch_test_common * info)
1101 {
1102 	wait_threads(&info->test_done, info->nthreads);
1103 }
1104 
1105 static void
notify_waiter(struct synch_test_common * info)1106 notify_waiter(struct synch_test_common * info)
1107 {
1108 	wake_threads(&info->test_done);
1109 }
1110 
1111 static void
wait_for_waiters(struct synch_test_common * info)1112 wait_for_waiters(struct synch_test_common *info)
1113 {
1114 	uint i, j;
1115 	thread_t thread;
1116 
1117 	for (i = 0; i < info->nthreads; i++) {
1118 		j = 0;
1119 		while (os_atomic_load(&info->threads[i], acquire) == NULL) {
1120 			if (j % 100 == 0) {
1121 				IOSleep(10);
1122 			}
1123 			j++;
1124 		}
1125 
1126 		if (info->threads[i] != current_thread()) {
1127 			j = 0;
1128 			do {
1129 				thread = os_atomic_load(&info->threads[i], relaxed);
1130 				if (thread == (thread_t) 1) {
1131 					break;
1132 				}
1133 
1134 				if (!(thread->state & TH_RUN)) {
1135 					break;
1136 				}
1137 
1138 				if (j % 100 == 0) {
1139 					IOSleep(100);
1140 				}
1141 				j++;
1142 
1143 				if (thread->started == FALSE) {
1144 					continue;
1145 				}
1146 			} while (thread->state & TH_RUN);
1147 		}
1148 	}
1149 }
1150 
1151 static void
exclude_current_waiter(struct synch_test_common * info)1152 exclude_current_waiter(struct synch_test_common *info)
1153 {
1154 	uint i, j;
1155 
1156 	for (i = 0; i < info->nthreads; i++) {
1157 		j = 0;
1158 		while (os_atomic_load(&info->threads[i], acquire) == NULL) {
1159 			if (j % 100 == 0) {
1160 				IOSleep(10);
1161 			}
1162 			j++;
1163 		}
1164 
1165 		if (os_atomic_load(&info->threads[i], acquire) == current_thread()) {
1166 			os_atomic_store(&info->threads[i], (thread_t)1, release);
1167 			return;
1168 		}
1169 	}
1170 }
1171 
1172 struct info_sleep_inheritor_test {
1173 	struct synch_test_common head;
1174 	lck_mtx_t mtx_lock;
1175 	lck_rw_t rw_lock;
1176 	decl_lck_mtx_gate_data(, gate);
1177 	boolean_t gate_closed;
1178 	int prim_type;
1179 	boolean_t work_to_do;
1180 	unsigned int max_pri;
1181 	unsigned int steal_pri;
1182 	int synch_value;
1183 	int synch;
1184 	int value;
1185 	int handoff_failure;
1186 	thread_t thread_inheritor;
1187 	bool use_alloc_gate;
1188 	gate_t *alloc_gate;
1189 	struct obj_cached **obj_cache;
1190 	kern_apfs_reflock_data(, reflock);
1191 	int reflock_protected_status;
1192 };
1193 
1194 static void
primitive_lock(struct info_sleep_inheritor_test * info)1195 primitive_lock(struct info_sleep_inheritor_test *info)
1196 {
1197 	switch (info->prim_type) {
1198 	case MTX_LOCK:
1199 		lck_mtx_lock(&info->mtx_lock);
1200 		break;
1201 	case RW_LOCK:
1202 		lck_rw_lock(&info->rw_lock, LCK_RW_TYPE_EXCLUSIVE);
1203 		break;
1204 	default:
1205 		panic("invalid type %d", info->prim_type);
1206 	}
1207 }
1208 
1209 static void
primitive_unlock(struct info_sleep_inheritor_test * info)1210 primitive_unlock(struct info_sleep_inheritor_test *info)
1211 {
1212 	switch (info->prim_type) {
1213 	case MTX_LOCK:
1214 		lck_mtx_unlock(&info->mtx_lock);
1215 		break;
1216 	case RW_LOCK:
1217 		lck_rw_unlock(&info->rw_lock, LCK_RW_TYPE_EXCLUSIVE);
1218 		break;
1219 	default:
1220 		panic("invalid type %d", info->prim_type);
1221 	}
1222 }
1223 
1224 static wait_result_t
primitive_sleep_with_inheritor(struct info_sleep_inheritor_test * info)1225 primitive_sleep_with_inheritor(struct info_sleep_inheritor_test *info)
1226 {
1227 	wait_result_t ret = KERN_SUCCESS;
1228 	switch (info->prim_type) {
1229 	case MTX_LOCK:
1230 		ret = lck_mtx_sleep_with_inheritor(&info->mtx_lock, LCK_SLEEP_DEFAULT, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1231 		break;
1232 	case RW_LOCK:
1233 		ret = lck_rw_sleep_with_inheritor(&info->rw_lock, LCK_SLEEP_DEFAULT, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1234 		break;
1235 	default:
1236 		panic("invalid type %d", info->prim_type);
1237 	}
1238 
1239 	return ret;
1240 }
1241 
1242 static void
primitive_wakeup_one_with_inheritor(struct info_sleep_inheritor_test * info)1243 primitive_wakeup_one_with_inheritor(struct info_sleep_inheritor_test *info)
1244 {
1245 	switch (info->prim_type) {
1246 	case MTX_LOCK:
1247 	case RW_LOCK:
1248 		wakeup_one_with_inheritor((event_t) &info->thread_inheritor, THREAD_AWAKENED, LCK_WAKE_DEFAULT, &info->thread_inheritor);
1249 		break;
1250 	default:
1251 		panic("invalid type %d", info->prim_type);
1252 	}
1253 }
1254 
1255 static void
primitive_wakeup_all_with_inheritor(struct info_sleep_inheritor_test * info)1256 primitive_wakeup_all_with_inheritor(struct info_sleep_inheritor_test *info)
1257 {
1258 	switch (info->prim_type) {
1259 	case MTX_LOCK:
1260 	case RW_LOCK:
1261 		wakeup_all_with_inheritor((event_t) &info->thread_inheritor, THREAD_AWAKENED);
1262 		break;
1263 	default:
1264 		panic("invalid type %d", info->prim_type);
1265 	}
1266 	return;
1267 }
1268 
1269 static void
primitive_change_sleep_inheritor(struct info_sleep_inheritor_test * info)1270 primitive_change_sleep_inheritor(struct info_sleep_inheritor_test *info)
1271 {
1272 	switch (info->prim_type) {
1273 	case MTX_LOCK:
1274 	case RW_LOCK:
1275 		change_sleep_inheritor((event_t) &info->thread_inheritor, info->thread_inheritor);
1276 		break;
1277 	default:
1278 		panic("invalid type %d", info->prim_type);
1279 	}
1280 	return;
1281 }
1282 
1283 static kern_return_t
primitive_gate_try_close(struct info_sleep_inheritor_test * info)1284 primitive_gate_try_close(struct info_sleep_inheritor_test *info)
1285 {
1286 	gate_t *gate = &info->gate;
1287 	if (info->use_alloc_gate == true) {
1288 		gate = info->alloc_gate;
1289 	}
1290 	kern_return_t ret = KERN_SUCCESS;
1291 	switch (info->prim_type) {
1292 	case MTX_LOCK:
1293 		ret = lck_mtx_gate_try_close(&info->mtx_lock, gate);
1294 		break;
1295 	case RW_LOCK:
1296 		ret = lck_rw_gate_try_close(&info->rw_lock, gate);
1297 		break;
1298 	default:
1299 		panic("invalid type %d", info->prim_type);
1300 	}
1301 	return ret;
1302 }
1303 
1304 static gate_wait_result_t
primitive_gate_wait(struct info_sleep_inheritor_test * info)1305 primitive_gate_wait(struct info_sleep_inheritor_test *info)
1306 {
1307 	gate_t *gate = &info->gate;
1308 	if (info->use_alloc_gate == true) {
1309 		gate = info->alloc_gate;
1310 	}
1311 	gate_wait_result_t ret = GATE_OPENED;
1312 	switch (info->prim_type) {
1313 	case MTX_LOCK:
1314 		ret = lck_mtx_gate_wait(&info->mtx_lock, gate, LCK_SLEEP_DEFAULT, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1315 		break;
1316 	case RW_LOCK:
1317 		ret = lck_rw_gate_wait(&info->rw_lock, gate, LCK_SLEEP_DEFAULT, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1318 		break;
1319 	default:
1320 		panic("invalid type %d", info->prim_type);
1321 	}
1322 	return ret;
1323 }
1324 
1325 static void
primitive_gate_open(struct info_sleep_inheritor_test * info)1326 primitive_gate_open(struct info_sleep_inheritor_test *info)
1327 {
1328 	gate_t *gate = &info->gate;
1329 	if (info->use_alloc_gate == true) {
1330 		gate = info->alloc_gate;
1331 	}
1332 	switch (info->prim_type) {
1333 	case MTX_LOCK:
1334 		lck_mtx_gate_open(&info->mtx_lock, gate);
1335 		break;
1336 	case RW_LOCK:
1337 		lck_rw_gate_open(&info->rw_lock, gate);
1338 		break;
1339 	default:
1340 		panic("invalid type %d", info->prim_type);
1341 	}
1342 }
1343 
1344 static void
primitive_gate_close(struct info_sleep_inheritor_test * info)1345 primitive_gate_close(struct info_sleep_inheritor_test *info)
1346 {
1347 	gate_t *gate = &info->gate;
1348 	if (info->use_alloc_gate == true) {
1349 		gate = info->alloc_gate;
1350 	}
1351 
1352 	switch (info->prim_type) {
1353 	case MTX_LOCK:
1354 		lck_mtx_gate_close(&info->mtx_lock, gate);
1355 		break;
1356 	case RW_LOCK:
1357 		lck_rw_gate_close(&info->rw_lock, gate);
1358 		break;
1359 	default:
1360 		panic("invalid type %d", info->prim_type);
1361 	}
1362 }
1363 
1364 static void
primitive_gate_steal(struct info_sleep_inheritor_test * info)1365 primitive_gate_steal(struct info_sleep_inheritor_test *info)
1366 {
1367 	gate_t *gate = &info->gate;
1368 	if (info->use_alloc_gate == true) {
1369 		gate = info->alloc_gate;
1370 	}
1371 
1372 	switch (info->prim_type) {
1373 	case MTX_LOCK:
1374 		lck_mtx_gate_steal(&info->mtx_lock, gate);
1375 		break;
1376 	case RW_LOCK:
1377 		lck_rw_gate_steal(&info->rw_lock, gate);
1378 		break;
1379 	default:
1380 		panic("invalid type %d", info->prim_type);
1381 	}
1382 }
1383 
1384 static kern_return_t
primitive_gate_handoff(struct info_sleep_inheritor_test * info,int flags)1385 primitive_gate_handoff(struct info_sleep_inheritor_test *info, int flags)
1386 {
1387 	gate_t *gate = &info->gate;
1388 	if (info->use_alloc_gate == true) {
1389 		gate = info->alloc_gate;
1390 	}
1391 
1392 	kern_return_t ret = KERN_SUCCESS;
1393 	switch (info->prim_type) {
1394 	case MTX_LOCK:
1395 		ret = lck_mtx_gate_handoff(&info->mtx_lock, gate, flags);
1396 		break;
1397 	case RW_LOCK:
1398 		ret = lck_rw_gate_handoff(&info->rw_lock, gate, flags);
1399 		break;
1400 	default:
1401 		panic("invalid type %d", info->prim_type);
1402 	}
1403 	return ret;
1404 }
1405 
1406 static void
primitive_gate_assert(struct info_sleep_inheritor_test * info,int type)1407 primitive_gate_assert(struct info_sleep_inheritor_test *info, int type)
1408 {
1409 	gate_t *gate = &info->gate;
1410 	if (info->use_alloc_gate == true) {
1411 		gate = info->alloc_gate;
1412 	}
1413 
1414 	switch (info->prim_type) {
1415 	case MTX_LOCK:
1416 		lck_mtx_gate_assert(&info->mtx_lock, gate, type);
1417 		break;
1418 	case RW_LOCK:
1419 		lck_rw_gate_assert(&info->rw_lock, gate, type);
1420 		break;
1421 	default:
1422 		panic("invalid type %d", info->prim_type);
1423 	}
1424 }
1425 
1426 static void
primitive_gate_init(struct info_sleep_inheritor_test * info)1427 primitive_gate_init(struct info_sleep_inheritor_test *info)
1428 {
1429 	switch (info->prim_type) {
1430 	case MTX_LOCK:
1431 		lck_mtx_gate_init(&info->mtx_lock, &info->gate);
1432 		break;
1433 	case RW_LOCK:
1434 		lck_rw_gate_init(&info->rw_lock, &info->gate);
1435 		break;
1436 	default:
1437 		panic("invalid type %d", info->prim_type);
1438 	}
1439 }
1440 
1441 static void
primitive_gate_destroy(struct info_sleep_inheritor_test * info)1442 primitive_gate_destroy(struct info_sleep_inheritor_test *info)
1443 {
1444 	switch (info->prim_type) {
1445 	case MTX_LOCK:
1446 		lck_mtx_gate_destroy(&info->mtx_lock, &info->gate);
1447 		break;
1448 	case RW_LOCK:
1449 		lck_rw_gate_destroy(&info->rw_lock, &info->gate);
1450 		break;
1451 	default:
1452 		panic("invalid type %d", info->prim_type);
1453 	}
1454 }
1455 
1456 static void
primitive_gate_alloc(struct info_sleep_inheritor_test * info)1457 primitive_gate_alloc(struct info_sleep_inheritor_test *info)
1458 {
1459 	gate_t *gate;
1460 	switch (info->prim_type) {
1461 	case MTX_LOCK:
1462 		gate = lck_mtx_gate_alloc_init(&info->mtx_lock);
1463 		break;
1464 	case RW_LOCK:
1465 		gate = lck_rw_gate_alloc_init(&info->rw_lock);
1466 		break;
1467 	default:
1468 		panic("invalid type %d", info->prim_type);
1469 	}
1470 	info->alloc_gate = gate;
1471 }
1472 
1473 static void
primitive_gate_free(struct info_sleep_inheritor_test * info)1474 primitive_gate_free(struct info_sleep_inheritor_test *info)
1475 {
1476 	T_ASSERT(info->alloc_gate != NULL, "gate not yet freed");
1477 
1478 	switch (info->prim_type) {
1479 	case MTX_LOCK:
1480 		lck_mtx_gate_free(&info->mtx_lock, info->alloc_gate);
1481 		break;
1482 	case RW_LOCK:
1483 		lck_rw_gate_free(&info->rw_lock, info->alloc_gate);
1484 		break;
1485 	default:
1486 		panic("invalid type %d", info->prim_type);
1487 	}
1488 	info->alloc_gate = NULL;
1489 }
1490 
1491 static void
thread_inheritor_like_mutex(void * args,__unused wait_result_t wr)1492 thread_inheritor_like_mutex(
1493 	void *args,
1494 	__unused wait_result_t wr)
1495 {
1496 	wait_result_t wait;
1497 
1498 	struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
1499 	uint my_pri = current_thread()->sched_pri;
1500 
1501 	T_LOG("Started thread pri %d %p", my_pri, current_thread());
1502 
1503 	/*
1504 	 * spin here to start concurrently
1505 	 */
1506 	wake_threads(&info->synch);
1507 	wait_threads(&info->synch, info->synch_value);
1508 
1509 	primitive_lock(info);
1510 
1511 	if (info->thread_inheritor == NULL) {
1512 		info->thread_inheritor = current_thread();
1513 	} else {
1514 		wait = primitive_sleep_with_inheritor(info);
1515 		T_ASSERT(wait == THREAD_AWAKENED || wait == THREAD_NOT_WAITING, "sleep_with_inheritor return");
1516 	}
1517 	primitive_unlock(info);
1518 
1519 	IOSleep(100);
1520 	info->value++;
1521 
1522 	primitive_lock(info);
1523 
1524 	T_ASSERT(info->thread_inheritor == current_thread(), "thread_inheritor is %p", info->thread_inheritor);
1525 	primitive_wakeup_one_with_inheritor(info);
1526 	T_LOG("woken up %p", info->thread_inheritor);
1527 
1528 	if (info->thread_inheritor == NULL) {
1529 		T_ASSERT(info->handoff_failure == 0, "handoff failures");
1530 		info->handoff_failure++;
1531 	} else {
1532 		T_ASSERT(info->thread_inheritor != current_thread(), "thread_inheritor is %p", info->thread_inheritor);
1533 		thread_deallocate(info->thread_inheritor);
1534 	}
1535 
1536 	primitive_unlock(info);
1537 
1538 	assert(current_thread()->kern_promotion_schedpri == 0);
1539 	notify_waiter((struct synch_test_common *)info);
1540 
1541 	thread_terminate_self();
1542 }
1543 
1544 static void
thread_just_inheritor_do_work(void * args,__unused wait_result_t wr)1545 thread_just_inheritor_do_work(
1546 	void *args,
1547 	__unused wait_result_t wr)
1548 {
1549 	struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
1550 	uint my_pri = current_thread()->sched_pri;
1551 	uint max_pri;
1552 
1553 	T_LOG("Started thread pri %d %p", my_pri, current_thread());
1554 	primitive_lock(info);
1555 
1556 	if (info->thread_inheritor == NULL) {
1557 		info->thread_inheritor = current_thread();
1558 		primitive_unlock(info);
1559 		T_LOG("Thread pri %d first to run %p", my_pri, current_thread());
1560 
1561 		wait_threads(&info->synch, info->synch_value - 1);
1562 
1563 		wait_for_waiters((struct synch_test_common *)info);
1564 
1565 		max_pri = get_max_pri((struct synch_test_common *) info);
1566 		T_ASSERT((uint) current_thread()->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", current_thread()->sched_pri, max_pri);
1567 
1568 		os_atomic_store(&info->synch, 0, relaxed);
1569 		primitive_lock(info);
1570 		primitive_wakeup_all_with_inheritor(info);
1571 	} else {
1572 		wake_threads(&info->synch);
1573 		primitive_sleep_with_inheritor(info);
1574 	}
1575 
1576 	primitive_unlock(info);
1577 
1578 	assert(current_thread()->kern_promotion_schedpri == 0);
1579 	notify_waiter((struct synch_test_common *)info);
1580 
1581 	thread_terminate_self();
1582 }
1583 
1584 static void
thread_steal_work(void * args,__unused wait_result_t wr)1585 thread_steal_work(
1586 	void *args,
1587 	__unused wait_result_t wr)
1588 {
1589 	struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
1590 	uint my_pri = current_thread()->sched_pri;
1591 
1592 	T_LOG("Started thread pri %d %p", my_pri, current_thread());
1593 	primitive_lock(info);
1594 
1595 	if (info->thread_inheritor == NULL) {
1596 		info->thread_inheritor = current_thread();
1597 		exclude_current_waiter((struct synch_test_common *)info);
1598 
1599 		T_LOG("Thread pri %d first to run %p", my_pri, current_thread());
1600 		primitive_unlock(info);
1601 
1602 		wait_threads(&info->synch, info->synch_value - 2);
1603 
1604 		wait_for_waiters((struct synch_test_common *)info);
1605 		T_LOG("Thread pri %d first to run %p", my_pri, current_thread());
1606 		primitive_lock(info);
1607 		if (info->thread_inheritor == current_thread()) {
1608 			primitive_wakeup_all_with_inheritor(info);
1609 		}
1610 	} else {
1611 		if (info->steal_pri == 0) {
1612 			info->steal_pri = my_pri;
1613 			info->thread_inheritor = current_thread();
1614 			primitive_change_sleep_inheritor(info);
1615 			exclude_current_waiter((struct synch_test_common *)info);
1616 
1617 			primitive_unlock(info);
1618 
1619 			wait_threads(&info->synch, info->synch_value - 2);
1620 
1621 			T_LOG("Thread pri %d stole push %p", my_pri, current_thread());
1622 			wait_for_waiters((struct synch_test_common *)info);
1623 
1624 			T_ASSERT((uint) current_thread()->sched_pri == info->steal_pri, "sleep_inheritor inheritor priority current is %d, should be %d", current_thread()->sched_pri, info->steal_pri);
1625 
1626 			primitive_lock(info);
1627 			primitive_wakeup_all_with_inheritor(info);
1628 		} else {
1629 			if (my_pri > info->steal_pri) {
1630 				info->steal_pri = my_pri;
1631 			}
1632 			wake_threads(&info->synch);
1633 			primitive_sleep_with_inheritor(info);
1634 			exclude_current_waiter((struct synch_test_common *)info);
1635 		}
1636 	}
1637 	primitive_unlock(info);
1638 
1639 	assert(current_thread()->kern_promotion_schedpri == 0);
1640 	notify_waiter((struct synch_test_common *)info);
1641 
1642 	thread_terminate_self();
1643 }
1644 
1645 static void
thread_no_inheritor_work(void * args,__unused wait_result_t wr)1646 thread_no_inheritor_work(
1647 	void *args,
1648 	__unused wait_result_t wr)
1649 {
1650 	struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
1651 	uint my_pri = current_thread()->sched_pri;
1652 
1653 	T_LOG("Started thread pri %d %p", my_pri, current_thread());
1654 	primitive_lock(info);
1655 
1656 	info->value--;
1657 	if (info->value == 0) {
1658 		primitive_wakeup_all_with_inheritor(info);
1659 	} else {
1660 		info->thread_inheritor = NULL;
1661 		primitive_sleep_with_inheritor(info);
1662 	}
1663 
1664 	primitive_unlock(info);
1665 
1666 	assert(current_thread()->kern_promotion_schedpri == 0);
1667 	notify_waiter((struct synch_test_common *)info);
1668 
1669 	thread_terminate_self();
1670 }
1671 
1672 static void
thread_mtx_work(void * args,__unused wait_result_t wr)1673 thread_mtx_work(
1674 	void *args,
1675 	__unused wait_result_t wr)
1676 {
1677 	struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
1678 	uint my_pri = current_thread()->sched_pri;
1679 	int i;
1680 	u_int8_t rand;
1681 	unsigned int mod_rand;
1682 	uint max_pri;
1683 
1684 	T_LOG("Started thread pri %d %p", my_pri, current_thread());
1685 
1686 	for (i = 0; i < 10; i++) {
1687 		lck_mtx_lock(&info->mtx_lock);
1688 		if (info->thread_inheritor == NULL) {
1689 			info->thread_inheritor = current_thread();
1690 			lck_mtx_unlock(&info->mtx_lock);
1691 
1692 			T_LOG("Thread pri %d first to run %p", my_pri, current_thread());
1693 
1694 			wait_threads(&info->synch, info->synch_value - 1);
1695 			wait_for_waiters((struct synch_test_common *)info);
1696 			max_pri = get_max_pri((struct synch_test_common *) info);
1697 			T_ASSERT((uint) current_thread()->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", current_thread()->sched_pri, max_pri);
1698 
1699 			os_atomic_store(&info->synch, 0, relaxed);
1700 
1701 			lck_mtx_lock(&info->mtx_lock);
1702 			info->thread_inheritor = NULL;
1703 			wakeup_all_with_inheritor((event_t) &info->thread_inheritor, THREAD_AWAKENED);
1704 			lck_mtx_unlock(&info->mtx_lock);
1705 			continue;
1706 		}
1707 
1708 		read_random(&rand, sizeof(rand));
1709 		mod_rand = rand % 2;
1710 
1711 		wake_threads(&info->synch);
1712 		switch (mod_rand) {
1713 		case 0:
1714 			lck_mtx_sleep_with_inheritor(&info->mtx_lock, LCK_SLEEP_DEFAULT, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1715 			lck_mtx_unlock(&info->mtx_lock);
1716 			break;
1717 		case 1:
1718 			lck_mtx_sleep_with_inheritor(&info->mtx_lock, LCK_SLEEP_UNLOCK, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1719 			break;
1720 		default:
1721 			panic("rand()mod4 returned %u (random %u)", mod_rand, rand);
1722 		}
1723 	}
1724 
1725 	/*
1726 	 * spin here to stop using the lock as mutex
1727 	 */
1728 	wake_threads(&info->synch);
1729 	wait_threads(&info->synch, info->synch_value);
1730 
1731 	for (i = 0; i < 10; i++) {
1732 		/* read_random might sleep so read it before acquiring the mtx as spin */
1733 		read_random(&rand, sizeof(rand));
1734 
1735 		lck_mtx_lock_spin(&info->mtx_lock);
1736 		if (info->thread_inheritor == NULL) {
1737 			info->thread_inheritor = current_thread();
1738 			lck_mtx_unlock(&info->mtx_lock);
1739 
1740 			T_LOG("Thread pri %d first to run %p", my_pri, current_thread());
1741 			wait_for_waiters((struct synch_test_common *)info);
1742 			max_pri = get_max_pri((struct synch_test_common *) info);
1743 			T_ASSERT((uint) current_thread()->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", current_thread()->sched_pri, max_pri);
1744 
1745 			lck_mtx_lock_spin(&info->mtx_lock);
1746 			info->thread_inheritor = NULL;
1747 			wakeup_all_with_inheritor((event_t) &info->thread_inheritor, THREAD_AWAKENED);
1748 			lck_mtx_unlock(&info->mtx_lock);
1749 			continue;
1750 		}
1751 
1752 		mod_rand = rand % 2;
1753 		switch (mod_rand) {
1754 		case 0:
1755 			lck_mtx_sleep_with_inheritor(&info->mtx_lock, LCK_SLEEP_SPIN, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1756 			lck_mtx_unlock(&info->mtx_lock);
1757 			break;
1758 		case 1:
1759 			lck_mtx_sleep_with_inheritor(&info->mtx_lock, LCK_SLEEP_SPIN_ALWAYS, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1760 			lck_mtx_unlock(&info->mtx_lock);
1761 			break;
1762 		default:
1763 			panic("rand()mod4 returned %u (random %u)", mod_rand, rand);
1764 		}
1765 	}
1766 	assert(current_thread()->kern_promotion_schedpri == 0);
1767 	notify_waiter((struct synch_test_common *)info);
1768 
1769 	thread_terminate_self();
1770 }
1771 
1772 static void
thread_rw_work(void * args,__unused wait_result_t wr)1773 thread_rw_work(
1774 	void *args,
1775 	__unused wait_result_t wr)
1776 {
1777 	struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
1778 	uint my_pri = current_thread()->sched_pri;
1779 	int i;
1780 	lck_rw_type_t type;
1781 	u_int8_t rand;
1782 	unsigned int mod_rand;
1783 	uint max_pri;
1784 
1785 	T_LOG("Started thread pri %d %p", my_pri, current_thread());
1786 
1787 	for (i = 0; i < 10; i++) {
1788 try_again:
1789 		type = LCK_RW_TYPE_SHARED;
1790 		lck_rw_lock(&info->rw_lock, type);
1791 		if (info->thread_inheritor == NULL) {
1792 			type = LCK_RW_TYPE_EXCLUSIVE;
1793 
1794 			if (lck_rw_lock_shared_to_exclusive(&info->rw_lock)) {
1795 				if (info->thread_inheritor == NULL) {
1796 					info->thread_inheritor = current_thread();
1797 					lck_rw_unlock(&info->rw_lock, type);
1798 					wait_threads(&info->synch, info->synch_value - 1);
1799 
1800 					T_LOG("Thread pri %d first to run %p", my_pri, current_thread());
1801 					wait_for_waiters((struct synch_test_common *)info);
1802 					max_pri = get_max_pri((struct synch_test_common *) info);
1803 					T_ASSERT((uint) current_thread()->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", current_thread()->sched_pri, max_pri);
1804 
1805 					os_atomic_store(&info->synch, 0, relaxed);
1806 
1807 					lck_rw_lock(&info->rw_lock, type);
1808 					info->thread_inheritor = NULL;
1809 					wakeup_all_with_inheritor((event_t) &info->thread_inheritor, THREAD_AWAKENED);
1810 					lck_rw_unlock(&info->rw_lock, type);
1811 					continue;
1812 				}
1813 			} else {
1814 				goto try_again;
1815 			}
1816 		}
1817 
1818 		read_random(&rand, sizeof(rand));
1819 		mod_rand = rand % 4;
1820 
1821 		wake_threads(&info->synch);
1822 		switch (mod_rand) {
1823 		case 0:
1824 			lck_rw_sleep_with_inheritor(&info->rw_lock, LCK_SLEEP_DEFAULT, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1825 			lck_rw_unlock(&info->rw_lock, type);
1826 			break;
1827 		case 1:
1828 			lck_rw_sleep_with_inheritor(&info->rw_lock, LCK_SLEEP_UNLOCK, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1829 			break;
1830 		case 2:
1831 			lck_rw_sleep_with_inheritor(&info->rw_lock, LCK_SLEEP_SHARED, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1832 			lck_rw_unlock(&info->rw_lock, LCK_RW_TYPE_SHARED);
1833 			break;
1834 		case 3:
1835 			lck_rw_sleep_with_inheritor(&info->rw_lock, LCK_SLEEP_EXCLUSIVE, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1836 			lck_rw_unlock(&info->rw_lock, LCK_RW_TYPE_EXCLUSIVE);
1837 			break;
1838 		default:
1839 			panic("rand()mod4 returned %u (random %u)", mod_rand, rand);
1840 		}
1841 	}
1842 
1843 	assert(current_thread()->kern_promotion_schedpri == 0);
1844 	notify_waiter((struct synch_test_common *)info);
1845 
1846 	thread_terminate_self();
1847 }
1848 
1849 #define OBJ_STATE_UNUSED        0
1850 #define OBJ_STATE_REAL          1
1851 #define OBJ_STATE_PLACEHOLDER   2
1852 
1853 #define OBJ_BUFF_SIZE 11
1854 struct obj_cached {
1855 	int obj_id;
1856 	int obj_state;
1857 	struct kern_apfs_reflock *obj_refcount;
1858 	char obj_buff[OBJ_BUFF_SIZE];
1859 };
1860 
1861 #define CACHE_SIZE 2
1862 #define USE_CACHE_ROUNDS 15
1863 
1864 #define REFCOUNT_REFLOCK_ROUNDS 15
1865 
1866 /*
1867  * For the reflock cache test the cache is allocated
1868  * and its pointer is saved in obj_cache.
1869  * The lock for the cache is going to be one of the exclusive
1870  * locks already present in struct info_sleep_inheritor_test.
1871  */
1872 
1873 static struct obj_cached *
alloc_init_cache_entry(void)1874 alloc_init_cache_entry(void)
1875 {
1876 	struct obj_cached *cache_entry = kalloc_type(struct obj_cached, 1, Z_WAITOK | Z_NOFAIL | Z_ZERO);
1877 	cache_entry->obj_id = 0;
1878 	cache_entry->obj_state = OBJ_STATE_UNUSED;
1879 	cache_entry->obj_refcount = kern_apfs_reflock_alloc_init();
1880 	snprintf(cache_entry->obj_buff, OBJ_BUFF_SIZE, "I am groot");
1881 	return cache_entry;
1882 }
1883 
1884 static void
init_cache(struct info_sleep_inheritor_test * info)1885 init_cache(struct info_sleep_inheritor_test *info)
1886 {
1887 	struct obj_cached **obj_cache = kalloc_type(struct obj_cached *, CACHE_SIZE, Z_WAITOK | Z_NOFAIL | Z_ZERO);
1888 
1889 	int i;
1890 	for (i = 0; i < CACHE_SIZE; i++) {
1891 		obj_cache[i] = alloc_init_cache_entry();
1892 	}
1893 
1894 	info->obj_cache = obj_cache;
1895 }
1896 
1897 static void
check_cache_empty(struct info_sleep_inheritor_test * info)1898 check_cache_empty(struct info_sleep_inheritor_test *info)
1899 {
1900 	struct obj_cached **obj_cache = info->obj_cache;
1901 
1902 	int i, ret;
1903 	for (i = 0; i < CACHE_SIZE; i++) {
1904 		if (obj_cache[i] != NULL) {
1905 			T_ASSERT(obj_cache[i]->obj_state == OBJ_STATE_UNUSED, "checked OBJ_STATE_UNUSED");
1906 			T_ASSERT(obj_cache[i]->obj_refcount != NULL, "checked obj_refcount");
1907 			ret = memcmp(obj_cache[i]->obj_buff, "I am groot", OBJ_BUFF_SIZE);
1908 			T_ASSERT(ret == 0, "checked buff correctly emptied");
1909 		}
1910 	}
1911 }
1912 
1913 static void
free_cache(struct info_sleep_inheritor_test * info)1914 free_cache(struct info_sleep_inheritor_test *info)
1915 {
1916 	struct obj_cached **obj_cache = info->obj_cache;
1917 
1918 	int i;
1919 	for (i = 0; i < CACHE_SIZE; i++) {
1920 		if (obj_cache[i] != NULL) {
1921 			kern_apfs_reflock_free(obj_cache[i]->obj_refcount);
1922 			obj_cache[i]->obj_refcount = NULL;
1923 			kfree_type(struct obj_cached, 1, obj_cache[i]);
1924 			obj_cache[i] = NULL;
1925 		}
1926 	}
1927 
1928 	kfree_type(struct obj_cached *, CACHE_SIZE, obj_cache);
1929 	info->obj_cache = NULL;
1930 }
1931 
1932 static struct obj_cached *
find_id_in_cache(int obj_id,struct info_sleep_inheritor_test * info)1933 find_id_in_cache(int obj_id, struct info_sleep_inheritor_test *info)
1934 {
1935 	struct obj_cached **obj_cache = info->obj_cache;
1936 	int i;
1937 	for (i = 0; i < CACHE_SIZE; i++) {
1938 		if (obj_cache[i] != NULL && obj_cache[i]->obj_id == obj_id) {
1939 			return obj_cache[i];
1940 		}
1941 	}
1942 	return NULL;
1943 }
1944 
1945 static bool
free_id_in_cache(int obj_id,struct info_sleep_inheritor_test * info,__assert_only struct obj_cached * expected)1946 free_id_in_cache(int obj_id, struct info_sleep_inheritor_test *info, __assert_only struct obj_cached *expected)
1947 {
1948 	struct obj_cached **obj_cache = info->obj_cache;
1949 	int i;
1950 	for (i = 0; i < CACHE_SIZE; i++) {
1951 		if (obj_cache[i] != NULL && obj_cache[i]->obj_id == obj_id) {
1952 			assert(obj_cache[i] == expected);
1953 			kfree_type(struct obj_cached, 1, obj_cache[i]);
1954 			obj_cache[i] = NULL;
1955 			return true;
1956 		}
1957 	}
1958 	return false;
1959 }
1960 
1961 static struct obj_cached *
find_empty_spot_in_cache(struct info_sleep_inheritor_test * info)1962 find_empty_spot_in_cache(struct info_sleep_inheritor_test *info)
1963 {
1964 	struct obj_cached **obj_cache = info->obj_cache;
1965 	int i;
1966 	for (i = 0; i < CACHE_SIZE; i++) {
1967 		if (obj_cache[i] == NULL) {
1968 			obj_cache[i] = alloc_init_cache_entry();
1969 			return obj_cache[i];
1970 		}
1971 		if (obj_cache[i]->obj_state == OBJ_STATE_UNUSED) {
1972 			return obj_cache[i];
1973 		}
1974 	}
1975 	return NULL;
1976 }
1977 
1978 static int
get_obj_cache(int obj_id,struct info_sleep_inheritor_test * info,char ** buff)1979 get_obj_cache(int obj_id, struct info_sleep_inheritor_test *info, char **buff)
1980 {
1981 	struct obj_cached *obj = NULL, *obj2 = NULL;
1982 	kern_apfs_reflock_t refcount = NULL;
1983 	bool ret;
1984 	kern_apfs_reflock_out_flags_t out_flags;
1985 
1986 try_again:
1987 	primitive_lock(info);
1988 	if ((obj = find_id_in_cache(obj_id, info)) != NULL) {
1989 		/* Found an allocated object on the cache with same id */
1990 
1991 		/*
1992 		 * copy the pointer to obj_refcount as obj might
1993 		 * get deallocated after primitive_unlock()
1994 		 */
1995 		refcount = obj->obj_refcount;
1996 		if (kern_apfs_reflock_try_get_ref(refcount, KERN_APFS_REFLOCK_IN_WILL_WAIT, &out_flags)) {
1997 			/*
1998 			 * Got a ref, let's check the state
1999 			 */
2000 			switch (obj->obj_state) {
2001 			case OBJ_STATE_UNUSED:
2002 				goto init;
2003 			case OBJ_STATE_REAL:
2004 				goto done;
2005 			case OBJ_STATE_PLACEHOLDER:
2006 				panic("Thread %p observed OBJ_STATE_PLACEHOLDER %d for obj %d", current_thread(), obj->obj_state, obj_id);
2007 			default:
2008 				panic("Thread %p observed an unknown obj_state %d for obj %d", current_thread(), obj->obj_state, obj_id);
2009 			}
2010 		} else {
2011 			/*
2012 			 * Didn't get a ref.
2013 			 * This means or an obj_put() of the last ref is ongoing
2014 			 * or a init of the object is happening.
2015 			 * Both cases wait for that to finish and retry.
2016 			 * While waiting the thread that is holding the reflock
2017 			 * will get a priority at least as the one of this thread.
2018 			 */
2019 			primitive_unlock(info);
2020 			kern_apfs_reflock_wait_for_unlock(refcount, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
2021 			goto try_again;
2022 		}
2023 	} else {
2024 		/* Look for a spot on the cache where we can save the object */
2025 
2026 		if ((obj = find_empty_spot_in_cache(info)) == NULL) {
2027 			/*
2028 			 * Sadness cache is full, and everyting in the cache is
2029 			 * used.
2030 			 */
2031 			primitive_unlock(info);
2032 			return -1;
2033 		} else {
2034 			/*
2035 			 * copy the pointer to obj_refcount as obj might
2036 			 * get deallocated after primitive_unlock()
2037 			 */
2038 			refcount = obj->obj_refcount;
2039 			if (kern_apfs_reflock_try_get_ref(refcount, KERN_APFS_REFLOCK_IN_WILL_WAIT, &out_flags)) {
2040 				/*
2041 				 * Got a ref on a OBJ_STATE_UNUSED obj.
2042 				 * Recicle time.
2043 				 */
2044 				obj->obj_id = obj_id;
2045 				goto init;
2046 			} else {
2047 				/*
2048 				 * This could happen if the obj_put() has just changed the
2049 				 * state to OBJ_STATE_UNUSED, but not unlocked the reflock yet.
2050 				 */
2051 				primitive_unlock(info);
2052 				kern_apfs_reflock_wait_for_unlock(refcount, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
2053 				goto try_again;
2054 			}
2055 		}
2056 	}
2057 init:
2058 	assert(obj->obj_id == obj_id);
2059 	assert(obj->obj_state == OBJ_STATE_UNUSED);
2060 	/*
2061 	 * We already got a ref on the object, but we need
2062 	 * to initialize it. Mark it as
2063 	 * OBJ_STATE_PLACEHOLDER and get the obj_reflock.
2064 	 * In this way all thread waiting for this init
2065 	 * to finish will push on this thread.
2066 	 */
2067 	ret = kern_apfs_reflock_try_lock(refcount, KERN_APFS_REFLOCK_IN_DEFAULT, NULL);
2068 	assert(ret == true);
2069 	obj->obj_state = OBJ_STATE_PLACEHOLDER;
2070 	primitive_unlock(info);
2071 
2072 	//let's pretend we are populating the obj
2073 	IOSleep(10);
2074 	/*
2075 	 * obj will not be deallocated while I hold a ref.
2076 	 * So it is safe to access it.
2077 	 */
2078 	snprintf(obj->obj_buff, OBJ_BUFF_SIZE, "I am %d", obj_id);
2079 
2080 	primitive_lock(info);
2081 	obj2 = find_id_in_cache(obj_id, info);
2082 	assert(obj == obj2);
2083 	assert(obj->obj_state == OBJ_STATE_PLACEHOLDER);
2084 
2085 	obj->obj_state = OBJ_STATE_REAL;
2086 	kern_apfs_reflock_unlock(refcount);
2087 
2088 done:
2089 	*buff = obj->obj_buff;
2090 	primitive_unlock(info);
2091 	return 0;
2092 }
2093 
2094 static void
put_obj_cache(int obj_id,struct info_sleep_inheritor_test * info,bool free)2095 put_obj_cache(int obj_id, struct info_sleep_inheritor_test *info, bool free)
2096 {
2097 	struct obj_cached *obj = NULL, *obj2 = NULL;
2098 	bool ret;
2099 	kern_apfs_reflock_out_flags_t out_flags;
2100 	kern_apfs_reflock_t refcount = NULL;
2101 
2102 	primitive_lock(info);
2103 	obj = find_id_in_cache(obj_id, info);
2104 	primitive_unlock(info);
2105 
2106 	/*
2107 	 * Nobody should have been able to remove obj_id
2108 	 * from the cache.
2109 	 */
2110 	assert(obj != NULL);
2111 	assert(obj->obj_state == OBJ_STATE_REAL);
2112 
2113 	refcount = obj->obj_refcount;
2114 
2115 	/*
2116 	 * This should never fail, as or the reflock
2117 	 * was acquired when the state was OBJ_STATE_UNUSED to init,
2118 	 * or from a put that reached zero. And if the latter
2119 	 * happened subsequent reflock_get_ref() will had to wait to transition
2120 	 * to OBJ_STATE_REAL.
2121 	 */
2122 	ret = kern_apfs_reflock_try_put_ref(refcount, KERN_APFS_REFLOCK_IN_LOCK_IF_LAST, &out_flags);
2123 	assert(ret == true);
2124 	if ((out_flags & KERN_APFS_REFLOCK_OUT_LOCKED) == 0) {
2125 		return;
2126 	}
2127 
2128 	/*
2129 	 * Note: nobody at this point will be able to get a ref or a lock on
2130 	 * refcount.
2131 	 * All people waiting on refcount will push on this thread.
2132 	 */
2133 
2134 	//let's pretend we are flushing the obj somewhere.
2135 	IOSleep(10);
2136 	snprintf(obj->obj_buff, OBJ_BUFF_SIZE, "I am groot");
2137 
2138 	primitive_lock(info);
2139 	obj->obj_state = OBJ_STATE_UNUSED;
2140 	if (free) {
2141 		obj2 = find_id_in_cache(obj_id, info);
2142 		assert(obj == obj2);
2143 
2144 		ret = free_id_in_cache(obj_id, info, obj);
2145 		assert(ret == true);
2146 	}
2147 	primitive_unlock(info);
2148 
2149 	kern_apfs_reflock_unlock(refcount);
2150 
2151 	if (free) {
2152 		kern_apfs_reflock_free(refcount);
2153 	}
2154 }
2155 
2156 static void
thread_use_cache(void * args,__unused wait_result_t wr)2157 thread_use_cache(
2158 	void *args,
2159 	__unused wait_result_t wr)
2160 {
2161 	struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
2162 	int my_obj;
2163 
2164 	primitive_lock(info);
2165 	my_obj = ((info->value--) % (CACHE_SIZE + 1)) + 1;
2166 	primitive_unlock(info);
2167 
2168 	T_LOG("Thread %p started and it is going to use obj %d", current_thread(), my_obj);
2169 	/*
2170 	 * This is the string I would expect to see
2171 	 * on my_obj buff.
2172 	 */
2173 	char my_string[OBJ_BUFF_SIZE];
2174 	int my_string_size = snprintf(my_string, OBJ_BUFF_SIZE, "I am %d", my_obj);
2175 
2176 	/*
2177 	 * spin here to start concurrently with the other threads
2178 	 */
2179 	wake_threads(&info->synch);
2180 	wait_threads(&info->synch, info->synch_value);
2181 
2182 	for (int i = 0; i < USE_CACHE_ROUNDS; i++) {
2183 		char *buff;
2184 		while (get_obj_cache(my_obj, info, &buff) == -1) {
2185 			/*
2186 			 * Cache is full, wait.
2187 			 */
2188 			IOSleep(10);
2189 		}
2190 		T_ASSERT(memcmp(buff, my_string, my_string_size) == 0, "reflock: thread %p obj_id %d value in buff", current_thread(), my_obj);
2191 		IOSleep(10);
2192 		T_ASSERT(memcmp(buff, my_string, my_string_size) == 0, "reflock: thread %p obj_id %d value in buff", current_thread(), my_obj);
2193 		put_obj_cache(my_obj, info, (i % 2 == 0));
2194 	}
2195 
2196 	notify_waiter((struct synch_test_common *)info);
2197 	thread_terminate_self();
2198 }
2199 
2200 static void
thread_refcount_reflock(void * args,__unused wait_result_t wr)2201 thread_refcount_reflock(
2202 	void *args,
2203 	__unused wait_result_t wr)
2204 {
2205 	struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
2206 	bool ret;
2207 	kern_apfs_reflock_out_flags_t out_flags;
2208 	kern_apfs_reflock_in_flags_t in_flags;
2209 
2210 	T_LOG("Thread %p started", current_thread());
2211 	/*
2212 	 * spin here to start concurrently with the other threads
2213 	 */
2214 	wake_threads(&info->synch);
2215 	wait_threads(&info->synch, info->synch_value);
2216 
2217 	for (int i = 0; i < REFCOUNT_REFLOCK_ROUNDS; i++) {
2218 		in_flags = KERN_APFS_REFLOCK_IN_LOCK_IF_FIRST;
2219 		if ((i % 2) == 0) {
2220 			in_flags |= KERN_APFS_REFLOCK_IN_WILL_WAIT;
2221 		}
2222 		ret = kern_apfs_reflock_try_get_ref(&info->reflock, in_flags, &out_flags);
2223 		if (ret == true) {
2224 			/* got reference, check if we did 0->1 */
2225 			if ((out_flags & KERN_APFS_REFLOCK_OUT_LOCKED) == KERN_APFS_REFLOCK_OUT_LOCKED) {
2226 				T_ASSERT(info->reflock_protected_status == 0, "status init check");
2227 				info->reflock_protected_status = 1;
2228 				kern_apfs_reflock_unlock(&info->reflock);
2229 			} else {
2230 				T_ASSERT(info->reflock_protected_status == 1, "status set check");
2231 			}
2232 			/* release the reference and check if we did 1->0 */
2233 			ret = kern_apfs_reflock_try_put_ref(&info->reflock, KERN_APFS_REFLOCK_IN_LOCK_IF_LAST, &out_flags);
2234 			T_ASSERT(ret == true, "kern_apfs_reflock_try_put_ref success");
2235 			if ((out_flags & KERN_APFS_REFLOCK_OUT_LOCKED) == KERN_APFS_REFLOCK_OUT_LOCKED) {
2236 				T_ASSERT(info->reflock_protected_status == 1, "status set check");
2237 				info->reflock_protected_status = 0;
2238 				kern_apfs_reflock_unlock(&info->reflock);
2239 			}
2240 		} else {
2241 			/* didn't get a reference */
2242 			if ((in_flags & KERN_APFS_REFLOCK_IN_WILL_WAIT) == KERN_APFS_REFLOCK_IN_WILL_WAIT) {
2243 				kern_apfs_reflock_wait_for_unlock(&info->reflock, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
2244 			}
2245 		}
2246 	}
2247 
2248 	notify_waiter((struct synch_test_common *)info);
2249 	thread_terminate_self();
2250 }
2251 
2252 static void
thread_force_reflock(void * args,__unused wait_result_t wr)2253 thread_force_reflock(
2254 	void *args,
2255 	__unused wait_result_t wr)
2256 {
2257 	struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
2258 	bool ret;
2259 	kern_apfs_reflock_out_flags_t out_flags;
2260 	bool lock = false;
2261 	uint32_t count;
2262 
2263 	T_LOG("Thread %p started", current_thread());
2264 	if (os_atomic_inc_orig(&info->value, relaxed) == 0) {
2265 		T_LOG("Thread %p is locker", current_thread());
2266 		lock = true;
2267 		ret = kern_apfs_reflock_try_lock(&info->reflock, KERN_APFS_REFLOCK_IN_ALLOW_FORCE, &count);
2268 		T_ASSERT(ret == true, "kern_apfs_reflock_try_lock success");
2269 		T_ASSERT(count == 0, "refcount value");
2270 	}
2271 	/*
2272 	 * spin here to start concurrently with the other threads
2273 	 */
2274 	wake_threads(&info->synch);
2275 	wait_threads(&info->synch, info->synch_value);
2276 
2277 	if (lock) {
2278 		IOSleep(100);
2279 		kern_apfs_reflock_unlock(&info->reflock);
2280 	} else {
2281 		for (int i = 0; i < REFCOUNT_REFLOCK_ROUNDS; i++) {
2282 			ret = kern_apfs_reflock_try_get_ref(&info->reflock, KERN_APFS_REFLOCK_IN_FORCE, &out_flags);
2283 			T_ASSERT(ret == true, "kern_apfs_reflock_try_get_ref success");
2284 			ret = kern_apfs_reflock_try_put_ref(&info->reflock, KERN_APFS_REFLOCK_IN_FORCE, &out_flags);
2285 			T_ASSERT(ret == true, "kern_apfs_reflock_try_put_ref success");
2286 		}
2287 	}
2288 
2289 	notify_waiter((struct synch_test_common *)info);
2290 	thread_terminate_self();
2291 }
2292 
2293 static void
thread_lock_reflock(void * args,__unused wait_result_t wr)2294 thread_lock_reflock(
2295 	void *args,
2296 	__unused wait_result_t wr)
2297 {
2298 	struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
2299 	bool ret;
2300 	kern_apfs_reflock_out_flags_t out_flags;
2301 	bool lock = false;
2302 	uint32_t count;
2303 
2304 	T_LOG("Thread %p started", current_thread());
2305 	if (os_atomic_inc_orig(&info->value, relaxed) == 0) {
2306 		T_LOG("Thread %p is locker", current_thread());
2307 		lock = true;
2308 		ret = kern_apfs_reflock_try_lock(&info->reflock, KERN_APFS_REFLOCK_IN_DEFAULT, &count);
2309 		T_ASSERT(ret == true, "kern_apfs_reflock_try_lock success");
2310 		T_ASSERT(count == 0, "refcount value");
2311 		info->reflock_protected_status = 1;
2312 	}
2313 	/*
2314 	 * spin here to start concurrently with the other threads
2315 	 */
2316 	wake_threads(&info->synch);
2317 	wait_threads(&info->synch, info->synch_value);
2318 
2319 	if (lock) {
2320 		IOSleep(100);
2321 		info->reflock_protected_status = 0;
2322 		kern_apfs_reflock_unlock(&info->reflock);
2323 	} else {
2324 		for (int i = 0; i < REFCOUNT_REFLOCK_ROUNDS; i++) {
2325 			ret = kern_apfs_reflock_try_get_ref(&info->reflock, KERN_APFS_REFLOCK_IN_DEFAULT, &out_flags);
2326 			if (ret == true) {
2327 				T_ASSERT(info->reflock_protected_status == 0, "unlocked status check");
2328 				ret = kern_apfs_reflock_try_put_ref(&info->reflock, KERN_APFS_REFLOCK_IN_DEFAULT, &out_flags);
2329 				T_ASSERT(ret == true, "kern_apfs_reflock_try_put_ref success");
2330 				break;
2331 			}
2332 		}
2333 	}
2334 
2335 	notify_waiter((struct synch_test_common *)info);
2336 	thread_terminate_self();
2337 }
2338 
2339 static void
test_cache_reflock(struct info_sleep_inheritor_test * info)2340 test_cache_reflock(struct info_sleep_inheritor_test *info)
2341 {
2342 	info->synch = 0;
2343 	info->synch_value = info->head.nthreads;
2344 
2345 	info->value = info->head.nthreads;
2346 	/*
2347 	 * Use the mtx as cache lock
2348 	 */
2349 	info->prim_type = MTX_LOCK;
2350 
2351 	init_cache(info);
2352 
2353 	start_threads((thread_continue_t)thread_use_cache, (struct synch_test_common *)info, FALSE);
2354 	wait_all_thread((struct synch_test_common *)info);
2355 
2356 	check_cache_empty(info);
2357 	free_cache(info);
2358 }
2359 
2360 static void
test_refcount_reflock(struct info_sleep_inheritor_test * info)2361 test_refcount_reflock(struct info_sleep_inheritor_test *info)
2362 {
2363 	info->synch = 0;
2364 	info->synch_value = info->head.nthreads;
2365 	kern_apfs_reflock_init(&info->reflock);
2366 	info->reflock_protected_status = 0;
2367 
2368 	start_threads((thread_continue_t)thread_refcount_reflock, (struct synch_test_common *)info, FALSE);
2369 	wait_all_thread((struct synch_test_common *)info);
2370 
2371 	kern_apfs_reflock_destroy(&info->reflock);
2372 
2373 	T_ASSERT(info->reflock_protected_status == 0, "unlocked status check");
2374 }
2375 
2376 static void
test_force_reflock(struct info_sleep_inheritor_test * info)2377 test_force_reflock(struct info_sleep_inheritor_test *info)
2378 {
2379 	info->synch = 0;
2380 	info->synch_value = info->head.nthreads;
2381 	kern_apfs_reflock_init(&info->reflock);
2382 	info->value = 0;
2383 
2384 	start_threads((thread_continue_t)thread_force_reflock, (struct synch_test_common *)info, FALSE);
2385 	wait_all_thread((struct synch_test_common *)info);
2386 
2387 	kern_apfs_reflock_destroy(&info->reflock);
2388 }
2389 
2390 static void
test_lock_reflock(struct info_sleep_inheritor_test * info)2391 test_lock_reflock(struct info_sleep_inheritor_test *info)
2392 {
2393 	info->synch = 0;
2394 	info->synch_value = info->head.nthreads;
2395 	kern_apfs_reflock_init(&info->reflock);
2396 	info->value = 0;
2397 
2398 	start_threads((thread_continue_t)thread_lock_reflock, (struct synch_test_common *)info, FALSE);
2399 	wait_all_thread((struct synch_test_common *)info);
2400 
2401 	kern_apfs_reflock_destroy(&info->reflock);
2402 }
2403 
2404 static void
test_sleep_with_wake_all(struct info_sleep_inheritor_test * info,int prim_type)2405 test_sleep_with_wake_all(struct info_sleep_inheritor_test *info, int prim_type)
2406 {
2407 	info->prim_type = prim_type;
2408 	info->synch = 0;
2409 	info->synch_value = info->head.nthreads;
2410 
2411 	info->thread_inheritor = NULL;
2412 
2413 	start_threads((thread_continue_t)thread_just_inheritor_do_work, (struct synch_test_common *)info, TRUE);
2414 	wait_all_thread((struct synch_test_common *)info);
2415 }
2416 
2417 static void
test_sleep_with_wake_one(struct info_sleep_inheritor_test * info,int prim_type)2418 test_sleep_with_wake_one(struct info_sleep_inheritor_test *info, int prim_type)
2419 {
2420 	info->prim_type = prim_type;
2421 
2422 	info->synch = 0;
2423 	info->synch_value = info->head.nthreads;
2424 	info->value = 0;
2425 	info->handoff_failure = 0;
2426 	info->thread_inheritor = NULL;
2427 
2428 	start_threads((thread_continue_t)thread_inheritor_like_mutex, (struct synch_test_common *)info, FALSE);
2429 	wait_all_thread((struct synch_test_common *)info);
2430 
2431 	T_ASSERT(info->value == (int)info->head.nthreads, "value protected by sleep");
2432 	T_ASSERT(info->handoff_failure == 1, "handoff failures");
2433 }
2434 
2435 static void
test_change_sleep_inheritor(struct info_sleep_inheritor_test * info,int prim_type)2436 test_change_sleep_inheritor(struct info_sleep_inheritor_test *info, int prim_type)
2437 {
2438 	info->prim_type = prim_type;
2439 
2440 	info->thread_inheritor = NULL;
2441 	info->steal_pri = 0;
2442 	info->synch = 0;
2443 	info->synch_value = info->head.nthreads;
2444 
2445 	start_threads((thread_continue_t)thread_steal_work, (struct synch_test_common *)info, FALSE);
2446 	wait_all_thread((struct synch_test_common *)info);
2447 }
2448 
2449 static void
test_no_inheritor(struct info_sleep_inheritor_test * info,int prim_type)2450 test_no_inheritor(struct info_sleep_inheritor_test *info, int prim_type)
2451 {
2452 	info->prim_type = prim_type;
2453 	info->synch = 0;
2454 	info->synch_value = info->head.nthreads;
2455 
2456 	info->thread_inheritor = NULL;
2457 	info->value = info->head.nthreads;
2458 
2459 	start_threads((thread_continue_t)thread_no_inheritor_work, (struct synch_test_common *)info, FALSE);
2460 	wait_all_thread((struct synch_test_common *)info);
2461 }
2462 
2463 static void
test_rw_lock(struct info_sleep_inheritor_test * info)2464 test_rw_lock(struct info_sleep_inheritor_test *info)
2465 {
2466 	info->thread_inheritor = NULL;
2467 	info->value = info->head.nthreads;
2468 	info->synch = 0;
2469 	info->synch_value = info->head.nthreads;
2470 
2471 	start_threads((thread_continue_t)thread_rw_work, (struct synch_test_common *)info, FALSE);
2472 	wait_all_thread((struct synch_test_common *)info);
2473 }
2474 
2475 static void
test_mtx_lock(struct info_sleep_inheritor_test * info)2476 test_mtx_lock(struct info_sleep_inheritor_test *info)
2477 {
2478 	info->thread_inheritor = NULL;
2479 	info->value = info->head.nthreads;
2480 	info->synch = 0;
2481 	info->synch_value = info->head.nthreads;
2482 
2483 	start_threads((thread_continue_t)thread_mtx_work, (struct synch_test_common *)info, FALSE);
2484 	wait_all_thread((struct synch_test_common *)info);
2485 }
2486 
2487 kern_return_t
ts_kernel_sleep_inheritor_test(void)2488 ts_kernel_sleep_inheritor_test(void)
2489 {
2490 	struct info_sleep_inheritor_test info = {};
2491 
2492 	init_synch_test_common((struct synch_test_common *)&info, NUM_THREADS);
2493 
2494 	lck_attr_t* lck_attr = lck_attr_alloc_init();
2495 	lck_grp_attr_t* lck_grp_attr = lck_grp_attr_alloc_init();
2496 	lck_grp_t* lck_grp = lck_grp_alloc_init("test sleep_inheritor", lck_grp_attr);
2497 
2498 	lck_mtx_init(&info.mtx_lock, lck_grp, lck_attr);
2499 	lck_rw_init(&info.rw_lock, lck_grp, lck_attr);
2500 
2501 	/*
2502 	 * Testing lck_mtx_sleep_with_inheritor and wakeup_all_with_inheritor
2503 	 */
2504 	T_LOG("Testing mtx sleep with inheritor and wake_all_with_inheritor");
2505 	test_sleep_with_wake_all(&info, MTX_LOCK);
2506 
2507 	/*
2508 	 * Testing rw_mtx_sleep_with_inheritor and wakeup_all_with_inheritor
2509 	 */
2510 	T_LOG("Testing rw sleep with inheritor and wake_all_with_inheritor");
2511 	test_sleep_with_wake_all(&info, RW_LOCK);
2512 
2513 	/*
2514 	 * Testing lck_mtx_sleep_with_inheritor and wakeup_one_with_inheritor
2515 	 */
2516 	T_LOG("Testing mtx sleep with inheritor and wake_one_with_inheritor");
2517 	test_sleep_with_wake_one(&info, MTX_LOCK);
2518 
2519 	/*
2520 	 * Testing lck_rw_sleep_with_inheritor and wakeup_one_with_inheritor
2521 	 */
2522 	T_LOG("Testing rw sleep with inheritor and wake_one_with_inheritor");
2523 	test_sleep_with_wake_one(&info, RW_LOCK);
2524 
2525 	/*
2526 	 * Testing lck_mtx_sleep_with_inheritor and wakeup_all_with_inheritor
2527 	 * and change_sleep_inheritor
2528 	 */
2529 	T_LOG("Testing change_sleep_inheritor with mxt sleep");
2530 	test_change_sleep_inheritor(&info, MTX_LOCK);
2531 
2532 	/*
2533 	 * Testing lck_mtx_sleep_with_inheritor and wakeup_all_with_inheritor
2534 	 * and change_sleep_inheritor
2535 	 */
2536 	T_LOG("Testing change_sleep_inheritor with rw sleep");
2537 	test_change_sleep_inheritor(&info, RW_LOCK);
2538 
2539 	/*
2540 	 * Testing lck_mtx_sleep_with_inheritor and wakeup_all_with_inheritor
2541 	 * with inheritor NULL
2542 	 */
2543 	T_LOG("Testing inheritor NULL");
2544 	test_no_inheritor(&info, MTX_LOCK);
2545 
2546 	/*
2547 	 * Testing lck_mtx_sleep_with_inheritor and wakeup_all_with_inheritor
2548 	 * with inheritor NULL
2549 	 */
2550 	T_LOG("Testing inheritor NULL");
2551 	test_no_inheritor(&info, RW_LOCK);
2552 
2553 	/*
2554 	 * Testing mtx locking combinations
2555 	 */
2556 	T_LOG("Testing mtx locking combinations");
2557 	test_mtx_lock(&info);
2558 
2559 	/*
2560 	 * Testing rw locking combinations
2561 	 */
2562 	T_LOG("Testing rw locking combinations");
2563 	test_rw_lock(&info);
2564 
2565 	/*
2566 	 * Testing reflock / cond_sleep_with_inheritor
2567 	 */
2568 	T_LOG("Test cache reflock + cond_sleep_with_inheritor");
2569 	test_cache_reflock(&info);
2570 	T_LOG("Test force reflock + cond_sleep_with_inheritor");
2571 	test_force_reflock(&info);
2572 	T_LOG("Test refcount reflock + cond_sleep_with_inheritor");
2573 	test_refcount_reflock(&info);
2574 	T_LOG("Test lock reflock + cond_sleep_with_inheritor");
2575 	test_lock_reflock(&info);
2576 
2577 	destroy_synch_test_common((struct synch_test_common *)&info);
2578 
2579 	lck_attr_free(lck_attr);
2580 	lck_grp_attr_free(lck_grp_attr);
2581 	lck_rw_destroy(&info.rw_lock, lck_grp);
2582 	lck_mtx_destroy(&info.mtx_lock, lck_grp);
2583 	lck_grp_free(lck_grp);
2584 
2585 	return KERN_SUCCESS;
2586 }
2587 
2588 static void
thread_gate_aggressive(void * args,__unused wait_result_t wr)2589 thread_gate_aggressive(
2590 	void *args,
2591 	__unused wait_result_t wr)
2592 {
2593 	struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
2594 	uint my_pri = current_thread()->sched_pri;
2595 
2596 	T_LOG("Started thread pri %d %p", my_pri, current_thread());
2597 
2598 	primitive_lock(info);
2599 	if (info->thread_inheritor == NULL) {
2600 		info->thread_inheritor = current_thread();
2601 		primitive_gate_assert(info, GATE_ASSERT_OPEN);
2602 		primitive_gate_close(info);
2603 		exclude_current_waiter((struct synch_test_common *)info);
2604 
2605 		primitive_unlock(info);
2606 
2607 		wait_threads(&info->synch, info->synch_value - 2);
2608 		wait_for_waiters((struct synch_test_common *)info);
2609 		T_LOG("Thread pri %d first to run %p", my_pri, current_thread());
2610 
2611 		primitive_lock(info);
2612 		if (info->thread_inheritor == current_thread()) {
2613 			primitive_gate_open(info);
2614 		}
2615 	} else {
2616 		if (info->steal_pri == 0) {
2617 			info->steal_pri = my_pri;
2618 			info->thread_inheritor = current_thread();
2619 			primitive_gate_steal(info);
2620 			exclude_current_waiter((struct synch_test_common *)info);
2621 
2622 			primitive_unlock(info);
2623 			wait_threads(&info->synch, info->synch_value - 2);
2624 
2625 			T_LOG("Thread pri %d stole push %p", my_pri, current_thread());
2626 			wait_for_waiters((struct synch_test_common *)info);
2627 			T_ASSERT((uint) current_thread()->sched_pri == info->steal_pri, "gate keeper priority current is %d, should be %d", current_thread()->sched_pri, info->steal_pri);
2628 
2629 			primitive_lock(info);
2630 			primitive_gate_open(info);
2631 		} else {
2632 			if (my_pri > info->steal_pri) {
2633 				info->steal_pri = my_pri;
2634 			}
2635 			wake_threads(&info->synch);
2636 			primitive_gate_wait(info);
2637 			exclude_current_waiter((struct synch_test_common *)info);
2638 		}
2639 	}
2640 	primitive_unlock(info);
2641 
2642 	assert(current_thread()->kern_promotion_schedpri == 0);
2643 	notify_waiter((struct synch_test_common *)info);
2644 
2645 	thread_terminate_self();
2646 }
2647 
2648 static void
thread_gate_free(void * args,__unused wait_result_t wr)2649 thread_gate_free(
2650 	void *args,
2651 	__unused wait_result_t wr)
2652 {
2653 	struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
2654 	uint my_pri = current_thread()->sched_pri;
2655 
2656 	T_LOG("Started thread pri %d %p", my_pri, current_thread());
2657 
2658 	primitive_lock(info);
2659 
2660 	if (primitive_gate_try_close(info) == KERN_SUCCESS) {
2661 		primitive_gate_assert(info, GATE_ASSERT_HELD);
2662 		primitive_unlock(info);
2663 
2664 		wait_threads(&info->synch, info->synch_value - 1);
2665 		wait_for_waiters((struct synch_test_common *) info);
2666 
2667 		primitive_lock(info);
2668 		primitive_gate_open(info);
2669 		primitive_gate_free(info);
2670 	} else {
2671 		primitive_gate_assert(info, GATE_ASSERT_CLOSED);
2672 		wake_threads(&info->synch);
2673 		gate_wait_result_t ret = primitive_gate_wait(info);
2674 		T_ASSERT(ret == GATE_OPENED, "open gate");
2675 	}
2676 
2677 	primitive_unlock(info);
2678 
2679 	notify_waiter((struct synch_test_common *)info);
2680 
2681 	thread_terminate_self();
2682 }
2683 
2684 static void
thread_gate_like_mutex(void * args,__unused wait_result_t wr)2685 thread_gate_like_mutex(
2686 	void *args,
2687 	__unused wait_result_t wr)
2688 {
2689 	gate_wait_result_t wait;
2690 	kern_return_t ret;
2691 	uint my_pri = current_thread()->sched_pri;
2692 
2693 	struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
2694 
2695 	T_LOG("Started thread pri %d %p", my_pri, current_thread());
2696 
2697 	/*
2698 	 * spin here to start concurrently
2699 	 */
2700 	wake_threads(&info->synch);
2701 	wait_threads(&info->synch, info->synch_value);
2702 
2703 	primitive_lock(info);
2704 
2705 	if (primitive_gate_try_close(info) != KERN_SUCCESS) {
2706 		wait = primitive_gate_wait(info);
2707 		T_ASSERT(wait == GATE_HANDOFF, "gate_wait return");
2708 	}
2709 
2710 	primitive_gate_assert(info, GATE_ASSERT_HELD);
2711 
2712 	primitive_unlock(info);
2713 
2714 	IOSleep(100);
2715 	info->value++;
2716 
2717 	primitive_lock(info);
2718 
2719 	ret = primitive_gate_handoff(info, GATE_HANDOFF_DEFAULT);
2720 	if (ret == KERN_NOT_WAITING) {
2721 		T_ASSERT(info->handoff_failure == 0, "handoff failures");
2722 		primitive_gate_handoff(info, GATE_HANDOFF_OPEN_IF_NO_WAITERS);
2723 		info->handoff_failure++;
2724 	}
2725 
2726 	primitive_unlock(info);
2727 	notify_waiter((struct synch_test_common *)info);
2728 
2729 	thread_terminate_self();
2730 }
2731 
2732 static void
thread_just_one_do_work(void * args,__unused wait_result_t wr)2733 thread_just_one_do_work(
2734 	void *args,
2735 	__unused wait_result_t wr)
2736 {
2737 	struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
2738 	uint my_pri = current_thread()->sched_pri;
2739 	uint max_pri;
2740 
2741 	T_LOG("Started thread pri %d %p", my_pri, current_thread());
2742 
2743 	primitive_lock(info);
2744 check_again:
2745 	if (info->work_to_do) {
2746 		if (primitive_gate_try_close(info) == KERN_SUCCESS) {
2747 			primitive_gate_assert(info, GATE_ASSERT_HELD);
2748 			primitive_unlock(info);
2749 
2750 			T_LOG("Thread pri %d acquired the gate %p", my_pri, current_thread());
2751 			wait_threads(&info->synch, info->synch_value - 1);
2752 			wait_for_waiters((struct synch_test_common *)info);
2753 			max_pri = get_max_pri((struct synch_test_common *) info);
2754 			T_ASSERT((uint) current_thread()->sched_pri == max_pri, "gate owner priority current is %d, should be %d", current_thread()->sched_pri, max_pri);
2755 			os_atomic_store(&info->synch, 0, relaxed);
2756 
2757 			primitive_lock(info);
2758 			info->work_to_do = FALSE;
2759 			primitive_gate_open(info);
2760 		} else {
2761 			primitive_gate_assert(info, GATE_ASSERT_CLOSED);
2762 			wake_threads(&info->synch);
2763 			primitive_gate_wait(info);
2764 			goto check_again;
2765 		}
2766 	}
2767 	primitive_unlock(info);
2768 
2769 	assert(current_thread()->kern_promotion_schedpri == 0);
2770 	notify_waiter((struct synch_test_common *)info);
2771 	thread_terminate_self();
2772 }
2773 
2774 static void
test_gate_push(struct info_sleep_inheritor_test * info,int prim_type)2775 test_gate_push(struct info_sleep_inheritor_test *info, int prim_type)
2776 {
2777 	info->prim_type = prim_type;
2778 	info->use_alloc_gate = false;
2779 
2780 	primitive_gate_init(info);
2781 	info->work_to_do = TRUE;
2782 	info->synch = 0;
2783 	info->synch_value = NUM_THREADS;
2784 
2785 	start_threads((thread_continue_t)thread_just_one_do_work, (struct synch_test_common *) info, TRUE);
2786 	wait_all_thread((struct synch_test_common *)info);
2787 
2788 	primitive_gate_destroy(info);
2789 }
2790 
2791 static void
test_gate_handoff(struct info_sleep_inheritor_test * info,int prim_type)2792 test_gate_handoff(struct info_sleep_inheritor_test *info, int prim_type)
2793 {
2794 	info->prim_type = prim_type;
2795 	info->use_alloc_gate = false;
2796 
2797 	primitive_gate_init(info);
2798 
2799 	info->synch = 0;
2800 	info->synch_value = NUM_THREADS;
2801 	info->value = 0;
2802 	info->handoff_failure = 0;
2803 
2804 	start_threads((thread_continue_t)thread_gate_like_mutex, (struct synch_test_common *)info, false);
2805 	wait_all_thread((struct synch_test_common *)info);
2806 
2807 	T_ASSERT(info->value == NUM_THREADS, "value protected by gate");
2808 	T_ASSERT(info->handoff_failure == 1, "handoff failures");
2809 
2810 	primitive_gate_destroy(info);
2811 }
2812 
2813 static void
test_gate_steal(struct info_sleep_inheritor_test * info,int prim_type)2814 test_gate_steal(struct info_sleep_inheritor_test *info, int prim_type)
2815 {
2816 	info->prim_type = prim_type;
2817 	info->use_alloc_gate = false;
2818 
2819 	primitive_gate_init(info);
2820 
2821 	info->synch = 0;
2822 	info->synch_value = NUM_THREADS;
2823 	info->thread_inheritor = NULL;
2824 	info->steal_pri = 0;
2825 
2826 	start_threads((thread_continue_t)thread_gate_aggressive, (struct synch_test_common *)info, FALSE);
2827 	wait_all_thread((struct synch_test_common *)info);
2828 
2829 	primitive_gate_destroy(info);
2830 }
2831 
2832 static void
test_gate_alloc_free(struct info_sleep_inheritor_test * info,int prim_type)2833 test_gate_alloc_free(struct info_sleep_inheritor_test *info, int prim_type)
2834 {
2835 	(void)info;
2836 	(void) prim_type;
2837 	info->prim_type = prim_type;
2838 	info->use_alloc_gate = true;
2839 
2840 	primitive_gate_alloc(info);
2841 
2842 	info->synch = 0;
2843 	info->synch_value = NUM_THREADS;
2844 
2845 	start_threads((thread_continue_t)thread_gate_free, (struct synch_test_common *)info, FALSE);
2846 	wait_all_thread((struct synch_test_common *)info);
2847 
2848 	T_ASSERT(info->alloc_gate == NULL, "gate free");
2849 	info->use_alloc_gate = false;
2850 }
2851 
2852 kern_return_t
ts_kernel_gate_test(void)2853 ts_kernel_gate_test(void)
2854 {
2855 	struct info_sleep_inheritor_test info = {};
2856 
2857 	T_LOG("Testing gate primitive");
2858 
2859 	init_synch_test_common((struct synch_test_common *)&info, NUM_THREADS);
2860 
2861 	lck_attr_t* lck_attr = lck_attr_alloc_init();
2862 	lck_grp_attr_t* lck_grp_attr = lck_grp_attr_alloc_init();
2863 	lck_grp_t* lck_grp = lck_grp_alloc_init("test gate", lck_grp_attr);
2864 
2865 	lck_mtx_init(&info.mtx_lock, lck_grp, lck_attr);
2866 	lck_rw_init(&info.rw_lock, lck_grp, lck_attr);
2867 
2868 	/*
2869 	 * Testing the priority inherited by the keeper
2870 	 * lck_mtx_gate_try_close, lck_mtx_gate_open, lck_mtx_gate_wait
2871 	 */
2872 	T_LOG("Testing gate push, mtx");
2873 	test_gate_push(&info, MTX_LOCK);
2874 
2875 	T_LOG("Testing gate push, rw");
2876 	test_gate_push(&info, RW_LOCK);
2877 
2878 	/*
2879 	 * Testing the handoff
2880 	 * lck_mtx_gate_wait, lck_mtx_gate_handoff
2881 	 */
2882 	T_LOG("Testing gate handoff, mtx");
2883 	test_gate_handoff(&info, MTX_LOCK);
2884 
2885 	T_LOG("Testing gate handoff, rw");
2886 	test_gate_handoff(&info, RW_LOCK);
2887 
2888 	/*
2889 	 * Testing the steal
2890 	 * lck_mtx_gate_close, lck_mtx_gate_wait, lck_mtx_gate_steal, lck_mtx_gate_handoff
2891 	 */
2892 	T_LOG("Testing gate steal, mtx");
2893 	test_gate_steal(&info, MTX_LOCK);
2894 
2895 	T_LOG("Testing gate steal, rw");
2896 	test_gate_steal(&info, RW_LOCK);
2897 
2898 	/*
2899 	 * Testing the alloc/free
2900 	 * lck_mtx_gate_alloc_init, lck_mtx_gate_close, lck_mtx_gate_wait, lck_mtx_gate_free
2901 	 */
2902 	T_LOG("Testing gate alloc/free, mtx");
2903 	test_gate_alloc_free(&info, MTX_LOCK);
2904 
2905 	T_LOG("Testing gate alloc/free, rw");
2906 	test_gate_alloc_free(&info, RW_LOCK);
2907 
2908 	destroy_synch_test_common((struct synch_test_common *)&info);
2909 
2910 	lck_attr_free(lck_attr);
2911 	lck_grp_attr_free(lck_grp_attr);
2912 	lck_mtx_destroy(&info.mtx_lock, lck_grp);
2913 	lck_grp_free(lck_grp);
2914 
2915 	return KERN_SUCCESS;
2916 }
2917 
2918 #define NUM_THREAD_CHAIN 6
2919 
2920 struct turnstile_chain_test {
2921 	struct synch_test_common head;
2922 	lck_mtx_t mtx_lock;
2923 	int synch_value;
2924 	int synch;
2925 	int synch2;
2926 	gate_t gates[NUM_THREAD_CHAIN];
2927 };
2928 
2929 static void
thread_sleep_gate_chain_work(void * args,__unused wait_result_t wr)2930 thread_sleep_gate_chain_work(
2931 	void *args,
2932 	__unused wait_result_t wr)
2933 {
2934 	struct turnstile_chain_test *info = (struct turnstile_chain_test*) args;
2935 	thread_t self = current_thread();
2936 	uint my_pri = self->sched_pri;
2937 	uint max_pri;
2938 	uint i;
2939 	thread_t inheritor = NULL, woken_up;
2940 	event_t wait_event, wake_event;
2941 	kern_return_t ret;
2942 
2943 	T_LOG("Started thread pri %d %p", my_pri, self);
2944 
2945 	/*
2946 	 * Need to use the threads ids, wait for all of them to be populated
2947 	 */
2948 
2949 	while (os_atomic_load(&info->head.threads[info->head.nthreads - 1], acquire) == NULL) {
2950 		IOSleep(10);
2951 	}
2952 
2953 	max_pri = get_max_pri((struct synch_test_common *) info);
2954 
2955 	for (i = 0; i < info->head.nthreads; i = i + 2) {
2956 		// even threads will close a gate
2957 		if (info->head.threads[i] == self) {
2958 			lck_mtx_lock(&info->mtx_lock);
2959 			lck_mtx_gate_close(&info->mtx_lock, &info->gates[i]);
2960 			lck_mtx_unlock(&info->mtx_lock);
2961 			break;
2962 		}
2963 	}
2964 
2965 	wake_threads(&info->synch2);
2966 	wait_threads(&info->synch2, info->synch_value);
2967 
2968 	if (self == os_atomic_load(&info->head.threads[0], acquire)) {
2969 		wait_threads(&info->synch, info->synch_value - 1);
2970 		wait_for_waiters((struct synch_test_common *)info);
2971 
2972 		T_ASSERT((uint) self->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", self->sched_pri, max_pri);
2973 
2974 		lck_mtx_lock(&info->mtx_lock);
2975 		lck_mtx_gate_open(&info->mtx_lock, &info->gates[0]);
2976 		lck_mtx_unlock(&info->mtx_lock);
2977 	} else {
2978 		wait_event = NULL;
2979 		wake_event = NULL;
2980 		for (i = 0; i < info->head.nthreads; i++) {
2981 			if (info->head.threads[i] == self) {
2982 				inheritor = info->head.threads[i - 1];
2983 				wait_event = (event_t) &info->head.threads[i - 1];
2984 				wake_event = (event_t) &info->head.threads[i];
2985 				break;
2986 			}
2987 		}
2988 		assert(wait_event != NULL);
2989 
2990 		lck_mtx_lock(&info->mtx_lock);
2991 		wake_threads(&info->synch);
2992 
2993 		if (i % 2 != 0) {
2994 			lck_mtx_gate_wait(&info->mtx_lock, &info->gates[i - 1], LCK_SLEEP_UNLOCK, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
2995 			T_ASSERT((uint) self->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", self->sched_pri, max_pri);
2996 
2997 			ret = wakeup_one_with_inheritor(wake_event, THREAD_AWAKENED, LCK_WAKE_DO_NOT_TRANSFER_PUSH, &woken_up);
2998 			if (ret == KERN_SUCCESS) {
2999 				T_ASSERT(i != (info->head.nthreads - 1), "thread id");
3000 				T_ASSERT(woken_up == info->head.threads[i + 1], "wakeup_one_with_inheritor woke next");
3001 			} else {
3002 				T_ASSERT(i == (info->head.nthreads - 1), "thread id");
3003 			}
3004 
3005 			// i am still the inheritor, wake all to drop inheritership
3006 			ret = wakeup_all_with_inheritor(wake_event, LCK_WAKE_DEFAULT);
3007 			T_ASSERT(ret == KERN_NOT_WAITING, "waiters on event");
3008 		} else {
3009 			// I previously closed a gate
3010 			lck_mtx_sleep_with_inheritor(&info->mtx_lock, LCK_SLEEP_UNLOCK, wait_event, inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
3011 			T_ASSERT((uint) self->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", self->sched_pri, max_pri);
3012 
3013 			lck_mtx_lock(&info->mtx_lock);
3014 			lck_mtx_gate_open(&info->mtx_lock, &info->gates[i]);
3015 			lck_mtx_unlock(&info->mtx_lock);
3016 		}
3017 	}
3018 
3019 	assert(current_thread()->kern_promotion_schedpri == 0);
3020 	notify_waiter((struct synch_test_common *)info);
3021 
3022 	thread_terminate_self();
3023 }
3024 
3025 static void
thread_gate_chain_work(void * args,__unused wait_result_t wr)3026 thread_gate_chain_work(
3027 	void *args,
3028 	__unused wait_result_t wr)
3029 {
3030 	struct turnstile_chain_test *info = (struct turnstile_chain_test*) args;
3031 	thread_t self = current_thread();
3032 	uint my_pri = self->sched_pri;
3033 	uint max_pri;
3034 	uint i;
3035 	T_LOG("Started thread pri %d %p", my_pri, self);
3036 
3037 
3038 	/*
3039 	 * Need to use the threads ids, wait for all of them to be populated
3040 	 */
3041 	while (os_atomic_load(&info->head.threads[info->head.nthreads - 1], acquire) == NULL) {
3042 		IOSleep(10);
3043 	}
3044 
3045 	max_pri = get_max_pri((struct synch_test_common *) info);
3046 
3047 	for (i = 0; i < info->head.nthreads; i++) {
3048 		if (info->head.threads[i] == self) {
3049 			lck_mtx_lock(&info->mtx_lock);
3050 			lck_mtx_gate_close(&info->mtx_lock, &info->gates[i]);
3051 			lck_mtx_unlock(&info->mtx_lock);
3052 			break;
3053 		}
3054 	}
3055 	assert(i != info->head.nthreads);
3056 
3057 	wake_threads(&info->synch2);
3058 	wait_threads(&info->synch2, info->synch_value);
3059 
3060 	if (self == os_atomic_load(&info->head.threads[0], acquire)) {
3061 		wait_threads(&info->synch, info->synch_value - 1);
3062 
3063 		wait_for_waiters((struct synch_test_common *)info);
3064 
3065 		T_ASSERT((uint) self->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", self->sched_pri, max_pri);
3066 
3067 		lck_mtx_lock(&info->mtx_lock);
3068 		lck_mtx_gate_open(&info->mtx_lock, &info->gates[0]);
3069 		lck_mtx_unlock(&info->mtx_lock);
3070 	} else {
3071 		lck_mtx_lock(&info->mtx_lock);
3072 		wake_threads(&info->synch);
3073 		lck_mtx_gate_wait(&info->mtx_lock, &info->gates[i - 1], LCK_SLEEP_UNLOCK, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
3074 
3075 		T_ASSERT((uint) self->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", self->sched_pri, max_pri);
3076 
3077 		lck_mtx_lock(&info->mtx_lock);
3078 		lck_mtx_gate_open(&info->mtx_lock, &info->gates[i]);
3079 		lck_mtx_unlock(&info->mtx_lock);
3080 	}
3081 
3082 	assert(current_thread()->kern_promotion_schedpri == 0);
3083 	notify_waiter((struct synch_test_common *)info);
3084 
3085 	thread_terminate_self();
3086 }
3087 
3088 static void
thread_sleep_chain_work(void * args,__unused wait_result_t wr)3089 thread_sleep_chain_work(
3090 	void *args,
3091 	__unused wait_result_t wr)
3092 {
3093 	struct turnstile_chain_test *info = (struct turnstile_chain_test*) args;
3094 	thread_t self = current_thread();
3095 	uint my_pri = self->sched_pri;
3096 	uint max_pri;
3097 	event_t wait_event, wake_event;
3098 	uint i;
3099 	thread_t inheritor = NULL, woken_up = NULL;
3100 	kern_return_t ret;
3101 
3102 	T_LOG("Started thread pri %d %p", my_pri, self);
3103 
3104 	/*
3105 	 * Need to use the threads ids, wait for all of them to be populated
3106 	 */
3107 	while (os_atomic_load(&info->head.threads[info->head.nthreads - 1], acquire) == NULL) {
3108 		IOSleep(10);
3109 	}
3110 
3111 	max_pri = get_max_pri((struct synch_test_common *) info);
3112 
3113 	if (self == os_atomic_load(&info->head.threads[0], acquire)) {
3114 		wait_threads(&info->synch, info->synch_value - 1);
3115 
3116 		wait_for_waiters((struct synch_test_common *)info);
3117 
3118 		T_ASSERT((uint) self->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", self->sched_pri, max_pri);
3119 
3120 		ret = wakeup_one_with_inheritor((event_t) &info->head.threads[0], THREAD_AWAKENED, LCK_WAKE_DO_NOT_TRANSFER_PUSH, &woken_up);
3121 		T_ASSERT(ret == KERN_SUCCESS, "wakeup_one_with_inheritor woke next");
3122 		T_ASSERT(woken_up == info->head.threads[1], "thread woken up");
3123 
3124 		// i am still the inheritor, wake all to drop inheritership
3125 		ret = wakeup_all_with_inheritor((event_t) &info->head.threads[0], LCK_WAKE_DEFAULT);
3126 		T_ASSERT(ret == KERN_NOT_WAITING, "waiters on event");
3127 	} else {
3128 		wait_event = NULL;
3129 		wake_event = NULL;
3130 		for (i = 0; i < info->head.nthreads; i++) {
3131 			if (info->head.threads[i] == self) {
3132 				inheritor = info->head.threads[i - 1];
3133 				wait_event = (event_t) &info->head.threads[i - 1];
3134 				wake_event = (event_t) &info->head.threads[i];
3135 				break;
3136 			}
3137 		}
3138 
3139 		assert(wait_event != NULL);
3140 		lck_mtx_lock(&info->mtx_lock);
3141 		wake_threads(&info->synch);
3142 
3143 		lck_mtx_sleep_with_inheritor(&info->mtx_lock, LCK_SLEEP_UNLOCK, wait_event, inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
3144 
3145 		T_ASSERT((uint) self->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", self->sched_pri, max_pri);
3146 
3147 		ret = wakeup_one_with_inheritor(wake_event, THREAD_AWAKENED, LCK_WAKE_DO_NOT_TRANSFER_PUSH, &woken_up);
3148 		if (ret == KERN_SUCCESS) {
3149 			T_ASSERT(i != (info->head.nthreads - 1), "thread id");
3150 			T_ASSERT(woken_up == info->head.threads[i + 1], "wakeup_one_with_inheritor woke next");
3151 		} else {
3152 			T_ASSERT(i == (info->head.nthreads - 1), "thread id");
3153 		}
3154 
3155 		// i am still the inheritor, wake all to drop inheritership
3156 		ret = wakeup_all_with_inheritor(wake_event, LCK_WAKE_DEFAULT);
3157 		T_ASSERT(ret == KERN_NOT_WAITING, "waiters on event");
3158 	}
3159 
3160 	assert(current_thread()->kern_promotion_schedpri == 0);
3161 	notify_waiter((struct synch_test_common *)info);
3162 
3163 	thread_terminate_self();
3164 }
3165 
3166 static void
test_sleep_chain(struct turnstile_chain_test * info)3167 test_sleep_chain(struct turnstile_chain_test *info)
3168 {
3169 	info->synch = 0;
3170 	info->synch_value = info->head.nthreads;
3171 
3172 	start_threads((thread_continue_t)thread_sleep_chain_work, (struct synch_test_common *)info, FALSE);
3173 	wait_all_thread((struct synch_test_common *)info);
3174 }
3175 
3176 static void
test_gate_chain(struct turnstile_chain_test * info)3177 test_gate_chain(struct turnstile_chain_test *info)
3178 {
3179 	info->synch = 0;
3180 	info->synch2 = 0;
3181 	info->synch_value = info->head.nthreads;
3182 
3183 	start_threads((thread_continue_t)thread_gate_chain_work, (struct synch_test_common *)info, FALSE);
3184 	wait_all_thread((struct synch_test_common *)info);
3185 }
3186 
3187 static void
test_sleep_gate_chain(struct turnstile_chain_test * info)3188 test_sleep_gate_chain(struct turnstile_chain_test *info)
3189 {
3190 	info->synch = 0;
3191 	info->synch2 = 0;
3192 	info->synch_value = info->head.nthreads;
3193 
3194 	start_threads((thread_continue_t)thread_sleep_gate_chain_work, (struct synch_test_common *)info, FALSE);
3195 	wait_all_thread((struct synch_test_common *)info);
3196 }
3197 
3198 kern_return_t
ts_kernel_turnstile_chain_test(void)3199 ts_kernel_turnstile_chain_test(void)
3200 {
3201 	struct turnstile_chain_test info = {};
3202 	int i;
3203 
3204 	init_synch_test_common((struct synch_test_common *)&info, NUM_THREAD_CHAIN);
3205 	lck_attr_t* lck_attr = lck_attr_alloc_init();
3206 	lck_grp_attr_t* lck_grp_attr = lck_grp_attr_alloc_init();
3207 	lck_grp_t* lck_grp = lck_grp_alloc_init("test gate", lck_grp_attr);
3208 
3209 	lck_mtx_init(&info.mtx_lock, lck_grp, lck_attr);
3210 	for (i = 0; i < NUM_THREAD_CHAIN; i++) {
3211 		lck_mtx_gate_init(&info.mtx_lock, &info.gates[i]);
3212 	}
3213 
3214 	T_LOG("Testing sleep chain, lck");
3215 	test_sleep_chain(&info);
3216 
3217 	T_LOG("Testing gate chain, lck");
3218 	test_gate_chain(&info);
3219 
3220 	T_LOG("Testing sleep and gate chain, lck");
3221 	test_sleep_gate_chain(&info);
3222 
3223 	destroy_synch_test_common((struct synch_test_common *)&info);
3224 	for (i = 0; i < NUM_THREAD_CHAIN; i++) {
3225 		lck_mtx_gate_destroy(&info.mtx_lock, &info.gates[i]);
3226 	}
3227 	lck_attr_free(lck_attr);
3228 	lck_grp_attr_free(lck_grp_attr);
3229 	lck_mtx_destroy(&info.mtx_lock, lck_grp);
3230 	lck_grp_free(lck_grp);
3231 
3232 	return KERN_SUCCESS;
3233 }
3234 
3235 kern_return_t
ts_kernel_timingsafe_bcmp_test(void)3236 ts_kernel_timingsafe_bcmp_test(void)
3237 {
3238 	int i, buf_size;
3239 	char *buf = NULL;
3240 
3241 	// empty
3242 	T_ASSERT(timingsafe_bcmp(NULL, NULL, 0) == 0, NULL);
3243 	T_ASSERT(timingsafe_bcmp("foo", "foo", 0) == 0, NULL);
3244 	T_ASSERT(timingsafe_bcmp("foo", "bar", 0) == 0, NULL);
3245 
3246 	// equal
3247 	T_ASSERT(timingsafe_bcmp("foo", "foo", strlen("foo")) == 0, NULL);
3248 
3249 	// unequal
3250 	T_ASSERT(timingsafe_bcmp("foo", "bar", strlen("foo")) == 1, NULL);
3251 	T_ASSERT(timingsafe_bcmp("foo", "goo", strlen("foo")) == 1, NULL);
3252 	T_ASSERT(timingsafe_bcmp("foo", "fpo", strlen("foo")) == 1, NULL);
3253 	T_ASSERT(timingsafe_bcmp("foo", "fop", strlen("foo")) == 1, NULL);
3254 
3255 	// all possible bitwise differences
3256 	for (i = 1; i < 256; i += 1) {
3257 		unsigned char a = 0;
3258 		unsigned char b = (unsigned char)i;
3259 
3260 		T_ASSERT(timingsafe_bcmp(&a, &b, sizeof(a)) == 1, NULL);
3261 	}
3262 
3263 	// large
3264 	buf_size = 1024 * 16;
3265 	buf = kalloc_data(buf_size, Z_WAITOK);
3266 	T_EXPECT_NOTNULL(buf, "kalloc of buf");
3267 
3268 	read_random(buf, buf_size);
3269 	T_ASSERT(timingsafe_bcmp(buf, buf, buf_size) == 0, NULL);
3270 	T_ASSERT(timingsafe_bcmp(buf, buf + 1, buf_size - 1) == 1, NULL);
3271 	T_ASSERT(timingsafe_bcmp(buf, buf + 128, 128) == 1, NULL);
3272 
3273 	memcpy(buf + 128, buf, 128);
3274 	T_ASSERT(timingsafe_bcmp(buf, buf + 128, 128) == 0, NULL);
3275 
3276 	kfree_data(buf, buf_size);
3277 
3278 	return KERN_SUCCESS;
3279 }
3280 
3281 kern_return_t
kprintf_hhx_test(void)3282 kprintf_hhx_test(void)
3283 {
3284 	printf("POST hhx test %hx%hx%hx%hx %hhx%hhx%hhx%hhx - %llx",
3285 	    (unsigned short)0xfeed, (unsigned short)0xface,
3286 	    (unsigned short)0xabad, (unsigned short)0xcafe,
3287 	    (unsigned char)'h', (unsigned char)'h', (unsigned char)'x',
3288 	    (unsigned char)'!',
3289 	    0xfeedfaceULL);
3290 	T_PASS("kprintf_hhx_test passed");
3291 	return KERN_SUCCESS;
3292 }
3293 
3294 static STATIC_IF_KEY_DEFINE_TRUE(key_true);
3295 static STATIC_IF_KEY_DEFINE_TRUE(key_true_to_false);
3296 static STATIC_IF_KEY_DEFINE_FALSE(key_false);
3297 static STATIC_IF_KEY_DEFINE_FALSE(key_false_to_true);
3298 
3299 __static_if_init_func
3300 static void
static_if_tests_setup(const char * args __unused)3301 static_if_tests_setup(const char *args __unused)
3302 {
3303 	static_if_key_disable(key_true_to_false);
3304 	static_if_key_enable(key_false_to_true);
3305 }
3306 STATIC_IF_INIT(static_if_tests_setup);
3307 
3308 static void
static_if_tests(void)3309 static_if_tests(void)
3310 {
3311 	int n = 0;
3312 
3313 	if (static_if(key_true)) {
3314 		n++;
3315 	}
3316 	if (probable_static_if(key_true)) {
3317 		n++;
3318 	}
3319 	if (improbable_static_if(key_true)) {
3320 		n++;
3321 	}
3322 	if (n != 3) {
3323 		panic("should still be enabled [n == %d, expected %d]", n, 3);
3324 	}
3325 
3326 	if (static_if(key_true_to_false)) {
3327 		n++;
3328 	}
3329 	if (probable_static_if(key_true_to_false)) {
3330 		n++;
3331 	}
3332 	if (improbable_static_if(key_true_to_false)) {
3333 		n++;
3334 	}
3335 	if (n != 3) {
3336 		panic("should now be disabled [n == %d, expected %d]", n, 3);
3337 	}
3338 
3339 	if (static_if(key_false)) {
3340 		n++;
3341 	}
3342 	if (probable_static_if(key_false)) {
3343 		n++;
3344 	}
3345 	if (improbable_static_if(key_false)) {
3346 		n++;
3347 	}
3348 	if (n != 3) {
3349 		panic("should still be disabled [n == %d, expected %d]", n, 3);
3350 	}
3351 
3352 	if (static_if(key_false_to_true)) {
3353 		n++;
3354 	}
3355 	if (probable_static_if(key_false_to_true)) {
3356 		n++;
3357 	}
3358 	if (improbable_static_if(key_false_to_true)) {
3359 		n++;
3360 	}
3361 	if (n != 6) {
3362 		panic("should now be disabled [n == %d, expected %d]", n, 3);
3363 	}
3364 }
3365 STARTUP(EARLY_BOOT, STARTUP_RANK_MIDDLE, static_if_tests);
3366 
3367 #if __BUILDING_XNU_LIB_UNITTEST__
3368 /* these functions are used for testing the unittest mocking framework and interposing */
3369 
3370 __mockable size_t
kernel_func1(__unused int a,__unused char b)3371 kernel_func1(__unused int a, __unused char b)
3372 {
3373 	return 1000;
3374 }
3375 __mockable size_t
kernel_func2(__unused int a,__unused char b)3376 kernel_func2(__unused int a, __unused char b)
3377 {
3378 	return 2000;
3379 }
3380 __mockable size_t
kernel_func3(__unused int a,__unused char b)3381 kernel_func3(__unused int a, __unused char b)
3382 {
3383 	return 3000;
3384 }
3385 __mockable size_t
kernel_func4(__unused int a,__unused char b)3386 kernel_func4(__unused int a, __unused char b)
3387 {
3388 	return 4000;
3389 }
3390 __mockable size_t
kernel_func5(__unused int a,__unused char b)3391 kernel_func5(__unused int a, __unused char b)
3392 {
3393 	return 5000;
3394 }
3395 int kernel_func6_was_called = 0;
3396 __mockable void
kernel_func6(__unused int a,__unused char b)3397 kernel_func6(__unused int a, __unused char b)
3398 {
3399 	printf("in void func6");
3400 	kernel_func6_was_called = a;
3401 }
3402 __mockable size_t
kernel_func7(__unused int a,__unused char b)3403 kernel_func7(__unused int a, __unused char b)
3404 {
3405 	return 7000;
3406 }
3407 int kernel_func8_was_called = 0;
3408 __mockable void
kernel_func8(__unused int a,__unused char b)3409 kernel_func8(__unused int a, __unused char b)
3410 {
3411 	printf("in void func8");
3412 	kernel_func8_was_called = a;
3413 }
3414 
3415 #endif /* __BUILDING_XNU_LIB_UNITTEST__ */
3416