xref: /xnu-11417.140.69/osfmk/tests/kernel_tests.c (revision 43a90889846e00bfb5cf1d255cdc0a701a1e05a4)
1 /*
2  * Copyright (c) 2019-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <kern/kern_types.h>
30 #include <kern/assert.h>
31 #include <kern/host.h>
32 #include <kern/macro_help.h>
33 #include <kern/sched.h>
34 #include <kern/locks.h>
35 #include <kern/sched_prim.h>
36 #include <kern/misc_protos.h>
37 #include <kern/thread_call.h>
38 #include <kern/zalloc_internal.h>
39 #include <kern/kalloc.h>
40 #include <tests/ktest.h>
41 #include <sys/errno.h>
42 #include <sys/random.h>
43 #include <kern/kern_cdata.h>
44 #include <machine/lowglobals.h>
45 #include <machine/static_if.h>
46 #include <vm/vm_page.h>
47 #include <vm/vm_object_internal.h>
48 #include <vm/vm_protos.h>
49 #include <vm/vm_iokit.h>
50 #include <string.h>
51 #include <kern/kern_apfs_reflock.h>
52 
53 #if !(DEVELOPMENT || DEBUG)
54 #error "Testing is not enabled on RELEASE configurations"
55 #endif
56 
57 #include <tests/xnupost.h>
58 
59 extern boolean_t get_range_bounds(char * c, int64_t * lower, int64_t * upper);
60 __private_extern__ void qsort(void * a, size_t n, size_t es, int (*cmp)(const void *, const void *));
61 
62 uint32_t total_post_tests_count = 0;
63 void xnupost_reset_panic_widgets(void);
64 
65 /* test declarations */
66 kern_return_t zalloc_test(void);
67 kern_return_t RandomULong_test(void);
68 kern_return_t kcdata_api_test(void);
69 kern_return_t ts_kernel_primitive_test(void);
70 kern_return_t ts_kernel_sleep_inheritor_test(void);
71 kern_return_t ts_kernel_gate_test(void);
72 kern_return_t ts_kernel_turnstile_chain_test(void);
73 kern_return_t ts_kernel_timingsafe_bcmp_test(void);
74 
75 #if __ARM_VFP__
76 extern kern_return_t vfp_state_test(void);
77 #endif
78 
79 extern kern_return_t kprintf_hhx_test(void);
80 
81 #if defined(__arm64__)
82 kern_return_t pmap_coredump_test(void);
83 #endif
84 
85 extern kern_return_t console_serial_test(void);
86 extern kern_return_t console_serial_parallel_log_tests(void);
87 extern kern_return_t test_printf(void);
88 extern kern_return_t test_os_log(void);
89 extern kern_return_t test_os_log_handles(void);
90 extern kern_return_t test_os_log_parallel(void);
91 extern kern_return_t bitmap_post_test(void);
92 extern kern_return_t counter_tests(void);
93 #if ML_IO_TIMEOUTS_ENABLED
94 extern kern_return_t ml_io_timeout_test(void);
95 #endif
96 
97 #ifdef __arm64__
98 extern kern_return_t arm64_backtrace_test(void);
99 extern kern_return_t arm64_munger_test(void);
100 #if __ARM_PAN_AVAILABLE__
101 extern kern_return_t arm64_pan_test(void);
102 #endif
103 #if defined(HAS_APPLE_PAC)
104 extern kern_return_t arm64_ropjop_test(void);
105 #endif /* defined(HAS_APPLE_PAC) */
106 #if CONFIG_SPTM
107 extern kern_return_t arm64_panic_lockdown_test(void);
108 #endif /* CONFIG_SPTM */
109 #if HAS_SPECRES
110 extern kern_return_t specres_test(void);
111 #endif /* HAS_SPECRES */
112 #if BTI_ENFORCED
113 kern_return_t arm64_bti_test(void);
114 #endif /* BTI_ENFORCED */
115 extern kern_return_t arm64_speculation_guard_test(void);
116 #endif /* __arm64__ */
117 
118 extern kern_return_t test_thread_call(void);
119 
120 struct xnupost_panic_widget xt_panic_widgets = {.xtp_context_p = NULL,
121 	                                        .xtp_outval_p = NULL,
122 	                                        .xtp_func_name = NULL,
123 	                                        .xtp_func = NULL};
124 
125 struct xnupost_test kernel_post_tests[] = {
126 	XNUPOST_TEST_CONFIG_BASIC(zalloc_test),
127 	XNUPOST_TEST_CONFIG_BASIC(RandomULong_test),
128 	XNUPOST_TEST_CONFIG_BASIC(test_printf),
129 	XNUPOST_TEST_CONFIG_BASIC(test_os_log_handles),
130 	XNUPOST_TEST_CONFIG_BASIC(test_os_log),
131 	XNUPOST_TEST_CONFIG_BASIC(test_os_log_parallel),
132 #ifdef __arm64__
133 	XNUPOST_TEST_CONFIG_BASIC(arm64_backtrace_test),
134 	XNUPOST_TEST_CONFIG_BASIC(arm64_munger_test),
135 #if __ARM_PAN_AVAILABLE__
136 	XNUPOST_TEST_CONFIG_BASIC(arm64_pan_test),
137 #endif
138 #if defined(HAS_APPLE_PAC)
139 	XNUPOST_TEST_CONFIG_BASIC(arm64_ropjop_test),
140 #endif /* defined(HAS_APPLE_PAC) */
141 #if CONFIG_SPTM
142 	XNUPOST_TEST_CONFIG_BASIC(arm64_panic_lockdown_test),
143 #endif /* CONFIG_SPTM */
144 	XNUPOST_TEST_CONFIG_BASIC(arm64_speculation_guard_test),
145 #endif /* __arm64__ */
146 	XNUPOST_TEST_CONFIG_BASIC(kcdata_api_test),
147 	XNUPOST_TEST_CONFIG_BASIC(console_serial_test),
148 	XNUPOST_TEST_CONFIG_BASIC(console_serial_parallel_log_tests),
149 #if defined(__arm64__)
150 	XNUPOST_TEST_CONFIG_BASIC(pmap_coredump_test),
151 #endif
152 	XNUPOST_TEST_CONFIG_BASIC(bitmap_post_test),
153 	//XNUPOST_TEST_CONFIG_TEST_PANIC(kcdata_api_assert_tests)
154 	XNUPOST_TEST_CONFIG_BASIC(test_thread_call),
155 	XNUPOST_TEST_CONFIG_BASIC(ts_kernel_primitive_test),
156 	XNUPOST_TEST_CONFIG_BASIC(ts_kernel_sleep_inheritor_test),
157 	XNUPOST_TEST_CONFIG_BASIC(ts_kernel_gate_test),
158 	XNUPOST_TEST_CONFIG_BASIC(ts_kernel_turnstile_chain_test),
159 	XNUPOST_TEST_CONFIG_BASIC(ts_kernel_timingsafe_bcmp_test),
160 	XNUPOST_TEST_CONFIG_BASIC(kprintf_hhx_test),
161 #if __ARM_VFP__
162 	XNUPOST_TEST_CONFIG_BASIC(vfp_state_test),
163 #endif
164 	XNUPOST_TEST_CONFIG_BASIC(vm_tests),
165 	XNUPOST_TEST_CONFIG_BASIC(counter_tests),
166 #if ML_IO_TIMEOUTS_ENABLED
167 	XNUPOST_TEST_CONFIG_BASIC(ml_io_timeout_test),
168 #endif
169 #if HAS_SPECRES
170 	XNUPOST_TEST_CONFIG_BASIC(specres_test),
171 #endif
172 };
173 
174 uint32_t kernel_post_tests_count = sizeof(kernel_post_tests) / sizeof(xnupost_test_data_t);
175 
176 #define POSTARGS_RUN_TESTS 0x1
177 #define POSTARGS_CONTROLLER_AVAILABLE 0x2
178 #define POSTARGS_CUSTOM_TEST_RUNLIST 0x4
179 uint64_t kernel_post_args = 0x0;
180 
181 /* static variables to hold state */
182 static kern_return_t parse_config_retval = KERN_INVALID_CAPABILITY;
183 static char kernel_post_test_configs[256];
184 boolean_t xnupost_should_run_test(uint32_t test_num);
185 
186 kern_return_t
xnupost_parse_config()187 xnupost_parse_config()
188 {
189 	if (parse_config_retval != KERN_INVALID_CAPABILITY) {
190 		return parse_config_retval;
191 	}
192 	PE_parse_boot_argn("kernPOST", &kernel_post_args, sizeof(kernel_post_args));
193 
194 	if (PE_parse_boot_argn("kernPOST_config", &kernel_post_test_configs[0], sizeof(kernel_post_test_configs)) == TRUE) {
195 		kernel_post_args |= POSTARGS_CUSTOM_TEST_RUNLIST;
196 	}
197 
198 	if (kernel_post_args != 0) {
199 		parse_config_retval = KERN_SUCCESS;
200 		goto out;
201 	}
202 	parse_config_retval = KERN_NOT_SUPPORTED;
203 out:
204 	return parse_config_retval;
205 }
206 
207 boolean_t
xnupost_should_run_test(uint32_t test_num)208 xnupost_should_run_test(uint32_t test_num)
209 {
210 	if (kernel_post_args & POSTARGS_CUSTOM_TEST_RUNLIST) {
211 		int64_t begin = 0, end = 999999;
212 		char * b = kernel_post_test_configs;
213 		while (*b) {
214 			get_range_bounds(b, &begin, &end);
215 			if (test_num >= begin && test_num <= end) {
216 				return TRUE;
217 			}
218 
219 			/* skip to the next "," */
220 			while (*b != ',') {
221 				if (*b == '\0') {
222 					return FALSE;
223 				}
224 				b++;
225 			}
226 			/* skip past the ',' */
227 			b++;
228 		}
229 		return FALSE;
230 	}
231 	return TRUE;
232 }
233 
234 kern_return_t
xnupost_list_tests(xnupost_test_t test_list,uint32_t test_count)235 xnupost_list_tests(xnupost_test_t test_list, uint32_t test_count)
236 {
237 	if (KERN_SUCCESS != xnupost_parse_config()) {
238 		return KERN_FAILURE;
239 	}
240 
241 	xnupost_test_t testp;
242 	for (uint32_t i = 0; i < test_count; i++) {
243 		testp = &test_list[i];
244 		if (testp->xt_test_num == 0) {
245 			assert(total_post_tests_count < UINT16_MAX);
246 			testp->xt_test_num = (uint16_t)++total_post_tests_count;
247 		}
248 		/* make sure the boot-arg based test run list is honored */
249 		if (kernel_post_args & POSTARGS_CUSTOM_TEST_RUNLIST) {
250 			testp->xt_config |= XT_CONFIG_IGNORE;
251 			if (xnupost_should_run_test(testp->xt_test_num)) {
252 				testp->xt_config &= ~(XT_CONFIG_IGNORE);
253 				testp->xt_config |= XT_CONFIG_RUN;
254 				printf("\n[TEST] #%u is marked as ignored", testp->xt_test_num);
255 			}
256 		}
257 		printf("\n[TEST] TOC#%u name: %s expected: %d config: %x\n", testp->xt_test_num, testp->xt_name, testp->xt_expected_retval,
258 		    testp->xt_config);
259 	}
260 
261 	return KERN_SUCCESS;
262 }
263 
264 kern_return_t
xnupost_run_tests(xnupost_test_t test_list,uint32_t test_count)265 xnupost_run_tests(xnupost_test_t test_list, uint32_t test_count)
266 {
267 	uint32_t i = 0;
268 	int retval = KERN_SUCCESS;
269 	int test_retval = KERN_FAILURE;
270 
271 	if ((kernel_post_args & POSTARGS_RUN_TESTS) == 0) {
272 		printf("No POST boot-arg set.\n");
273 		return retval;
274 	}
275 
276 	T_START;
277 	xnupost_test_t testp;
278 	for (; i < test_count; i++) {
279 		xnupost_reset_panic_widgets();
280 		T_TESTRESULT = T_STATE_UNRESOLVED;
281 		testp = &test_list[i];
282 		T_BEGIN(testp->xt_name);
283 		testp->xt_begin_time = mach_absolute_time();
284 		testp->xt_end_time   = testp->xt_begin_time;
285 
286 		/*
287 		 * If test is designed to panic and controller
288 		 * is not available then mark as SKIPPED
289 		 */
290 		if ((testp->xt_config & XT_CONFIG_EXPECT_PANIC) && !(kernel_post_args & POSTARGS_CONTROLLER_AVAILABLE)) {
291 			T_SKIP(
292 				"Test expects panic but "
293 				"no controller is present");
294 			testp->xt_test_actions = XT_ACTION_SKIPPED;
295 			continue;
296 		}
297 
298 		if ((testp->xt_config & XT_CONFIG_IGNORE)) {
299 			T_SKIP("Test is marked as XT_CONFIG_IGNORE");
300 			testp->xt_test_actions = XT_ACTION_SKIPPED;
301 			continue;
302 		}
303 
304 		test_retval = testp->xt_func();
305 		if (T_STATE_UNRESOLVED == T_TESTRESULT) {
306 			/*
307 			 * If test result is unresolved due to that no T_* test cases are called,
308 			 * determine the test result based on the return value of the test function.
309 			 */
310 			if (KERN_SUCCESS == test_retval) {
311 				T_PASS("Test passed because retval == KERN_SUCCESS");
312 			} else {
313 				T_FAIL("Test failed because retval == KERN_FAILURE");
314 			}
315 		}
316 		T_END;
317 		testp->xt_retval = T_TESTRESULT;
318 		testp->xt_end_time = mach_absolute_time();
319 		if (testp->xt_retval == testp->xt_expected_retval) {
320 			testp->xt_test_actions = XT_ACTION_PASSED;
321 		} else {
322 			testp->xt_test_actions = XT_ACTION_FAILED;
323 		}
324 	}
325 	T_FINISH;
326 	return retval;
327 }
328 
329 kern_return_t
kernel_list_tests()330 kernel_list_tests()
331 {
332 	return xnupost_list_tests(kernel_post_tests, kernel_post_tests_count);
333 }
334 
335 kern_return_t
kernel_do_post()336 kernel_do_post()
337 {
338 	return xnupost_run_tests(kernel_post_tests, kernel_post_tests_count);
339 }
340 
341 kern_return_t
xnupost_register_panic_widget(xt_panic_widget_func funcp,const char * funcname,void * context,void ** outval)342 xnupost_register_panic_widget(xt_panic_widget_func funcp, const char * funcname, void * context, void ** outval)
343 {
344 	if (xt_panic_widgets.xtp_context_p != NULL || xt_panic_widgets.xtp_func != NULL) {
345 		return KERN_RESOURCE_SHORTAGE;
346 	}
347 
348 	xt_panic_widgets.xtp_context_p = context;
349 	xt_panic_widgets.xtp_func      = funcp;
350 	xt_panic_widgets.xtp_func_name = funcname;
351 	xt_panic_widgets.xtp_outval_p  = outval;
352 
353 	return KERN_SUCCESS;
354 }
355 
356 void
xnupost_reset_panic_widgets()357 xnupost_reset_panic_widgets()
358 {
359 	bzero(&xt_panic_widgets, sizeof(xt_panic_widgets));
360 }
361 
362 kern_return_t
xnupost_process_kdb_stop(const char * panic_s)363 xnupost_process_kdb_stop(const char * panic_s)
364 {
365 	xt_panic_return_t retval         = 0;
366 	struct xnupost_panic_widget * pw = &xt_panic_widgets;
367 	const char * name = "unknown";
368 	if (xt_panic_widgets.xtp_func_name) {
369 		name = xt_panic_widgets.xtp_func_name;
370 	}
371 
372 	/* bail early on if kernPOST is not set */
373 	if (kernel_post_args == 0) {
374 		return KERN_INVALID_CAPABILITY;
375 	}
376 
377 	if (xt_panic_widgets.xtp_func) {
378 		T_LOG("%s: Calling out to widget: %s", __func__, xt_panic_widgets.xtp_func_name);
379 		retval = pw->xtp_func(panic_s, pw->xtp_context_p, pw->xtp_outval_p);
380 	} else {
381 		return KERN_INVALID_CAPABILITY;
382 	}
383 
384 	switch (retval) {
385 	case XT_RET_W_SUCCESS:
386 		T_EXPECT_EQ_INT(retval, XT_RET_W_SUCCESS, "%s reported successful handling. Returning from kdb_stop.", name);
387 		/* KERN_SUCCESS means return from panic/assertion */
388 		return KERN_SUCCESS;
389 
390 	case XT_RET_W_FAIL:
391 		T_FAIL("%s reported XT_RET_W_FAIL: Returning from kdb_stop", name);
392 		return KERN_SUCCESS;
393 
394 	case XT_PANIC_W_FAIL:
395 		T_FAIL("%s reported XT_PANIC_W_FAIL: Continuing to kdb_stop", name);
396 		return KERN_FAILURE;
397 
398 	case XT_PANIC_W_SUCCESS:
399 		T_EXPECT_EQ_INT(retval, XT_PANIC_W_SUCCESS, "%s reported successful testcase. But continuing to kdb_stop.", name);
400 		return KERN_FAILURE;
401 
402 	case XT_PANIC_UNRELATED:
403 	default:
404 		T_LOG("UNRELATED: Continuing to kdb_stop.");
405 		return KERN_FAILURE;
406 	}
407 }
408 
409 xt_panic_return_t
_xt_generic_assert_check(const char * s,void * str_to_match,void ** outval)410 _xt_generic_assert_check(const char * s, void * str_to_match, void ** outval)
411 {
412 	xt_panic_return_t ret = XT_PANIC_UNRELATED;
413 
414 	if (NULL != strnstr(__DECONST(char *, s), (char *)str_to_match, strlen(s))) {
415 		T_LOG("%s: kdb_stop string: '%s' MATCHED string: '%s'", __func__, s, (char *)str_to_match);
416 		ret = XT_RET_W_SUCCESS;
417 	}
418 
419 	if (outval) {
420 		*outval = (void *)(uintptr_t)ret;
421 	}
422 	return ret;
423 }
424 
425 kern_return_t
xnupost_reset_tests(xnupost_test_t test_list,uint32_t test_count)426 xnupost_reset_tests(xnupost_test_t test_list, uint32_t test_count)
427 {
428 	uint32_t i = 0;
429 	xnupost_test_t testp;
430 	for (; i < test_count; i++) {
431 		testp                  = &test_list[i];
432 		testp->xt_begin_time   = 0;
433 		testp->xt_end_time     = 0;
434 		testp->xt_test_actions = XT_ACTION_NONE;
435 		testp->xt_retval       = -1;
436 	}
437 	return KERN_SUCCESS;
438 }
439 
440 
441 kern_return_t
zalloc_test(void)442 zalloc_test(void)
443 {
444 	zone_t test_zone;
445 	void * test_ptr;
446 
447 	T_SETUPBEGIN;
448 	test_zone = zone_create("test_uint64_zone", sizeof(uint64_t),
449 	    ZC_DESTRUCTIBLE);
450 	T_ASSERT_NOTNULL(test_zone, NULL);
451 
452 	T_ASSERT_EQ_INT(test_zone->z_elems_free, 0, NULL);
453 	T_SETUPEND;
454 
455 	T_ASSERT_NOTNULL(test_ptr = zalloc(test_zone), NULL);
456 
457 	zfree(test_zone, test_ptr);
458 
459 	/* A sample report for perfdata */
460 	T_PERF("num_threads_at_ktest", threads_count, "count", "# of threads in system at zalloc_test");
461 
462 	return KERN_SUCCESS;
463 }
464 
465 /*
466  * Function used for comparison by qsort()
467  */
468 static int
compare_numbers_ascending(const void * a,const void * b)469 compare_numbers_ascending(const void * a, const void * b)
470 {
471 	const uint64_t x = *(const uint64_t *)a;
472 	const uint64_t y = *(const uint64_t *)b;
473 	if (x < y) {
474 		return -1;
475 	} else if (x > y) {
476 		return 1;
477 	} else {
478 		return 0;
479 	}
480 }
481 
482 /*
483  * Function to count number of bits that are set in a number.
484  * It uses Side Addition using Magic Binary Numbers
485  */
486 static int
count_bits(uint64_t number)487 count_bits(uint64_t number)
488 {
489 	return __builtin_popcountll(number);
490 }
491 
492 kern_return_t
RandomULong_test()493 RandomULong_test()
494 {
495 /*
496  * Randomness test for RandomULong()
497  *
498  * This test verifies that:
499  *  a. RandomULong works
500  *  b. The generated numbers match the following entropy criteria:
501  *     For a thousand iterations, verify:
502  *          1. mean entropy > 12 bits
503  *          2. min entropy > 4 bits
504  *          3. No Duplicate
505  *          4. No incremental/decremental pattern in a window of 3
506  *          5. No Zero
507  *          6. No -1
508  *
509  * <rdar://problem/22526137> Add test to increase code coverage for /dev/random
510  */
511 
512 #define CONF_MIN_ENTROPY 4
513 #define CONF_MEAN_ENTROPY 12
514 #define CONF_ITERATIONS 1000
515 #define CONF_WINDOW_SIZE 3
516 #define CONF_WINDOW_TREND_LIMIT ((CONF_WINDOW_SIZE / 2) + (CONF_WINDOW_SIZE & 1)) >> 0
517 
518 	int i;
519 	uint32_t min_bit_entropy, max_bit_entropy, bit_entropy;
520 	uint32_t aggregate_bit_entropy = 0;
521 	uint32_t mean_bit_entropy      = 0;
522 	uint64_t numbers[CONF_ITERATIONS];
523 	min_bit_entropy = UINT32_MAX;
524 	max_bit_entropy = 0;
525 
526 	/*
527 	 * TEST 1: Number generation and basic and basic validation
528 	 * Check for non-zero (no bits set), -1 (all bits set) and error
529 	 */
530 	for (i = 0; i < CONF_ITERATIONS; i++) {
531 		read_random(&numbers[i], sizeof(numbers[i]));
532 		if (numbers[i] == 0) {
533 			T_ASSERT_NE_ULLONG(numbers[i], 0, "read_random returned zero value.");
534 		}
535 		if (numbers[i] == UINT64_MAX) {
536 			T_ASSERT_NE_ULLONG(numbers[i], UINT64_MAX, "read_random returned -1.");
537 		}
538 	}
539 	T_PASS("Generated %d non-zero random numbers with atleast one bit reset.", CONF_ITERATIONS);
540 
541 	/*
542 	 * TEST 2: Mean and Min Bit Entropy
543 	 * Check the bit entropy and its mean over the generated numbers.
544 	 */
545 	for (i = 1; i < CONF_ITERATIONS; i++) {
546 		bit_entropy = count_bits(numbers[i - 1] ^ numbers[i]);
547 		if (bit_entropy < min_bit_entropy) {
548 			min_bit_entropy = bit_entropy;
549 		}
550 		if (bit_entropy > max_bit_entropy) {
551 			max_bit_entropy = bit_entropy;
552 		}
553 
554 		if (bit_entropy < CONF_MIN_ENTROPY) {
555 			T_EXPECT_GE_UINT(bit_entropy, CONF_MIN_ENTROPY,
556 			    "Number of differing bits in consecutive numbers does not satisfy the min criteria.");
557 		}
558 
559 		aggregate_bit_entropy += bit_entropy;
560 	}
561 	T_PASS("Passed the min bit entropy expectation of %d bits", CONF_MIN_ENTROPY);
562 
563 	mean_bit_entropy = aggregate_bit_entropy / CONF_ITERATIONS;
564 	T_EXPECT_GE_UINT(mean_bit_entropy, CONF_MEAN_ENTROPY, "Test criteria for mean number of differing bits.");
565 	T_PASS("Mean bit entropy criteria satisfied (Required %d, Actual: %d).", CONF_MEAN_ENTROPY, mean_bit_entropy);
566 	T_LOG("{PERFORMANCE} iterations: %d, min_bit_entropy: %d, mean_bit_entropy: %d, max_bit_entropy: %d", CONF_ITERATIONS,
567 	    min_bit_entropy, mean_bit_entropy, max_bit_entropy);
568 	T_PERF("min_bit_entropy_" T_TOSTRING(CONF_ITERATIONS), min_bit_entropy, "bits", "minimum bit entropy in RNG. High is better");
569 	T_PERF("mean_bit_entropy_" T_TOSTRING(CONF_ITERATIONS), mean_bit_entropy, "bits", "mean bit entropy in RNG. High is better");
570 	T_PERF("max_bit_entropy_" T_TOSTRING(CONF_ITERATIONS), max_bit_entropy, "bits", "max bit entropy in RNG. High is better");
571 
572 	/*
573 	 * TEST 3: Incremental Pattern Search
574 	 * Check that incremental/decremental pattern does not exist in the given window
575 	 */
576 	int window_start, window_end, trend;
577 	window_start = window_end = trend = 0;
578 
579 	do {
580 		/*
581 		 * Set the window
582 		 */
583 		window_end = window_start + CONF_WINDOW_SIZE - 1;
584 		if (window_end >= CONF_ITERATIONS) {
585 			window_end = CONF_ITERATIONS - 1;
586 		}
587 
588 		trend = 0;
589 		for (i = window_start; i < window_end; i++) {
590 			if (numbers[i] < numbers[i + 1]) {
591 				trend++;
592 			} else if (numbers[i] > numbers[i + 1]) {
593 				trend--;
594 			}
595 		}
596 		/*
597 		 * Check that there is no increasing or decreasing trend
598 		 * i.e. trend <= ceil(window_size/2)
599 		 */
600 		if (trend < 0) {
601 			trend = -trend;
602 		}
603 		if (trend > CONF_WINDOW_TREND_LIMIT) {
604 			T_ASSERT_LE_INT(trend, CONF_WINDOW_TREND_LIMIT, "Found increasing/decreasing trend in random numbers.");
605 		}
606 
607 		/*
608 		 * Move to the next window
609 		 */
610 		window_start++;
611 	} while (window_start < (CONF_ITERATIONS - 1));
612 	T_PASS("Did not find increasing/decreasing trends in a window of %d numbers.", CONF_WINDOW_SIZE);
613 
614 	/*
615 	 * TEST 4: Find Duplicates
616 	 * Check no duplicate values are generated
617 	 */
618 	qsort(numbers, CONF_ITERATIONS, sizeof(numbers[0]), compare_numbers_ascending);
619 	for (i = 1; i < CONF_ITERATIONS; i++) {
620 		if (numbers[i] == numbers[i - 1]) {
621 			T_ASSERT_NE_ULLONG(numbers[i], numbers[i - 1], "read_random generated duplicate values.");
622 		}
623 	}
624 	T_PASS("Test did not find any duplicates as expected.");
625 
626 	return KERN_SUCCESS;
627 }
628 
629 
630 /* KCDATA kernel api tests */
631 static struct kcdata_descriptor test_kc_data;//, test_kc_data2;
632 struct sample_disk_io_stats {
633 	uint64_t disk_reads_count;
634 	uint64_t disk_reads_size;
635 	uint64_t io_priority_count[4];
636 	uint64_t io_priority_size;
637 } __attribute__((packed));
638 
639 struct kcdata_subtype_descriptor test_disk_io_stats_def[] = {
640 	{
641 		.kcs_flags = KCS_SUBTYPE_FLAGS_NONE,
642 		.kcs_elem_type = KC_ST_UINT64,
643 		.kcs_elem_offset = 0 * sizeof(uint64_t),
644 		.kcs_elem_size = sizeof(uint64_t),
645 		.kcs_name = "disk_reads_count"
646 	},
647 	{
648 		.kcs_flags = KCS_SUBTYPE_FLAGS_NONE,
649 		.kcs_elem_type = KC_ST_UINT64,
650 		.kcs_elem_offset = 1 * sizeof(uint64_t),
651 		.kcs_elem_size = sizeof(uint64_t),
652 		.kcs_name = "disk_reads_size"
653 	},
654 	{
655 		.kcs_flags = KCS_SUBTYPE_FLAGS_ARRAY,
656 		.kcs_elem_type = KC_ST_UINT64,
657 		.kcs_elem_offset = 2 * sizeof(uint64_t),
658 		.kcs_elem_size = KCS_SUBTYPE_PACK_SIZE(4, sizeof(uint64_t)),
659 		.kcs_name = "io_priority_count"
660 	},
661 	{
662 		.kcs_flags = KCS_SUBTYPE_FLAGS_ARRAY,
663 		.kcs_elem_type = KC_ST_UINT64,
664 		.kcs_elem_offset = (2 + 4) * sizeof(uint64_t),
665 		.kcs_elem_size = sizeof(uint64_t),
666 		.kcs_name = "io_priority_size"
667 	},
668 };
669 
670 kern_return_t
kcdata_api_test(void)671 kcdata_api_test(void)
672 {
673 	kern_return_t retval = KERN_SUCCESS;
674 
675 	/* test for NULL input */
676 	retval = kcdata_memory_static_init(NULL, (mach_vm_address_t)0, KCDATA_BUFFER_BEGIN_STACKSHOT, 100, KCFLAG_USE_MEMCOPY);
677 	T_ASSERT(retval == KERN_INVALID_ARGUMENT, "kcdata_memory_static_init with NULL struct");
678 
679 	/* another negative test with buffer size < 32 bytes */
680 	char data[30] = "sample_disk_io_stats";
681 	retval = kcdata_memory_static_init(&test_kc_data, (mach_vm_address_t)&data, KCDATA_BUFFER_BEGIN_CRASHINFO, sizeof(data),
682 	    KCFLAG_USE_MEMCOPY);
683 	T_ASSERT(retval == KERN_INSUFFICIENT_BUFFER_SIZE, "init with 30 bytes failed as expected with KERN_INSUFFICIENT_BUFFER_SIZE");
684 
685 	/* test with COPYOUT for 0x0 address. Should return KERN_NO_ACCESS */
686 	retval = kcdata_memory_static_init(&test_kc_data, (mach_vm_address_t)0, KCDATA_BUFFER_BEGIN_CRASHINFO, PAGE_SIZE,
687 	    KCFLAG_USE_COPYOUT);
688 	T_ASSERT(retval == KERN_NO_ACCESS, "writing to 0x0 returned KERN_NO_ACCESS");
689 
690 	/* test with successful kcdata_memory_static_init */
691 	test_kc_data.kcd_length   = 0xdeadbeef;
692 
693 	void *data_ptr = kalloc_data(PAGE_SIZE, Z_WAITOK_ZERO_NOFAIL);
694 	mach_vm_address_t address = (mach_vm_address_t)data_ptr;
695 	T_EXPECT_NOTNULL(address, "kalloc of PAGE_SIZE data.");
696 
697 	retval = kcdata_memory_static_init(&test_kc_data, (mach_vm_address_t)address, KCDATA_BUFFER_BEGIN_STACKSHOT, PAGE_SIZE,
698 	    KCFLAG_USE_MEMCOPY);
699 
700 	T_ASSERT(retval == KERN_SUCCESS, "successful kcdata_memory_static_init call");
701 
702 	T_ASSERT(test_kc_data.kcd_length == PAGE_SIZE, "kcdata length is set correctly to PAGE_SIZE.");
703 	T_LOG("addr_begin 0x%llx and end 0x%llx and address 0x%llx", test_kc_data.kcd_addr_begin, test_kc_data.kcd_addr_end, address);
704 	T_ASSERT(test_kc_data.kcd_addr_begin == address, "kcdata begin address is correct 0x%llx", (uint64_t)address);
705 
706 	/* verify we have BEGIN and END HEADERS set */
707 	uint32_t * mem = (uint32_t *)address;
708 	T_ASSERT(mem[0] == KCDATA_BUFFER_BEGIN_STACKSHOT, "buffer does contain KCDATA_BUFFER_BEGIN_STACKSHOT");
709 	T_ASSERT(mem[4] == KCDATA_TYPE_BUFFER_END, "KCDATA_TYPE_BUFFER_END is appended as expected");
710 	T_ASSERT(mem[5] == 0, "size of BUFFER_END tag is zero");
711 
712 	/* verify kcdata_memory_get_used_bytes() */
713 	uint64_t bytes_used = 0;
714 	bytes_used = kcdata_memory_get_used_bytes(&test_kc_data);
715 	T_ASSERT(bytes_used == (2 * sizeof(struct kcdata_item)), "bytes_used api returned expected %llu", bytes_used);
716 
717 	/* test for kcdata_get_memory_addr() */
718 
719 	mach_vm_address_t user_addr = 0;
720 	/* negative test for NULL user_addr AND/OR kcdata_descriptor */
721 	retval = kcdata_get_memory_addr(NULL, KCDATA_TYPE_MACH_ABSOLUTE_TIME, sizeof(uint64_t), &user_addr);
722 	T_ASSERT(retval == KERN_INVALID_ARGUMENT, "kcdata_get_memory_addr with NULL struct -> KERN_INVALID_ARGUMENT");
723 
724 	retval = kcdata_get_memory_addr(&test_kc_data, KCDATA_TYPE_MACH_ABSOLUTE_TIME, sizeof(uint64_t), NULL);
725 	T_ASSERT(retval == KERN_INVALID_ARGUMENT, "kcdata_get_memory_addr with NULL user_addr -> KERN_INVALID_ARGUMENT");
726 
727 	/* successful case with size 0. Yes this is expected to succeed as just a item type could be used as boolean */
728 	retval = kcdata_get_memory_addr(&test_kc_data, KCDATA_TYPE_USECS_SINCE_EPOCH, 0, &user_addr);
729 	T_ASSERT(retval == KERN_SUCCESS, "Successfully got kcdata entry for 0 size data");
730 	T_ASSERT(user_addr == test_kc_data.kcd_addr_end, "0 sized data did not add any extra buffer space");
731 
732 	/* successful case with valid size. */
733 	user_addr = 0xdeadbeef;
734 	retval = kcdata_get_memory_addr(&test_kc_data, KCDATA_TYPE_MACH_ABSOLUTE_TIME, sizeof(uint64_t), &user_addr);
735 	T_ASSERT(retval == KERN_SUCCESS, "kcdata_get_memory_addr with valid values succeeded.");
736 	T_ASSERT(user_addr > test_kc_data.kcd_addr_begin, "user_addr is in range of buffer");
737 	T_ASSERT(user_addr < test_kc_data.kcd_addr_end, "user_addr is in range of buffer");
738 
739 	/* Try creating an item with really large size */
740 	user_addr  = 0xdeadbeef;
741 	bytes_used = kcdata_memory_get_used_bytes(&test_kc_data);
742 	retval = kcdata_get_memory_addr(&test_kc_data, KCDATA_TYPE_MACH_ABSOLUTE_TIME, PAGE_SIZE * 4, &user_addr);
743 	T_ASSERT(retval == KERN_INSUFFICIENT_BUFFER_SIZE, "Allocating entry with size > buffer -> KERN_INSUFFICIENT_BUFFER_SIZE");
744 	T_ASSERT(user_addr == 0xdeadbeef, "user_addr remained unaffected with failed kcdata_get_memory_addr");
745 	T_ASSERT(bytes_used == kcdata_memory_get_used_bytes(&test_kc_data), "The data structure should be unaffected");
746 
747 	/* verify convenience functions for uint32_with_description */
748 	retval = kcdata_add_uint32_with_description(&test_kc_data, 0xbdc0ffee, "This is bad coffee");
749 	T_ASSERT(retval == KERN_SUCCESS, "add uint32 with description succeeded.");
750 
751 	retval = kcdata_add_uint64_with_description(&test_kc_data, 0xf001badc0ffee, "another 8 byte no.");
752 	T_ASSERT(retval == KERN_SUCCESS, "add uint64 with desc succeeded.");
753 
754 	/* verify creating an KCDATA_TYPE_ARRAY here */
755 	user_addr  = 0xdeadbeef;
756 	bytes_used = kcdata_memory_get_used_bytes(&test_kc_data);
757 	/* save memory address where the array will come up */
758 	struct kcdata_item * item_p = (struct kcdata_item *)test_kc_data.kcd_addr_end;
759 
760 	retval = kcdata_get_memory_addr_for_array(&test_kc_data, KCDATA_TYPE_MACH_ABSOLUTE_TIME, sizeof(uint64_t), 20, &user_addr);
761 	T_ASSERT(retval == KERN_SUCCESS, "Array of 20 integers should be possible");
762 	T_ASSERT(user_addr != 0xdeadbeef, "user_addr is updated as expected");
763 	T_ASSERT((kcdata_memory_get_used_bytes(&test_kc_data) - bytes_used) >= 20 * sizeof(uint64_t), "memory allocation is in range");
764 	kcdata_iter_t iter = kcdata_iter(item_p, (unsigned long)(PAGE_SIZE - kcdata_memory_get_used_bytes(&test_kc_data)));
765 	T_ASSERT(kcdata_iter_array_elem_count(iter) == 20, "array count is 20");
766 
767 	/* FIXME add tests here for ranges of sizes and counts */
768 
769 	T_ASSERT(item_p->flags == (((uint64_t)KCDATA_TYPE_MACH_ABSOLUTE_TIME << 32) | 20), "flags are set correctly");
770 
771 	/* test adding of custom type */
772 
773 	retval = kcdata_add_type_definition(&test_kc_data, 0x999, data, &test_disk_io_stats_def[0],
774 	    sizeof(test_disk_io_stats_def) / sizeof(struct kcdata_subtype_descriptor));
775 	T_ASSERT(retval == KERN_SUCCESS, "adding custom type succeeded.");
776 
777 	kfree_data(data_ptr, PAGE_SIZE);
778 	return KERN_SUCCESS;
779 }
780 
781 /*
782  *  kern_return_t
783  *  kcdata_api_assert_tests()
784  *  {
785  *       kern_return_t retval       = 0;
786  *       void * assert_check_retval = NULL;
787  *       test_kc_data2.kcd_length   = 0xdeadbeef;
788  *       mach_vm_address_t address = (mach_vm_address_t)kalloc(PAGE_SIZE);
789  *       T_EXPECT_NOTNULL(address, "kalloc of PAGE_SIZE data.");
790  *
791  *       retval = kcdata_memory_static_init(&test_kc_data2, (mach_vm_address_t)address, KCDATA_BUFFER_BEGIN_STACKSHOT, PAGE_SIZE,
792  *                                          KCFLAG_USE_MEMCOPY);
793  *
794  *       T_ASSERT(retval == KERN_SUCCESS, "successful kcdata_memory_static_init call");
795  *
796  *       retval = T_REGISTER_ASSERT_CHECK("KCDATA_DESC_MAXLEN", &assert_check_retval);
797  *       T_ASSERT(retval == KERN_SUCCESS, "registered assert widget");
798  *
799  *       // this will assert
800  *       retval = kcdata_add_uint32_with_description(&test_kc_data2, 0xc0ffee, "really long description string for kcdata");
801  *       T_ASSERT(retval == KERN_INVALID_ARGUMENT, "API param check returned KERN_INVALID_ARGUMENT correctly");
802  *       T_ASSERT(assert_check_retval == (void *)XT_RET_W_SUCCESS, "assertion handler verified that it was hit");
803  *
804  *       return KERN_SUCCESS;
805  *  }
806  */
807 
808 #if defined(__arm64__)
809 
810 #include <arm/pmap.h>
811 
812 #define MAX_PMAP_OBJECT_ELEMENT 100000
813 
814 extern struct vm_object pmap_object_store; /* store pt pages */
815 extern unsigned long gPhysBase, gPhysSize, first_avail;
816 
817 /*
818  * Define macros to transverse the pmap object structures and extract
819  * physical page number with information from low global only
820  * This emulate how Astris extracts information from coredump
821  */
822 #if defined(__arm64__)
823 
824 static inline uintptr_t
astris_vm_page_unpack_ptr(uintptr_t p)825 astris_vm_page_unpack_ptr(uintptr_t p)
826 {
827 	if (!p) {
828 		return (uintptr_t)0;
829 	}
830 
831 	return (p & lowGlo.lgPmapMemFromArrayMask)
832 	       ? lowGlo.lgPmapMemStartAddr + (p & ~(lowGlo.lgPmapMemFromArrayMask)) * lowGlo.lgPmapMemPagesize
833 	       : lowGlo.lgPmapMemPackedBaseAddr + (p << lowGlo.lgPmapMemPackedShift);
834 }
835 
836 // assume next pointer is the first element
837 #define astris_vm_page_queue_next(qc) (astris_vm_page_unpack_ptr(*((uint32_t *)(qc))))
838 
839 #endif
840 
841 #define astris_vm_page_queue_first(q) astris_vm_page_queue_next(q)
842 
843 #define astris_vm_page_queue_end(q, qe) ((q) == (qe))
844 
845 #define astris_vm_page_queue_iterate(head, elt)                                                           \
846 	for ((elt) = (uintptr_t)astris_vm_page_queue_first((head)); !astris_vm_page_queue_end((head), (elt)); \
847 	     (elt) = (uintptr_t)astris_vm_page_queue_next(((elt) + (uintptr_t)lowGlo.lgPmapMemChainOffset)))
848 
849 #define astris_ptoa(x) ((vm_address_t)(x) << lowGlo.lgPageShift)
850 
851 static inline ppnum_t
astris_vm_page_get_phys_page(uintptr_t m)852 astris_vm_page_get_phys_page(uintptr_t m)
853 {
854 	return (m >= lowGlo.lgPmapMemStartAddr && m < lowGlo.lgPmapMemEndAddr)
855 	       ? (ppnum_t)((m - lowGlo.lgPmapMemStartAddr) / lowGlo.lgPmapMemPagesize + lowGlo.lgPmapMemFirstppnum)
856 	       : *((ppnum_t *)(m + lowGlo.lgPmapMemPageOffset));
857 }
858 
859 kern_return_t
pmap_coredump_test(void)860 pmap_coredump_test(void)
861 {
862 	int iter = 0;
863 	uintptr_t p;
864 
865 	T_LOG("Testing coredump info for PMAP.");
866 
867 	T_ASSERT_GE_ULONG(lowGlo.lgStaticAddr, gPhysBase, NULL);
868 	T_ASSERT_LE_ULONG(lowGlo.lgStaticAddr + lowGlo.lgStaticSize, first_avail, NULL);
869 	T_ASSERT_EQ_ULONG(lowGlo.lgLayoutMajorVersion, 3, NULL);
870 	T_ASSERT_GE_ULONG(lowGlo.lgLayoutMinorVersion, 2, NULL);
871 	T_ASSERT_EQ_ULONG(lowGlo.lgLayoutMagic, LOWGLO_LAYOUT_MAGIC, NULL);
872 
873 	// check the constant values in lowGlo
874 	T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemQ, ((typeof(lowGlo.lgPmapMemQ)) & (pmap_object_store.memq)), NULL);
875 	T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemPageOffset, offsetof(struct vm_page_with_ppnum, vmp_phys_page), NULL);
876 	T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemChainOffset, offsetof(struct vm_page, vmp_listq), NULL);
877 	T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemPagesize, sizeof(struct vm_page), NULL);
878 
879 #if defined(__arm64__)
880 	T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemFromArrayMask, VM_PAGE_PACKED_FROM_ARRAY, NULL);
881 	T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemPackedShift, VM_PAGE_PACKED_PTR_SHIFT, NULL);
882 	T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemPackedBaseAddr, VM_PAGE_PACKED_PTR_BASE, NULL);
883 #endif
884 
885 	vm_object_lock_shared(&pmap_object_store);
886 	astris_vm_page_queue_iterate(lowGlo.lgPmapMemQ, p)
887 	{
888 		ppnum_t ppnum   = astris_vm_page_get_phys_page(p);
889 		pmap_paddr_t pa = (pmap_paddr_t)astris_ptoa(ppnum);
890 		T_ASSERT_GE_ULONG(pa, gPhysBase, NULL);
891 		T_ASSERT_LT_ULONG(pa, gPhysBase + gPhysSize, NULL);
892 		iter++;
893 		T_ASSERT_LT_INT(iter, MAX_PMAP_OBJECT_ELEMENT, NULL);
894 	}
895 	vm_object_unlock(&pmap_object_store);
896 
897 	T_ASSERT_GT_INT(iter, 0, NULL);
898 	return KERN_SUCCESS;
899 }
900 #endif /* defined(__arm64__) */
901 
902 struct ts_kern_prim_test_args {
903 	int *end_barrier;
904 	int *notify_b;
905 	int *wait_event_b;
906 	int before_num;
907 	int *notify_a;
908 	int *wait_event_a;
909 	int after_num;
910 	int priority_to_check;
911 };
912 
913 static void
wait_threads(int * var,int num)914 wait_threads(
915 	int* var,
916 	int num)
917 {
918 	if (var != NULL) {
919 		while (os_atomic_load(var, acquire) != num) {
920 			assert_wait((event_t) var, THREAD_UNINT);
921 			if (os_atomic_load(var, acquire) != num) {
922 				(void) thread_block(THREAD_CONTINUE_NULL);
923 			} else {
924 				clear_wait(current_thread(), THREAD_AWAKENED);
925 			}
926 		}
927 	}
928 }
929 
930 static void
wake_threads(int * var)931 wake_threads(
932 	int* var)
933 {
934 	if (var) {
935 		os_atomic_inc(var, relaxed);
936 		thread_wakeup((event_t) var);
937 	}
938 }
939 
940 extern void IOSleep(int);
941 
942 static void
thread_lock_unlock_kernel_primitive(void * args,__unused wait_result_t wr)943 thread_lock_unlock_kernel_primitive(
944 	void *args,
945 	__unused wait_result_t wr)
946 {
947 	thread_t thread = current_thread();
948 	struct ts_kern_prim_test_args *info = (struct ts_kern_prim_test_args*) args;
949 	int pri;
950 
951 	wait_threads(info->wait_event_b, info->before_num);
952 	wake_threads(info->notify_b);
953 
954 	tstile_test_prim_lock(SYSCTL_TURNSTILE_TEST_KERNEL_DEFAULT);
955 
956 	wake_threads(info->notify_a);
957 	wait_threads(info->wait_event_a, info->after_num);
958 
959 	IOSleep(100);
960 
961 	if (info->priority_to_check) {
962 		spl_t s = splsched();
963 		thread_lock(thread);
964 		pri = thread->sched_pri;
965 		thread_unlock(thread);
966 		splx(s);
967 		T_ASSERT(pri == info->priority_to_check, "Priority thread: current sched %d sched wanted %d", pri, info->priority_to_check);
968 	}
969 
970 	tstile_test_prim_unlock(SYSCTL_TURNSTILE_TEST_KERNEL_DEFAULT);
971 
972 	wake_threads(info->end_barrier);
973 	thread_terminate_self();
974 }
975 
976 kern_return_t
ts_kernel_primitive_test(void)977 ts_kernel_primitive_test(void)
978 {
979 	thread_t owner, thread1, thread2;
980 	struct ts_kern_prim_test_args targs[2] = {};
981 	kern_return_t result;
982 	int end_barrier = 0;
983 	int owner_locked = 0;
984 	int waiters_ready = 0;
985 
986 	T_LOG("Testing turnstile kernel primitive");
987 
988 	targs[0].notify_b = NULL;
989 	targs[0].wait_event_b = NULL;
990 	targs[0].before_num = 0;
991 	targs[0].notify_a = &owner_locked;
992 	targs[0].wait_event_a = &waiters_ready;
993 	targs[0].after_num = 2;
994 	targs[0].priority_to_check = 90;
995 	targs[0].end_barrier = &end_barrier;
996 
997 	// Start owner with priority 80
998 	result = kernel_thread_start_priority((thread_continue_t)thread_lock_unlock_kernel_primitive, &targs[0], 80, &owner);
999 	T_ASSERT(result == KERN_SUCCESS, "Starting owner");
1000 
1001 	targs[1].notify_b = &waiters_ready;
1002 	targs[1].wait_event_b = &owner_locked;
1003 	targs[1].before_num = 1;
1004 	targs[1].notify_a = NULL;
1005 	targs[1].wait_event_a = NULL;
1006 	targs[1].after_num = 0;
1007 	targs[1].priority_to_check = 0;
1008 	targs[1].end_barrier = &end_barrier;
1009 
1010 	// Start waiters with priority 85 and 90
1011 	result = kernel_thread_start_priority((thread_continue_t)thread_lock_unlock_kernel_primitive, &targs[1], 85, &thread1);
1012 	T_ASSERT(result == KERN_SUCCESS, "Starting thread1");
1013 
1014 	result = kernel_thread_start_priority((thread_continue_t)thread_lock_unlock_kernel_primitive, &targs[1], 90, &thread2);
1015 	T_ASSERT(result == KERN_SUCCESS, "Starting thread2");
1016 
1017 	wait_threads(&end_barrier, 3);
1018 
1019 	return KERN_SUCCESS;
1020 }
1021 
1022 #define MTX_LOCK 0
1023 #define RW_LOCK 1
1024 
1025 #define NUM_THREADS 4
1026 
1027 struct synch_test_common {
1028 	unsigned int nthreads;
1029 	thread_t *threads;
1030 	int max_pri;
1031 	int test_done;
1032 };
1033 
1034 static kern_return_t
init_synch_test_common(struct synch_test_common * info,unsigned int nthreads)1035 init_synch_test_common(struct synch_test_common *info, unsigned int nthreads)
1036 {
1037 	info->nthreads = nthreads;
1038 	info->threads = kalloc_type(thread_t, nthreads, Z_WAITOK);
1039 	if (!info->threads) {
1040 		return ENOMEM;
1041 	}
1042 
1043 	return KERN_SUCCESS;
1044 }
1045 
1046 static void
destroy_synch_test_common(struct synch_test_common * info)1047 destroy_synch_test_common(struct synch_test_common *info)
1048 {
1049 	kfree_type(thread_t, info->nthreads, info->threads);
1050 }
1051 
1052 static void
start_threads(thread_continue_t func,struct synch_test_common * info,bool sleep_after_first)1053 start_threads(thread_continue_t func, struct synch_test_common *info, bool sleep_after_first)
1054 {
1055 	thread_t thread;
1056 	kern_return_t result;
1057 	uint i;
1058 	int priority = 75;
1059 
1060 	info->test_done = 0;
1061 
1062 	for (i = 0; i < info->nthreads; i++) {
1063 		info->threads[i] = NULL;
1064 	}
1065 
1066 	info->max_pri = priority + (info->nthreads - 1) * 5;
1067 	if (info->max_pri > 95) {
1068 		info->max_pri = 95;
1069 	}
1070 
1071 	for (i = 0; i < info->nthreads; i++) {
1072 		result = kernel_thread_start_priority((thread_continue_t)func, info, priority, &thread);
1073 		os_atomic_store(&info->threads[i], thread, release);
1074 		T_ASSERT(result == KERN_SUCCESS, "Starting thread %d, priority %d, %p", i, priority, thread);
1075 
1076 		priority += 5;
1077 
1078 		if (i == 0 && sleep_after_first) {
1079 			IOSleep(100);
1080 		}
1081 	}
1082 }
1083 
1084 static unsigned int
get_max_pri(struct synch_test_common * info)1085 get_max_pri(struct synch_test_common * info)
1086 {
1087 	return info->max_pri;
1088 }
1089 
1090 static void
wait_all_thread(struct synch_test_common * info)1091 wait_all_thread(struct synch_test_common * info)
1092 {
1093 	wait_threads(&info->test_done, info->nthreads);
1094 }
1095 
1096 static void
notify_waiter(struct synch_test_common * info)1097 notify_waiter(struct synch_test_common * info)
1098 {
1099 	wake_threads(&info->test_done);
1100 }
1101 
1102 static void
wait_for_waiters(struct synch_test_common * info)1103 wait_for_waiters(struct synch_test_common *info)
1104 {
1105 	uint i, j;
1106 	thread_t thread;
1107 
1108 	for (i = 0; i < info->nthreads; i++) {
1109 		j = 0;
1110 		while (os_atomic_load(&info->threads[i], acquire) == NULL) {
1111 			if (j % 100 == 0) {
1112 				IOSleep(10);
1113 			}
1114 			j++;
1115 		}
1116 
1117 		if (info->threads[i] != current_thread()) {
1118 			j = 0;
1119 			do {
1120 				thread = os_atomic_load(&info->threads[i], relaxed);
1121 				if (thread == (thread_t) 1) {
1122 					break;
1123 				}
1124 
1125 				if (!(thread->state & TH_RUN)) {
1126 					break;
1127 				}
1128 
1129 				if (j % 100 == 0) {
1130 					IOSleep(100);
1131 				}
1132 				j++;
1133 
1134 				if (thread->started == FALSE) {
1135 					continue;
1136 				}
1137 			} while (thread->state & TH_RUN);
1138 		}
1139 	}
1140 }
1141 
1142 static void
exclude_current_waiter(struct synch_test_common * info)1143 exclude_current_waiter(struct synch_test_common *info)
1144 {
1145 	uint i, j;
1146 
1147 	for (i = 0; i < info->nthreads; i++) {
1148 		j = 0;
1149 		while (os_atomic_load(&info->threads[i], acquire) == NULL) {
1150 			if (j % 100 == 0) {
1151 				IOSleep(10);
1152 			}
1153 			j++;
1154 		}
1155 
1156 		if (os_atomic_load(&info->threads[i], acquire) == current_thread()) {
1157 			os_atomic_store(&info->threads[i], (thread_t)1, release);
1158 			return;
1159 		}
1160 	}
1161 }
1162 
1163 struct info_sleep_inheritor_test {
1164 	struct synch_test_common head;
1165 	lck_mtx_t mtx_lock;
1166 	lck_rw_t rw_lock;
1167 	decl_lck_mtx_gate_data(, gate);
1168 	boolean_t gate_closed;
1169 	int prim_type;
1170 	boolean_t work_to_do;
1171 	unsigned int max_pri;
1172 	unsigned int steal_pri;
1173 	int synch_value;
1174 	int synch;
1175 	int value;
1176 	int handoff_failure;
1177 	thread_t thread_inheritor;
1178 	bool use_alloc_gate;
1179 	gate_t *alloc_gate;
1180 	struct obj_cached **obj_cache;
1181 	kern_apfs_reflock_data(, reflock);
1182 	int reflock_protected_status;
1183 };
1184 
1185 static void
primitive_lock(struct info_sleep_inheritor_test * info)1186 primitive_lock(struct info_sleep_inheritor_test *info)
1187 {
1188 	switch (info->prim_type) {
1189 	case MTX_LOCK:
1190 		lck_mtx_lock(&info->mtx_lock);
1191 		break;
1192 	case RW_LOCK:
1193 		lck_rw_lock(&info->rw_lock, LCK_RW_TYPE_EXCLUSIVE);
1194 		break;
1195 	default:
1196 		panic("invalid type %d", info->prim_type);
1197 	}
1198 }
1199 
1200 static void
primitive_unlock(struct info_sleep_inheritor_test * info)1201 primitive_unlock(struct info_sleep_inheritor_test *info)
1202 {
1203 	switch (info->prim_type) {
1204 	case MTX_LOCK:
1205 		lck_mtx_unlock(&info->mtx_lock);
1206 		break;
1207 	case RW_LOCK:
1208 		lck_rw_unlock(&info->rw_lock, LCK_RW_TYPE_EXCLUSIVE);
1209 		break;
1210 	default:
1211 		panic("invalid type %d", info->prim_type);
1212 	}
1213 }
1214 
1215 static wait_result_t
primitive_sleep_with_inheritor(struct info_sleep_inheritor_test * info)1216 primitive_sleep_with_inheritor(struct info_sleep_inheritor_test *info)
1217 {
1218 	wait_result_t ret = KERN_SUCCESS;
1219 	switch (info->prim_type) {
1220 	case MTX_LOCK:
1221 		ret = lck_mtx_sleep_with_inheritor(&info->mtx_lock, LCK_SLEEP_DEFAULT, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1222 		break;
1223 	case RW_LOCK:
1224 		ret = lck_rw_sleep_with_inheritor(&info->rw_lock, LCK_SLEEP_DEFAULT, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1225 		break;
1226 	default:
1227 		panic("invalid type %d", info->prim_type);
1228 	}
1229 
1230 	return ret;
1231 }
1232 
1233 static void
primitive_wakeup_one_with_inheritor(struct info_sleep_inheritor_test * info)1234 primitive_wakeup_one_with_inheritor(struct info_sleep_inheritor_test *info)
1235 {
1236 	switch (info->prim_type) {
1237 	case MTX_LOCK:
1238 	case RW_LOCK:
1239 		wakeup_one_with_inheritor((event_t) &info->thread_inheritor, THREAD_AWAKENED, LCK_WAKE_DEFAULT, &info->thread_inheritor);
1240 		break;
1241 	default:
1242 		panic("invalid type %d", info->prim_type);
1243 	}
1244 }
1245 
1246 static void
primitive_wakeup_all_with_inheritor(struct info_sleep_inheritor_test * info)1247 primitive_wakeup_all_with_inheritor(struct info_sleep_inheritor_test *info)
1248 {
1249 	switch (info->prim_type) {
1250 	case MTX_LOCK:
1251 	case RW_LOCK:
1252 		wakeup_all_with_inheritor((event_t) &info->thread_inheritor, THREAD_AWAKENED);
1253 		break;
1254 	default:
1255 		panic("invalid type %d", info->prim_type);
1256 	}
1257 	return;
1258 }
1259 
1260 static void
primitive_change_sleep_inheritor(struct info_sleep_inheritor_test * info)1261 primitive_change_sleep_inheritor(struct info_sleep_inheritor_test *info)
1262 {
1263 	switch (info->prim_type) {
1264 	case MTX_LOCK:
1265 	case RW_LOCK:
1266 		change_sleep_inheritor((event_t) &info->thread_inheritor, info->thread_inheritor);
1267 		break;
1268 	default:
1269 		panic("invalid type %d", info->prim_type);
1270 	}
1271 	return;
1272 }
1273 
1274 static kern_return_t
primitive_gate_try_close(struct info_sleep_inheritor_test * info)1275 primitive_gate_try_close(struct info_sleep_inheritor_test *info)
1276 {
1277 	gate_t *gate = &info->gate;
1278 	if (info->use_alloc_gate == true) {
1279 		gate = info->alloc_gate;
1280 	}
1281 	kern_return_t ret = KERN_SUCCESS;
1282 	switch (info->prim_type) {
1283 	case MTX_LOCK:
1284 		ret = lck_mtx_gate_try_close(&info->mtx_lock, gate);
1285 		break;
1286 	case RW_LOCK:
1287 		ret = lck_rw_gate_try_close(&info->rw_lock, gate);
1288 		break;
1289 	default:
1290 		panic("invalid type %d", info->prim_type);
1291 	}
1292 	return ret;
1293 }
1294 
1295 static gate_wait_result_t
primitive_gate_wait(struct info_sleep_inheritor_test * info)1296 primitive_gate_wait(struct info_sleep_inheritor_test *info)
1297 {
1298 	gate_t *gate = &info->gate;
1299 	if (info->use_alloc_gate == true) {
1300 		gate = info->alloc_gate;
1301 	}
1302 	gate_wait_result_t ret = GATE_OPENED;
1303 	switch (info->prim_type) {
1304 	case MTX_LOCK:
1305 		ret = lck_mtx_gate_wait(&info->mtx_lock, gate, LCK_SLEEP_DEFAULT, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1306 		break;
1307 	case RW_LOCK:
1308 		ret = lck_rw_gate_wait(&info->rw_lock, gate, LCK_SLEEP_DEFAULT, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1309 		break;
1310 	default:
1311 		panic("invalid type %d", info->prim_type);
1312 	}
1313 	return ret;
1314 }
1315 
1316 static void
primitive_gate_open(struct info_sleep_inheritor_test * info)1317 primitive_gate_open(struct info_sleep_inheritor_test *info)
1318 {
1319 	gate_t *gate = &info->gate;
1320 	if (info->use_alloc_gate == true) {
1321 		gate = info->alloc_gate;
1322 	}
1323 	switch (info->prim_type) {
1324 	case MTX_LOCK:
1325 		lck_mtx_gate_open(&info->mtx_lock, gate);
1326 		break;
1327 	case RW_LOCK:
1328 		lck_rw_gate_open(&info->rw_lock, gate);
1329 		break;
1330 	default:
1331 		panic("invalid type %d", info->prim_type);
1332 	}
1333 }
1334 
1335 static void
primitive_gate_close(struct info_sleep_inheritor_test * info)1336 primitive_gate_close(struct info_sleep_inheritor_test *info)
1337 {
1338 	gate_t *gate = &info->gate;
1339 	if (info->use_alloc_gate == true) {
1340 		gate = info->alloc_gate;
1341 	}
1342 
1343 	switch (info->prim_type) {
1344 	case MTX_LOCK:
1345 		lck_mtx_gate_close(&info->mtx_lock, gate);
1346 		break;
1347 	case RW_LOCK:
1348 		lck_rw_gate_close(&info->rw_lock, gate);
1349 		break;
1350 	default:
1351 		panic("invalid type %d", info->prim_type);
1352 	}
1353 }
1354 
1355 static void
primitive_gate_steal(struct info_sleep_inheritor_test * info)1356 primitive_gate_steal(struct info_sleep_inheritor_test *info)
1357 {
1358 	gate_t *gate = &info->gate;
1359 	if (info->use_alloc_gate == true) {
1360 		gate = info->alloc_gate;
1361 	}
1362 
1363 	switch (info->prim_type) {
1364 	case MTX_LOCK:
1365 		lck_mtx_gate_steal(&info->mtx_lock, gate);
1366 		break;
1367 	case RW_LOCK:
1368 		lck_rw_gate_steal(&info->rw_lock, gate);
1369 		break;
1370 	default:
1371 		panic("invalid type %d", info->prim_type);
1372 	}
1373 }
1374 
1375 static kern_return_t
primitive_gate_handoff(struct info_sleep_inheritor_test * info,int flags)1376 primitive_gate_handoff(struct info_sleep_inheritor_test *info, int flags)
1377 {
1378 	gate_t *gate = &info->gate;
1379 	if (info->use_alloc_gate == true) {
1380 		gate = info->alloc_gate;
1381 	}
1382 
1383 	kern_return_t ret = KERN_SUCCESS;
1384 	switch (info->prim_type) {
1385 	case MTX_LOCK:
1386 		ret = lck_mtx_gate_handoff(&info->mtx_lock, gate, flags);
1387 		break;
1388 	case RW_LOCK:
1389 		ret = lck_rw_gate_handoff(&info->rw_lock, gate, flags);
1390 		break;
1391 	default:
1392 		panic("invalid type %d", info->prim_type);
1393 	}
1394 	return ret;
1395 }
1396 
1397 static void
primitive_gate_assert(struct info_sleep_inheritor_test * info,int type)1398 primitive_gate_assert(struct info_sleep_inheritor_test *info, int type)
1399 {
1400 	gate_t *gate = &info->gate;
1401 	if (info->use_alloc_gate == true) {
1402 		gate = info->alloc_gate;
1403 	}
1404 
1405 	switch (info->prim_type) {
1406 	case MTX_LOCK:
1407 		lck_mtx_gate_assert(&info->mtx_lock, gate, type);
1408 		break;
1409 	case RW_LOCK:
1410 		lck_rw_gate_assert(&info->rw_lock, gate, type);
1411 		break;
1412 	default:
1413 		panic("invalid type %d", info->prim_type);
1414 	}
1415 }
1416 
1417 static void
primitive_gate_init(struct info_sleep_inheritor_test * info)1418 primitive_gate_init(struct info_sleep_inheritor_test *info)
1419 {
1420 	switch (info->prim_type) {
1421 	case MTX_LOCK:
1422 		lck_mtx_gate_init(&info->mtx_lock, &info->gate);
1423 		break;
1424 	case RW_LOCK:
1425 		lck_rw_gate_init(&info->rw_lock, &info->gate);
1426 		break;
1427 	default:
1428 		panic("invalid type %d", info->prim_type);
1429 	}
1430 }
1431 
1432 static void
primitive_gate_destroy(struct info_sleep_inheritor_test * info)1433 primitive_gate_destroy(struct info_sleep_inheritor_test *info)
1434 {
1435 	switch (info->prim_type) {
1436 	case MTX_LOCK:
1437 		lck_mtx_gate_destroy(&info->mtx_lock, &info->gate);
1438 		break;
1439 	case RW_LOCK:
1440 		lck_rw_gate_destroy(&info->rw_lock, &info->gate);
1441 		break;
1442 	default:
1443 		panic("invalid type %d", info->prim_type);
1444 	}
1445 }
1446 
1447 static void
primitive_gate_alloc(struct info_sleep_inheritor_test * info)1448 primitive_gate_alloc(struct info_sleep_inheritor_test *info)
1449 {
1450 	gate_t *gate;
1451 	switch (info->prim_type) {
1452 	case MTX_LOCK:
1453 		gate = lck_mtx_gate_alloc_init(&info->mtx_lock);
1454 		break;
1455 	case RW_LOCK:
1456 		gate = lck_rw_gate_alloc_init(&info->rw_lock);
1457 		break;
1458 	default:
1459 		panic("invalid type %d", info->prim_type);
1460 	}
1461 	info->alloc_gate = gate;
1462 }
1463 
1464 static void
primitive_gate_free(struct info_sleep_inheritor_test * info)1465 primitive_gate_free(struct info_sleep_inheritor_test *info)
1466 {
1467 	T_ASSERT(info->alloc_gate != NULL, "gate not yet freed");
1468 
1469 	switch (info->prim_type) {
1470 	case MTX_LOCK:
1471 		lck_mtx_gate_free(&info->mtx_lock, info->alloc_gate);
1472 		break;
1473 	case RW_LOCK:
1474 		lck_rw_gate_free(&info->rw_lock, info->alloc_gate);
1475 		break;
1476 	default:
1477 		panic("invalid type %d", info->prim_type);
1478 	}
1479 	info->alloc_gate = NULL;
1480 }
1481 
1482 static void
thread_inheritor_like_mutex(void * args,__unused wait_result_t wr)1483 thread_inheritor_like_mutex(
1484 	void *args,
1485 	__unused wait_result_t wr)
1486 {
1487 	wait_result_t wait;
1488 
1489 	struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
1490 	uint my_pri = current_thread()->sched_pri;
1491 
1492 	T_LOG("Started thread pri %d %p", my_pri, current_thread());
1493 
1494 	/*
1495 	 * spin here to start concurrently
1496 	 */
1497 	wake_threads(&info->synch);
1498 	wait_threads(&info->synch, info->synch_value);
1499 
1500 	primitive_lock(info);
1501 
1502 	if (info->thread_inheritor == NULL) {
1503 		info->thread_inheritor = current_thread();
1504 	} else {
1505 		wait = primitive_sleep_with_inheritor(info);
1506 		T_ASSERT(wait == THREAD_AWAKENED || wait == THREAD_NOT_WAITING, "sleep_with_inheritor return");
1507 	}
1508 	primitive_unlock(info);
1509 
1510 	IOSleep(100);
1511 	info->value++;
1512 
1513 	primitive_lock(info);
1514 
1515 	T_ASSERT(info->thread_inheritor == current_thread(), "thread_inheritor is %p", info->thread_inheritor);
1516 	primitive_wakeup_one_with_inheritor(info);
1517 	T_LOG("woken up %p", info->thread_inheritor);
1518 
1519 	if (info->thread_inheritor == NULL) {
1520 		T_ASSERT(info->handoff_failure == 0, "handoff failures");
1521 		info->handoff_failure++;
1522 	} else {
1523 		T_ASSERT(info->thread_inheritor != current_thread(), "thread_inheritor is %p", info->thread_inheritor);
1524 		thread_deallocate(info->thread_inheritor);
1525 	}
1526 
1527 	primitive_unlock(info);
1528 
1529 	assert(current_thread()->kern_promotion_schedpri == 0);
1530 	notify_waiter((struct synch_test_common *)info);
1531 
1532 	thread_terminate_self();
1533 }
1534 
1535 static void
thread_just_inheritor_do_work(void * args,__unused wait_result_t wr)1536 thread_just_inheritor_do_work(
1537 	void *args,
1538 	__unused wait_result_t wr)
1539 {
1540 	struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
1541 	uint my_pri = current_thread()->sched_pri;
1542 	uint max_pri;
1543 
1544 	T_LOG("Started thread pri %d %p", my_pri, current_thread());
1545 	primitive_lock(info);
1546 
1547 	if (info->thread_inheritor == NULL) {
1548 		info->thread_inheritor = current_thread();
1549 		primitive_unlock(info);
1550 		T_LOG("Thread pri %d first to run %p", my_pri, current_thread());
1551 
1552 		wait_threads(&info->synch, info->synch_value - 1);
1553 
1554 		wait_for_waiters((struct synch_test_common *)info);
1555 
1556 		max_pri = get_max_pri((struct synch_test_common *) info);
1557 		T_ASSERT((uint) current_thread()->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", current_thread()->sched_pri, max_pri);
1558 
1559 		os_atomic_store(&info->synch, 0, relaxed);
1560 		primitive_lock(info);
1561 		primitive_wakeup_all_with_inheritor(info);
1562 	} else {
1563 		wake_threads(&info->synch);
1564 		primitive_sleep_with_inheritor(info);
1565 	}
1566 
1567 	primitive_unlock(info);
1568 
1569 	assert(current_thread()->kern_promotion_schedpri == 0);
1570 	notify_waiter((struct synch_test_common *)info);
1571 
1572 	thread_terminate_self();
1573 }
1574 
1575 static void
thread_steal_work(void * args,__unused wait_result_t wr)1576 thread_steal_work(
1577 	void *args,
1578 	__unused wait_result_t wr)
1579 {
1580 	struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
1581 	uint my_pri = current_thread()->sched_pri;
1582 
1583 	T_LOG("Started thread pri %d %p", my_pri, current_thread());
1584 	primitive_lock(info);
1585 
1586 	if (info->thread_inheritor == NULL) {
1587 		info->thread_inheritor = current_thread();
1588 		exclude_current_waiter((struct synch_test_common *)info);
1589 
1590 		T_LOG("Thread pri %d first to run %p", my_pri, current_thread());
1591 		primitive_unlock(info);
1592 
1593 		wait_threads(&info->synch, info->synch_value - 2);
1594 
1595 		wait_for_waiters((struct synch_test_common *)info);
1596 		T_LOG("Thread pri %d first to run %p", my_pri, current_thread());
1597 		primitive_lock(info);
1598 		if (info->thread_inheritor == current_thread()) {
1599 			primitive_wakeup_all_with_inheritor(info);
1600 		}
1601 	} else {
1602 		if (info->steal_pri == 0) {
1603 			info->steal_pri = my_pri;
1604 			info->thread_inheritor = current_thread();
1605 			primitive_change_sleep_inheritor(info);
1606 			exclude_current_waiter((struct synch_test_common *)info);
1607 
1608 			primitive_unlock(info);
1609 
1610 			wait_threads(&info->synch, info->synch_value - 2);
1611 
1612 			T_LOG("Thread pri %d stole push %p", my_pri, current_thread());
1613 			wait_for_waiters((struct synch_test_common *)info);
1614 
1615 			T_ASSERT((uint) current_thread()->sched_pri == info->steal_pri, "sleep_inheritor inheritor priority current is %d, should be %d", current_thread()->sched_pri, info->steal_pri);
1616 
1617 			primitive_lock(info);
1618 			primitive_wakeup_all_with_inheritor(info);
1619 		} else {
1620 			if (my_pri > info->steal_pri) {
1621 				info->steal_pri = my_pri;
1622 			}
1623 			wake_threads(&info->synch);
1624 			primitive_sleep_with_inheritor(info);
1625 			exclude_current_waiter((struct synch_test_common *)info);
1626 		}
1627 	}
1628 	primitive_unlock(info);
1629 
1630 	assert(current_thread()->kern_promotion_schedpri == 0);
1631 	notify_waiter((struct synch_test_common *)info);
1632 
1633 	thread_terminate_self();
1634 }
1635 
1636 static void
thread_no_inheritor_work(void * args,__unused wait_result_t wr)1637 thread_no_inheritor_work(
1638 	void *args,
1639 	__unused wait_result_t wr)
1640 {
1641 	struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
1642 	uint my_pri = current_thread()->sched_pri;
1643 
1644 	T_LOG("Started thread pri %d %p", my_pri, current_thread());
1645 	primitive_lock(info);
1646 
1647 	info->value--;
1648 	if (info->value == 0) {
1649 		primitive_wakeup_all_with_inheritor(info);
1650 	} else {
1651 		info->thread_inheritor = NULL;
1652 		primitive_sleep_with_inheritor(info);
1653 	}
1654 
1655 	primitive_unlock(info);
1656 
1657 	assert(current_thread()->kern_promotion_schedpri == 0);
1658 	notify_waiter((struct synch_test_common *)info);
1659 
1660 	thread_terminate_self();
1661 }
1662 
1663 static void
thread_mtx_work(void * args,__unused wait_result_t wr)1664 thread_mtx_work(
1665 	void *args,
1666 	__unused wait_result_t wr)
1667 {
1668 	struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
1669 	uint my_pri = current_thread()->sched_pri;
1670 	int i;
1671 	u_int8_t rand;
1672 	unsigned int mod_rand;
1673 	uint max_pri;
1674 
1675 	T_LOG("Started thread pri %d %p", my_pri, current_thread());
1676 
1677 	for (i = 0; i < 10; i++) {
1678 		lck_mtx_lock(&info->mtx_lock);
1679 		if (info->thread_inheritor == NULL) {
1680 			info->thread_inheritor = current_thread();
1681 			lck_mtx_unlock(&info->mtx_lock);
1682 
1683 			T_LOG("Thread pri %d first to run %p", my_pri, current_thread());
1684 
1685 			wait_threads(&info->synch, info->synch_value - 1);
1686 			wait_for_waiters((struct synch_test_common *)info);
1687 			max_pri = get_max_pri((struct synch_test_common *) info);
1688 			T_ASSERT((uint) current_thread()->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", current_thread()->sched_pri, max_pri);
1689 
1690 			os_atomic_store(&info->synch, 0, relaxed);
1691 
1692 			lck_mtx_lock(&info->mtx_lock);
1693 			info->thread_inheritor = NULL;
1694 			wakeup_all_with_inheritor((event_t) &info->thread_inheritor, THREAD_AWAKENED);
1695 			lck_mtx_unlock(&info->mtx_lock);
1696 			continue;
1697 		}
1698 
1699 		read_random(&rand, sizeof(rand));
1700 		mod_rand = rand % 2;
1701 
1702 		wake_threads(&info->synch);
1703 		switch (mod_rand) {
1704 		case 0:
1705 			lck_mtx_sleep_with_inheritor(&info->mtx_lock, LCK_SLEEP_DEFAULT, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1706 			lck_mtx_unlock(&info->mtx_lock);
1707 			break;
1708 		case 1:
1709 			lck_mtx_sleep_with_inheritor(&info->mtx_lock, LCK_SLEEP_UNLOCK, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1710 			break;
1711 		default:
1712 			panic("rand()mod4 returned %u (random %u)", mod_rand, rand);
1713 		}
1714 	}
1715 
1716 	/*
1717 	 * spin here to stop using the lock as mutex
1718 	 */
1719 	wake_threads(&info->synch);
1720 	wait_threads(&info->synch, info->synch_value);
1721 
1722 	for (i = 0; i < 10; i++) {
1723 		/* read_random might sleep so read it before acquiring the mtx as spin */
1724 		read_random(&rand, sizeof(rand));
1725 
1726 		lck_mtx_lock_spin(&info->mtx_lock);
1727 		if (info->thread_inheritor == NULL) {
1728 			info->thread_inheritor = current_thread();
1729 			lck_mtx_unlock(&info->mtx_lock);
1730 
1731 			T_LOG("Thread pri %d first to run %p", my_pri, current_thread());
1732 			wait_for_waiters((struct synch_test_common *)info);
1733 			max_pri = get_max_pri((struct synch_test_common *) info);
1734 			T_ASSERT((uint) current_thread()->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", current_thread()->sched_pri, max_pri);
1735 
1736 			lck_mtx_lock_spin(&info->mtx_lock);
1737 			info->thread_inheritor = NULL;
1738 			wakeup_all_with_inheritor((event_t) &info->thread_inheritor, THREAD_AWAKENED);
1739 			lck_mtx_unlock(&info->mtx_lock);
1740 			continue;
1741 		}
1742 
1743 		mod_rand = rand % 2;
1744 		switch (mod_rand) {
1745 		case 0:
1746 			lck_mtx_sleep_with_inheritor(&info->mtx_lock, LCK_SLEEP_SPIN, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1747 			lck_mtx_unlock(&info->mtx_lock);
1748 			break;
1749 		case 1:
1750 			lck_mtx_sleep_with_inheritor(&info->mtx_lock, LCK_SLEEP_SPIN_ALWAYS, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1751 			lck_mtx_unlock(&info->mtx_lock);
1752 			break;
1753 		default:
1754 			panic("rand()mod4 returned %u (random %u)", mod_rand, rand);
1755 		}
1756 	}
1757 	assert(current_thread()->kern_promotion_schedpri == 0);
1758 	notify_waiter((struct synch_test_common *)info);
1759 
1760 	thread_terminate_self();
1761 }
1762 
1763 static void
thread_rw_work(void * args,__unused wait_result_t wr)1764 thread_rw_work(
1765 	void *args,
1766 	__unused wait_result_t wr)
1767 {
1768 	struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
1769 	uint my_pri = current_thread()->sched_pri;
1770 	int i;
1771 	lck_rw_type_t type;
1772 	u_int8_t rand;
1773 	unsigned int mod_rand;
1774 	uint max_pri;
1775 
1776 	T_LOG("Started thread pri %d %p", my_pri, current_thread());
1777 
1778 	for (i = 0; i < 10; i++) {
1779 try_again:
1780 		type = LCK_RW_TYPE_SHARED;
1781 		lck_rw_lock(&info->rw_lock, type);
1782 		if (info->thread_inheritor == NULL) {
1783 			type = LCK_RW_TYPE_EXCLUSIVE;
1784 
1785 			if (lck_rw_lock_shared_to_exclusive(&info->rw_lock)) {
1786 				if (info->thread_inheritor == NULL) {
1787 					info->thread_inheritor = current_thread();
1788 					lck_rw_unlock(&info->rw_lock, type);
1789 					wait_threads(&info->synch, info->synch_value - 1);
1790 
1791 					T_LOG("Thread pri %d first to run %p", my_pri, current_thread());
1792 					wait_for_waiters((struct synch_test_common *)info);
1793 					max_pri = get_max_pri((struct synch_test_common *) info);
1794 					T_ASSERT((uint) current_thread()->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", current_thread()->sched_pri, max_pri);
1795 
1796 					os_atomic_store(&info->synch, 0, relaxed);
1797 
1798 					lck_rw_lock(&info->rw_lock, type);
1799 					info->thread_inheritor = NULL;
1800 					wakeup_all_with_inheritor((event_t) &info->thread_inheritor, THREAD_AWAKENED);
1801 					lck_rw_unlock(&info->rw_lock, type);
1802 					continue;
1803 				}
1804 			} else {
1805 				goto try_again;
1806 			}
1807 		}
1808 
1809 		read_random(&rand, sizeof(rand));
1810 		mod_rand = rand % 4;
1811 
1812 		wake_threads(&info->synch);
1813 		switch (mod_rand) {
1814 		case 0:
1815 			lck_rw_sleep_with_inheritor(&info->rw_lock, LCK_SLEEP_DEFAULT, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1816 			lck_rw_unlock(&info->rw_lock, type);
1817 			break;
1818 		case 1:
1819 			lck_rw_sleep_with_inheritor(&info->rw_lock, LCK_SLEEP_UNLOCK, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1820 			break;
1821 		case 2:
1822 			lck_rw_sleep_with_inheritor(&info->rw_lock, LCK_SLEEP_SHARED, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1823 			lck_rw_unlock(&info->rw_lock, LCK_RW_TYPE_SHARED);
1824 			break;
1825 		case 3:
1826 			lck_rw_sleep_with_inheritor(&info->rw_lock, LCK_SLEEP_EXCLUSIVE, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1827 			lck_rw_unlock(&info->rw_lock, LCK_RW_TYPE_EXCLUSIVE);
1828 			break;
1829 		default:
1830 			panic("rand()mod4 returned %u (random %u)", mod_rand, rand);
1831 		}
1832 	}
1833 
1834 	assert(current_thread()->kern_promotion_schedpri == 0);
1835 	notify_waiter((struct synch_test_common *)info);
1836 
1837 	thread_terminate_self();
1838 }
1839 
1840 #define OBJ_STATE_UNUSED        0
1841 #define OBJ_STATE_REAL          1
1842 #define OBJ_STATE_PLACEHOLDER   2
1843 
1844 #define OBJ_BUFF_SIZE 11
1845 struct obj_cached {
1846 	int obj_id;
1847 	int obj_state;
1848 	struct kern_apfs_reflock *obj_refcount;
1849 	char obj_buff[OBJ_BUFF_SIZE];
1850 };
1851 
1852 #define CACHE_SIZE 2
1853 #define USE_CACHE_ROUNDS 15
1854 
1855 #define REFCOUNT_REFLOCK_ROUNDS 15
1856 
1857 /*
1858  * For the reflock cache test the cache is allocated
1859  * and its pointer is saved in obj_cache.
1860  * The lock for the cache is going to be one of the exclusive
1861  * locks already present in struct info_sleep_inheritor_test.
1862  */
1863 
1864 static struct obj_cached *
alloc_init_cache_entry(void)1865 alloc_init_cache_entry(void)
1866 {
1867 	struct obj_cached *cache_entry = kalloc_type(struct obj_cached, 1, Z_WAITOK | Z_NOFAIL | Z_ZERO);
1868 	cache_entry->obj_id = 0;
1869 	cache_entry->obj_state = OBJ_STATE_UNUSED;
1870 	cache_entry->obj_refcount = kern_apfs_reflock_alloc_init();
1871 	snprintf(cache_entry->obj_buff, OBJ_BUFF_SIZE, "I am groot");
1872 	return cache_entry;
1873 }
1874 
1875 static void
init_cache(struct info_sleep_inheritor_test * info)1876 init_cache(struct info_sleep_inheritor_test *info)
1877 {
1878 	struct obj_cached **obj_cache = kalloc_type(struct obj_cached *, CACHE_SIZE, Z_WAITOK | Z_NOFAIL | Z_ZERO);
1879 
1880 	int i;
1881 	for (i = 0; i < CACHE_SIZE; i++) {
1882 		obj_cache[i] = alloc_init_cache_entry();
1883 	}
1884 
1885 	info->obj_cache = obj_cache;
1886 }
1887 
1888 static void
check_cache_empty(struct info_sleep_inheritor_test * info)1889 check_cache_empty(struct info_sleep_inheritor_test *info)
1890 {
1891 	struct obj_cached **obj_cache = info->obj_cache;
1892 
1893 	int i, ret;
1894 	for (i = 0; i < CACHE_SIZE; i++) {
1895 		if (obj_cache[i] != NULL) {
1896 			T_ASSERT(obj_cache[i]->obj_state == OBJ_STATE_UNUSED, "checked OBJ_STATE_UNUSED");
1897 			T_ASSERT(obj_cache[i]->obj_refcount != NULL, "checked obj_refcount");
1898 			ret = memcmp(obj_cache[i]->obj_buff, "I am groot", OBJ_BUFF_SIZE);
1899 			T_ASSERT(ret == 0, "checked buff correctly emptied");
1900 		}
1901 	}
1902 }
1903 
1904 static void
free_cache(struct info_sleep_inheritor_test * info)1905 free_cache(struct info_sleep_inheritor_test *info)
1906 {
1907 	struct obj_cached **obj_cache = info->obj_cache;
1908 
1909 	int i;
1910 	for (i = 0; i < CACHE_SIZE; i++) {
1911 		if (obj_cache[i] != NULL) {
1912 			kern_apfs_reflock_free(obj_cache[i]->obj_refcount);
1913 			obj_cache[i]->obj_refcount = NULL;
1914 			kfree_type(struct obj_cached, 1, obj_cache[i]);
1915 			obj_cache[i] = NULL;
1916 		}
1917 	}
1918 
1919 	kfree_type(struct obj_cached *, CACHE_SIZE, obj_cache);
1920 	info->obj_cache = NULL;
1921 }
1922 
1923 static struct obj_cached *
find_id_in_cache(int obj_id,struct info_sleep_inheritor_test * info)1924 find_id_in_cache(int obj_id, struct info_sleep_inheritor_test *info)
1925 {
1926 	struct obj_cached **obj_cache = info->obj_cache;
1927 	int i;
1928 	for (i = 0; i < CACHE_SIZE; i++) {
1929 		if (obj_cache[i] != NULL && obj_cache[i]->obj_id == obj_id) {
1930 			return obj_cache[i];
1931 		}
1932 	}
1933 	return NULL;
1934 }
1935 
1936 static bool
free_id_in_cache(int obj_id,struct info_sleep_inheritor_test * info,__assert_only struct obj_cached * expected)1937 free_id_in_cache(int obj_id, struct info_sleep_inheritor_test *info, __assert_only struct obj_cached *expected)
1938 {
1939 	struct obj_cached **obj_cache = info->obj_cache;
1940 	int i;
1941 	for (i = 0; i < CACHE_SIZE; i++) {
1942 		if (obj_cache[i] != NULL && obj_cache[i]->obj_id == obj_id) {
1943 			assert(obj_cache[i] == expected);
1944 			kfree_type(struct obj_cached, 1, obj_cache[i]);
1945 			obj_cache[i] = NULL;
1946 			return true;
1947 		}
1948 	}
1949 	return false;
1950 }
1951 
1952 static struct obj_cached *
find_empty_spot_in_cache(struct info_sleep_inheritor_test * info)1953 find_empty_spot_in_cache(struct info_sleep_inheritor_test *info)
1954 {
1955 	struct obj_cached **obj_cache = info->obj_cache;
1956 	int i;
1957 	for (i = 0; i < CACHE_SIZE; i++) {
1958 		if (obj_cache[i] == NULL) {
1959 			obj_cache[i] = alloc_init_cache_entry();
1960 			return obj_cache[i];
1961 		}
1962 		if (obj_cache[i]->obj_state == OBJ_STATE_UNUSED) {
1963 			return obj_cache[i];
1964 		}
1965 	}
1966 	return NULL;
1967 }
1968 
1969 static int
get_obj_cache(int obj_id,struct info_sleep_inheritor_test * info,char ** buff)1970 get_obj_cache(int obj_id, struct info_sleep_inheritor_test *info, char **buff)
1971 {
1972 	struct obj_cached *obj = NULL, *obj2 = NULL;
1973 	kern_apfs_reflock_t refcount = NULL;
1974 	bool ret;
1975 	kern_apfs_reflock_out_flags_t out_flags;
1976 
1977 try_again:
1978 	primitive_lock(info);
1979 	if ((obj = find_id_in_cache(obj_id, info)) != NULL) {
1980 		/* Found an allocated object on the cache with same id */
1981 
1982 		/*
1983 		 * copy the pointer to obj_refcount as obj might
1984 		 * get deallocated after primitive_unlock()
1985 		 */
1986 		refcount = obj->obj_refcount;
1987 		if (kern_apfs_reflock_try_get_ref(refcount, KERN_APFS_REFLOCK_IN_WILL_WAIT, &out_flags)) {
1988 			/*
1989 			 * Got a ref, let's check the state
1990 			 */
1991 			switch (obj->obj_state) {
1992 			case OBJ_STATE_UNUSED:
1993 				goto init;
1994 			case OBJ_STATE_REAL:
1995 				goto done;
1996 			case OBJ_STATE_PLACEHOLDER:
1997 				panic("Thread %p observed OBJ_STATE_PLACEHOLDER %d for obj %d", current_thread(), obj->obj_state, obj_id);
1998 			default:
1999 				panic("Thread %p observed an unknown obj_state %d for obj %d", current_thread(), obj->obj_state, obj_id);
2000 			}
2001 		} else {
2002 			/*
2003 			 * Didn't get a ref.
2004 			 * This means or an obj_put() of the last ref is ongoing
2005 			 * or a init of the object is happening.
2006 			 * Both cases wait for that to finish and retry.
2007 			 * While waiting the thread that is holding the reflock
2008 			 * will get a priority at least as the one of this thread.
2009 			 */
2010 			primitive_unlock(info);
2011 			kern_apfs_reflock_wait_for_unlock(refcount, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
2012 			goto try_again;
2013 		}
2014 	} else {
2015 		/* Look for a spot on the cache where we can save the object */
2016 
2017 		if ((obj = find_empty_spot_in_cache(info)) == NULL) {
2018 			/*
2019 			 * Sadness cache is full, and everyting in the cache is
2020 			 * used.
2021 			 */
2022 			primitive_unlock(info);
2023 			return -1;
2024 		} else {
2025 			/*
2026 			 * copy the pointer to obj_refcount as obj might
2027 			 * get deallocated after primitive_unlock()
2028 			 */
2029 			refcount = obj->obj_refcount;
2030 			if (kern_apfs_reflock_try_get_ref(refcount, KERN_APFS_REFLOCK_IN_WILL_WAIT, &out_flags)) {
2031 				/*
2032 				 * Got a ref on a OBJ_STATE_UNUSED obj.
2033 				 * Recicle time.
2034 				 */
2035 				obj->obj_id = obj_id;
2036 				goto init;
2037 			} else {
2038 				/*
2039 				 * This could happen if the obj_put() has just changed the
2040 				 * state to OBJ_STATE_UNUSED, but not unlocked the reflock yet.
2041 				 */
2042 				primitive_unlock(info);
2043 				kern_apfs_reflock_wait_for_unlock(refcount, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
2044 				goto try_again;
2045 			}
2046 		}
2047 	}
2048 init:
2049 	assert(obj->obj_id == obj_id);
2050 	assert(obj->obj_state == OBJ_STATE_UNUSED);
2051 	/*
2052 	 * We already got a ref on the object, but we need
2053 	 * to initialize it. Mark it as
2054 	 * OBJ_STATE_PLACEHOLDER and get the obj_reflock.
2055 	 * In this way all thread waiting for this init
2056 	 * to finish will push on this thread.
2057 	 */
2058 	ret = kern_apfs_reflock_try_lock(refcount, KERN_APFS_REFLOCK_IN_DEFAULT, NULL);
2059 	assert(ret == true);
2060 	obj->obj_state = OBJ_STATE_PLACEHOLDER;
2061 	primitive_unlock(info);
2062 
2063 	//let's pretend we are populating the obj
2064 	IOSleep(10);
2065 	/*
2066 	 * obj will not be deallocated while I hold a ref.
2067 	 * So it is safe to access it.
2068 	 */
2069 	snprintf(obj->obj_buff, OBJ_BUFF_SIZE, "I am %d", obj_id);
2070 
2071 	primitive_lock(info);
2072 	obj2 = find_id_in_cache(obj_id, info);
2073 	assert(obj == obj2);
2074 	assert(obj->obj_state == OBJ_STATE_PLACEHOLDER);
2075 
2076 	obj->obj_state = OBJ_STATE_REAL;
2077 	kern_apfs_reflock_unlock(refcount);
2078 
2079 done:
2080 	*buff = obj->obj_buff;
2081 	primitive_unlock(info);
2082 	return 0;
2083 }
2084 
2085 static void
put_obj_cache(int obj_id,struct info_sleep_inheritor_test * info,bool free)2086 put_obj_cache(int obj_id, struct info_sleep_inheritor_test *info, bool free)
2087 {
2088 	struct obj_cached *obj = NULL, *obj2 = NULL;
2089 	bool ret;
2090 	kern_apfs_reflock_out_flags_t out_flags;
2091 	kern_apfs_reflock_t refcount = NULL;
2092 
2093 	primitive_lock(info);
2094 	obj = find_id_in_cache(obj_id, info);
2095 	primitive_unlock(info);
2096 
2097 	/*
2098 	 * Nobody should have been able to remove obj_id
2099 	 * from the cache.
2100 	 */
2101 	assert(obj != NULL);
2102 	assert(obj->obj_state == OBJ_STATE_REAL);
2103 
2104 	refcount = obj->obj_refcount;
2105 
2106 	/*
2107 	 * This should never fail, as or the reflock
2108 	 * was acquired when the state was OBJ_STATE_UNUSED to init,
2109 	 * or from a put that reached zero. And if the latter
2110 	 * happened subsequent reflock_get_ref() will had to wait to transition
2111 	 * to OBJ_STATE_REAL.
2112 	 */
2113 	ret = kern_apfs_reflock_try_put_ref(refcount, KERN_APFS_REFLOCK_IN_LOCK_IF_LAST, &out_flags);
2114 	assert(ret == true);
2115 	if ((out_flags & KERN_APFS_REFLOCK_OUT_LOCKED) == 0) {
2116 		return;
2117 	}
2118 
2119 	/*
2120 	 * Note: nobody at this point will be able to get a ref or a lock on
2121 	 * refcount.
2122 	 * All people waiting on refcount will push on this thread.
2123 	 */
2124 
2125 	//let's pretend we are flushing the obj somewhere.
2126 	IOSleep(10);
2127 	snprintf(obj->obj_buff, OBJ_BUFF_SIZE, "I am groot");
2128 
2129 	primitive_lock(info);
2130 	obj->obj_state = OBJ_STATE_UNUSED;
2131 	if (free) {
2132 		obj2 = find_id_in_cache(obj_id, info);
2133 		assert(obj == obj2);
2134 
2135 		ret = free_id_in_cache(obj_id, info, obj);
2136 		assert(ret == true);
2137 	}
2138 	primitive_unlock(info);
2139 
2140 	kern_apfs_reflock_unlock(refcount);
2141 
2142 	if (free) {
2143 		kern_apfs_reflock_free(refcount);
2144 	}
2145 }
2146 
2147 static void
thread_use_cache(void * args,__unused wait_result_t wr)2148 thread_use_cache(
2149 	void *args,
2150 	__unused wait_result_t wr)
2151 {
2152 	struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
2153 	int my_obj;
2154 
2155 	primitive_lock(info);
2156 	my_obj = ((info->value--) % (CACHE_SIZE + 1)) + 1;
2157 	primitive_unlock(info);
2158 
2159 	T_LOG("Thread %p started and it is going to use obj %d", current_thread(), my_obj);
2160 	/*
2161 	 * This is the string I would expect to see
2162 	 * on my_obj buff.
2163 	 */
2164 	char my_string[OBJ_BUFF_SIZE];
2165 	int my_string_size = snprintf(my_string, OBJ_BUFF_SIZE, "I am %d", my_obj);
2166 
2167 	/*
2168 	 * spin here to start concurrently with the other threads
2169 	 */
2170 	wake_threads(&info->synch);
2171 	wait_threads(&info->synch, info->synch_value);
2172 
2173 	for (int i = 0; i < USE_CACHE_ROUNDS; i++) {
2174 		char *buff;
2175 		while (get_obj_cache(my_obj, info, &buff) == -1) {
2176 			/*
2177 			 * Cache is full, wait.
2178 			 */
2179 			IOSleep(10);
2180 		}
2181 		T_ASSERT(memcmp(buff, my_string, my_string_size) == 0, "reflock: thread %p obj_id %d value in buff", current_thread(), my_obj);
2182 		IOSleep(10);
2183 		T_ASSERT(memcmp(buff, my_string, my_string_size) == 0, "reflock: thread %p obj_id %d value in buff", current_thread(), my_obj);
2184 		put_obj_cache(my_obj, info, (i % 2 == 0));
2185 	}
2186 
2187 	notify_waiter((struct synch_test_common *)info);
2188 	thread_terminate_self();
2189 }
2190 
2191 static void
thread_refcount_reflock(void * args,__unused wait_result_t wr)2192 thread_refcount_reflock(
2193 	void *args,
2194 	__unused wait_result_t wr)
2195 {
2196 	struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
2197 	bool ret;
2198 	kern_apfs_reflock_out_flags_t out_flags;
2199 	kern_apfs_reflock_in_flags_t in_flags;
2200 
2201 	T_LOG("Thread %p started", current_thread());
2202 	/*
2203 	 * spin here to start concurrently with the other threads
2204 	 */
2205 	wake_threads(&info->synch);
2206 	wait_threads(&info->synch, info->synch_value);
2207 
2208 	for (int i = 0; i < REFCOUNT_REFLOCK_ROUNDS; i++) {
2209 		in_flags = KERN_APFS_REFLOCK_IN_LOCK_IF_FIRST;
2210 		if ((i % 2) == 0) {
2211 			in_flags |= KERN_APFS_REFLOCK_IN_WILL_WAIT;
2212 		}
2213 		ret = kern_apfs_reflock_try_get_ref(&info->reflock, in_flags, &out_flags);
2214 		if (ret == true) {
2215 			/* got reference, check if we did 0->1 */
2216 			if ((out_flags & KERN_APFS_REFLOCK_OUT_LOCKED) == KERN_APFS_REFLOCK_OUT_LOCKED) {
2217 				T_ASSERT(info->reflock_protected_status == 0, "status init check");
2218 				info->reflock_protected_status = 1;
2219 				kern_apfs_reflock_unlock(&info->reflock);
2220 			} else {
2221 				T_ASSERT(info->reflock_protected_status == 1, "status set check");
2222 			}
2223 			/* release the reference and check if we did 1->0 */
2224 			ret = kern_apfs_reflock_try_put_ref(&info->reflock, KERN_APFS_REFLOCK_IN_LOCK_IF_LAST, &out_flags);
2225 			T_ASSERT(ret == true, "kern_apfs_reflock_try_put_ref success");
2226 			if ((out_flags & KERN_APFS_REFLOCK_OUT_LOCKED) == KERN_APFS_REFLOCK_OUT_LOCKED) {
2227 				T_ASSERT(info->reflock_protected_status == 1, "status set check");
2228 				info->reflock_protected_status = 0;
2229 				kern_apfs_reflock_unlock(&info->reflock);
2230 			}
2231 		} else {
2232 			/* didn't get a reference */
2233 			if ((in_flags & KERN_APFS_REFLOCK_IN_WILL_WAIT) == KERN_APFS_REFLOCK_IN_WILL_WAIT) {
2234 				kern_apfs_reflock_wait_for_unlock(&info->reflock, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
2235 			}
2236 		}
2237 	}
2238 
2239 	notify_waiter((struct synch_test_common *)info);
2240 	thread_terminate_self();
2241 }
2242 
2243 static void
thread_force_reflock(void * args,__unused wait_result_t wr)2244 thread_force_reflock(
2245 	void *args,
2246 	__unused wait_result_t wr)
2247 {
2248 	struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
2249 	bool ret;
2250 	kern_apfs_reflock_out_flags_t out_flags;
2251 	bool lock = false;
2252 	uint32_t count;
2253 
2254 	T_LOG("Thread %p started", current_thread());
2255 	if (os_atomic_inc_orig(&info->value, relaxed) == 0) {
2256 		T_LOG("Thread %p is locker", current_thread());
2257 		lock = true;
2258 		ret = kern_apfs_reflock_try_lock(&info->reflock, KERN_APFS_REFLOCK_IN_ALLOW_FORCE, &count);
2259 		T_ASSERT(ret == true, "kern_apfs_reflock_try_lock success");
2260 		T_ASSERT(count == 0, "refcount value");
2261 	}
2262 	/*
2263 	 * spin here to start concurrently with the other threads
2264 	 */
2265 	wake_threads(&info->synch);
2266 	wait_threads(&info->synch, info->synch_value);
2267 
2268 	if (lock) {
2269 		IOSleep(100);
2270 		kern_apfs_reflock_unlock(&info->reflock);
2271 	} else {
2272 		for (int i = 0; i < REFCOUNT_REFLOCK_ROUNDS; i++) {
2273 			ret = kern_apfs_reflock_try_get_ref(&info->reflock, KERN_APFS_REFLOCK_IN_FORCE, &out_flags);
2274 			T_ASSERT(ret == true, "kern_apfs_reflock_try_get_ref success");
2275 			ret = kern_apfs_reflock_try_put_ref(&info->reflock, KERN_APFS_REFLOCK_IN_FORCE, &out_flags);
2276 			T_ASSERT(ret == true, "kern_apfs_reflock_try_put_ref success");
2277 		}
2278 	}
2279 
2280 	notify_waiter((struct synch_test_common *)info);
2281 	thread_terminate_self();
2282 }
2283 
2284 static void
thread_lock_reflock(void * args,__unused wait_result_t wr)2285 thread_lock_reflock(
2286 	void *args,
2287 	__unused wait_result_t wr)
2288 {
2289 	struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
2290 	bool ret;
2291 	kern_apfs_reflock_out_flags_t out_flags;
2292 	bool lock = false;
2293 	uint32_t count;
2294 
2295 	T_LOG("Thread %p started", current_thread());
2296 	if (os_atomic_inc_orig(&info->value, relaxed) == 0) {
2297 		T_LOG("Thread %p is locker", current_thread());
2298 		lock = true;
2299 		ret = kern_apfs_reflock_try_lock(&info->reflock, KERN_APFS_REFLOCK_IN_DEFAULT, &count);
2300 		T_ASSERT(ret == true, "kern_apfs_reflock_try_lock success");
2301 		T_ASSERT(count == 0, "refcount value");
2302 		info->reflock_protected_status = 1;
2303 	}
2304 	/*
2305 	 * spin here to start concurrently with the other threads
2306 	 */
2307 	wake_threads(&info->synch);
2308 	wait_threads(&info->synch, info->synch_value);
2309 
2310 	if (lock) {
2311 		IOSleep(100);
2312 		info->reflock_protected_status = 0;
2313 		kern_apfs_reflock_unlock(&info->reflock);
2314 	} else {
2315 		for (int i = 0; i < REFCOUNT_REFLOCK_ROUNDS; i++) {
2316 			ret = kern_apfs_reflock_try_get_ref(&info->reflock, KERN_APFS_REFLOCK_IN_DEFAULT, &out_flags);
2317 			if (ret == true) {
2318 				T_ASSERT(info->reflock_protected_status == 0, "unlocked status check");
2319 				ret = kern_apfs_reflock_try_put_ref(&info->reflock, KERN_APFS_REFLOCK_IN_DEFAULT, &out_flags);
2320 				T_ASSERT(ret == true, "kern_apfs_reflock_try_put_ref success");
2321 				break;
2322 			}
2323 		}
2324 	}
2325 
2326 	notify_waiter((struct synch_test_common *)info);
2327 	thread_terminate_self();
2328 }
2329 
2330 static void
test_cache_reflock(struct info_sleep_inheritor_test * info)2331 test_cache_reflock(struct info_sleep_inheritor_test *info)
2332 {
2333 	info->synch = 0;
2334 	info->synch_value = info->head.nthreads;
2335 
2336 	info->value = info->head.nthreads;
2337 	/*
2338 	 * Use the mtx as cache lock
2339 	 */
2340 	info->prim_type = MTX_LOCK;
2341 
2342 	init_cache(info);
2343 
2344 	start_threads((thread_continue_t)thread_use_cache, (struct synch_test_common *)info, FALSE);
2345 	wait_all_thread((struct synch_test_common *)info);
2346 
2347 	check_cache_empty(info);
2348 	free_cache(info);
2349 }
2350 
2351 static void
test_refcount_reflock(struct info_sleep_inheritor_test * info)2352 test_refcount_reflock(struct info_sleep_inheritor_test *info)
2353 {
2354 	info->synch = 0;
2355 	info->synch_value = info->head.nthreads;
2356 	kern_apfs_reflock_init(&info->reflock);
2357 	info->reflock_protected_status = 0;
2358 
2359 	start_threads((thread_continue_t)thread_refcount_reflock, (struct synch_test_common *)info, FALSE);
2360 	wait_all_thread((struct synch_test_common *)info);
2361 
2362 	kern_apfs_reflock_destroy(&info->reflock);
2363 
2364 	T_ASSERT(info->reflock_protected_status == 0, "unlocked status check");
2365 }
2366 
2367 static void
test_force_reflock(struct info_sleep_inheritor_test * info)2368 test_force_reflock(struct info_sleep_inheritor_test *info)
2369 {
2370 	info->synch = 0;
2371 	info->synch_value = info->head.nthreads;
2372 	kern_apfs_reflock_init(&info->reflock);
2373 	info->value = 0;
2374 
2375 	start_threads((thread_continue_t)thread_force_reflock, (struct synch_test_common *)info, FALSE);
2376 	wait_all_thread((struct synch_test_common *)info);
2377 
2378 	kern_apfs_reflock_destroy(&info->reflock);
2379 }
2380 
2381 static void
test_lock_reflock(struct info_sleep_inheritor_test * info)2382 test_lock_reflock(struct info_sleep_inheritor_test *info)
2383 {
2384 	info->synch = 0;
2385 	info->synch_value = info->head.nthreads;
2386 	kern_apfs_reflock_init(&info->reflock);
2387 	info->value = 0;
2388 
2389 	start_threads((thread_continue_t)thread_lock_reflock, (struct synch_test_common *)info, FALSE);
2390 	wait_all_thread((struct synch_test_common *)info);
2391 
2392 	kern_apfs_reflock_destroy(&info->reflock);
2393 }
2394 
2395 static void
test_sleep_with_wake_all(struct info_sleep_inheritor_test * info,int prim_type)2396 test_sleep_with_wake_all(struct info_sleep_inheritor_test *info, int prim_type)
2397 {
2398 	info->prim_type = prim_type;
2399 	info->synch = 0;
2400 	info->synch_value = info->head.nthreads;
2401 
2402 	info->thread_inheritor = NULL;
2403 
2404 	start_threads((thread_continue_t)thread_just_inheritor_do_work, (struct synch_test_common *)info, TRUE);
2405 	wait_all_thread((struct synch_test_common *)info);
2406 }
2407 
2408 static void
test_sleep_with_wake_one(struct info_sleep_inheritor_test * info,int prim_type)2409 test_sleep_with_wake_one(struct info_sleep_inheritor_test *info, int prim_type)
2410 {
2411 	info->prim_type = prim_type;
2412 
2413 	info->synch = 0;
2414 	info->synch_value = info->head.nthreads;
2415 	info->value = 0;
2416 	info->handoff_failure = 0;
2417 	info->thread_inheritor = NULL;
2418 
2419 	start_threads((thread_continue_t)thread_inheritor_like_mutex, (struct synch_test_common *)info, FALSE);
2420 	wait_all_thread((struct synch_test_common *)info);
2421 
2422 	T_ASSERT(info->value == (int)info->head.nthreads, "value protected by sleep");
2423 	T_ASSERT(info->handoff_failure == 1, "handoff failures");
2424 }
2425 
2426 static void
test_change_sleep_inheritor(struct info_sleep_inheritor_test * info,int prim_type)2427 test_change_sleep_inheritor(struct info_sleep_inheritor_test *info, int prim_type)
2428 {
2429 	info->prim_type = prim_type;
2430 
2431 	info->thread_inheritor = NULL;
2432 	info->steal_pri = 0;
2433 	info->synch = 0;
2434 	info->synch_value = info->head.nthreads;
2435 
2436 	start_threads((thread_continue_t)thread_steal_work, (struct synch_test_common *)info, FALSE);
2437 	wait_all_thread((struct synch_test_common *)info);
2438 }
2439 
2440 static void
test_no_inheritor(struct info_sleep_inheritor_test * info,int prim_type)2441 test_no_inheritor(struct info_sleep_inheritor_test *info, int prim_type)
2442 {
2443 	info->prim_type = prim_type;
2444 	info->synch = 0;
2445 	info->synch_value = info->head.nthreads;
2446 
2447 	info->thread_inheritor = NULL;
2448 	info->value = info->head.nthreads;
2449 
2450 	start_threads((thread_continue_t)thread_no_inheritor_work, (struct synch_test_common *)info, FALSE);
2451 	wait_all_thread((struct synch_test_common *)info);
2452 }
2453 
2454 static void
test_rw_lock(struct info_sleep_inheritor_test * info)2455 test_rw_lock(struct info_sleep_inheritor_test *info)
2456 {
2457 	info->thread_inheritor = NULL;
2458 	info->value = info->head.nthreads;
2459 	info->synch = 0;
2460 	info->synch_value = info->head.nthreads;
2461 
2462 	start_threads((thread_continue_t)thread_rw_work, (struct synch_test_common *)info, FALSE);
2463 	wait_all_thread((struct synch_test_common *)info);
2464 }
2465 
2466 static void
test_mtx_lock(struct info_sleep_inheritor_test * info)2467 test_mtx_lock(struct info_sleep_inheritor_test *info)
2468 {
2469 	info->thread_inheritor = NULL;
2470 	info->value = info->head.nthreads;
2471 	info->synch = 0;
2472 	info->synch_value = info->head.nthreads;
2473 
2474 	start_threads((thread_continue_t)thread_mtx_work, (struct synch_test_common *)info, FALSE);
2475 	wait_all_thread((struct synch_test_common *)info);
2476 }
2477 
2478 kern_return_t
ts_kernel_sleep_inheritor_test(void)2479 ts_kernel_sleep_inheritor_test(void)
2480 {
2481 	struct info_sleep_inheritor_test info = {};
2482 
2483 	init_synch_test_common((struct synch_test_common *)&info, NUM_THREADS);
2484 
2485 	lck_attr_t* lck_attr = lck_attr_alloc_init();
2486 	lck_grp_attr_t* lck_grp_attr = lck_grp_attr_alloc_init();
2487 	lck_grp_t* lck_grp = lck_grp_alloc_init("test sleep_inheritor", lck_grp_attr);
2488 
2489 	lck_mtx_init(&info.mtx_lock, lck_grp, lck_attr);
2490 	lck_rw_init(&info.rw_lock, lck_grp, lck_attr);
2491 
2492 	/*
2493 	 * Testing lck_mtx_sleep_with_inheritor and wakeup_all_with_inheritor
2494 	 */
2495 	T_LOG("Testing mtx sleep with inheritor and wake_all_with_inheritor");
2496 	test_sleep_with_wake_all(&info, MTX_LOCK);
2497 
2498 	/*
2499 	 * Testing rw_mtx_sleep_with_inheritor and wakeup_all_with_inheritor
2500 	 */
2501 	T_LOG("Testing rw sleep with inheritor and wake_all_with_inheritor");
2502 	test_sleep_with_wake_all(&info, RW_LOCK);
2503 
2504 	/*
2505 	 * Testing lck_mtx_sleep_with_inheritor and wakeup_one_with_inheritor
2506 	 */
2507 	T_LOG("Testing mtx sleep with inheritor and wake_one_with_inheritor");
2508 	test_sleep_with_wake_one(&info, MTX_LOCK);
2509 
2510 	/*
2511 	 * Testing lck_rw_sleep_with_inheritor and wakeup_one_with_inheritor
2512 	 */
2513 	T_LOG("Testing rw sleep with inheritor and wake_one_with_inheritor");
2514 	test_sleep_with_wake_one(&info, RW_LOCK);
2515 
2516 	/*
2517 	 * Testing lck_mtx_sleep_with_inheritor and wakeup_all_with_inheritor
2518 	 * and change_sleep_inheritor
2519 	 */
2520 	T_LOG("Testing change_sleep_inheritor with mxt sleep");
2521 	test_change_sleep_inheritor(&info, MTX_LOCK);
2522 
2523 	/*
2524 	 * Testing lck_mtx_sleep_with_inheritor and wakeup_all_with_inheritor
2525 	 * and change_sleep_inheritor
2526 	 */
2527 	T_LOG("Testing change_sleep_inheritor with rw sleep");
2528 	test_change_sleep_inheritor(&info, RW_LOCK);
2529 
2530 	/*
2531 	 * Testing lck_mtx_sleep_with_inheritor and wakeup_all_with_inheritor
2532 	 * with inheritor NULL
2533 	 */
2534 	T_LOG("Testing inheritor NULL");
2535 	test_no_inheritor(&info, MTX_LOCK);
2536 
2537 	/*
2538 	 * Testing lck_mtx_sleep_with_inheritor and wakeup_all_with_inheritor
2539 	 * with inheritor NULL
2540 	 */
2541 	T_LOG("Testing inheritor NULL");
2542 	test_no_inheritor(&info, RW_LOCK);
2543 
2544 	/*
2545 	 * Testing mtx locking combinations
2546 	 */
2547 	T_LOG("Testing mtx locking combinations");
2548 	test_mtx_lock(&info);
2549 
2550 	/*
2551 	 * Testing rw locking combinations
2552 	 */
2553 	T_LOG("Testing rw locking combinations");
2554 	test_rw_lock(&info);
2555 
2556 	/*
2557 	 * Testing reflock / cond_sleep_with_inheritor
2558 	 */
2559 	T_LOG("Test cache reflock + cond_sleep_with_inheritor");
2560 	test_cache_reflock(&info);
2561 	T_LOG("Test force reflock + cond_sleep_with_inheritor");
2562 	test_force_reflock(&info);
2563 	T_LOG("Test refcount reflock + cond_sleep_with_inheritor");
2564 	test_refcount_reflock(&info);
2565 	T_LOG("Test lock reflock + cond_sleep_with_inheritor");
2566 	test_lock_reflock(&info);
2567 
2568 	destroy_synch_test_common((struct synch_test_common *)&info);
2569 
2570 	lck_attr_free(lck_attr);
2571 	lck_grp_attr_free(lck_grp_attr);
2572 	lck_rw_destroy(&info.rw_lock, lck_grp);
2573 	lck_mtx_destroy(&info.mtx_lock, lck_grp);
2574 	lck_grp_free(lck_grp);
2575 
2576 	return KERN_SUCCESS;
2577 }
2578 
2579 static void
thread_gate_aggressive(void * args,__unused wait_result_t wr)2580 thread_gate_aggressive(
2581 	void *args,
2582 	__unused wait_result_t wr)
2583 {
2584 	struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
2585 	uint my_pri = current_thread()->sched_pri;
2586 
2587 	T_LOG("Started thread pri %d %p", my_pri, current_thread());
2588 
2589 	primitive_lock(info);
2590 	if (info->thread_inheritor == NULL) {
2591 		info->thread_inheritor = current_thread();
2592 		primitive_gate_assert(info, GATE_ASSERT_OPEN);
2593 		primitive_gate_close(info);
2594 		exclude_current_waiter((struct synch_test_common *)info);
2595 
2596 		primitive_unlock(info);
2597 
2598 		wait_threads(&info->synch, info->synch_value - 2);
2599 		wait_for_waiters((struct synch_test_common *)info);
2600 		T_LOG("Thread pri %d first to run %p", my_pri, current_thread());
2601 
2602 		primitive_lock(info);
2603 		if (info->thread_inheritor == current_thread()) {
2604 			primitive_gate_open(info);
2605 		}
2606 	} else {
2607 		if (info->steal_pri == 0) {
2608 			info->steal_pri = my_pri;
2609 			info->thread_inheritor = current_thread();
2610 			primitive_gate_steal(info);
2611 			exclude_current_waiter((struct synch_test_common *)info);
2612 
2613 			primitive_unlock(info);
2614 			wait_threads(&info->synch, info->synch_value - 2);
2615 
2616 			T_LOG("Thread pri %d stole push %p", my_pri, current_thread());
2617 			wait_for_waiters((struct synch_test_common *)info);
2618 			T_ASSERT((uint) current_thread()->sched_pri == info->steal_pri, "gate keeper priority current is %d, should be %d", current_thread()->sched_pri, info->steal_pri);
2619 
2620 			primitive_lock(info);
2621 			primitive_gate_open(info);
2622 		} else {
2623 			if (my_pri > info->steal_pri) {
2624 				info->steal_pri = my_pri;
2625 			}
2626 			wake_threads(&info->synch);
2627 			primitive_gate_wait(info);
2628 			exclude_current_waiter((struct synch_test_common *)info);
2629 		}
2630 	}
2631 	primitive_unlock(info);
2632 
2633 	assert(current_thread()->kern_promotion_schedpri == 0);
2634 	notify_waiter((struct synch_test_common *)info);
2635 
2636 	thread_terminate_self();
2637 }
2638 
2639 static void
thread_gate_free(void * args,__unused wait_result_t wr)2640 thread_gate_free(
2641 	void *args,
2642 	__unused wait_result_t wr)
2643 {
2644 	struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
2645 	uint my_pri = current_thread()->sched_pri;
2646 
2647 	T_LOG("Started thread pri %d %p", my_pri, current_thread());
2648 
2649 	primitive_lock(info);
2650 
2651 	if (primitive_gate_try_close(info) == KERN_SUCCESS) {
2652 		primitive_gate_assert(info, GATE_ASSERT_HELD);
2653 		primitive_unlock(info);
2654 
2655 		wait_threads(&info->synch, info->synch_value - 1);
2656 		wait_for_waiters((struct synch_test_common *) info);
2657 
2658 		primitive_lock(info);
2659 		primitive_gate_open(info);
2660 		primitive_gate_free(info);
2661 	} else {
2662 		primitive_gate_assert(info, GATE_ASSERT_CLOSED);
2663 		wake_threads(&info->synch);
2664 		gate_wait_result_t ret = primitive_gate_wait(info);
2665 		T_ASSERT(ret == GATE_OPENED, "open gate");
2666 	}
2667 
2668 	primitive_unlock(info);
2669 
2670 	notify_waiter((struct synch_test_common *)info);
2671 
2672 	thread_terminate_self();
2673 }
2674 
2675 static void
thread_gate_like_mutex(void * args,__unused wait_result_t wr)2676 thread_gate_like_mutex(
2677 	void *args,
2678 	__unused wait_result_t wr)
2679 {
2680 	gate_wait_result_t wait;
2681 	kern_return_t ret;
2682 	uint my_pri = current_thread()->sched_pri;
2683 
2684 	struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
2685 
2686 	T_LOG("Started thread pri %d %p", my_pri, current_thread());
2687 
2688 	/*
2689 	 * spin here to start concurrently
2690 	 */
2691 	wake_threads(&info->synch);
2692 	wait_threads(&info->synch, info->synch_value);
2693 
2694 	primitive_lock(info);
2695 
2696 	if (primitive_gate_try_close(info) != KERN_SUCCESS) {
2697 		wait = primitive_gate_wait(info);
2698 		T_ASSERT(wait == GATE_HANDOFF, "gate_wait return");
2699 	}
2700 
2701 	primitive_gate_assert(info, GATE_ASSERT_HELD);
2702 
2703 	primitive_unlock(info);
2704 
2705 	IOSleep(100);
2706 	info->value++;
2707 
2708 	primitive_lock(info);
2709 
2710 	ret = primitive_gate_handoff(info, GATE_HANDOFF_DEFAULT);
2711 	if (ret == KERN_NOT_WAITING) {
2712 		T_ASSERT(info->handoff_failure == 0, "handoff failures");
2713 		primitive_gate_handoff(info, GATE_HANDOFF_OPEN_IF_NO_WAITERS);
2714 		info->handoff_failure++;
2715 	}
2716 
2717 	primitive_unlock(info);
2718 	notify_waiter((struct synch_test_common *)info);
2719 
2720 	thread_terminate_self();
2721 }
2722 
2723 static void
thread_just_one_do_work(void * args,__unused wait_result_t wr)2724 thread_just_one_do_work(
2725 	void *args,
2726 	__unused wait_result_t wr)
2727 {
2728 	struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
2729 	uint my_pri = current_thread()->sched_pri;
2730 	uint max_pri;
2731 
2732 	T_LOG("Started thread pri %d %p", my_pri, current_thread());
2733 
2734 	primitive_lock(info);
2735 check_again:
2736 	if (info->work_to_do) {
2737 		if (primitive_gate_try_close(info) == KERN_SUCCESS) {
2738 			primitive_gate_assert(info, GATE_ASSERT_HELD);
2739 			primitive_unlock(info);
2740 
2741 			T_LOG("Thread pri %d acquired the gate %p", my_pri, current_thread());
2742 			wait_threads(&info->synch, info->synch_value - 1);
2743 			wait_for_waiters((struct synch_test_common *)info);
2744 			max_pri = get_max_pri((struct synch_test_common *) info);
2745 			T_ASSERT((uint) current_thread()->sched_pri == max_pri, "gate owner priority current is %d, should be %d", current_thread()->sched_pri, max_pri);
2746 			os_atomic_store(&info->synch, 0, relaxed);
2747 
2748 			primitive_lock(info);
2749 			info->work_to_do = FALSE;
2750 			primitive_gate_open(info);
2751 		} else {
2752 			primitive_gate_assert(info, GATE_ASSERT_CLOSED);
2753 			wake_threads(&info->synch);
2754 			primitive_gate_wait(info);
2755 			goto check_again;
2756 		}
2757 	}
2758 	primitive_unlock(info);
2759 
2760 	assert(current_thread()->kern_promotion_schedpri == 0);
2761 	notify_waiter((struct synch_test_common *)info);
2762 	thread_terminate_self();
2763 }
2764 
2765 static void
test_gate_push(struct info_sleep_inheritor_test * info,int prim_type)2766 test_gate_push(struct info_sleep_inheritor_test *info, int prim_type)
2767 {
2768 	info->prim_type = prim_type;
2769 	info->use_alloc_gate = false;
2770 
2771 	primitive_gate_init(info);
2772 	info->work_to_do = TRUE;
2773 	info->synch = 0;
2774 	info->synch_value = NUM_THREADS;
2775 
2776 	start_threads((thread_continue_t)thread_just_one_do_work, (struct synch_test_common *) info, TRUE);
2777 	wait_all_thread((struct synch_test_common *)info);
2778 
2779 	primitive_gate_destroy(info);
2780 }
2781 
2782 static void
test_gate_handoff(struct info_sleep_inheritor_test * info,int prim_type)2783 test_gate_handoff(struct info_sleep_inheritor_test *info, int prim_type)
2784 {
2785 	info->prim_type = prim_type;
2786 	info->use_alloc_gate = false;
2787 
2788 	primitive_gate_init(info);
2789 
2790 	info->synch = 0;
2791 	info->synch_value = NUM_THREADS;
2792 	info->value = 0;
2793 	info->handoff_failure = 0;
2794 
2795 	start_threads((thread_continue_t)thread_gate_like_mutex, (struct synch_test_common *)info, false);
2796 	wait_all_thread((struct synch_test_common *)info);
2797 
2798 	T_ASSERT(info->value == NUM_THREADS, "value protected by gate");
2799 	T_ASSERT(info->handoff_failure == 1, "handoff failures");
2800 
2801 	primitive_gate_destroy(info);
2802 }
2803 
2804 static void
test_gate_steal(struct info_sleep_inheritor_test * info,int prim_type)2805 test_gate_steal(struct info_sleep_inheritor_test *info, int prim_type)
2806 {
2807 	info->prim_type = prim_type;
2808 	info->use_alloc_gate = false;
2809 
2810 	primitive_gate_init(info);
2811 
2812 	info->synch = 0;
2813 	info->synch_value = NUM_THREADS;
2814 	info->thread_inheritor = NULL;
2815 	info->steal_pri = 0;
2816 
2817 	start_threads((thread_continue_t)thread_gate_aggressive, (struct synch_test_common *)info, FALSE);
2818 	wait_all_thread((struct synch_test_common *)info);
2819 
2820 	primitive_gate_destroy(info);
2821 }
2822 
2823 static void
test_gate_alloc_free(struct info_sleep_inheritor_test * info,int prim_type)2824 test_gate_alloc_free(struct info_sleep_inheritor_test *info, int prim_type)
2825 {
2826 	(void)info;
2827 	(void) prim_type;
2828 	info->prim_type = prim_type;
2829 	info->use_alloc_gate = true;
2830 
2831 	primitive_gate_alloc(info);
2832 
2833 	info->synch = 0;
2834 	info->synch_value = NUM_THREADS;
2835 
2836 	start_threads((thread_continue_t)thread_gate_free, (struct synch_test_common *)info, FALSE);
2837 	wait_all_thread((struct synch_test_common *)info);
2838 
2839 	T_ASSERT(info->alloc_gate == NULL, "gate free");
2840 	info->use_alloc_gate = false;
2841 }
2842 
2843 kern_return_t
ts_kernel_gate_test(void)2844 ts_kernel_gate_test(void)
2845 {
2846 	struct info_sleep_inheritor_test info = {};
2847 
2848 	T_LOG("Testing gate primitive");
2849 
2850 	init_synch_test_common((struct synch_test_common *)&info, NUM_THREADS);
2851 
2852 	lck_attr_t* lck_attr = lck_attr_alloc_init();
2853 	lck_grp_attr_t* lck_grp_attr = lck_grp_attr_alloc_init();
2854 	lck_grp_t* lck_grp = lck_grp_alloc_init("test gate", lck_grp_attr);
2855 
2856 	lck_mtx_init(&info.mtx_lock, lck_grp, lck_attr);
2857 	lck_rw_init(&info.rw_lock, lck_grp, lck_attr);
2858 
2859 	/*
2860 	 * Testing the priority inherited by the keeper
2861 	 * lck_mtx_gate_try_close, lck_mtx_gate_open, lck_mtx_gate_wait
2862 	 */
2863 	T_LOG("Testing gate push, mtx");
2864 	test_gate_push(&info, MTX_LOCK);
2865 
2866 	T_LOG("Testing gate push, rw");
2867 	test_gate_push(&info, RW_LOCK);
2868 
2869 	/*
2870 	 * Testing the handoff
2871 	 * lck_mtx_gate_wait, lck_mtx_gate_handoff
2872 	 */
2873 	T_LOG("Testing gate handoff, mtx");
2874 	test_gate_handoff(&info, MTX_LOCK);
2875 
2876 	T_LOG("Testing gate handoff, rw");
2877 	test_gate_handoff(&info, RW_LOCK);
2878 
2879 	/*
2880 	 * Testing the steal
2881 	 * lck_mtx_gate_close, lck_mtx_gate_wait, lck_mtx_gate_steal, lck_mtx_gate_handoff
2882 	 */
2883 	T_LOG("Testing gate steal, mtx");
2884 	test_gate_steal(&info, MTX_LOCK);
2885 
2886 	T_LOG("Testing gate steal, rw");
2887 	test_gate_steal(&info, RW_LOCK);
2888 
2889 	/*
2890 	 * Testing the alloc/free
2891 	 * lck_mtx_gate_alloc_init, lck_mtx_gate_close, lck_mtx_gate_wait, lck_mtx_gate_free
2892 	 */
2893 	T_LOG("Testing gate alloc/free, mtx");
2894 	test_gate_alloc_free(&info, MTX_LOCK);
2895 
2896 	T_LOG("Testing gate alloc/free, rw");
2897 	test_gate_alloc_free(&info, RW_LOCK);
2898 
2899 	destroy_synch_test_common((struct synch_test_common *)&info);
2900 
2901 	lck_attr_free(lck_attr);
2902 	lck_grp_attr_free(lck_grp_attr);
2903 	lck_mtx_destroy(&info.mtx_lock, lck_grp);
2904 	lck_grp_free(lck_grp);
2905 
2906 	return KERN_SUCCESS;
2907 }
2908 
2909 #define NUM_THREAD_CHAIN 6
2910 
2911 struct turnstile_chain_test {
2912 	struct synch_test_common head;
2913 	lck_mtx_t mtx_lock;
2914 	int synch_value;
2915 	int synch;
2916 	int synch2;
2917 	gate_t gates[NUM_THREAD_CHAIN];
2918 };
2919 
2920 static void
thread_sleep_gate_chain_work(void * args,__unused wait_result_t wr)2921 thread_sleep_gate_chain_work(
2922 	void *args,
2923 	__unused wait_result_t wr)
2924 {
2925 	struct turnstile_chain_test *info = (struct turnstile_chain_test*) args;
2926 	thread_t self = current_thread();
2927 	uint my_pri = self->sched_pri;
2928 	uint max_pri;
2929 	uint i;
2930 	thread_t inheritor = NULL, woken_up;
2931 	event_t wait_event, wake_event;
2932 	kern_return_t ret;
2933 
2934 	T_LOG("Started thread pri %d %p", my_pri, self);
2935 
2936 	/*
2937 	 * Need to use the threads ids, wait for all of them to be populated
2938 	 */
2939 
2940 	while (os_atomic_load(&info->head.threads[info->head.nthreads - 1], acquire) == NULL) {
2941 		IOSleep(10);
2942 	}
2943 
2944 	max_pri = get_max_pri((struct synch_test_common *) info);
2945 
2946 	for (i = 0; i < info->head.nthreads; i = i + 2) {
2947 		// even threads will close a gate
2948 		if (info->head.threads[i] == self) {
2949 			lck_mtx_lock(&info->mtx_lock);
2950 			lck_mtx_gate_close(&info->mtx_lock, &info->gates[i]);
2951 			lck_mtx_unlock(&info->mtx_lock);
2952 			break;
2953 		}
2954 	}
2955 
2956 	wake_threads(&info->synch2);
2957 	wait_threads(&info->synch2, info->synch_value);
2958 
2959 	if (self == os_atomic_load(&info->head.threads[0], acquire)) {
2960 		wait_threads(&info->synch, info->synch_value - 1);
2961 		wait_for_waiters((struct synch_test_common *)info);
2962 
2963 		T_ASSERT((uint) self->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", self->sched_pri, max_pri);
2964 
2965 		lck_mtx_lock(&info->mtx_lock);
2966 		lck_mtx_gate_open(&info->mtx_lock, &info->gates[0]);
2967 		lck_mtx_unlock(&info->mtx_lock);
2968 	} else {
2969 		wait_event = NULL;
2970 		wake_event = NULL;
2971 		for (i = 0; i < info->head.nthreads; i++) {
2972 			if (info->head.threads[i] == self) {
2973 				inheritor = info->head.threads[i - 1];
2974 				wait_event = (event_t) &info->head.threads[i - 1];
2975 				wake_event = (event_t) &info->head.threads[i];
2976 				break;
2977 			}
2978 		}
2979 		assert(wait_event != NULL);
2980 
2981 		lck_mtx_lock(&info->mtx_lock);
2982 		wake_threads(&info->synch);
2983 
2984 		if (i % 2 != 0) {
2985 			lck_mtx_gate_wait(&info->mtx_lock, &info->gates[i - 1], LCK_SLEEP_UNLOCK, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
2986 			T_ASSERT((uint) self->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", self->sched_pri, max_pri);
2987 
2988 			ret = wakeup_one_with_inheritor(wake_event, THREAD_AWAKENED, LCK_WAKE_DO_NOT_TRANSFER_PUSH, &woken_up);
2989 			if (ret == KERN_SUCCESS) {
2990 				T_ASSERT(i != (info->head.nthreads - 1), "thread id");
2991 				T_ASSERT(woken_up == info->head.threads[i + 1], "wakeup_one_with_inheritor woke next");
2992 			} else {
2993 				T_ASSERT(i == (info->head.nthreads - 1), "thread id");
2994 			}
2995 
2996 			// i am still the inheritor, wake all to drop inheritership
2997 			ret = wakeup_all_with_inheritor(wake_event, LCK_WAKE_DEFAULT);
2998 			T_ASSERT(ret == KERN_NOT_WAITING, "waiters on event");
2999 		} else {
3000 			// I previously closed a gate
3001 			lck_mtx_sleep_with_inheritor(&info->mtx_lock, LCK_SLEEP_UNLOCK, wait_event, inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
3002 			T_ASSERT((uint) self->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", self->sched_pri, max_pri);
3003 
3004 			lck_mtx_lock(&info->mtx_lock);
3005 			lck_mtx_gate_open(&info->mtx_lock, &info->gates[i]);
3006 			lck_mtx_unlock(&info->mtx_lock);
3007 		}
3008 	}
3009 
3010 	assert(current_thread()->kern_promotion_schedpri == 0);
3011 	notify_waiter((struct synch_test_common *)info);
3012 
3013 	thread_terminate_self();
3014 }
3015 
3016 static void
thread_gate_chain_work(void * args,__unused wait_result_t wr)3017 thread_gate_chain_work(
3018 	void *args,
3019 	__unused wait_result_t wr)
3020 {
3021 	struct turnstile_chain_test *info = (struct turnstile_chain_test*) args;
3022 	thread_t self = current_thread();
3023 	uint my_pri = self->sched_pri;
3024 	uint max_pri;
3025 	uint i;
3026 	T_LOG("Started thread pri %d %p", my_pri, self);
3027 
3028 
3029 	/*
3030 	 * Need to use the threads ids, wait for all of them to be populated
3031 	 */
3032 	while (os_atomic_load(&info->head.threads[info->head.nthreads - 1], acquire) == NULL) {
3033 		IOSleep(10);
3034 	}
3035 
3036 	max_pri = get_max_pri((struct synch_test_common *) info);
3037 
3038 	for (i = 0; i < info->head.nthreads; i++) {
3039 		if (info->head.threads[i] == self) {
3040 			lck_mtx_lock(&info->mtx_lock);
3041 			lck_mtx_gate_close(&info->mtx_lock, &info->gates[i]);
3042 			lck_mtx_unlock(&info->mtx_lock);
3043 			break;
3044 		}
3045 	}
3046 	assert(i != info->head.nthreads);
3047 
3048 	wake_threads(&info->synch2);
3049 	wait_threads(&info->synch2, info->synch_value);
3050 
3051 	if (self == os_atomic_load(&info->head.threads[0], acquire)) {
3052 		wait_threads(&info->synch, info->synch_value - 1);
3053 
3054 		wait_for_waiters((struct synch_test_common *)info);
3055 
3056 		T_ASSERT((uint) self->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", self->sched_pri, max_pri);
3057 
3058 		lck_mtx_lock(&info->mtx_lock);
3059 		lck_mtx_gate_open(&info->mtx_lock, &info->gates[0]);
3060 		lck_mtx_unlock(&info->mtx_lock);
3061 	} else {
3062 		lck_mtx_lock(&info->mtx_lock);
3063 		wake_threads(&info->synch);
3064 		lck_mtx_gate_wait(&info->mtx_lock, &info->gates[i - 1], LCK_SLEEP_UNLOCK, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
3065 
3066 		T_ASSERT((uint) self->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", self->sched_pri, max_pri);
3067 
3068 		lck_mtx_lock(&info->mtx_lock);
3069 		lck_mtx_gate_open(&info->mtx_lock, &info->gates[i]);
3070 		lck_mtx_unlock(&info->mtx_lock);
3071 	}
3072 
3073 	assert(current_thread()->kern_promotion_schedpri == 0);
3074 	notify_waiter((struct synch_test_common *)info);
3075 
3076 	thread_terminate_self();
3077 }
3078 
3079 static void
thread_sleep_chain_work(void * args,__unused wait_result_t wr)3080 thread_sleep_chain_work(
3081 	void *args,
3082 	__unused wait_result_t wr)
3083 {
3084 	struct turnstile_chain_test *info = (struct turnstile_chain_test*) args;
3085 	thread_t self = current_thread();
3086 	uint my_pri = self->sched_pri;
3087 	uint max_pri;
3088 	event_t wait_event, wake_event;
3089 	uint i;
3090 	thread_t inheritor = NULL, woken_up = NULL;
3091 	kern_return_t ret;
3092 
3093 	T_LOG("Started thread pri %d %p", my_pri, self);
3094 
3095 	/*
3096 	 * Need to use the threads ids, wait for all of them to be populated
3097 	 */
3098 	while (os_atomic_load(&info->head.threads[info->head.nthreads - 1], acquire) == NULL) {
3099 		IOSleep(10);
3100 	}
3101 
3102 	max_pri = get_max_pri((struct synch_test_common *) info);
3103 
3104 	if (self == os_atomic_load(&info->head.threads[0], acquire)) {
3105 		wait_threads(&info->synch, info->synch_value - 1);
3106 
3107 		wait_for_waiters((struct synch_test_common *)info);
3108 
3109 		T_ASSERT((uint) self->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", self->sched_pri, max_pri);
3110 
3111 		ret = wakeup_one_with_inheritor((event_t) &info->head.threads[0], THREAD_AWAKENED, LCK_WAKE_DO_NOT_TRANSFER_PUSH, &woken_up);
3112 		T_ASSERT(ret == KERN_SUCCESS, "wakeup_one_with_inheritor woke next");
3113 		T_ASSERT(woken_up == info->head.threads[1], "thread woken up");
3114 
3115 		// i am still the inheritor, wake all to drop inheritership
3116 		ret = wakeup_all_with_inheritor((event_t) &info->head.threads[0], LCK_WAKE_DEFAULT);
3117 		T_ASSERT(ret == KERN_NOT_WAITING, "waiters on event");
3118 	} else {
3119 		wait_event = NULL;
3120 		wake_event = NULL;
3121 		for (i = 0; i < info->head.nthreads; i++) {
3122 			if (info->head.threads[i] == self) {
3123 				inheritor = info->head.threads[i - 1];
3124 				wait_event = (event_t) &info->head.threads[i - 1];
3125 				wake_event = (event_t) &info->head.threads[i];
3126 				break;
3127 			}
3128 		}
3129 
3130 		assert(wait_event != NULL);
3131 		lck_mtx_lock(&info->mtx_lock);
3132 		wake_threads(&info->synch);
3133 
3134 		lck_mtx_sleep_with_inheritor(&info->mtx_lock, LCK_SLEEP_UNLOCK, wait_event, inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
3135 
3136 		T_ASSERT((uint) self->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", self->sched_pri, max_pri);
3137 
3138 		ret = wakeup_one_with_inheritor(wake_event, THREAD_AWAKENED, LCK_WAKE_DO_NOT_TRANSFER_PUSH, &woken_up);
3139 		if (ret == KERN_SUCCESS) {
3140 			T_ASSERT(i != (info->head.nthreads - 1), "thread id");
3141 			T_ASSERT(woken_up == info->head.threads[i + 1], "wakeup_one_with_inheritor woke next");
3142 		} else {
3143 			T_ASSERT(i == (info->head.nthreads - 1), "thread id");
3144 		}
3145 
3146 		// i am still the inheritor, wake all to drop inheritership
3147 		ret = wakeup_all_with_inheritor(wake_event, LCK_WAKE_DEFAULT);
3148 		T_ASSERT(ret == KERN_NOT_WAITING, "waiters on event");
3149 	}
3150 
3151 	assert(current_thread()->kern_promotion_schedpri == 0);
3152 	notify_waiter((struct synch_test_common *)info);
3153 
3154 	thread_terminate_self();
3155 }
3156 
3157 static void
test_sleep_chain(struct turnstile_chain_test * info)3158 test_sleep_chain(struct turnstile_chain_test *info)
3159 {
3160 	info->synch = 0;
3161 	info->synch_value = info->head.nthreads;
3162 
3163 	start_threads((thread_continue_t)thread_sleep_chain_work, (struct synch_test_common *)info, FALSE);
3164 	wait_all_thread((struct synch_test_common *)info);
3165 }
3166 
3167 static void
test_gate_chain(struct turnstile_chain_test * info)3168 test_gate_chain(struct turnstile_chain_test *info)
3169 {
3170 	info->synch = 0;
3171 	info->synch2 = 0;
3172 	info->synch_value = info->head.nthreads;
3173 
3174 	start_threads((thread_continue_t)thread_gate_chain_work, (struct synch_test_common *)info, FALSE);
3175 	wait_all_thread((struct synch_test_common *)info);
3176 }
3177 
3178 static void
test_sleep_gate_chain(struct turnstile_chain_test * info)3179 test_sleep_gate_chain(struct turnstile_chain_test *info)
3180 {
3181 	info->synch = 0;
3182 	info->synch2 = 0;
3183 	info->synch_value = info->head.nthreads;
3184 
3185 	start_threads((thread_continue_t)thread_sleep_gate_chain_work, (struct synch_test_common *)info, FALSE);
3186 	wait_all_thread((struct synch_test_common *)info);
3187 }
3188 
3189 kern_return_t
ts_kernel_turnstile_chain_test(void)3190 ts_kernel_turnstile_chain_test(void)
3191 {
3192 	struct turnstile_chain_test info = {};
3193 	int i;
3194 
3195 	init_synch_test_common((struct synch_test_common *)&info, NUM_THREAD_CHAIN);
3196 	lck_attr_t* lck_attr = lck_attr_alloc_init();
3197 	lck_grp_attr_t* lck_grp_attr = lck_grp_attr_alloc_init();
3198 	lck_grp_t* lck_grp = lck_grp_alloc_init("test gate", lck_grp_attr);
3199 
3200 	lck_mtx_init(&info.mtx_lock, lck_grp, lck_attr);
3201 	for (i = 0; i < NUM_THREAD_CHAIN; i++) {
3202 		lck_mtx_gate_init(&info.mtx_lock, &info.gates[i]);
3203 	}
3204 
3205 	T_LOG("Testing sleep chain, lck");
3206 	test_sleep_chain(&info);
3207 
3208 	T_LOG("Testing gate chain, lck");
3209 	test_gate_chain(&info);
3210 
3211 	T_LOG("Testing sleep and gate chain, lck");
3212 	test_sleep_gate_chain(&info);
3213 
3214 	destroy_synch_test_common((struct synch_test_common *)&info);
3215 	for (i = 0; i < NUM_THREAD_CHAIN; i++) {
3216 		lck_mtx_gate_destroy(&info.mtx_lock, &info.gates[i]);
3217 	}
3218 	lck_attr_free(lck_attr);
3219 	lck_grp_attr_free(lck_grp_attr);
3220 	lck_mtx_destroy(&info.mtx_lock, lck_grp);
3221 	lck_grp_free(lck_grp);
3222 
3223 	return KERN_SUCCESS;
3224 }
3225 
3226 kern_return_t
ts_kernel_timingsafe_bcmp_test(void)3227 ts_kernel_timingsafe_bcmp_test(void)
3228 {
3229 	int i, buf_size;
3230 	char *buf = NULL;
3231 
3232 	// empty
3233 	T_ASSERT(timingsafe_bcmp(NULL, NULL, 0) == 0, NULL);
3234 	T_ASSERT(timingsafe_bcmp("foo", "foo", 0) == 0, NULL);
3235 	T_ASSERT(timingsafe_bcmp("foo", "bar", 0) == 0, NULL);
3236 
3237 	// equal
3238 	T_ASSERT(timingsafe_bcmp("foo", "foo", strlen("foo")) == 0, NULL);
3239 
3240 	// unequal
3241 	T_ASSERT(timingsafe_bcmp("foo", "bar", strlen("foo")) == 1, NULL);
3242 	T_ASSERT(timingsafe_bcmp("foo", "goo", strlen("foo")) == 1, NULL);
3243 	T_ASSERT(timingsafe_bcmp("foo", "fpo", strlen("foo")) == 1, NULL);
3244 	T_ASSERT(timingsafe_bcmp("foo", "fop", strlen("foo")) == 1, NULL);
3245 
3246 	// all possible bitwise differences
3247 	for (i = 1; i < 256; i += 1) {
3248 		unsigned char a = 0;
3249 		unsigned char b = (unsigned char)i;
3250 
3251 		T_ASSERT(timingsafe_bcmp(&a, &b, sizeof(a)) == 1, NULL);
3252 	}
3253 
3254 	// large
3255 	buf_size = 1024 * 16;
3256 	buf = kalloc_data(buf_size, Z_WAITOK);
3257 	T_EXPECT_NOTNULL(buf, "kalloc of buf");
3258 
3259 	read_random(buf, buf_size);
3260 	T_ASSERT(timingsafe_bcmp(buf, buf, buf_size) == 0, NULL);
3261 	T_ASSERT(timingsafe_bcmp(buf, buf + 1, buf_size - 1) == 1, NULL);
3262 	T_ASSERT(timingsafe_bcmp(buf, buf + 128, 128) == 1, NULL);
3263 
3264 	memcpy(buf + 128, buf, 128);
3265 	T_ASSERT(timingsafe_bcmp(buf, buf + 128, 128) == 0, NULL);
3266 
3267 	kfree_data(buf, buf_size);
3268 
3269 	return KERN_SUCCESS;
3270 }
3271 
3272 kern_return_t
kprintf_hhx_test(void)3273 kprintf_hhx_test(void)
3274 {
3275 	printf("POST hhx test %hx%hx%hx%hx %hhx%hhx%hhx%hhx - %llx",
3276 	    (unsigned short)0xfeed, (unsigned short)0xface,
3277 	    (unsigned short)0xabad, (unsigned short)0xcafe,
3278 	    (unsigned char)'h', (unsigned char)'h', (unsigned char)'x',
3279 	    (unsigned char)'!',
3280 	    0xfeedfaceULL);
3281 	T_PASS("kprintf_hhx_test passed");
3282 	return KERN_SUCCESS;
3283 }
3284 
3285 static STATIC_IF_KEY_DEFINE_TRUE(key_true);
3286 static STATIC_IF_KEY_DEFINE_TRUE(key_true_to_false);
3287 static STATIC_IF_KEY_DEFINE_FALSE(key_false);
3288 static STATIC_IF_KEY_DEFINE_FALSE(key_false_to_true);
3289 
3290 __static_if_init_func
3291 static void
static_if_tests_setup(const char * args __unused)3292 static_if_tests_setup(const char *args __unused)
3293 {
3294 	static_if_key_disable(key_true_to_false);
3295 	static_if_key_enable(key_false_to_true);
3296 }
3297 STATIC_IF_INIT(static_if_tests_setup);
3298 
3299 static void
static_if_tests(void)3300 static_if_tests(void)
3301 {
3302 	int n = 0;
3303 
3304 	if (static_if(key_true)) {
3305 		n++;
3306 	}
3307 	if (probable_static_if(key_true)) {
3308 		n++;
3309 	}
3310 	if (improbable_static_if(key_true)) {
3311 		n++;
3312 	}
3313 	if (n != 3) {
3314 		panic("should still be enabled [n == %d, expected %d]", n, 3);
3315 	}
3316 
3317 	if (static_if(key_true_to_false)) {
3318 		n++;
3319 	}
3320 	if (probable_static_if(key_true_to_false)) {
3321 		n++;
3322 	}
3323 	if (improbable_static_if(key_true_to_false)) {
3324 		n++;
3325 	}
3326 	if (n != 3) {
3327 		panic("should now be disabled [n == %d, expected %d]", n, 3);
3328 	}
3329 
3330 	if (static_if(key_false)) {
3331 		n++;
3332 	}
3333 	if (probable_static_if(key_false)) {
3334 		n++;
3335 	}
3336 	if (improbable_static_if(key_false)) {
3337 		n++;
3338 	}
3339 	if (n != 3) {
3340 		panic("should still be disabled [n == %d, expected %d]", n, 3);
3341 	}
3342 
3343 	if (static_if(key_false_to_true)) {
3344 		n++;
3345 	}
3346 	if (probable_static_if(key_false_to_true)) {
3347 		n++;
3348 	}
3349 	if (improbable_static_if(key_false_to_true)) {
3350 		n++;
3351 	}
3352 	if (n != 6) {
3353 		panic("should now be disabled [n == %d, expected %d]", n, 3);
3354 	}
3355 }
3356 STARTUP(EARLY_BOOT, STARTUP_RANK_MIDDLE, static_if_tests);
3357