xref: /xnu-8020.140.41/osfmk/tests/kernel_tests.c (revision 27b03b360a988dfd3dfdf34262bb0042026747cc)
1 /*
2  * Copyright (c) 2019-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <kern/kern_types.h>
30 #include <kern/assert.h>
31 #include <kern/host.h>
32 #include <kern/macro_help.h>
33 #include <kern/sched.h>
34 #include <kern/locks.h>
35 #include <kern/sched_prim.h>
36 #include <kern/misc_protos.h>
37 #include <kern/thread_call.h>
38 #include <kern/zalloc_internal.h>
39 #include <kern/kalloc.h>
40 #include <tests/ktest.h>
41 #include <sys/errno.h>
42 #include <sys/random.h>
43 #include <kern/kern_cdata.h>
44 #include <machine/lowglobals.h>
45 #include <vm/vm_page.h>
46 #include <vm/vm_object.h>
47 #include <vm/vm_protos.h>
48 #include <string.h>
49 
50 #if !(DEVELOPMENT || DEBUG)
51 #error "Testing is not enabled on RELEASE configurations"
52 #endif
53 
54 #include <tests/xnupost.h>
55 
56 extern boolean_t get_range_bounds(char * c, int64_t * lower, int64_t * upper);
57 __private_extern__ void qsort(void * a, size_t n, size_t es, int (*cmp)(const void *, const void *));
58 
59 uint32_t total_post_tests_count = 0;
60 void xnupost_reset_panic_widgets(void);
61 
62 /* test declarations */
63 kern_return_t zalloc_test(void);
64 kern_return_t RandomULong_test(void);
65 kern_return_t kcdata_api_test(void);
66 kern_return_t ts_kernel_primitive_test(void);
67 kern_return_t ts_kernel_sleep_inheritor_test(void);
68 kern_return_t ts_kernel_gate_test(void);
69 kern_return_t ts_kernel_turnstile_chain_test(void);
70 kern_return_t ts_kernel_timingsafe_bcmp_test(void);
71 
72 #if __ARM_VFP__
73 extern kern_return_t vfp_state_test(void);
74 #endif
75 
76 extern kern_return_t kprintf_hhx_test(void);
77 
78 #if defined(__arm__) || defined(__arm64__)
79 kern_return_t pmap_coredump_test(void);
80 #endif
81 
82 extern kern_return_t console_serial_test(void);
83 extern kern_return_t console_serial_parallel_log_tests(void);
84 extern kern_return_t test_os_log(void);
85 extern kern_return_t test_os_log_parallel(void);
86 extern kern_return_t bitmap_post_test(void);
87 extern kern_return_t counter_tests(void);
88 
89 #ifdef __arm64__
90 extern kern_return_t arm64_munger_test(void);
91 extern kern_return_t ex_cb_test(void);
92 #if __ARM_PAN_AVAILABLE__
93 extern kern_return_t arm64_pan_test(void);
94 #endif
95 #if defined(HAS_APPLE_PAC)
96 extern kern_return_t arm64_ropjop_test(void);
97 #endif /* defined(HAS_APPLE_PAC) */
98 #endif /* __arm64__ */
99 
100 extern kern_return_t test_thread_call(void);
101 
102 
103 struct xnupost_panic_widget xt_panic_widgets = {.xtp_context_p = NULL,
104 	                                        .xtp_outval_p = NULL,
105 	                                        .xtp_func_name = NULL,
106 	                                        .xtp_func = NULL};
107 
108 struct xnupost_test kernel_post_tests[] = {XNUPOST_TEST_CONFIG_BASIC(zalloc_test),
109 	                                   XNUPOST_TEST_CONFIG_BASIC(RandomULong_test),
110 	                                   XNUPOST_TEST_CONFIG_BASIC(test_os_log),
111 	                                   XNUPOST_TEST_CONFIG_BASIC(test_os_log_parallel),
112 #ifdef __arm64__
113 	                                   XNUPOST_TEST_CONFIG_BASIC(arm64_munger_test),
114 	                                   XNUPOST_TEST_CONFIG_BASIC(ex_cb_test),
115 #if __ARM_PAN_AVAILABLE__
116 	                                   XNUPOST_TEST_CONFIG_BASIC(arm64_pan_test),
117 #endif
118 #if defined(HAS_APPLE_PAC)
119 	                                   XNUPOST_TEST_CONFIG_BASIC(arm64_ropjop_test),
120 #endif /* defined(HAS_APPLE_PAC) */
121 #endif /* __arm64__ */
122 	                                   XNUPOST_TEST_CONFIG_BASIC(kcdata_api_test),
123 	                                   XNUPOST_TEST_CONFIG_BASIC(console_serial_test),
124 	                                   XNUPOST_TEST_CONFIG_BASIC(console_serial_parallel_log_tests),
125 #if defined(__arm__) || defined(__arm64__)
126 	                                   XNUPOST_TEST_CONFIG_BASIC(pmap_coredump_test),
127 #endif
128 	                                   XNUPOST_TEST_CONFIG_BASIC(bitmap_post_test),
129 	                                   //XNUPOST_TEST_CONFIG_TEST_PANIC(kcdata_api_assert_tests)
130 	                                   XNUPOST_TEST_CONFIG_BASIC(test_thread_call),
131 	                                   XNUPOST_TEST_CONFIG_BASIC(ts_kernel_primitive_test),
132 	                                   XNUPOST_TEST_CONFIG_BASIC(ts_kernel_sleep_inheritor_test),
133 	                                   XNUPOST_TEST_CONFIG_BASIC(ts_kernel_gate_test),
134 	                                   XNUPOST_TEST_CONFIG_BASIC(ts_kernel_turnstile_chain_test),
135 	                                   XNUPOST_TEST_CONFIG_BASIC(ts_kernel_timingsafe_bcmp_test),
136 	                                   XNUPOST_TEST_CONFIG_BASIC(kprintf_hhx_test),
137 #if __ARM_VFP__
138 	                                   XNUPOST_TEST_CONFIG_BASIC(vfp_state_test),
139 #endif
140 	                                   XNUPOST_TEST_CONFIG_BASIC(vm_tests),
141 	                                   XNUPOST_TEST_CONFIG_BASIC(counter_tests)};
142 
143 uint32_t kernel_post_tests_count = sizeof(kernel_post_tests) / sizeof(xnupost_test_data_t);
144 
145 #define POSTARGS_RUN_TESTS 0x1
146 #define POSTARGS_CONTROLLER_AVAILABLE 0x2
147 #define POSTARGS_CUSTOM_TEST_RUNLIST 0x4
148 uint64_t kernel_post_args = 0x0;
149 
150 /* static variables to hold state */
151 static kern_return_t parse_config_retval = KERN_INVALID_CAPABILITY;
152 static char kernel_post_test_configs[256];
153 boolean_t xnupost_should_run_test(uint32_t test_num);
154 
155 kern_return_t
xnupost_parse_config()156 xnupost_parse_config()
157 {
158 	if (parse_config_retval != KERN_INVALID_CAPABILITY) {
159 		return parse_config_retval;
160 	}
161 	PE_parse_boot_argn("kernPOST", &kernel_post_args, sizeof(kernel_post_args));
162 
163 	if (PE_parse_boot_argn("kernPOST_config", &kernel_post_test_configs[0], sizeof(kernel_post_test_configs)) == TRUE) {
164 		kernel_post_args |= POSTARGS_CUSTOM_TEST_RUNLIST;
165 	}
166 
167 	if (kernel_post_args != 0) {
168 		parse_config_retval = KERN_SUCCESS;
169 		goto out;
170 	}
171 	parse_config_retval = KERN_NOT_SUPPORTED;
172 out:
173 	return parse_config_retval;
174 }
175 
176 boolean_t
xnupost_should_run_test(uint32_t test_num)177 xnupost_should_run_test(uint32_t test_num)
178 {
179 	if (kernel_post_args & POSTARGS_CUSTOM_TEST_RUNLIST) {
180 		int64_t begin = 0, end = 999999;
181 		char * b = kernel_post_test_configs;
182 		while (*b) {
183 			get_range_bounds(b, &begin, &end);
184 			if (test_num >= begin && test_num <= end) {
185 				return TRUE;
186 			}
187 
188 			/* skip to the next "," */
189 			while (*b != ',') {
190 				if (*b == '\0') {
191 					return FALSE;
192 				}
193 				b++;
194 			}
195 			/* skip past the ',' */
196 			b++;
197 		}
198 		return FALSE;
199 	}
200 	return TRUE;
201 }
202 
203 kern_return_t
xnupost_list_tests(xnupost_test_t test_list,uint32_t test_count)204 xnupost_list_tests(xnupost_test_t test_list, uint32_t test_count)
205 {
206 	if (KERN_SUCCESS != xnupost_parse_config()) {
207 		return KERN_FAILURE;
208 	}
209 
210 	xnupost_test_t testp;
211 	for (uint32_t i = 0; i < test_count; i++) {
212 		testp = &test_list[i];
213 		if (testp->xt_test_num == 0) {
214 			assert(total_post_tests_count < UINT16_MAX);
215 			testp->xt_test_num = (uint16_t)++total_post_tests_count;
216 		}
217 		/* make sure the boot-arg based test run list is honored */
218 		if (kernel_post_args & POSTARGS_CUSTOM_TEST_RUNLIST) {
219 			testp->xt_config |= XT_CONFIG_IGNORE;
220 			if (xnupost_should_run_test(testp->xt_test_num)) {
221 				testp->xt_config &= ~(XT_CONFIG_IGNORE);
222 				testp->xt_config |= XT_CONFIG_RUN;
223 				printf("\n[TEST] #%u is marked as ignored", testp->xt_test_num);
224 			}
225 		}
226 		printf("\n[TEST] TOC#%u name: %s expected: %d config: %x\n", testp->xt_test_num, testp->xt_name, testp->xt_expected_retval,
227 		    testp->xt_config);
228 	}
229 
230 	return KERN_SUCCESS;
231 }
232 
233 kern_return_t
xnupost_run_tests(xnupost_test_t test_list,uint32_t test_count)234 xnupost_run_tests(xnupost_test_t test_list, uint32_t test_count)
235 {
236 	uint32_t i = 0;
237 	int retval = KERN_SUCCESS;
238 	int test_retval = KERN_FAILURE;
239 
240 	if ((kernel_post_args & POSTARGS_RUN_TESTS) == 0) {
241 		printf("No POST boot-arg set.\n");
242 		return retval;
243 	}
244 
245 	T_START;
246 	xnupost_test_t testp;
247 	for (; i < test_count; i++) {
248 		xnupost_reset_panic_widgets();
249 		T_TESTRESULT = T_STATE_UNRESOLVED;
250 		testp = &test_list[i];
251 		T_BEGIN(testp->xt_name);
252 		testp->xt_begin_time = mach_absolute_time();
253 		testp->xt_end_time   = testp->xt_begin_time;
254 
255 		/*
256 		 * If test is designed to panic and controller
257 		 * is not available then mark as SKIPPED
258 		 */
259 		if ((testp->xt_config & XT_CONFIG_EXPECT_PANIC) && !(kernel_post_args & POSTARGS_CONTROLLER_AVAILABLE)) {
260 			T_SKIP(
261 				"Test expects panic but "
262 				"no controller is present");
263 			testp->xt_test_actions = XT_ACTION_SKIPPED;
264 			continue;
265 		}
266 
267 		if ((testp->xt_config & XT_CONFIG_IGNORE)) {
268 			T_SKIP("Test is marked as XT_CONFIG_IGNORE");
269 			testp->xt_test_actions = XT_ACTION_SKIPPED;
270 			continue;
271 		}
272 
273 		test_retval = testp->xt_func();
274 		if (T_STATE_UNRESOLVED == T_TESTRESULT) {
275 			/*
276 			 * If test result is unresolved due to that no T_* test cases are called,
277 			 * determine the test result based on the return value of the test function.
278 			 */
279 			if (KERN_SUCCESS == test_retval) {
280 				T_PASS("Test passed because retval == KERN_SUCCESS");
281 			} else {
282 				T_FAIL("Test failed because retval == KERN_FAILURE");
283 			}
284 		}
285 		T_END;
286 		testp->xt_retval = T_TESTRESULT;
287 		testp->xt_end_time = mach_absolute_time();
288 		if (testp->xt_retval == testp->xt_expected_retval) {
289 			testp->xt_test_actions = XT_ACTION_PASSED;
290 		} else {
291 			testp->xt_test_actions = XT_ACTION_FAILED;
292 		}
293 	}
294 	T_FINISH;
295 	return retval;
296 }
297 
298 kern_return_t
kernel_list_tests()299 kernel_list_tests()
300 {
301 	return xnupost_list_tests(kernel_post_tests, kernel_post_tests_count);
302 }
303 
304 kern_return_t
kernel_do_post()305 kernel_do_post()
306 {
307 	return xnupost_run_tests(kernel_post_tests, kernel_post_tests_count);
308 }
309 
310 kern_return_t
xnupost_register_panic_widget(xt_panic_widget_func funcp,const char * funcname,void * context,void ** outval)311 xnupost_register_panic_widget(xt_panic_widget_func funcp, const char * funcname, void * context, void ** outval)
312 {
313 	if (xt_panic_widgets.xtp_context_p != NULL || xt_panic_widgets.xtp_func != NULL) {
314 		return KERN_RESOURCE_SHORTAGE;
315 	}
316 
317 	xt_panic_widgets.xtp_context_p = context;
318 	xt_panic_widgets.xtp_func      = funcp;
319 	xt_panic_widgets.xtp_func_name = funcname;
320 	xt_panic_widgets.xtp_outval_p  = outval;
321 
322 	return KERN_SUCCESS;
323 }
324 
325 void
xnupost_reset_panic_widgets()326 xnupost_reset_panic_widgets()
327 {
328 	bzero(&xt_panic_widgets, sizeof(xt_panic_widgets));
329 }
330 
331 kern_return_t
xnupost_process_kdb_stop(const char * panic_s)332 xnupost_process_kdb_stop(const char * panic_s)
333 {
334 	xt_panic_return_t retval         = 0;
335 	struct xnupost_panic_widget * pw = &xt_panic_widgets;
336 	const char * name = "unknown";
337 	if (xt_panic_widgets.xtp_func_name) {
338 		name = xt_panic_widgets.xtp_func_name;
339 	}
340 
341 	/* bail early on if kernPOST is not set */
342 	if (kernel_post_args == 0) {
343 		return KERN_INVALID_CAPABILITY;
344 	}
345 
346 	if (xt_panic_widgets.xtp_func) {
347 		T_LOG("%s: Calling out to widget: %s", __func__, xt_panic_widgets.xtp_func_name);
348 		retval = pw->xtp_func(panic_s, pw->xtp_context_p, pw->xtp_outval_p);
349 	} else {
350 		return KERN_INVALID_CAPABILITY;
351 	}
352 
353 	switch (retval) {
354 	case XT_RET_W_SUCCESS:
355 		T_EXPECT_EQ_INT(retval, XT_RET_W_SUCCESS, "%s reported successful handling. Returning from kdb_stop.", name);
356 		/* KERN_SUCCESS means return from panic/assertion */
357 		return KERN_SUCCESS;
358 
359 	case XT_RET_W_FAIL:
360 		T_FAIL("%s reported XT_RET_W_FAIL: Returning from kdb_stop", name);
361 		return KERN_SUCCESS;
362 
363 	case XT_PANIC_W_FAIL:
364 		T_FAIL("%s reported XT_PANIC_W_FAIL: Continuing to kdb_stop", name);
365 		return KERN_FAILURE;
366 
367 	case XT_PANIC_W_SUCCESS:
368 		T_EXPECT_EQ_INT(retval, XT_PANIC_W_SUCCESS, "%s reported successful testcase. But continuing to kdb_stop.", name);
369 		return KERN_FAILURE;
370 
371 	case XT_PANIC_UNRELATED:
372 	default:
373 		T_LOG("UNRELATED: Continuing to kdb_stop.");
374 		return KERN_FAILURE;
375 	}
376 }
377 
378 xt_panic_return_t
_xt_generic_assert_check(const char * s,void * str_to_match,void ** outval)379 _xt_generic_assert_check(const char * s, void * str_to_match, void ** outval)
380 {
381 	xt_panic_return_t ret = XT_PANIC_UNRELATED;
382 
383 	if (NULL != strnstr(__DECONST(char *, s), (char *)str_to_match, strlen(s))) {
384 		T_LOG("%s: kdb_stop string: '%s' MATCHED string: '%s'", __func__, s, (char *)str_to_match);
385 		ret = XT_RET_W_SUCCESS;
386 	}
387 
388 	if (outval) {
389 		*outval = (void *)(uintptr_t)ret;
390 	}
391 	return ret;
392 }
393 
394 kern_return_t
xnupost_reset_tests(xnupost_test_t test_list,uint32_t test_count)395 xnupost_reset_tests(xnupost_test_t test_list, uint32_t test_count)
396 {
397 	uint32_t i = 0;
398 	xnupost_test_t testp;
399 	for (; i < test_count; i++) {
400 		testp                  = &test_list[i];
401 		testp->xt_begin_time   = 0;
402 		testp->xt_end_time     = 0;
403 		testp->xt_test_actions = XT_ACTION_NONE;
404 		testp->xt_retval       = -1;
405 	}
406 	return KERN_SUCCESS;
407 }
408 
409 
410 kern_return_t
zalloc_test(void)411 zalloc_test(void)
412 {
413 	zone_t test_zone;
414 	void * test_ptr;
415 
416 	T_SETUPBEGIN;
417 	test_zone = zone_create("test_uint64_zone", sizeof(uint64_t),
418 	    ZC_DESTRUCTIBLE);
419 	T_ASSERT_NOTNULL(test_zone, NULL);
420 
421 	T_ASSERT_EQ_INT(test_zone->z_elems_free, 0, NULL);
422 	T_SETUPEND;
423 
424 	T_ASSERT_NOTNULL(test_ptr = zalloc(test_zone), NULL);
425 
426 	zfree(test_zone, test_ptr);
427 
428 	/* A sample report for perfdata */
429 	T_PERF("num_threads_at_ktest", threads_count, "count", "# of threads in system at zalloc_test");
430 
431 	return KERN_SUCCESS;
432 }
433 
434 /*
435  * Function used for comparison by qsort()
436  */
437 static int
compare_numbers_ascending(const void * a,const void * b)438 compare_numbers_ascending(const void * a, const void * b)
439 {
440 	const uint64_t x = *(const uint64_t *)a;
441 	const uint64_t y = *(const uint64_t *)b;
442 	if (x < y) {
443 		return -1;
444 	} else if (x > y) {
445 		return 1;
446 	} else {
447 		return 0;
448 	}
449 }
450 
451 /*
452  * Function to count number of bits that are set in a number.
453  * It uses Side Addition using Magic Binary Numbers
454  */
455 static int
count_bits(uint64_t number)456 count_bits(uint64_t number)
457 {
458 	return __builtin_popcountll(number);
459 }
460 
461 kern_return_t
RandomULong_test()462 RandomULong_test()
463 {
464 /*
465  * Randomness test for RandomULong()
466  *
467  * This test verifies that:
468  *  a. RandomULong works
469  *  b. The generated numbers match the following entropy criteria:
470  *     For a thousand iterations, verify:
471  *          1. mean entropy > 12 bits
472  *          2. min entropy > 4 bits
473  *          3. No Duplicate
474  *          4. No incremental/decremental pattern in a window of 3
475  *          5. No Zero
476  *          6. No -1
477  *
478  * <rdar://problem/22526137> Add test to increase code coverage for /dev/random
479  */
480 
481 #define CONF_MIN_ENTROPY 4
482 #define CONF_MEAN_ENTROPY 12
483 #define CONF_ITERATIONS 1000
484 #define CONF_WINDOW_SIZE 3
485 #define CONF_WINDOW_TREND_LIMIT ((CONF_WINDOW_SIZE / 2) + (CONF_WINDOW_SIZE & 1)) >> 0
486 
487 	int i;
488 	uint32_t min_bit_entropy, max_bit_entropy, bit_entropy;
489 	uint32_t aggregate_bit_entropy = 0;
490 	uint32_t mean_bit_entropy      = 0;
491 	uint64_t numbers[CONF_ITERATIONS];
492 	min_bit_entropy = UINT32_MAX;
493 	max_bit_entropy = 0;
494 
495 	/*
496 	 * TEST 1: Number generation and basic and basic validation
497 	 * Check for non-zero (no bits set), -1 (all bits set) and error
498 	 */
499 	for (i = 0; i < CONF_ITERATIONS; i++) {
500 		read_random(&numbers[i], sizeof(numbers[i]));
501 		if (numbers[i] == 0) {
502 			T_ASSERT_NE_ULLONG(numbers[i], 0, "read_random returned zero value.");
503 		}
504 		if (numbers[i] == UINT64_MAX) {
505 			T_ASSERT_NE_ULLONG(numbers[i], UINT64_MAX, "read_random returned -1.");
506 		}
507 	}
508 	T_PASS("Generated %d non-zero random numbers with atleast one bit reset.", CONF_ITERATIONS);
509 
510 	/*
511 	 * TEST 2: Mean and Min Bit Entropy
512 	 * Check the bit entropy and its mean over the generated numbers.
513 	 */
514 	for (i = 1; i < CONF_ITERATIONS; i++) {
515 		bit_entropy = count_bits(numbers[i - 1] ^ numbers[i]);
516 		if (bit_entropy < min_bit_entropy) {
517 			min_bit_entropy = bit_entropy;
518 		}
519 		if (bit_entropy > max_bit_entropy) {
520 			max_bit_entropy = bit_entropy;
521 		}
522 
523 		if (bit_entropy < CONF_MIN_ENTROPY) {
524 			T_EXPECT_GE_UINT(bit_entropy, CONF_MIN_ENTROPY,
525 			    "Number of differing bits in consecutive numbers does not satisfy the min criteria.");
526 		}
527 
528 		aggregate_bit_entropy += bit_entropy;
529 	}
530 	T_PASS("Passed the min bit entropy expectation of %d bits", CONF_MIN_ENTROPY);
531 
532 	mean_bit_entropy = aggregate_bit_entropy / CONF_ITERATIONS;
533 	T_EXPECT_GE_UINT(mean_bit_entropy, CONF_MEAN_ENTROPY, "Test criteria for mean number of differing bits.");
534 	T_PASS("Mean bit entropy criteria satisfied (Required %d, Actual: %d).", CONF_MEAN_ENTROPY, mean_bit_entropy);
535 	T_LOG("{PERFORMANCE} iterations: %d, min_bit_entropy: %d, mean_bit_entropy: %d, max_bit_entropy: %d", CONF_ITERATIONS,
536 	    min_bit_entropy, mean_bit_entropy, max_bit_entropy);
537 	T_PERF("min_bit_entropy_" T_TOSTRING(CONF_ITERATIONS), min_bit_entropy, "bits", "minimum bit entropy in RNG. High is better");
538 	T_PERF("mean_bit_entropy_" T_TOSTRING(CONF_ITERATIONS), mean_bit_entropy, "bits", "mean bit entropy in RNG. High is better");
539 	T_PERF("max_bit_entropy_" T_TOSTRING(CONF_ITERATIONS), max_bit_entropy, "bits", "max bit entropy in RNG. High is better");
540 
541 	/*
542 	 * TEST 3: Incremental Pattern Search
543 	 * Check that incremental/decremental pattern does not exist in the given window
544 	 */
545 	int window_start, window_end, trend;
546 	window_start = window_end = trend = 0;
547 
548 	do {
549 		/*
550 		 * Set the window
551 		 */
552 		window_end = window_start + CONF_WINDOW_SIZE - 1;
553 		if (window_end >= CONF_ITERATIONS) {
554 			window_end = CONF_ITERATIONS - 1;
555 		}
556 
557 		trend = 0;
558 		for (i = window_start; i < window_end; i++) {
559 			if (numbers[i] < numbers[i + 1]) {
560 				trend++;
561 			} else if (numbers[i] > numbers[i + 1]) {
562 				trend--;
563 			}
564 		}
565 		/*
566 		 * Check that there is no increasing or decreasing trend
567 		 * i.e. trend <= ceil(window_size/2)
568 		 */
569 		if (trend < 0) {
570 			trend = -trend;
571 		}
572 		if (trend > CONF_WINDOW_TREND_LIMIT) {
573 			T_ASSERT_LE_INT(trend, CONF_WINDOW_TREND_LIMIT, "Found increasing/decreasing trend in random numbers.");
574 		}
575 
576 		/*
577 		 * Move to the next window
578 		 */
579 		window_start++;
580 	} while (window_start < (CONF_ITERATIONS - 1));
581 	T_PASS("Did not find increasing/decreasing trends in a window of %d numbers.", CONF_WINDOW_SIZE);
582 
583 	/*
584 	 * TEST 4: Find Duplicates
585 	 * Check no duplicate values are generated
586 	 */
587 	qsort(numbers, CONF_ITERATIONS, sizeof(numbers[0]), compare_numbers_ascending);
588 	for (i = 1; i < CONF_ITERATIONS; i++) {
589 		if (numbers[i] == numbers[i - 1]) {
590 			T_ASSERT_NE_ULLONG(numbers[i], numbers[i - 1], "read_random generated duplicate values.");
591 		}
592 	}
593 	T_PASS("Test did not find any duplicates as expected.");
594 
595 	return KERN_SUCCESS;
596 }
597 
598 
599 /* KCDATA kernel api tests */
600 static struct kcdata_descriptor test_kc_data;//, test_kc_data2;
601 struct sample_disk_io_stats {
602 	uint64_t disk_reads_count;
603 	uint64_t disk_reads_size;
604 	uint64_t io_priority_count[4];
605 	uint64_t io_priority_size;
606 } __attribute__((packed));
607 
608 struct kcdata_subtype_descriptor test_disk_io_stats_def[] = {
609 	{
610 		.kcs_flags = KCS_SUBTYPE_FLAGS_NONE,
611 		.kcs_elem_type = KC_ST_UINT64,
612 		.kcs_elem_offset = 0 * sizeof(uint64_t),
613 		.kcs_elem_size = sizeof(uint64_t),
614 		.kcs_name = "disk_reads_count"
615 	},
616 	{
617 		.kcs_flags = KCS_SUBTYPE_FLAGS_NONE,
618 		.kcs_elem_type = KC_ST_UINT64,
619 		.kcs_elem_offset = 1 * sizeof(uint64_t),
620 		.kcs_elem_size = sizeof(uint64_t),
621 		.kcs_name = "disk_reads_size"
622 	},
623 	{
624 		.kcs_flags = KCS_SUBTYPE_FLAGS_ARRAY,
625 		.kcs_elem_type = KC_ST_UINT64,
626 		.kcs_elem_offset = 2 * sizeof(uint64_t),
627 		.kcs_elem_size = KCS_SUBTYPE_PACK_SIZE(4, sizeof(uint64_t)),
628 		.kcs_name = "io_priority_count"
629 	},
630 	{
631 		.kcs_flags = KCS_SUBTYPE_FLAGS_ARRAY,
632 		.kcs_elem_type = KC_ST_UINT64,
633 		.kcs_elem_offset = (2 + 4) * sizeof(uint64_t),
634 		.kcs_elem_size = sizeof(uint64_t),
635 		.kcs_name = "io_priority_size"
636 	},
637 };
638 
639 kern_return_t
kcdata_api_test(void)640 kcdata_api_test(void)
641 {
642 	kern_return_t retval = KERN_SUCCESS;
643 
644 	/* test for NULL input */
645 	retval = kcdata_memory_static_init(NULL, (mach_vm_address_t)0, KCDATA_BUFFER_BEGIN_STACKSHOT, 100, KCFLAG_USE_MEMCOPY);
646 	T_ASSERT(retval == KERN_INVALID_ARGUMENT, "kcdata_memory_static_init with NULL struct");
647 
648 	/* another negative test with buffer size < 32 bytes */
649 	char data[30] = "sample_disk_io_stats";
650 	retval = kcdata_memory_static_init(&test_kc_data, (mach_vm_address_t)&data, KCDATA_BUFFER_BEGIN_CRASHINFO, sizeof(data),
651 	    KCFLAG_USE_MEMCOPY);
652 	T_ASSERT(retval == KERN_INSUFFICIENT_BUFFER_SIZE, "init with 30 bytes failed as expected with KERN_INSUFFICIENT_BUFFER_SIZE");
653 
654 	/* test with COPYOUT for 0x0 address. Should return KERN_NO_ACCESS */
655 	retval = kcdata_memory_static_init(&test_kc_data, (mach_vm_address_t)0, KCDATA_BUFFER_BEGIN_CRASHINFO, PAGE_SIZE,
656 	    KCFLAG_USE_COPYOUT);
657 	T_ASSERT(retval == KERN_NO_ACCESS, "writing to 0x0 returned KERN_NO_ACCESS");
658 
659 	/* test with successful kcdata_memory_static_init */
660 	test_kc_data.kcd_length   = 0xdeadbeef;
661 
662 	void *data_ptr = kalloc_data(PAGE_SIZE, Z_WAITOK_ZERO_NOFAIL);
663 	mach_vm_address_t address = (mach_vm_address_t)data_ptr;
664 	T_EXPECT_NOTNULL(address, "kalloc of PAGE_SIZE data.");
665 
666 	retval = kcdata_memory_static_init(&test_kc_data, (mach_vm_address_t)address, KCDATA_BUFFER_BEGIN_STACKSHOT, PAGE_SIZE,
667 	    KCFLAG_USE_MEMCOPY);
668 
669 	T_ASSERT(retval == KERN_SUCCESS, "successful kcdata_memory_static_init call");
670 
671 	T_ASSERT(test_kc_data.kcd_length == PAGE_SIZE, "kcdata length is set correctly to PAGE_SIZE.");
672 	T_LOG("addr_begin 0x%llx and end 0x%llx and address 0x%llx", test_kc_data.kcd_addr_begin, test_kc_data.kcd_addr_end, address);
673 	T_ASSERT(test_kc_data.kcd_addr_begin == address, "kcdata begin address is correct 0x%llx", (uint64_t)address);
674 
675 	/* verify we have BEGIN and END HEADERS set */
676 	uint32_t * mem = (uint32_t *)address;
677 	T_ASSERT(mem[0] == KCDATA_BUFFER_BEGIN_STACKSHOT, "buffer does contain KCDATA_BUFFER_BEGIN_STACKSHOT");
678 	T_ASSERT(mem[4] == KCDATA_TYPE_BUFFER_END, "KCDATA_TYPE_BUFFER_END is appended as expected");
679 	T_ASSERT(mem[5] == 0, "size of BUFFER_END tag is zero");
680 
681 	/* verify kcdata_memory_get_used_bytes() */
682 	uint64_t bytes_used = 0;
683 	bytes_used = kcdata_memory_get_used_bytes(&test_kc_data);
684 	T_ASSERT(bytes_used == (2 * sizeof(struct kcdata_item)), "bytes_used api returned expected %llu", bytes_used);
685 
686 	/* test for kcdata_get_memory_addr() */
687 
688 	mach_vm_address_t user_addr = 0;
689 	/* negative test for NULL user_addr AND/OR kcdata_descriptor */
690 	retval = kcdata_get_memory_addr(NULL, KCDATA_TYPE_MACH_ABSOLUTE_TIME, sizeof(uint64_t), &user_addr);
691 	T_ASSERT(retval == KERN_INVALID_ARGUMENT, "kcdata_get_memory_addr with NULL struct -> KERN_INVALID_ARGUMENT");
692 
693 	retval = kcdata_get_memory_addr(&test_kc_data, KCDATA_TYPE_MACH_ABSOLUTE_TIME, sizeof(uint64_t), NULL);
694 	T_ASSERT(retval == KERN_INVALID_ARGUMENT, "kcdata_get_memory_addr with NULL user_addr -> KERN_INVALID_ARGUMENT");
695 
696 	/* successful case with size 0. Yes this is expected to succeed as just a item type could be used as boolean */
697 	retval = kcdata_get_memory_addr(&test_kc_data, KCDATA_TYPE_USECS_SINCE_EPOCH, 0, &user_addr);
698 	T_ASSERT(retval == KERN_SUCCESS, "Successfully got kcdata entry for 0 size data");
699 	T_ASSERT(user_addr == test_kc_data.kcd_addr_end, "0 sized data did not add any extra buffer space");
700 
701 	/* successful case with valid size. */
702 	user_addr = 0xdeadbeef;
703 	retval = kcdata_get_memory_addr(&test_kc_data, KCDATA_TYPE_MACH_ABSOLUTE_TIME, sizeof(uint64_t), &user_addr);
704 	T_ASSERT(retval == KERN_SUCCESS, "kcdata_get_memory_addr with valid values succeeded.");
705 	T_ASSERT(user_addr > test_kc_data.kcd_addr_begin, "user_addr is in range of buffer");
706 	T_ASSERT(user_addr < test_kc_data.kcd_addr_end, "user_addr is in range of buffer");
707 
708 	/* Try creating an item with really large size */
709 	user_addr  = 0xdeadbeef;
710 	bytes_used = kcdata_memory_get_used_bytes(&test_kc_data);
711 	retval = kcdata_get_memory_addr(&test_kc_data, KCDATA_TYPE_MACH_ABSOLUTE_TIME, PAGE_SIZE * 4, &user_addr);
712 	T_ASSERT(retval == KERN_INSUFFICIENT_BUFFER_SIZE, "Allocating entry with size > buffer -> KERN_INSUFFICIENT_BUFFER_SIZE");
713 	T_ASSERT(user_addr == 0xdeadbeef, "user_addr remained unaffected with failed kcdata_get_memory_addr");
714 	T_ASSERT(bytes_used == kcdata_memory_get_used_bytes(&test_kc_data), "The data structure should be unaffected");
715 
716 	/* verify convenience functions for uint32_with_description */
717 	retval = kcdata_add_uint32_with_description(&test_kc_data, 0xbdc0ffee, "This is bad coffee");
718 	T_ASSERT(retval == KERN_SUCCESS, "add uint32 with description succeeded.");
719 
720 	retval = kcdata_add_uint64_with_description(&test_kc_data, 0xf001badc0ffee, "another 8 byte no.");
721 	T_ASSERT(retval == KERN_SUCCESS, "add uint64 with desc succeeded.");
722 
723 	/* verify creating an KCDATA_TYPE_ARRAY here */
724 	user_addr  = 0xdeadbeef;
725 	bytes_used = kcdata_memory_get_used_bytes(&test_kc_data);
726 	/* save memory address where the array will come up */
727 	struct kcdata_item * item_p = (struct kcdata_item *)test_kc_data.kcd_addr_end;
728 
729 	retval = kcdata_get_memory_addr_for_array(&test_kc_data, KCDATA_TYPE_MACH_ABSOLUTE_TIME, sizeof(uint64_t), 20, &user_addr);
730 	T_ASSERT(retval == KERN_SUCCESS, "Array of 20 integers should be possible");
731 	T_ASSERT(user_addr != 0xdeadbeef, "user_addr is updated as expected");
732 	T_ASSERT((kcdata_memory_get_used_bytes(&test_kc_data) - bytes_used) >= 20 * sizeof(uint64_t), "memory allocation is in range");
733 	kcdata_iter_t iter = kcdata_iter(item_p, (unsigned long)(PAGE_SIZE - kcdata_memory_get_used_bytes(&test_kc_data)));
734 	T_ASSERT(kcdata_iter_array_elem_count(iter) == 20, "array count is 20");
735 
736 	/* FIXME add tests here for ranges of sizes and counts */
737 
738 	T_ASSERT(item_p->flags == (((uint64_t)KCDATA_TYPE_MACH_ABSOLUTE_TIME << 32) | 20), "flags are set correctly");
739 
740 	/* test adding of custom type */
741 
742 	retval = kcdata_add_type_definition(&test_kc_data, 0x999, data, &test_disk_io_stats_def[0],
743 	    sizeof(test_disk_io_stats_def) / sizeof(struct kcdata_subtype_descriptor));
744 	T_ASSERT(retval == KERN_SUCCESS, "adding custom type succeeded.");
745 
746 	kfree_data(data_ptr, PAGE_SIZE);
747 	return KERN_SUCCESS;
748 }
749 
750 /*
751  *  kern_return_t
752  *  kcdata_api_assert_tests()
753  *  {
754  *       kern_return_t retval       = 0;
755  *       void * assert_check_retval = NULL;
756  *       test_kc_data2.kcd_length   = 0xdeadbeef;
757  *       mach_vm_address_t address = (mach_vm_address_t)kalloc(PAGE_SIZE);
758  *       T_EXPECT_NOTNULL(address, "kalloc of PAGE_SIZE data.");
759  *
760  *       retval = kcdata_memory_static_init(&test_kc_data2, (mach_vm_address_t)address, KCDATA_BUFFER_BEGIN_STACKSHOT, PAGE_SIZE,
761  *                                          KCFLAG_USE_MEMCOPY);
762  *
763  *       T_ASSERT(retval == KERN_SUCCESS, "successful kcdata_memory_static_init call");
764  *
765  *       retval = T_REGISTER_ASSERT_CHECK("KCDATA_DESC_MAXLEN", &assert_check_retval);
766  *       T_ASSERT(retval == KERN_SUCCESS, "registered assert widget");
767  *
768  *       // this will assert
769  *       retval = kcdata_add_uint32_with_description(&test_kc_data2, 0xc0ffee, "really long description string for kcdata");
770  *       T_ASSERT(retval == KERN_INVALID_ARGUMENT, "API param check returned KERN_INVALID_ARGUMENT correctly");
771  *       T_ASSERT(assert_check_retval == (void *)XT_RET_W_SUCCESS, "assertion handler verified that it was hit");
772  *
773  *       return KERN_SUCCESS;
774  *  }
775  */
776 
777 #if defined(__arm__) || defined(__arm64__)
778 
779 #include <arm/pmap.h>
780 
781 #define MAX_PMAP_OBJECT_ELEMENT 100000
782 
783 extern struct vm_object pmap_object_store; /* store pt pages */
784 extern unsigned long gPhysBase, gPhysSize, first_avail;
785 
786 /*
787  * Define macros to transverse the pmap object structures and extract
788  * physical page number with information from low global only
789  * This emulate how Astris extracts information from coredump
790  */
791 #if defined(__arm64__)
792 
793 static inline uintptr_t
astris_vm_page_unpack_ptr(uintptr_t p)794 astris_vm_page_unpack_ptr(uintptr_t p)
795 {
796 	if (!p) {
797 		return (uintptr_t)0;
798 	}
799 
800 	return (p & lowGlo.lgPmapMemFromArrayMask)
801 	       ? lowGlo.lgPmapMemStartAddr + (p & ~(lowGlo.lgPmapMemFromArrayMask)) * lowGlo.lgPmapMemPagesize
802 	       : lowGlo.lgPmapMemPackedBaseAddr + (p << lowGlo.lgPmapMemPackedShift);
803 }
804 
805 // assume next pointer is the first element
806 #define astris_vm_page_queue_next(qc) (astris_vm_page_unpack_ptr(*((uint32_t *)(qc))))
807 
808 #endif
809 
810 #if defined(__arm__)
811 
812 // assume next pointer is the first element
813 #define astris_vm_page_queue_next(qc) *((uintptr_t *)(qc))
814 
815 #endif
816 
817 #define astris_vm_page_queue_first(q) astris_vm_page_queue_next(q)
818 
819 #define astris_vm_page_queue_end(q, qe) ((q) == (qe))
820 
821 #define astris_vm_page_queue_iterate(head, elt)                                                           \
822 	for ((elt) = (uintptr_t)astris_vm_page_queue_first((head)); !astris_vm_page_queue_end((head), (elt)); \
823 	     (elt) = (uintptr_t)astris_vm_page_queue_next(((elt) + (uintptr_t)lowGlo.lgPmapMemChainOffset)))
824 
825 #define astris_ptoa(x) ((vm_address_t)(x) << lowGlo.lgPageShift)
826 
827 static inline ppnum_t
astris_vm_page_get_phys_page(uintptr_t m)828 astris_vm_page_get_phys_page(uintptr_t m)
829 {
830 	return (m >= lowGlo.lgPmapMemStartAddr && m < lowGlo.lgPmapMemEndAddr)
831 	       ? (ppnum_t)((m - lowGlo.lgPmapMemStartAddr) / lowGlo.lgPmapMemPagesize + lowGlo.lgPmapMemFirstppnum)
832 	       : *((ppnum_t *)(m + lowGlo.lgPmapMemPageOffset));
833 }
834 
835 kern_return_t
pmap_coredump_test(void)836 pmap_coredump_test(void)
837 {
838 	int iter = 0;
839 	uintptr_t p;
840 
841 	T_LOG("Testing coredump info for PMAP.");
842 
843 	T_ASSERT_GE_ULONG(lowGlo.lgStaticAddr, gPhysBase, NULL);
844 	T_ASSERT_LE_ULONG(lowGlo.lgStaticAddr + lowGlo.lgStaticSize, first_avail, NULL);
845 	T_ASSERT_EQ_ULONG(lowGlo.lgLayoutMajorVersion, 3, NULL);
846 	T_ASSERT_GE_ULONG(lowGlo.lgLayoutMinorVersion, 2, NULL);
847 	T_ASSERT_EQ_ULONG(lowGlo.lgLayoutMagic, LOWGLO_LAYOUT_MAGIC, NULL);
848 
849 	// check the constant values in lowGlo
850 	T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemQ, ((typeof(lowGlo.lgPmapMemQ)) & (pmap_object_store.memq)), NULL);
851 	T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemPageOffset, offsetof(struct vm_page_with_ppnum, vmp_phys_page), NULL);
852 	T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemChainOffset, offsetof(struct vm_page, vmp_listq), NULL);
853 	T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemPagesize, sizeof(struct vm_page), NULL);
854 
855 #if defined(__arm64__)
856 	T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemFromArrayMask, VM_PAGE_PACKED_FROM_ARRAY, NULL);
857 	T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemPackedShift, VM_PAGE_PACKED_PTR_SHIFT, NULL);
858 	T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemPackedBaseAddr, VM_PAGE_PACKED_PTR_BASE, NULL);
859 #endif
860 
861 	vm_object_lock_shared(&pmap_object_store);
862 	astris_vm_page_queue_iterate(lowGlo.lgPmapMemQ, p)
863 	{
864 		ppnum_t ppnum   = astris_vm_page_get_phys_page(p);
865 		pmap_paddr_t pa = (pmap_paddr_t)astris_ptoa(ppnum);
866 		T_ASSERT_GE_ULONG(pa, gPhysBase, NULL);
867 		T_ASSERT_LT_ULONG(pa, gPhysBase + gPhysSize, NULL);
868 		iter++;
869 		T_ASSERT_LT_INT(iter, MAX_PMAP_OBJECT_ELEMENT, NULL);
870 	}
871 	vm_object_unlock(&pmap_object_store);
872 
873 	T_ASSERT_GT_INT(iter, 0, NULL);
874 	return KERN_SUCCESS;
875 }
876 #endif /* defined(__arm__) || defined(__arm64__) */
877 
878 struct ts_kern_prim_test_args {
879 	int *end_barrier;
880 	int *notify_b;
881 	int *wait_event_b;
882 	int before_num;
883 	int *notify_a;
884 	int *wait_event_a;
885 	int after_num;
886 	int priority_to_check;
887 };
888 
889 static void
wait_threads(int * var,int num)890 wait_threads(
891 	int* var,
892 	int num)
893 {
894 	if (var != NULL) {
895 		while (os_atomic_load(var, acquire) != num) {
896 			assert_wait((event_t) var, THREAD_UNINT);
897 			if (os_atomic_load(var, acquire) != num) {
898 				(void) thread_block(THREAD_CONTINUE_NULL);
899 			} else {
900 				clear_wait(current_thread(), THREAD_AWAKENED);
901 			}
902 		}
903 	}
904 }
905 
906 static void
wake_threads(int * var)907 wake_threads(
908 	int* var)
909 {
910 	if (var) {
911 		os_atomic_inc(var, relaxed);
912 		thread_wakeup((event_t) var);
913 	}
914 }
915 
916 extern void IOSleep(int);
917 
918 static void
thread_lock_unlock_kernel_primitive(void * args,__unused wait_result_t wr)919 thread_lock_unlock_kernel_primitive(
920 	void *args,
921 	__unused wait_result_t wr)
922 {
923 	thread_t thread = current_thread();
924 	struct ts_kern_prim_test_args *info = (struct ts_kern_prim_test_args*) args;
925 	int pri;
926 
927 	wait_threads(info->wait_event_b, info->before_num);
928 	wake_threads(info->notify_b);
929 
930 	tstile_test_prim_lock(SYSCTL_TURNSTILE_TEST_KERNEL_DEFAULT);
931 
932 	wake_threads(info->notify_a);
933 	wait_threads(info->wait_event_a, info->after_num);
934 
935 	IOSleep(100);
936 
937 	if (info->priority_to_check) {
938 		spl_t s = splsched();
939 		thread_lock(thread);
940 		pri = thread->sched_pri;
941 		thread_unlock(thread);
942 		splx(s);
943 		T_ASSERT(pri == info->priority_to_check, "Priority thread: current sched %d sched wanted %d", pri, info->priority_to_check);
944 	}
945 
946 	tstile_test_prim_unlock(SYSCTL_TURNSTILE_TEST_KERNEL_DEFAULT);
947 
948 	wake_threads(info->end_barrier);
949 	thread_terminate_self();
950 }
951 
952 kern_return_t
ts_kernel_primitive_test(void)953 ts_kernel_primitive_test(void)
954 {
955 	thread_t owner, thread1, thread2;
956 	struct ts_kern_prim_test_args targs[2] = {};
957 	kern_return_t result;
958 	int end_barrier = 0;
959 	int owner_locked = 0;
960 	int waiters_ready = 0;
961 
962 	T_LOG("Testing turnstile kernel primitive");
963 
964 	targs[0].notify_b = NULL;
965 	targs[0].wait_event_b = NULL;
966 	targs[0].before_num = 0;
967 	targs[0].notify_a = &owner_locked;
968 	targs[0].wait_event_a = &waiters_ready;
969 	targs[0].after_num = 2;
970 	targs[0].priority_to_check = 90;
971 	targs[0].end_barrier = &end_barrier;
972 
973 	// Start owner with priority 80
974 	result = kernel_thread_start_priority((thread_continue_t)thread_lock_unlock_kernel_primitive, &targs[0], 80, &owner);
975 	T_ASSERT(result == KERN_SUCCESS, "Starting owner");
976 
977 	targs[1].notify_b = &waiters_ready;
978 	targs[1].wait_event_b = &owner_locked;
979 	targs[1].before_num = 1;
980 	targs[1].notify_a = NULL;
981 	targs[1].wait_event_a = NULL;
982 	targs[1].after_num = 0;
983 	targs[1].priority_to_check = 0;
984 	targs[1].end_barrier = &end_barrier;
985 
986 	// Start waiters with priority 85 and 90
987 	result = kernel_thread_start_priority((thread_continue_t)thread_lock_unlock_kernel_primitive, &targs[1], 85, &thread1);
988 	T_ASSERT(result == KERN_SUCCESS, "Starting thread1");
989 
990 	result = kernel_thread_start_priority((thread_continue_t)thread_lock_unlock_kernel_primitive, &targs[1], 90, &thread2);
991 	T_ASSERT(result == KERN_SUCCESS, "Starting thread2");
992 
993 	wait_threads(&end_barrier, 3);
994 
995 	return KERN_SUCCESS;
996 }
997 
998 #define MTX_LOCK 0
999 #define RW_LOCK 1
1000 
1001 #define NUM_THREADS 4
1002 
1003 struct synch_test_common {
1004 	unsigned int nthreads;
1005 	thread_t *threads;
1006 	int max_pri;
1007 	int test_done;
1008 };
1009 
1010 static kern_return_t
init_synch_test_common(struct synch_test_common * info,unsigned int nthreads)1011 init_synch_test_common(struct synch_test_common *info, unsigned int nthreads)
1012 {
1013 	info->nthreads = nthreads;
1014 	info->threads = kalloc_type(thread_t, nthreads, Z_WAITOK);
1015 	if (!info->threads) {
1016 		return ENOMEM;
1017 	}
1018 
1019 	return KERN_SUCCESS;
1020 }
1021 
1022 static void
destroy_synch_test_common(struct synch_test_common * info)1023 destroy_synch_test_common(struct synch_test_common *info)
1024 {
1025 	kfree_type(thread_t, info->nthreads, info->threads);
1026 }
1027 
1028 static void
start_threads(thread_continue_t func,struct synch_test_common * info,bool sleep_after_first)1029 start_threads(thread_continue_t func, struct synch_test_common *info, bool sleep_after_first)
1030 {
1031 	thread_t thread;
1032 	kern_return_t result;
1033 	uint i;
1034 	int priority = 75;
1035 
1036 	info->test_done = 0;
1037 
1038 	for (i = 0; i < info->nthreads; i++) {
1039 		info->threads[i] = NULL;
1040 	}
1041 
1042 	info->max_pri = priority + (info->nthreads - 1) * 5;
1043 	if (info->max_pri > 95) {
1044 		info->max_pri = 95;
1045 	}
1046 
1047 	for (i = 0; i < info->nthreads; i++) {
1048 		result = kernel_thread_start_priority((thread_continue_t)func, info, priority, &thread);
1049 		os_atomic_store(&info->threads[i], thread, release);
1050 		T_ASSERT(result == KERN_SUCCESS, "Starting thread %d, priority %d, %p", i, priority, thread);
1051 
1052 		priority += 5;
1053 
1054 		if (i == 0 && sleep_after_first) {
1055 			IOSleep(100);
1056 		}
1057 	}
1058 }
1059 
1060 static unsigned int
get_max_pri(struct synch_test_common * info)1061 get_max_pri(struct synch_test_common * info)
1062 {
1063 	return info->max_pri;
1064 }
1065 
1066 static void
wait_all_thread(struct synch_test_common * info)1067 wait_all_thread(struct synch_test_common * info)
1068 {
1069 	wait_threads(&info->test_done, info->nthreads);
1070 }
1071 
1072 static void
notify_waiter(struct synch_test_common * info)1073 notify_waiter(struct synch_test_common * info)
1074 {
1075 	wake_threads(&info->test_done);
1076 }
1077 
1078 static void
wait_for_waiters(struct synch_test_common * info)1079 wait_for_waiters(struct synch_test_common *info)
1080 {
1081 	uint i, j;
1082 	thread_t thread;
1083 
1084 	for (i = 0; i < info->nthreads; i++) {
1085 		j = 0;
1086 		while (os_atomic_load(&info->threads[i], acquire) == NULL) {
1087 			if (j % 100 == 0) {
1088 				IOSleep(10);
1089 			}
1090 			j++;
1091 		}
1092 
1093 		if (info->threads[i] != current_thread()) {
1094 			j = 0;
1095 			do {
1096 				thread = os_atomic_load(&info->threads[i], relaxed);
1097 				if (thread == (thread_t) 1) {
1098 					break;
1099 				}
1100 
1101 				if (!(thread->state & TH_RUN)) {
1102 					break;
1103 				}
1104 
1105 				if (j % 100 == 0) {
1106 					IOSleep(100);
1107 				}
1108 				j++;
1109 
1110 				if (thread->started == FALSE) {
1111 					continue;
1112 				}
1113 			} while (thread->state & TH_RUN);
1114 		}
1115 	}
1116 }
1117 
1118 static void
exclude_current_waiter(struct synch_test_common * info)1119 exclude_current_waiter(struct synch_test_common *info)
1120 {
1121 	uint i, j;
1122 
1123 	for (i = 0; i < info->nthreads; i++) {
1124 		j = 0;
1125 		while (os_atomic_load(&info->threads[i], acquire) == NULL) {
1126 			if (j % 100 == 0) {
1127 				IOSleep(10);
1128 			}
1129 			j++;
1130 		}
1131 
1132 		if (os_atomic_load(&info->threads[i], acquire) == current_thread()) {
1133 			os_atomic_store(&info->threads[i], (thread_t)1, release);
1134 			return;
1135 		}
1136 	}
1137 }
1138 
1139 struct info_sleep_inheritor_test {
1140 	struct synch_test_common head;
1141 	lck_mtx_t mtx_lock;
1142 	lck_rw_t rw_lock;
1143 	decl_lck_mtx_gate_data(, gate);
1144 	boolean_t gate_closed;
1145 	int prim_type;
1146 	boolean_t work_to_do;
1147 	unsigned int max_pri;
1148 	unsigned int steal_pri;
1149 	int synch_value;
1150 	int synch;
1151 	int value;
1152 	int handoff_failure;
1153 	thread_t thread_inheritor;
1154 	bool use_alloc_gate;
1155 	gate_t *alloc_gate;
1156 };
1157 
1158 static void
primitive_lock(struct info_sleep_inheritor_test * info)1159 primitive_lock(struct info_sleep_inheritor_test *info)
1160 {
1161 	switch (info->prim_type) {
1162 	case MTX_LOCK:
1163 		lck_mtx_lock(&info->mtx_lock);
1164 		break;
1165 	case RW_LOCK:
1166 		lck_rw_lock(&info->rw_lock, LCK_RW_TYPE_EXCLUSIVE);
1167 		break;
1168 	default:
1169 		panic("invalid type %d", info->prim_type);
1170 	}
1171 }
1172 
1173 static void
primitive_unlock(struct info_sleep_inheritor_test * info)1174 primitive_unlock(struct info_sleep_inheritor_test *info)
1175 {
1176 	switch (info->prim_type) {
1177 	case MTX_LOCK:
1178 		lck_mtx_unlock(&info->mtx_lock);
1179 		break;
1180 	case RW_LOCK:
1181 		lck_rw_unlock(&info->rw_lock, LCK_RW_TYPE_EXCLUSIVE);
1182 		break;
1183 	default:
1184 		panic("invalid type %d", info->prim_type);
1185 	}
1186 }
1187 
1188 static wait_result_t
primitive_sleep_with_inheritor(struct info_sleep_inheritor_test * info)1189 primitive_sleep_with_inheritor(struct info_sleep_inheritor_test *info)
1190 {
1191 	wait_result_t ret = KERN_SUCCESS;
1192 	switch (info->prim_type) {
1193 	case MTX_LOCK:
1194 		ret = lck_mtx_sleep_with_inheritor(&info->mtx_lock, LCK_SLEEP_DEFAULT, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1195 		break;
1196 	case RW_LOCK:
1197 		ret = lck_rw_sleep_with_inheritor(&info->rw_lock, LCK_SLEEP_DEFAULT, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1198 		break;
1199 	default:
1200 		panic("invalid type %d", info->prim_type);
1201 	}
1202 
1203 	return ret;
1204 }
1205 
1206 static void
primitive_wakeup_one_with_inheritor(struct info_sleep_inheritor_test * info)1207 primitive_wakeup_one_with_inheritor(struct info_sleep_inheritor_test *info)
1208 {
1209 	switch (info->prim_type) {
1210 	case MTX_LOCK:
1211 	case RW_LOCK:
1212 		wakeup_one_with_inheritor((event_t) &info->thread_inheritor, THREAD_AWAKENED, LCK_WAKE_DEFAULT, &info->thread_inheritor);
1213 		break;
1214 	default:
1215 		panic("invalid type %d", info->prim_type);
1216 	}
1217 }
1218 
1219 static void
primitive_wakeup_all_with_inheritor(struct info_sleep_inheritor_test * info)1220 primitive_wakeup_all_with_inheritor(struct info_sleep_inheritor_test *info)
1221 {
1222 	switch (info->prim_type) {
1223 	case MTX_LOCK:
1224 	case RW_LOCK:
1225 		wakeup_all_with_inheritor((event_t) &info->thread_inheritor, THREAD_AWAKENED);
1226 		break;
1227 	default:
1228 		panic("invalid type %d", info->prim_type);
1229 	}
1230 	return;
1231 }
1232 
1233 static void
primitive_change_sleep_inheritor(struct info_sleep_inheritor_test * info)1234 primitive_change_sleep_inheritor(struct info_sleep_inheritor_test *info)
1235 {
1236 	switch (info->prim_type) {
1237 	case MTX_LOCK:
1238 	case RW_LOCK:
1239 		change_sleep_inheritor((event_t) &info->thread_inheritor, info->thread_inheritor);
1240 		break;
1241 	default:
1242 		panic("invalid type %d", info->prim_type);
1243 	}
1244 	return;
1245 }
1246 
1247 static kern_return_t
primitive_gate_try_close(struct info_sleep_inheritor_test * info)1248 primitive_gate_try_close(struct info_sleep_inheritor_test *info)
1249 {
1250 	gate_t *gate = &info->gate;
1251 	if (info->use_alloc_gate == true) {
1252 		gate = info->alloc_gate;
1253 	}
1254 	kern_return_t ret = KERN_SUCCESS;
1255 	switch (info->prim_type) {
1256 	case MTX_LOCK:
1257 		ret = lck_mtx_gate_try_close(&info->mtx_lock, gate);
1258 		break;
1259 	case RW_LOCK:
1260 		ret = lck_rw_gate_try_close(&info->rw_lock, gate);
1261 		break;
1262 	default:
1263 		panic("invalid type %d", info->prim_type);
1264 	}
1265 	return ret;
1266 }
1267 
1268 static gate_wait_result_t
primitive_gate_wait(struct info_sleep_inheritor_test * info)1269 primitive_gate_wait(struct info_sleep_inheritor_test *info)
1270 {
1271 	gate_t *gate = &info->gate;
1272 	if (info->use_alloc_gate == true) {
1273 		gate = info->alloc_gate;
1274 	}
1275 	gate_wait_result_t ret = GATE_OPENED;
1276 	switch (info->prim_type) {
1277 	case MTX_LOCK:
1278 		ret = lck_mtx_gate_wait(&info->mtx_lock, gate, LCK_SLEEP_DEFAULT, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1279 		break;
1280 	case RW_LOCK:
1281 		ret = lck_rw_gate_wait(&info->rw_lock, gate, LCK_SLEEP_DEFAULT, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1282 		break;
1283 	default:
1284 		panic("invalid type %d", info->prim_type);
1285 	}
1286 	return ret;
1287 }
1288 
1289 static void
primitive_gate_open(struct info_sleep_inheritor_test * info)1290 primitive_gate_open(struct info_sleep_inheritor_test *info)
1291 {
1292 	gate_t *gate = &info->gate;
1293 	if (info->use_alloc_gate == true) {
1294 		gate = info->alloc_gate;
1295 	}
1296 	switch (info->prim_type) {
1297 	case MTX_LOCK:
1298 		lck_mtx_gate_open(&info->mtx_lock, gate);
1299 		break;
1300 	case RW_LOCK:
1301 		lck_rw_gate_open(&info->rw_lock, gate);
1302 		break;
1303 	default:
1304 		panic("invalid type %d", info->prim_type);
1305 	}
1306 }
1307 
1308 static void
primitive_gate_close(struct info_sleep_inheritor_test * info)1309 primitive_gate_close(struct info_sleep_inheritor_test *info)
1310 {
1311 	gate_t *gate = &info->gate;
1312 	if (info->use_alloc_gate == true) {
1313 		gate = info->alloc_gate;
1314 	}
1315 
1316 	switch (info->prim_type) {
1317 	case MTX_LOCK:
1318 		lck_mtx_gate_close(&info->mtx_lock, gate);
1319 		break;
1320 	case RW_LOCK:
1321 		lck_rw_gate_close(&info->rw_lock, gate);
1322 		break;
1323 	default:
1324 		panic("invalid type %d", info->prim_type);
1325 	}
1326 }
1327 
1328 static void
primitive_gate_steal(struct info_sleep_inheritor_test * info)1329 primitive_gate_steal(struct info_sleep_inheritor_test *info)
1330 {
1331 	gate_t *gate = &info->gate;
1332 	if (info->use_alloc_gate == true) {
1333 		gate = info->alloc_gate;
1334 	}
1335 
1336 	switch (info->prim_type) {
1337 	case MTX_LOCK:
1338 		lck_mtx_gate_steal(&info->mtx_lock, gate);
1339 		break;
1340 	case RW_LOCK:
1341 		lck_rw_gate_steal(&info->rw_lock, gate);
1342 		break;
1343 	default:
1344 		panic("invalid type %d", info->prim_type);
1345 	}
1346 }
1347 
1348 static kern_return_t
primitive_gate_handoff(struct info_sleep_inheritor_test * info,int flags)1349 primitive_gate_handoff(struct info_sleep_inheritor_test *info, int flags)
1350 {
1351 	gate_t *gate = &info->gate;
1352 	if (info->use_alloc_gate == true) {
1353 		gate = info->alloc_gate;
1354 	}
1355 
1356 	kern_return_t ret = KERN_SUCCESS;
1357 	switch (info->prim_type) {
1358 	case MTX_LOCK:
1359 		ret = lck_mtx_gate_handoff(&info->mtx_lock, gate, flags);
1360 		break;
1361 	case RW_LOCK:
1362 		ret = lck_rw_gate_handoff(&info->rw_lock, gate, flags);
1363 		break;
1364 	default:
1365 		panic("invalid type %d", info->prim_type);
1366 	}
1367 	return ret;
1368 }
1369 
1370 static void
primitive_gate_assert(struct info_sleep_inheritor_test * info,int type)1371 primitive_gate_assert(struct info_sleep_inheritor_test *info, int type)
1372 {
1373 	gate_t *gate = &info->gate;
1374 	if (info->use_alloc_gate == true) {
1375 		gate = info->alloc_gate;
1376 	}
1377 
1378 	switch (info->prim_type) {
1379 	case MTX_LOCK:
1380 		lck_mtx_gate_assert(&info->mtx_lock, gate, type);
1381 		break;
1382 	case RW_LOCK:
1383 		lck_rw_gate_assert(&info->rw_lock, gate, type);
1384 		break;
1385 	default:
1386 		panic("invalid type %d", info->prim_type);
1387 	}
1388 }
1389 
1390 static void
primitive_gate_init(struct info_sleep_inheritor_test * info)1391 primitive_gate_init(struct info_sleep_inheritor_test *info)
1392 {
1393 	switch (info->prim_type) {
1394 	case MTX_LOCK:
1395 		lck_mtx_gate_init(&info->mtx_lock, &info->gate);
1396 		break;
1397 	case RW_LOCK:
1398 		lck_rw_gate_init(&info->rw_lock, &info->gate);
1399 		break;
1400 	default:
1401 		panic("invalid type %d", info->prim_type);
1402 	}
1403 }
1404 
1405 static void
primitive_gate_destroy(struct info_sleep_inheritor_test * info)1406 primitive_gate_destroy(struct info_sleep_inheritor_test *info)
1407 {
1408 	switch (info->prim_type) {
1409 	case MTX_LOCK:
1410 		lck_mtx_gate_destroy(&info->mtx_lock, &info->gate);
1411 		break;
1412 	case RW_LOCK:
1413 		lck_rw_gate_destroy(&info->rw_lock, &info->gate);
1414 		break;
1415 	default:
1416 		panic("invalid type %d", info->prim_type);
1417 	}
1418 }
1419 
1420 static void
primitive_gate_alloc(struct info_sleep_inheritor_test * info)1421 primitive_gate_alloc(struct info_sleep_inheritor_test *info)
1422 {
1423 	gate_t *gate;
1424 	switch (info->prim_type) {
1425 	case MTX_LOCK:
1426 		gate = lck_mtx_gate_alloc_init(&info->mtx_lock);
1427 		break;
1428 	case RW_LOCK:
1429 		gate = lck_rw_gate_alloc_init(&info->rw_lock);
1430 		break;
1431 	default:
1432 		panic("invalid type %d", info->prim_type);
1433 	}
1434 	info->alloc_gate = gate;
1435 }
1436 
1437 static void
primitive_gate_free(struct info_sleep_inheritor_test * info)1438 primitive_gate_free(struct info_sleep_inheritor_test *info)
1439 {
1440 	T_ASSERT(info->alloc_gate != NULL, "gate not yet freed");
1441 
1442 	switch (info->prim_type) {
1443 	case MTX_LOCK:
1444 		lck_mtx_gate_free(&info->mtx_lock, info->alloc_gate);
1445 		break;
1446 	case RW_LOCK:
1447 		lck_rw_gate_free(&info->rw_lock, info->alloc_gate);
1448 		break;
1449 	default:
1450 		panic("invalid type %d", info->prim_type);
1451 	}
1452 	info->alloc_gate = NULL;
1453 }
1454 
1455 static void
thread_inheritor_like_mutex(void * args,__unused wait_result_t wr)1456 thread_inheritor_like_mutex(
1457 	void *args,
1458 	__unused wait_result_t wr)
1459 {
1460 	wait_result_t wait;
1461 
1462 	struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
1463 	uint my_pri = current_thread()->sched_pri;
1464 
1465 	T_LOG("Started thread pri %d %p", my_pri, current_thread());
1466 
1467 	/*
1468 	 * spin here to start concurrently
1469 	 */
1470 	wake_threads(&info->synch);
1471 	wait_threads(&info->synch, info->synch_value);
1472 
1473 	primitive_lock(info);
1474 
1475 	if (info->thread_inheritor == NULL) {
1476 		info->thread_inheritor = current_thread();
1477 	} else {
1478 		wait = primitive_sleep_with_inheritor(info);
1479 		T_ASSERT(wait == THREAD_AWAKENED || wait == THREAD_NOT_WAITING, "sleep_with_inheritor return");
1480 	}
1481 	primitive_unlock(info);
1482 
1483 	IOSleep(100);
1484 	info->value++;
1485 
1486 	primitive_lock(info);
1487 
1488 	T_ASSERT(info->thread_inheritor == current_thread(), "thread_inheritor is %p", info->thread_inheritor);
1489 	primitive_wakeup_one_with_inheritor(info);
1490 	T_LOG("woken up %p", info->thread_inheritor);
1491 
1492 	if (info->thread_inheritor == NULL) {
1493 		T_ASSERT(info->handoff_failure == 0, "handoff failures");
1494 		info->handoff_failure++;
1495 	} else {
1496 		T_ASSERT(info->thread_inheritor != current_thread(), "thread_inheritor is %p", info->thread_inheritor);
1497 		thread_deallocate(info->thread_inheritor);
1498 	}
1499 
1500 	primitive_unlock(info);
1501 
1502 	assert(current_thread()->kern_promotion_schedpri == 0);
1503 	notify_waiter((struct synch_test_common *)info);
1504 
1505 	thread_terminate_self();
1506 }
1507 
1508 static void
thread_just_inheritor_do_work(void * args,__unused wait_result_t wr)1509 thread_just_inheritor_do_work(
1510 	void *args,
1511 	__unused wait_result_t wr)
1512 {
1513 	struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
1514 	uint my_pri = current_thread()->sched_pri;
1515 	uint max_pri;
1516 
1517 	T_LOG("Started thread pri %d %p", my_pri, current_thread());
1518 	primitive_lock(info);
1519 
1520 	if (info->thread_inheritor == NULL) {
1521 		info->thread_inheritor = current_thread();
1522 		primitive_unlock(info);
1523 		T_LOG("Thread pri %d first to run %p", my_pri, current_thread());
1524 
1525 		wait_threads(&info->synch, info->synch_value - 1);
1526 
1527 		wait_for_waiters((struct synch_test_common *)info);
1528 
1529 		max_pri = get_max_pri((struct synch_test_common *) info);
1530 		T_ASSERT((uint) current_thread()->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", current_thread()->sched_pri, max_pri);
1531 
1532 		os_atomic_store(&info->synch, 0, relaxed);
1533 		primitive_lock(info);
1534 		primitive_wakeup_all_with_inheritor(info);
1535 	} else {
1536 		wake_threads(&info->synch);
1537 		primitive_sleep_with_inheritor(info);
1538 	}
1539 
1540 	primitive_unlock(info);
1541 
1542 	assert(current_thread()->kern_promotion_schedpri == 0);
1543 	notify_waiter((struct synch_test_common *)info);
1544 
1545 	thread_terminate_self();
1546 }
1547 
1548 static void
thread_steal_work(void * args,__unused wait_result_t wr)1549 thread_steal_work(
1550 	void *args,
1551 	__unused wait_result_t wr)
1552 {
1553 	struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
1554 	uint my_pri = current_thread()->sched_pri;
1555 
1556 	T_LOG("Started thread pri %d %p", my_pri, current_thread());
1557 	primitive_lock(info);
1558 
1559 	if (info->thread_inheritor == NULL) {
1560 		info->thread_inheritor = current_thread();
1561 		exclude_current_waiter((struct synch_test_common *)info);
1562 
1563 		T_LOG("Thread pri %d first to run %p", my_pri, current_thread());
1564 		primitive_unlock(info);
1565 
1566 		wait_threads(&info->synch, info->synch_value - 2);
1567 
1568 		wait_for_waiters((struct synch_test_common *)info);
1569 		T_LOG("Thread pri %d first to run %p", my_pri, current_thread());
1570 		primitive_lock(info);
1571 		if (info->thread_inheritor == current_thread()) {
1572 			primitive_wakeup_all_with_inheritor(info);
1573 		}
1574 	} else {
1575 		if (info->steal_pri == 0) {
1576 			info->steal_pri = my_pri;
1577 			info->thread_inheritor = current_thread();
1578 			primitive_change_sleep_inheritor(info);
1579 			exclude_current_waiter((struct synch_test_common *)info);
1580 
1581 			primitive_unlock(info);
1582 
1583 			wait_threads(&info->synch, info->synch_value - 2);
1584 
1585 			T_LOG("Thread pri %d stole push %p", my_pri, current_thread());
1586 			wait_for_waiters((struct synch_test_common *)info);
1587 
1588 			T_ASSERT((uint) current_thread()->sched_pri == info->steal_pri, "sleep_inheritor inheritor priority current is %d, should be %d", current_thread()->sched_pri, info->steal_pri);
1589 
1590 			primitive_lock(info);
1591 			primitive_wakeup_all_with_inheritor(info);
1592 		} else {
1593 			if (my_pri > info->steal_pri) {
1594 				info->steal_pri = my_pri;
1595 			}
1596 			wake_threads(&info->synch);
1597 			primitive_sleep_with_inheritor(info);
1598 			exclude_current_waiter((struct synch_test_common *)info);
1599 		}
1600 	}
1601 	primitive_unlock(info);
1602 
1603 	assert(current_thread()->kern_promotion_schedpri == 0);
1604 	notify_waiter((struct synch_test_common *)info);
1605 
1606 	thread_terminate_self();
1607 }
1608 
1609 static void
thread_no_inheritor_work(void * args,__unused wait_result_t wr)1610 thread_no_inheritor_work(
1611 	void *args,
1612 	__unused wait_result_t wr)
1613 {
1614 	struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
1615 	uint my_pri = current_thread()->sched_pri;
1616 
1617 	T_LOG("Started thread pri %d %p", my_pri, current_thread());
1618 	primitive_lock(info);
1619 
1620 	info->value--;
1621 	if (info->value == 0) {
1622 		primitive_wakeup_all_with_inheritor(info);
1623 	} else {
1624 		info->thread_inheritor = NULL;
1625 		primitive_sleep_with_inheritor(info);
1626 	}
1627 
1628 	primitive_unlock(info);
1629 
1630 	assert(current_thread()->kern_promotion_schedpri == 0);
1631 	notify_waiter((struct synch_test_common *)info);
1632 
1633 	thread_terminate_self();
1634 }
1635 
1636 static void
thread_mtx_work(void * args,__unused wait_result_t wr)1637 thread_mtx_work(
1638 	void *args,
1639 	__unused wait_result_t wr)
1640 {
1641 	struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
1642 	uint my_pri = current_thread()->sched_pri;
1643 	int i;
1644 	u_int8_t rand;
1645 	unsigned int mod_rand;
1646 	uint max_pri;
1647 
1648 	T_LOG("Started thread pri %d %p", my_pri, current_thread());
1649 
1650 	for (i = 0; i < 10; i++) {
1651 		lck_mtx_lock(&info->mtx_lock);
1652 		if (info->thread_inheritor == NULL) {
1653 			info->thread_inheritor = current_thread();
1654 			lck_mtx_unlock(&info->mtx_lock);
1655 
1656 			T_LOG("Thread pri %d first to run %p", my_pri, current_thread());
1657 
1658 			wait_threads(&info->synch, info->synch_value - 1);
1659 			wait_for_waiters((struct synch_test_common *)info);
1660 			max_pri = get_max_pri((struct synch_test_common *) info);
1661 			T_ASSERT((uint) current_thread()->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", current_thread()->sched_pri, max_pri);
1662 
1663 			os_atomic_store(&info->synch, 0, relaxed);
1664 
1665 			lck_mtx_lock(&info->mtx_lock);
1666 			info->thread_inheritor = NULL;
1667 			wakeup_all_with_inheritor((event_t) &info->thread_inheritor, THREAD_AWAKENED);
1668 			lck_mtx_unlock(&info->mtx_lock);
1669 			continue;
1670 		}
1671 
1672 		read_random(&rand, sizeof(rand));
1673 		mod_rand = rand % 2;
1674 
1675 		wake_threads(&info->synch);
1676 		switch (mod_rand) {
1677 		case 0:
1678 			lck_mtx_sleep_with_inheritor(&info->mtx_lock, LCK_SLEEP_DEFAULT, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1679 			lck_mtx_unlock(&info->mtx_lock);
1680 			break;
1681 		case 1:
1682 			lck_mtx_sleep_with_inheritor(&info->mtx_lock, LCK_SLEEP_UNLOCK, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1683 			break;
1684 		default:
1685 			panic("rand()mod4 returned %u (random %u)", mod_rand, rand);
1686 		}
1687 	}
1688 
1689 	/*
1690 	 * spin here to stop using the lock as mutex
1691 	 */
1692 	wake_threads(&info->synch);
1693 	wait_threads(&info->synch, info->synch_value);
1694 
1695 	for (i = 0; i < 10; i++) {
1696 		/* read_random might sleep so read it before acquiring the mtx as spin */
1697 		read_random(&rand, sizeof(rand));
1698 
1699 		lck_mtx_lock_spin(&info->mtx_lock);
1700 		if (info->thread_inheritor == NULL) {
1701 			info->thread_inheritor = current_thread();
1702 			lck_mtx_unlock(&info->mtx_lock);
1703 
1704 			T_LOG("Thread pri %d first to run %p", my_pri, current_thread());
1705 			wait_for_waiters((struct synch_test_common *)info);
1706 			max_pri = get_max_pri((struct synch_test_common *) info);
1707 			T_ASSERT((uint) current_thread()->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", current_thread()->sched_pri, max_pri);
1708 
1709 			lck_mtx_lock_spin(&info->mtx_lock);
1710 			info->thread_inheritor = NULL;
1711 			wakeup_all_with_inheritor((event_t) &info->thread_inheritor, THREAD_AWAKENED);
1712 			lck_mtx_unlock(&info->mtx_lock);
1713 			continue;
1714 		}
1715 
1716 		mod_rand = rand % 2;
1717 		switch (mod_rand) {
1718 		case 0:
1719 			lck_mtx_sleep_with_inheritor(&info->mtx_lock, LCK_SLEEP_SPIN, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1720 			lck_mtx_unlock(&info->mtx_lock);
1721 			break;
1722 		case 1:
1723 			lck_mtx_sleep_with_inheritor(&info->mtx_lock, LCK_SLEEP_SPIN_ALWAYS, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1724 			lck_mtx_unlock(&info->mtx_lock);
1725 			break;
1726 		default:
1727 			panic("rand()mod4 returned %u (random %u)", mod_rand, rand);
1728 		}
1729 	}
1730 	assert(current_thread()->kern_promotion_schedpri == 0);
1731 	notify_waiter((struct synch_test_common *)info);
1732 
1733 	thread_terminate_self();
1734 }
1735 
1736 static void
thread_rw_work(void * args,__unused wait_result_t wr)1737 thread_rw_work(
1738 	void *args,
1739 	__unused wait_result_t wr)
1740 {
1741 	struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
1742 	uint my_pri = current_thread()->sched_pri;
1743 	int i;
1744 	lck_rw_type_t type;
1745 	u_int8_t rand;
1746 	unsigned int mod_rand;
1747 	uint max_pri;
1748 
1749 	T_LOG("Started thread pri %d %p", my_pri, current_thread());
1750 
1751 	for (i = 0; i < 10; i++) {
1752 try_again:
1753 		type = LCK_RW_TYPE_SHARED;
1754 		lck_rw_lock(&info->rw_lock, type);
1755 		if (info->thread_inheritor == NULL) {
1756 			type = LCK_RW_TYPE_EXCLUSIVE;
1757 
1758 			if (lck_rw_lock_shared_to_exclusive(&info->rw_lock)) {
1759 				if (info->thread_inheritor == NULL) {
1760 					info->thread_inheritor = current_thread();
1761 					lck_rw_unlock(&info->rw_lock, type);
1762 					wait_threads(&info->synch, info->synch_value - 1);
1763 
1764 					T_LOG("Thread pri %d first to run %p", my_pri, current_thread());
1765 					wait_for_waiters((struct synch_test_common *)info);
1766 					max_pri = get_max_pri((struct synch_test_common *) info);
1767 					T_ASSERT((uint) current_thread()->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", current_thread()->sched_pri, max_pri);
1768 
1769 					os_atomic_store(&info->synch, 0, relaxed);
1770 
1771 					lck_rw_lock(&info->rw_lock, type);
1772 					info->thread_inheritor = NULL;
1773 					wakeup_all_with_inheritor((event_t) &info->thread_inheritor, THREAD_AWAKENED);
1774 					lck_rw_unlock(&info->rw_lock, type);
1775 					continue;
1776 				}
1777 			} else {
1778 				goto try_again;
1779 			}
1780 		}
1781 
1782 		read_random(&rand, sizeof(rand));
1783 		mod_rand = rand % 4;
1784 
1785 		wake_threads(&info->synch);
1786 		switch (mod_rand) {
1787 		case 0:
1788 			lck_rw_sleep_with_inheritor(&info->rw_lock, LCK_SLEEP_DEFAULT, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1789 			lck_rw_unlock(&info->rw_lock, type);
1790 			break;
1791 		case 1:
1792 			lck_rw_sleep_with_inheritor(&info->rw_lock, LCK_SLEEP_UNLOCK, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1793 			break;
1794 		case 2:
1795 			lck_rw_sleep_with_inheritor(&info->rw_lock, LCK_SLEEP_SHARED, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1796 			lck_rw_unlock(&info->rw_lock, LCK_RW_TYPE_SHARED);
1797 			break;
1798 		case 3:
1799 			lck_rw_sleep_with_inheritor(&info->rw_lock, LCK_SLEEP_EXCLUSIVE, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1800 			lck_rw_unlock(&info->rw_lock, LCK_RW_TYPE_EXCLUSIVE);
1801 			break;
1802 		default:
1803 			panic("rand()mod4 returned %u (random %u)", mod_rand, rand);
1804 		}
1805 	}
1806 
1807 	assert(current_thread()->kern_promotion_schedpri == 0);
1808 	notify_waiter((struct synch_test_common *)info);
1809 
1810 	thread_terminate_self();
1811 }
1812 
1813 static void
test_sleep_with_wake_all(struct info_sleep_inheritor_test * info,int prim_type)1814 test_sleep_with_wake_all(struct info_sleep_inheritor_test *info, int prim_type)
1815 {
1816 	info->prim_type = prim_type;
1817 	info->synch = 0;
1818 	info->synch_value = info->head.nthreads;
1819 
1820 	info->thread_inheritor = NULL;
1821 
1822 	start_threads((thread_continue_t)thread_just_inheritor_do_work, (struct synch_test_common *)info, TRUE);
1823 	wait_all_thread((struct synch_test_common *)info);
1824 }
1825 
1826 static void
test_sleep_with_wake_one(struct info_sleep_inheritor_test * info,int prim_type)1827 test_sleep_with_wake_one(struct info_sleep_inheritor_test *info, int prim_type)
1828 {
1829 	info->prim_type = prim_type;
1830 
1831 	info->synch = 0;
1832 	info->synch_value = info->head.nthreads;
1833 	info->value = 0;
1834 	info->handoff_failure = 0;
1835 	info->thread_inheritor = NULL;
1836 
1837 	start_threads((thread_continue_t)thread_inheritor_like_mutex, (struct synch_test_common *)info, FALSE);
1838 	wait_all_thread((struct synch_test_common *)info);
1839 
1840 	T_ASSERT(info->value == (int)info->head.nthreads, "value protected by sleep");
1841 	T_ASSERT(info->handoff_failure == 1, "handoff failures");
1842 }
1843 
1844 static void
test_change_sleep_inheritor(struct info_sleep_inheritor_test * info,int prim_type)1845 test_change_sleep_inheritor(struct info_sleep_inheritor_test *info, int prim_type)
1846 {
1847 	info->prim_type = prim_type;
1848 
1849 	info->thread_inheritor = NULL;
1850 	info->steal_pri = 0;
1851 	info->synch = 0;
1852 	info->synch_value = info->head.nthreads;
1853 
1854 	start_threads((thread_continue_t)thread_steal_work, (struct synch_test_common *)info, FALSE);
1855 	wait_all_thread((struct synch_test_common *)info);
1856 }
1857 
1858 static void
test_no_inheritor(struct info_sleep_inheritor_test * info,int prim_type)1859 test_no_inheritor(struct info_sleep_inheritor_test *info, int prim_type)
1860 {
1861 	info->prim_type = prim_type;
1862 	info->synch = 0;
1863 	info->synch_value = info->head.nthreads;
1864 
1865 	info->thread_inheritor = NULL;
1866 	info->value = info->head.nthreads;
1867 
1868 	start_threads((thread_continue_t)thread_no_inheritor_work, (struct synch_test_common *)info, FALSE);
1869 	wait_all_thread((struct synch_test_common *)info);
1870 }
1871 
1872 static void
test_rw_lock(struct info_sleep_inheritor_test * info)1873 test_rw_lock(struct info_sleep_inheritor_test *info)
1874 {
1875 	info->thread_inheritor = NULL;
1876 	info->value = info->head.nthreads;
1877 	info->synch = 0;
1878 	info->synch_value = info->head.nthreads;
1879 
1880 	start_threads((thread_continue_t)thread_rw_work, (struct synch_test_common *)info, FALSE);
1881 	wait_all_thread((struct synch_test_common *)info);
1882 }
1883 
1884 static void
test_mtx_lock(struct info_sleep_inheritor_test * info)1885 test_mtx_lock(struct info_sleep_inheritor_test *info)
1886 {
1887 	info->thread_inheritor = NULL;
1888 	info->value = info->head.nthreads;
1889 	info->synch = 0;
1890 	info->synch_value = info->head.nthreads;
1891 
1892 	start_threads((thread_continue_t)thread_mtx_work, (struct synch_test_common *)info, FALSE);
1893 	wait_all_thread((struct synch_test_common *)info);
1894 }
1895 
1896 kern_return_t
ts_kernel_sleep_inheritor_test(void)1897 ts_kernel_sleep_inheritor_test(void)
1898 {
1899 	struct info_sleep_inheritor_test info = {};
1900 
1901 	init_synch_test_common((struct synch_test_common *)&info, NUM_THREADS);
1902 
1903 	lck_attr_t* lck_attr = lck_attr_alloc_init();
1904 	lck_grp_attr_t* lck_grp_attr = lck_grp_attr_alloc_init();
1905 	lck_grp_t* lck_grp = lck_grp_alloc_init("test sleep_inheritor", lck_grp_attr);
1906 
1907 	lck_mtx_init(&info.mtx_lock, lck_grp, lck_attr);
1908 	lck_rw_init(&info.rw_lock, lck_grp, lck_attr);
1909 
1910 	/*
1911 	 * Testing lck_mtx_sleep_with_inheritor and wakeup_all_with_inheritor
1912 	 */
1913 	T_LOG("Testing mtx sleep with inheritor and wake_all_with_inheritor");
1914 	test_sleep_with_wake_all(&info, MTX_LOCK);
1915 
1916 	/*
1917 	 * Testing rw_mtx_sleep_with_inheritor and wakeup_all_with_inheritor
1918 	 */
1919 	T_LOG("Testing rw sleep with inheritor and wake_all_with_inheritor");
1920 	test_sleep_with_wake_all(&info, RW_LOCK);
1921 
1922 	/*
1923 	 * Testing lck_mtx_sleep_with_inheritor and wakeup_one_with_inheritor
1924 	 */
1925 	T_LOG("Testing mtx sleep with inheritor and wake_one_with_inheritor");
1926 	test_sleep_with_wake_one(&info, MTX_LOCK);
1927 
1928 	/*
1929 	 * Testing lck_rw_sleep_with_inheritor and wakeup_one_with_inheritor
1930 	 */
1931 	T_LOG("Testing rw sleep with inheritor and wake_one_with_inheritor");
1932 	test_sleep_with_wake_one(&info, RW_LOCK);
1933 
1934 	/*
1935 	 * Testing lck_mtx_sleep_with_inheritor and wakeup_all_with_inheritor
1936 	 * and change_sleep_inheritor
1937 	 */
1938 	T_LOG("Testing change_sleep_inheritor with mxt sleep");
1939 	test_change_sleep_inheritor(&info, MTX_LOCK);
1940 
1941 	/*
1942 	 * Testing lck_mtx_sleep_with_inheritor and wakeup_all_with_inheritor
1943 	 * and change_sleep_inheritor
1944 	 */
1945 	T_LOG("Testing change_sleep_inheritor with rw sleep");
1946 	test_change_sleep_inheritor(&info, RW_LOCK);
1947 
1948 	/*
1949 	 * Testing lck_mtx_sleep_with_inheritor and wakeup_all_with_inheritor
1950 	 * with inheritor NULL
1951 	 */
1952 	T_LOG("Testing inheritor NULL");
1953 	test_no_inheritor(&info, MTX_LOCK);
1954 
1955 	/*
1956 	 * Testing lck_mtx_sleep_with_inheritor and wakeup_all_with_inheritor
1957 	 * with inheritor NULL
1958 	 */
1959 	T_LOG("Testing inheritor NULL");
1960 	test_no_inheritor(&info, RW_LOCK);
1961 
1962 	/*
1963 	 * Testing mtx locking combinations
1964 	 */
1965 	T_LOG("Testing mtx locking combinations");
1966 	test_mtx_lock(&info);
1967 
1968 	/*
1969 	 * Testing rw locking combinations
1970 	 */
1971 	T_LOG("Testing rw locking combinations");
1972 	test_rw_lock(&info);
1973 
1974 	destroy_synch_test_common((struct synch_test_common *)&info);
1975 
1976 	lck_attr_free(lck_attr);
1977 	lck_grp_attr_free(lck_grp_attr);
1978 	lck_rw_destroy(&info.rw_lock, lck_grp);
1979 	lck_mtx_destroy(&info.mtx_lock, lck_grp);
1980 	lck_grp_free(lck_grp);
1981 
1982 	return KERN_SUCCESS;
1983 }
1984 
1985 static void
thread_gate_aggressive(void * args,__unused wait_result_t wr)1986 thread_gate_aggressive(
1987 	void *args,
1988 	__unused wait_result_t wr)
1989 {
1990 	struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
1991 	uint my_pri = current_thread()->sched_pri;
1992 
1993 	T_LOG("Started thread pri %d %p", my_pri, current_thread());
1994 
1995 	primitive_lock(info);
1996 	if (info->thread_inheritor == NULL) {
1997 		info->thread_inheritor = current_thread();
1998 		primitive_gate_assert(info, GATE_ASSERT_OPEN);
1999 		primitive_gate_close(info);
2000 		exclude_current_waiter((struct synch_test_common *)info);
2001 
2002 		primitive_unlock(info);
2003 
2004 		wait_threads(&info->synch, info->synch_value - 2);
2005 		wait_for_waiters((struct synch_test_common *)info);
2006 		T_LOG("Thread pri %d first to run %p", my_pri, current_thread());
2007 
2008 		primitive_lock(info);
2009 		if (info->thread_inheritor == current_thread()) {
2010 			primitive_gate_open(info);
2011 		}
2012 	} else {
2013 		if (info->steal_pri == 0) {
2014 			info->steal_pri = my_pri;
2015 			info->thread_inheritor = current_thread();
2016 			primitive_gate_steal(info);
2017 			exclude_current_waiter((struct synch_test_common *)info);
2018 
2019 			primitive_unlock(info);
2020 			wait_threads(&info->synch, info->synch_value - 2);
2021 
2022 			T_LOG("Thread pri %d stole push %p", my_pri, current_thread());
2023 			wait_for_waiters((struct synch_test_common *)info);
2024 			T_ASSERT((uint) current_thread()->sched_pri == info->steal_pri, "gate keeper priority current is %d, should be %d", current_thread()->sched_pri, info->steal_pri);
2025 
2026 			primitive_lock(info);
2027 			primitive_gate_open(info);
2028 		} else {
2029 			if (my_pri > info->steal_pri) {
2030 				info->steal_pri = my_pri;
2031 			}
2032 			wake_threads(&info->synch);
2033 			primitive_gate_wait(info);
2034 			exclude_current_waiter((struct synch_test_common *)info);
2035 		}
2036 	}
2037 	primitive_unlock(info);
2038 
2039 	assert(current_thread()->kern_promotion_schedpri == 0);
2040 	notify_waiter((struct synch_test_common *)info);
2041 
2042 	thread_terminate_self();
2043 }
2044 
2045 static void
thread_gate_free(void * args,__unused wait_result_t wr)2046 thread_gate_free(
2047 	void *args,
2048 	__unused wait_result_t wr)
2049 {
2050 	struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
2051 	uint my_pri = current_thread()->sched_pri;
2052 
2053 	T_LOG("Started thread pri %d %p", my_pri, current_thread());
2054 
2055 	primitive_lock(info);
2056 
2057 	if (primitive_gate_try_close(info) == KERN_SUCCESS) {
2058 		primitive_gate_assert(info, GATE_ASSERT_HELD);
2059 		primitive_unlock(info);
2060 
2061 		wait_threads(&info->synch, info->synch_value - 1);
2062 		wait_for_waiters((struct synch_test_common *) info);
2063 
2064 		primitive_lock(info);
2065 		primitive_gate_open(info);
2066 		primitive_gate_free(info);
2067 	} else {
2068 		primitive_gate_assert(info, GATE_ASSERT_CLOSED);
2069 		wake_threads(&info->synch);
2070 		gate_wait_result_t ret = primitive_gate_wait(info);
2071 		T_ASSERT(ret == GATE_OPENED, "open gate");
2072 	}
2073 
2074 	primitive_unlock(info);
2075 
2076 	notify_waiter((struct synch_test_common *)info);
2077 
2078 	thread_terminate_self();
2079 }
2080 
2081 static void
thread_gate_like_mutex(void * args,__unused wait_result_t wr)2082 thread_gate_like_mutex(
2083 	void *args,
2084 	__unused wait_result_t wr)
2085 {
2086 	gate_wait_result_t wait;
2087 	kern_return_t ret;
2088 	uint my_pri = current_thread()->sched_pri;
2089 
2090 	struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
2091 
2092 	T_LOG("Started thread pri %d %p", my_pri, current_thread());
2093 
2094 	/*
2095 	 * spin here to start concurrently
2096 	 */
2097 	wake_threads(&info->synch);
2098 	wait_threads(&info->synch, info->synch_value);
2099 
2100 	primitive_lock(info);
2101 
2102 	if (primitive_gate_try_close(info) != KERN_SUCCESS) {
2103 		wait = primitive_gate_wait(info);
2104 		T_ASSERT(wait == GATE_HANDOFF, "gate_wait return");
2105 	}
2106 
2107 	primitive_gate_assert(info, GATE_ASSERT_HELD);
2108 
2109 	primitive_unlock(info);
2110 
2111 	IOSleep(100);
2112 	info->value++;
2113 
2114 	primitive_lock(info);
2115 
2116 	ret = primitive_gate_handoff(info, GATE_HANDOFF_DEFAULT);
2117 	if (ret == KERN_NOT_WAITING) {
2118 		T_ASSERT(info->handoff_failure == 0, "handoff failures");
2119 		primitive_gate_handoff(info, GATE_HANDOFF_OPEN_IF_NO_WAITERS);
2120 		info->handoff_failure++;
2121 	}
2122 
2123 	primitive_unlock(info);
2124 	notify_waiter((struct synch_test_common *)info);
2125 
2126 	thread_terminate_self();
2127 }
2128 
2129 static void
thread_just_one_do_work(void * args,__unused wait_result_t wr)2130 thread_just_one_do_work(
2131 	void *args,
2132 	__unused wait_result_t wr)
2133 {
2134 	struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
2135 	uint my_pri = current_thread()->sched_pri;
2136 	uint max_pri;
2137 
2138 	T_LOG("Started thread pri %d %p", my_pri, current_thread());
2139 
2140 	primitive_lock(info);
2141 check_again:
2142 	if (info->work_to_do) {
2143 		if (primitive_gate_try_close(info) == KERN_SUCCESS) {
2144 			primitive_gate_assert(info, GATE_ASSERT_HELD);
2145 			primitive_unlock(info);
2146 
2147 			T_LOG("Thread pri %d acquired the gate %p", my_pri, current_thread());
2148 			wait_threads(&info->synch, info->synch_value - 1);
2149 			wait_for_waiters((struct synch_test_common *)info);
2150 			max_pri = get_max_pri((struct synch_test_common *) info);
2151 			T_ASSERT((uint) current_thread()->sched_pri == max_pri, "gate owner priority current is %d, should be %d", current_thread()->sched_pri, max_pri);
2152 			os_atomic_store(&info->synch, 0, relaxed);
2153 
2154 			primitive_lock(info);
2155 			info->work_to_do = FALSE;
2156 			primitive_gate_open(info);
2157 		} else {
2158 			primitive_gate_assert(info, GATE_ASSERT_CLOSED);
2159 			wake_threads(&info->synch);
2160 			primitive_gate_wait(info);
2161 			goto check_again;
2162 		}
2163 	}
2164 	primitive_unlock(info);
2165 
2166 	assert(current_thread()->kern_promotion_schedpri == 0);
2167 	notify_waiter((struct synch_test_common *)info);
2168 	thread_terminate_self();
2169 }
2170 
2171 static void
test_gate_push(struct info_sleep_inheritor_test * info,int prim_type)2172 test_gate_push(struct info_sleep_inheritor_test *info, int prim_type)
2173 {
2174 	info->prim_type = prim_type;
2175 	info->use_alloc_gate = false;
2176 
2177 	primitive_gate_init(info);
2178 	info->work_to_do = TRUE;
2179 	info->synch = 0;
2180 	info->synch_value = NUM_THREADS;
2181 
2182 	start_threads((thread_continue_t)thread_just_one_do_work, (struct synch_test_common *) info, TRUE);
2183 	wait_all_thread((struct synch_test_common *)info);
2184 
2185 	primitive_gate_destroy(info);
2186 }
2187 
2188 static void
test_gate_handoff(struct info_sleep_inheritor_test * info,int prim_type)2189 test_gate_handoff(struct info_sleep_inheritor_test *info, int prim_type)
2190 {
2191 	info->prim_type = prim_type;
2192 	info->use_alloc_gate = false;
2193 
2194 	primitive_gate_init(info);
2195 
2196 	info->synch = 0;
2197 	info->synch_value = NUM_THREADS;
2198 	info->value = 0;
2199 	info->handoff_failure = 0;
2200 
2201 	start_threads((thread_continue_t)thread_gate_like_mutex, (struct synch_test_common *)info, false);
2202 	wait_all_thread((struct synch_test_common *)info);
2203 
2204 	T_ASSERT(info->value == NUM_THREADS, "value protected by gate");
2205 	T_ASSERT(info->handoff_failure == 1, "handoff failures");
2206 
2207 	primitive_gate_destroy(info);
2208 }
2209 
2210 static void
test_gate_steal(struct info_sleep_inheritor_test * info,int prim_type)2211 test_gate_steal(struct info_sleep_inheritor_test *info, int prim_type)
2212 {
2213 	info->prim_type = prim_type;
2214 	info->use_alloc_gate = false;
2215 
2216 	primitive_gate_init(info);
2217 
2218 	info->synch = 0;
2219 	info->synch_value = NUM_THREADS;
2220 	info->thread_inheritor = NULL;
2221 	info->steal_pri = 0;
2222 
2223 	start_threads((thread_continue_t)thread_gate_aggressive, (struct synch_test_common *)info, FALSE);
2224 	wait_all_thread((struct synch_test_common *)info);
2225 
2226 	primitive_gate_destroy(info);
2227 }
2228 
2229 static void
test_gate_alloc_free(struct info_sleep_inheritor_test * info,int prim_type)2230 test_gate_alloc_free(struct info_sleep_inheritor_test *info, int prim_type)
2231 {
2232 	(void)info;
2233 	(void) prim_type;
2234 	info->prim_type = prim_type;
2235 	info->use_alloc_gate = true;
2236 
2237 	primitive_gate_alloc(info);
2238 
2239 	info->synch = 0;
2240 	info->synch_value = NUM_THREADS;
2241 
2242 	start_threads((thread_continue_t)thread_gate_free, (struct synch_test_common *)info, FALSE);
2243 	wait_all_thread((struct synch_test_common *)info);
2244 
2245 	T_ASSERT(info->alloc_gate == NULL, "gate free");
2246 	info->use_alloc_gate = false;
2247 }
2248 
2249 kern_return_t
ts_kernel_gate_test(void)2250 ts_kernel_gate_test(void)
2251 {
2252 	struct info_sleep_inheritor_test info = {};
2253 
2254 	T_LOG("Testing gate primitive");
2255 
2256 	init_synch_test_common((struct synch_test_common *)&info, NUM_THREADS);
2257 
2258 	lck_attr_t* lck_attr = lck_attr_alloc_init();
2259 	lck_grp_attr_t* lck_grp_attr = lck_grp_attr_alloc_init();
2260 	lck_grp_t* lck_grp = lck_grp_alloc_init("test gate", lck_grp_attr);
2261 
2262 	lck_mtx_init(&info.mtx_lock, lck_grp, lck_attr);
2263 	lck_rw_init(&info.rw_lock, lck_grp, lck_attr);
2264 
2265 	/*
2266 	 * Testing the priority inherited by the keeper
2267 	 * lck_mtx_gate_try_close, lck_mtx_gate_open, lck_mtx_gate_wait
2268 	 */
2269 	T_LOG("Testing gate push, mtx");
2270 	test_gate_push(&info, MTX_LOCK);
2271 
2272 	T_LOG("Testing gate push, rw");
2273 	test_gate_push(&info, RW_LOCK);
2274 
2275 	/*
2276 	 * Testing the handoff
2277 	 * lck_mtx_gate_wait, lck_mtx_gate_handoff
2278 	 */
2279 	T_LOG("Testing gate handoff, mtx");
2280 	test_gate_handoff(&info, MTX_LOCK);
2281 
2282 	T_LOG("Testing gate handoff, rw");
2283 	test_gate_handoff(&info, RW_LOCK);
2284 
2285 	/*
2286 	 * Testing the steal
2287 	 * lck_mtx_gate_close, lck_mtx_gate_wait, lck_mtx_gate_steal, lck_mtx_gate_handoff
2288 	 */
2289 	T_LOG("Testing gate steal, mtx");
2290 	test_gate_steal(&info, MTX_LOCK);
2291 
2292 	T_LOG("Testing gate steal, rw");
2293 	test_gate_steal(&info, RW_LOCK);
2294 
2295 	/*
2296 	 * Testing the alloc/free
2297 	 * lck_mtx_gate_alloc_init, lck_mtx_gate_close, lck_mtx_gate_wait, lck_mtx_gate_free
2298 	 */
2299 	T_LOG("Testing gate alloc/free, mtx");
2300 	test_gate_alloc_free(&info, MTX_LOCK);
2301 
2302 	T_LOG("Testing gate alloc/free, rw");
2303 	test_gate_alloc_free(&info, RW_LOCK);
2304 
2305 	destroy_synch_test_common((struct synch_test_common *)&info);
2306 
2307 	lck_attr_free(lck_attr);
2308 	lck_grp_attr_free(lck_grp_attr);
2309 	lck_mtx_destroy(&info.mtx_lock, lck_grp);
2310 	lck_grp_free(lck_grp);
2311 
2312 	return KERN_SUCCESS;
2313 }
2314 
2315 #define NUM_THREAD_CHAIN 6
2316 
2317 struct turnstile_chain_test {
2318 	struct synch_test_common head;
2319 	lck_mtx_t mtx_lock;
2320 	int synch_value;
2321 	int synch;
2322 	int synch2;
2323 	gate_t gates[NUM_THREAD_CHAIN];
2324 };
2325 
2326 static void
thread_sleep_gate_chain_work(void * args,__unused wait_result_t wr)2327 thread_sleep_gate_chain_work(
2328 	void *args,
2329 	__unused wait_result_t wr)
2330 {
2331 	struct turnstile_chain_test *info = (struct turnstile_chain_test*) args;
2332 	thread_t self = current_thread();
2333 	uint my_pri = self->sched_pri;
2334 	uint max_pri;
2335 	uint i;
2336 	thread_t inheritor = NULL, woken_up;
2337 	event_t wait_event, wake_event;
2338 	kern_return_t ret;
2339 
2340 	T_LOG("Started thread pri %d %p", my_pri, self);
2341 
2342 	/*
2343 	 * Need to use the threads ids, wait for all of them to be populated
2344 	 */
2345 
2346 	while (os_atomic_load(&info->head.threads[info->head.nthreads - 1], acquire) == NULL) {
2347 		IOSleep(10);
2348 	}
2349 
2350 	max_pri = get_max_pri((struct synch_test_common *) info);
2351 
2352 	for (i = 0; i < info->head.nthreads; i = i + 2) {
2353 		// even threads will close a gate
2354 		if (info->head.threads[i] == self) {
2355 			lck_mtx_lock(&info->mtx_lock);
2356 			lck_mtx_gate_close(&info->mtx_lock, &info->gates[i]);
2357 			lck_mtx_unlock(&info->mtx_lock);
2358 			break;
2359 		}
2360 	}
2361 
2362 	wake_threads(&info->synch2);
2363 	wait_threads(&info->synch2, info->synch_value);
2364 
2365 	if (self == os_atomic_load(&info->head.threads[0], acquire)) {
2366 		wait_threads(&info->synch, info->synch_value - 1);
2367 		wait_for_waiters((struct synch_test_common *)info);
2368 
2369 		T_ASSERT((uint) self->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", self->sched_pri, max_pri);
2370 
2371 		lck_mtx_lock(&info->mtx_lock);
2372 		lck_mtx_gate_open(&info->mtx_lock, &info->gates[0]);
2373 		lck_mtx_unlock(&info->mtx_lock);
2374 	} else {
2375 		wait_event = NULL;
2376 		wake_event = NULL;
2377 		for (i = 0; i < info->head.nthreads; i++) {
2378 			if (info->head.threads[i] == self) {
2379 				inheritor = info->head.threads[i - 1];
2380 				wait_event = (event_t) &info->head.threads[i - 1];
2381 				wake_event = (event_t) &info->head.threads[i];
2382 				break;
2383 			}
2384 		}
2385 		assert(wait_event != NULL);
2386 
2387 		lck_mtx_lock(&info->mtx_lock);
2388 		wake_threads(&info->synch);
2389 
2390 		if (i % 2 != 0) {
2391 			lck_mtx_gate_wait(&info->mtx_lock, &info->gates[i - 1], LCK_SLEEP_UNLOCK, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
2392 			T_ASSERT((uint) self->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", self->sched_pri, max_pri);
2393 
2394 			ret = wakeup_one_with_inheritor(wake_event, THREAD_AWAKENED, LCK_WAKE_DO_NOT_TRANSFER_PUSH, &woken_up);
2395 			if (ret == KERN_SUCCESS) {
2396 				T_ASSERT(i != (info->head.nthreads - 1), "thread id");
2397 				T_ASSERT(woken_up == info->head.threads[i + 1], "wakeup_one_with_inheritor woke next");
2398 			} else {
2399 				T_ASSERT(i == (info->head.nthreads - 1), "thread id");
2400 			}
2401 
2402 			// i am still the inheritor, wake all to drop inheritership
2403 			ret = wakeup_all_with_inheritor(wake_event, LCK_WAKE_DEFAULT);
2404 			T_ASSERT(ret == KERN_NOT_WAITING, "waiters on event");
2405 		} else {
2406 			// I previously closed a gate
2407 			lck_mtx_sleep_with_inheritor(&info->mtx_lock, LCK_SLEEP_UNLOCK, wait_event, inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
2408 			T_ASSERT((uint) self->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", self->sched_pri, max_pri);
2409 
2410 			lck_mtx_lock(&info->mtx_lock);
2411 			lck_mtx_gate_open(&info->mtx_lock, &info->gates[i]);
2412 			lck_mtx_unlock(&info->mtx_lock);
2413 		}
2414 	}
2415 
2416 	assert(current_thread()->kern_promotion_schedpri == 0);
2417 	notify_waiter((struct synch_test_common *)info);
2418 
2419 	thread_terminate_self();
2420 }
2421 
2422 static void
thread_gate_chain_work(void * args,__unused wait_result_t wr)2423 thread_gate_chain_work(
2424 	void *args,
2425 	__unused wait_result_t wr)
2426 {
2427 	struct turnstile_chain_test *info = (struct turnstile_chain_test*) args;
2428 	thread_t self = current_thread();
2429 	uint my_pri = self->sched_pri;
2430 	uint max_pri;
2431 	uint i;
2432 	T_LOG("Started thread pri %d %p", my_pri, self);
2433 
2434 
2435 	/*
2436 	 * Need to use the threads ids, wait for all of them to be populated
2437 	 */
2438 	while (os_atomic_load(&info->head.threads[info->head.nthreads - 1], acquire) == NULL) {
2439 		IOSleep(10);
2440 	}
2441 
2442 	max_pri = get_max_pri((struct synch_test_common *) info);
2443 
2444 	for (i = 0; i < info->head.nthreads; i++) {
2445 		if (info->head.threads[i] == self) {
2446 			lck_mtx_lock(&info->mtx_lock);
2447 			lck_mtx_gate_close(&info->mtx_lock, &info->gates[i]);
2448 			lck_mtx_unlock(&info->mtx_lock);
2449 			break;
2450 		}
2451 	}
2452 	assert(i != info->head.nthreads);
2453 
2454 	wake_threads(&info->synch2);
2455 	wait_threads(&info->synch2, info->synch_value);
2456 
2457 	if (self == os_atomic_load(&info->head.threads[0], acquire)) {
2458 		wait_threads(&info->synch, info->synch_value - 1);
2459 
2460 		wait_for_waiters((struct synch_test_common *)info);
2461 
2462 		T_ASSERT((uint) self->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", self->sched_pri, max_pri);
2463 
2464 		lck_mtx_lock(&info->mtx_lock);
2465 		lck_mtx_gate_open(&info->mtx_lock, &info->gates[0]);
2466 		lck_mtx_unlock(&info->mtx_lock);
2467 	} else {
2468 		lck_mtx_lock(&info->mtx_lock);
2469 		wake_threads(&info->synch);
2470 		lck_mtx_gate_wait(&info->mtx_lock, &info->gates[i - 1], LCK_SLEEP_UNLOCK, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
2471 
2472 		T_ASSERT((uint) self->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", self->sched_pri, max_pri);
2473 
2474 		lck_mtx_lock(&info->mtx_lock);
2475 		lck_mtx_gate_open(&info->mtx_lock, &info->gates[i]);
2476 		lck_mtx_unlock(&info->mtx_lock);
2477 	}
2478 
2479 	assert(current_thread()->kern_promotion_schedpri == 0);
2480 	notify_waiter((struct synch_test_common *)info);
2481 
2482 	thread_terminate_self();
2483 }
2484 
2485 static void
thread_sleep_chain_work(void * args,__unused wait_result_t wr)2486 thread_sleep_chain_work(
2487 	void *args,
2488 	__unused wait_result_t wr)
2489 {
2490 	struct turnstile_chain_test *info = (struct turnstile_chain_test*) args;
2491 	thread_t self = current_thread();
2492 	uint my_pri = self->sched_pri;
2493 	uint max_pri;
2494 	event_t wait_event, wake_event;
2495 	uint i;
2496 	thread_t inheritor = NULL, woken_up = NULL;
2497 	kern_return_t ret;
2498 
2499 	T_LOG("Started thread pri %d %p", my_pri, self);
2500 
2501 	/*
2502 	 * Need to use the threads ids, wait for all of them to be populated
2503 	 */
2504 	while (os_atomic_load(&info->head.threads[info->head.nthreads - 1], acquire) == NULL) {
2505 		IOSleep(10);
2506 	}
2507 
2508 	max_pri = get_max_pri((struct synch_test_common *) info);
2509 
2510 	if (self == os_atomic_load(&info->head.threads[0], acquire)) {
2511 		wait_threads(&info->synch, info->synch_value - 1);
2512 
2513 		wait_for_waiters((struct synch_test_common *)info);
2514 
2515 		T_ASSERT((uint) self->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", self->sched_pri, max_pri);
2516 
2517 		ret = wakeup_one_with_inheritor((event_t) &info->head.threads[0], THREAD_AWAKENED, LCK_WAKE_DO_NOT_TRANSFER_PUSH, &woken_up);
2518 		T_ASSERT(ret == KERN_SUCCESS, "wakeup_one_with_inheritor woke next");
2519 		T_ASSERT(woken_up == info->head.threads[1], "thread woken up");
2520 
2521 		// i am still the inheritor, wake all to drop inheritership
2522 		ret = wakeup_all_with_inheritor((event_t) &info->head.threads[0], LCK_WAKE_DEFAULT);
2523 		T_ASSERT(ret == KERN_NOT_WAITING, "waiters on event");
2524 	} else {
2525 		wait_event = NULL;
2526 		wake_event = NULL;
2527 		for (i = 0; i < info->head.nthreads; i++) {
2528 			if (info->head.threads[i] == self) {
2529 				inheritor = info->head.threads[i - 1];
2530 				wait_event = (event_t) &info->head.threads[i - 1];
2531 				wake_event = (event_t) &info->head.threads[i];
2532 				break;
2533 			}
2534 		}
2535 
2536 		assert(wait_event != NULL);
2537 		lck_mtx_lock(&info->mtx_lock);
2538 		wake_threads(&info->synch);
2539 
2540 		lck_mtx_sleep_with_inheritor(&info->mtx_lock, LCK_SLEEP_UNLOCK, wait_event, inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
2541 
2542 		T_ASSERT((uint) self->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", self->sched_pri, max_pri);
2543 
2544 		ret = wakeup_one_with_inheritor(wake_event, THREAD_AWAKENED, LCK_WAKE_DO_NOT_TRANSFER_PUSH, &woken_up);
2545 		if (ret == KERN_SUCCESS) {
2546 			T_ASSERT(i != (info->head.nthreads - 1), "thread id");
2547 			T_ASSERT(woken_up == info->head.threads[i + 1], "wakeup_one_with_inheritor woke next");
2548 		} else {
2549 			T_ASSERT(i == (info->head.nthreads - 1), "thread id");
2550 		}
2551 
2552 		// i am still the inheritor, wake all to drop inheritership
2553 		ret = wakeup_all_with_inheritor(wake_event, LCK_WAKE_DEFAULT);
2554 		T_ASSERT(ret == KERN_NOT_WAITING, "waiters on event");
2555 	}
2556 
2557 	assert(current_thread()->kern_promotion_schedpri == 0);
2558 	notify_waiter((struct synch_test_common *)info);
2559 
2560 	thread_terminate_self();
2561 }
2562 
2563 static void
test_sleep_chain(struct turnstile_chain_test * info)2564 test_sleep_chain(struct turnstile_chain_test *info)
2565 {
2566 	info->synch = 0;
2567 	info->synch_value = info->head.nthreads;
2568 
2569 	start_threads((thread_continue_t)thread_sleep_chain_work, (struct synch_test_common *)info, FALSE);
2570 	wait_all_thread((struct synch_test_common *)info);
2571 }
2572 
2573 static void
test_gate_chain(struct turnstile_chain_test * info)2574 test_gate_chain(struct turnstile_chain_test *info)
2575 {
2576 	info->synch = 0;
2577 	info->synch2 = 0;
2578 	info->synch_value = info->head.nthreads;
2579 
2580 	start_threads((thread_continue_t)thread_gate_chain_work, (struct synch_test_common *)info, FALSE);
2581 	wait_all_thread((struct synch_test_common *)info);
2582 }
2583 
2584 static void
test_sleep_gate_chain(struct turnstile_chain_test * info)2585 test_sleep_gate_chain(struct turnstile_chain_test *info)
2586 {
2587 	info->synch = 0;
2588 	info->synch2 = 0;
2589 	info->synch_value = info->head.nthreads;
2590 
2591 	start_threads((thread_continue_t)thread_sleep_gate_chain_work, (struct synch_test_common *)info, FALSE);
2592 	wait_all_thread((struct synch_test_common *)info);
2593 }
2594 
2595 kern_return_t
ts_kernel_turnstile_chain_test(void)2596 ts_kernel_turnstile_chain_test(void)
2597 {
2598 	struct turnstile_chain_test info = {};
2599 	int i;
2600 
2601 	init_synch_test_common((struct synch_test_common *)&info, NUM_THREAD_CHAIN);
2602 	lck_attr_t* lck_attr = lck_attr_alloc_init();
2603 	lck_grp_attr_t* lck_grp_attr = lck_grp_attr_alloc_init();
2604 	lck_grp_t* lck_grp = lck_grp_alloc_init("test gate", lck_grp_attr);
2605 
2606 	lck_mtx_init(&info.mtx_lock, lck_grp, lck_attr);
2607 	for (i = 0; i < NUM_THREAD_CHAIN; i++) {
2608 		lck_mtx_gate_init(&info.mtx_lock, &info.gates[i]);
2609 	}
2610 
2611 	T_LOG("Testing sleep chain, lck");
2612 	test_sleep_chain(&info);
2613 
2614 	T_LOG("Testing gate chain, lck");
2615 	test_gate_chain(&info);
2616 
2617 	T_LOG("Testing sleep and gate chain, lck");
2618 	test_sleep_gate_chain(&info);
2619 
2620 	destroy_synch_test_common((struct synch_test_common *)&info);
2621 	for (i = 0; i < NUM_THREAD_CHAIN; i++) {
2622 		lck_mtx_gate_destroy(&info.mtx_lock, &info.gates[i]);
2623 	}
2624 	lck_attr_free(lck_attr);
2625 	lck_grp_attr_free(lck_grp_attr);
2626 	lck_mtx_destroy(&info.mtx_lock, lck_grp);
2627 	lck_grp_free(lck_grp);
2628 
2629 	return KERN_SUCCESS;
2630 }
2631 
2632 kern_return_t
ts_kernel_timingsafe_bcmp_test(void)2633 ts_kernel_timingsafe_bcmp_test(void)
2634 {
2635 	int i, buf_size;
2636 	char *buf = NULL;
2637 
2638 	// empty
2639 	T_ASSERT(timingsafe_bcmp(NULL, NULL, 0) == 0, NULL);
2640 	T_ASSERT(timingsafe_bcmp("foo", "foo", 0) == 0, NULL);
2641 	T_ASSERT(timingsafe_bcmp("foo", "bar", 0) == 0, NULL);
2642 
2643 	// equal
2644 	T_ASSERT(timingsafe_bcmp("foo", "foo", strlen("foo")) == 0, NULL);
2645 
2646 	// unequal
2647 	T_ASSERT(timingsafe_bcmp("foo", "bar", strlen("foo")) == 1, NULL);
2648 	T_ASSERT(timingsafe_bcmp("foo", "goo", strlen("foo")) == 1, NULL);
2649 	T_ASSERT(timingsafe_bcmp("foo", "fpo", strlen("foo")) == 1, NULL);
2650 	T_ASSERT(timingsafe_bcmp("foo", "fop", strlen("foo")) == 1, NULL);
2651 
2652 	// all possible bitwise differences
2653 	for (i = 1; i < 256; i += 1) {
2654 		unsigned char a = 0;
2655 		unsigned char b = (unsigned char)i;
2656 
2657 		T_ASSERT(timingsafe_bcmp(&a, &b, sizeof(a)) == 1, NULL);
2658 	}
2659 
2660 	// large
2661 	buf_size = 1024 * 16;
2662 	buf = kalloc_data(buf_size, Z_WAITOK);
2663 	T_EXPECT_NOTNULL(buf, "kalloc of buf");
2664 
2665 	read_random(buf, buf_size);
2666 	T_ASSERT(timingsafe_bcmp(buf, buf, buf_size) == 0, NULL);
2667 	T_ASSERT(timingsafe_bcmp(buf, buf + 1, buf_size - 1) == 1, NULL);
2668 	T_ASSERT(timingsafe_bcmp(buf, buf + 128, 128) == 1, NULL);
2669 
2670 	memcpy(buf + 128, buf, 128);
2671 	T_ASSERT(timingsafe_bcmp(buf, buf + 128, 128) == 0, NULL);
2672 
2673 	kfree_data(buf, buf_size);
2674 
2675 	return KERN_SUCCESS;
2676 }
2677 
2678 kern_return_t
kprintf_hhx_test(void)2679 kprintf_hhx_test(void)
2680 {
2681 	printf("POST hhx test %hx%hx%hx%hx %hhx%hhx%hhx%hhx - %llx",
2682 	    (unsigned short)0xfeed, (unsigned short)0xface,
2683 	    (unsigned short)0xabad, (unsigned short)0xcafe,
2684 	    (unsigned char)'h', (unsigned char)'h', (unsigned char)'x',
2685 	    (unsigned char)'!',
2686 	    0xfeedfaceULL);
2687 	T_PASS("kprintf_hhx_test passed");
2688 	return KERN_SUCCESS;
2689 }
2690