xref: /xnu-8019.80.24/osfmk/tests/kernel_tests.c (revision a325d9c4a84054e40bbe985afedcb50ab80993ea)
1 /*
2  * Copyright (c) 2019-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <kern/kern_types.h>
30 #include <kern/assert.h>
31 #include <kern/host.h>
32 #include <kern/macro_help.h>
33 #include <kern/sched.h>
34 #include <kern/locks.h>
35 #include <kern/sched_prim.h>
36 #include <kern/misc_protos.h>
37 #include <kern/thread_call.h>
38 #include <kern/zalloc_internal.h>
39 #include <kern/kalloc.h>
40 #include <tests/ktest.h>
41 #include <sys/errno.h>
42 #include <sys/random.h>
43 #include <kern/kern_cdata.h>
44 #include <machine/lowglobals.h>
45 #include <vm/vm_page.h>
46 #include <vm/vm_object.h>
47 #include <vm/vm_protos.h>
48 #include <string.h>
49 
50 #if !(DEVELOPMENT || DEBUG)
51 #error "Testing is not enabled on RELEASE configurations"
52 #endif
53 
54 #include <tests/xnupost.h>
55 
56 extern boolean_t get_range_bounds(char * c, int64_t * lower, int64_t * upper);
57 __private_extern__ void qsort(void * a, size_t n, size_t es, int (*cmp)(const void *, const void *));
58 
59 uint32_t total_post_tests_count = 0;
60 void xnupost_reset_panic_widgets(void);
61 
62 /* test declarations */
63 kern_return_t zalloc_test(void);
64 kern_return_t RandomULong_test(void);
65 kern_return_t kcdata_api_test(void);
66 kern_return_t ts_kernel_primitive_test(void);
67 kern_return_t ts_kernel_sleep_inheritor_test(void);
68 kern_return_t ts_kernel_gate_test(void);
69 kern_return_t ts_kernel_turnstile_chain_test(void);
70 kern_return_t ts_kernel_timingsafe_bcmp_test(void);
71 
72 #if __ARM_VFP__
73 extern kern_return_t vfp_state_test(void);
74 #endif
75 
76 extern kern_return_t kprintf_hhx_test(void);
77 
78 #if defined(__arm__) || defined(__arm64__)
79 kern_return_t pmap_coredump_test(void);
80 #endif
81 
82 extern kern_return_t console_serial_test(void);
83 extern kern_return_t console_serial_parallel_log_tests(void);
84 extern kern_return_t test_os_log(void);
85 extern kern_return_t test_os_log_parallel(void);
86 extern kern_return_t bitmap_post_test(void);
87 extern kern_return_t counter_tests(void);
88 
89 #ifdef __arm64__
90 extern kern_return_t arm64_munger_test(void);
91 extern kern_return_t ex_cb_test(void);
92 #if __ARM_PAN_AVAILABLE__
93 extern kern_return_t arm64_pan_test(void);
94 #endif
95 #if defined(HAS_APPLE_PAC)
96 extern kern_return_t arm64_ropjop_test(void);
97 #endif /* defined(HAS_APPLE_PAC) */
98 #endif /* __arm64__ */
99 
100 extern kern_return_t test_thread_call(void);
101 
102 
103 struct xnupost_panic_widget xt_panic_widgets = {.xtp_context_p = NULL,
104 	                                        .xtp_outval_p = NULL,
105 	                                        .xtp_func_name = NULL,
106 	                                        .xtp_func = NULL};
107 
108 struct xnupost_test kernel_post_tests[] = {XNUPOST_TEST_CONFIG_BASIC(zalloc_test),
109 	                                   XNUPOST_TEST_CONFIG_BASIC(RandomULong_test),
110 	                                   XNUPOST_TEST_CONFIG_BASIC(test_os_log),
111 	                                   XNUPOST_TEST_CONFIG_BASIC(test_os_log_parallel),
112 #ifdef __arm64__
113 	                                   XNUPOST_TEST_CONFIG_BASIC(arm64_munger_test),
114 	                                   XNUPOST_TEST_CONFIG_BASIC(ex_cb_test),
115 #if __ARM_PAN_AVAILABLE__
116 	                                   XNUPOST_TEST_CONFIG_BASIC(arm64_pan_test),
117 #endif
118 #if defined(HAS_APPLE_PAC)
119 	                                   XNUPOST_TEST_CONFIG_BASIC(arm64_ropjop_test),
120 #endif /* defined(HAS_APPLE_PAC) */
121 #endif /* __arm64__ */
122 	                                   XNUPOST_TEST_CONFIG_BASIC(kcdata_api_test),
123 	                                   XNUPOST_TEST_CONFIG_BASIC(console_serial_test),
124 	                                   XNUPOST_TEST_CONFIG_BASIC(console_serial_parallel_log_tests),
125 #if defined(__arm__) || defined(__arm64__)
126 	                                   XNUPOST_TEST_CONFIG_BASIC(pmap_coredump_test),
127 #endif
128 	                                   XNUPOST_TEST_CONFIG_BASIC(bitmap_post_test),
129 	                                   //XNUPOST_TEST_CONFIG_TEST_PANIC(kcdata_api_assert_tests)
130 	                                   XNUPOST_TEST_CONFIG_BASIC(test_thread_call),
131 	                                   XNUPOST_TEST_CONFIG_BASIC(ts_kernel_primitive_test),
132 	                                   XNUPOST_TEST_CONFIG_BASIC(ts_kernel_sleep_inheritor_test),
133 	                                   XNUPOST_TEST_CONFIG_BASIC(ts_kernel_gate_test),
134 	                                   XNUPOST_TEST_CONFIG_BASIC(ts_kernel_turnstile_chain_test),
135 	                                   XNUPOST_TEST_CONFIG_BASIC(ts_kernel_timingsafe_bcmp_test),
136 	                                   XNUPOST_TEST_CONFIG_BASIC(kprintf_hhx_test),
137 #if __ARM_VFP__
138 	                                   XNUPOST_TEST_CONFIG_BASIC(vfp_state_test),
139 #endif
140 	                                   XNUPOST_TEST_CONFIG_BASIC(vm_tests),
141 	                                   XNUPOST_TEST_CONFIG_BASIC(counter_tests)};
142 
143 uint32_t kernel_post_tests_count = sizeof(kernel_post_tests) / sizeof(xnupost_test_data_t);
144 
145 #define POSTARGS_RUN_TESTS 0x1
146 #define POSTARGS_CONTROLLER_AVAILABLE 0x2
147 #define POSTARGS_CUSTOM_TEST_RUNLIST 0x4
148 uint64_t kernel_post_args = 0x0;
149 
150 /* static variables to hold state */
151 static kern_return_t parse_config_retval = KERN_INVALID_CAPABILITY;
152 static char kernel_post_test_configs[256];
153 boolean_t xnupost_should_run_test(uint32_t test_num);
154 
155 kern_return_t
xnupost_parse_config()156 xnupost_parse_config()
157 {
158 	if (parse_config_retval != KERN_INVALID_CAPABILITY) {
159 		return parse_config_retval;
160 	}
161 	PE_parse_boot_argn("kernPOST", &kernel_post_args, sizeof(kernel_post_args));
162 
163 	if (PE_parse_boot_argn("kernPOST_config", &kernel_post_test_configs[0], sizeof(kernel_post_test_configs)) == TRUE) {
164 		kernel_post_args |= POSTARGS_CUSTOM_TEST_RUNLIST;
165 	}
166 
167 	if (kernel_post_args != 0) {
168 		parse_config_retval = KERN_SUCCESS;
169 		goto out;
170 	}
171 	parse_config_retval = KERN_NOT_SUPPORTED;
172 out:
173 	return parse_config_retval;
174 }
175 
176 boolean_t
xnupost_should_run_test(uint32_t test_num)177 xnupost_should_run_test(uint32_t test_num)
178 {
179 	if (kernel_post_args & POSTARGS_CUSTOM_TEST_RUNLIST) {
180 		int64_t begin = 0, end = 999999;
181 		char * b = kernel_post_test_configs;
182 		while (*b) {
183 			get_range_bounds(b, &begin, &end);
184 			if (test_num >= begin && test_num <= end) {
185 				return TRUE;
186 			}
187 
188 			/* skip to the next "," */
189 			while (*b != ',') {
190 				if (*b == '\0') {
191 					return FALSE;
192 				}
193 				b++;
194 			}
195 			/* skip past the ',' */
196 			b++;
197 		}
198 		return FALSE;
199 	}
200 	return TRUE;
201 }
202 
203 kern_return_t
xnupost_list_tests(xnupost_test_t test_list,uint32_t test_count)204 xnupost_list_tests(xnupost_test_t test_list, uint32_t test_count)
205 {
206 	if (KERN_SUCCESS != xnupost_parse_config()) {
207 		return KERN_FAILURE;
208 	}
209 
210 	xnupost_test_t testp;
211 	for (uint32_t i = 0; i < test_count; i++) {
212 		testp = &test_list[i];
213 		if (testp->xt_test_num == 0) {
214 			assert(total_post_tests_count < UINT16_MAX);
215 			testp->xt_test_num = (uint16_t)++total_post_tests_count;
216 		}
217 		/* make sure the boot-arg based test run list is honored */
218 		if (kernel_post_args & POSTARGS_CUSTOM_TEST_RUNLIST) {
219 			testp->xt_config |= XT_CONFIG_IGNORE;
220 			if (xnupost_should_run_test(testp->xt_test_num)) {
221 				testp->xt_config &= ~(XT_CONFIG_IGNORE);
222 				testp->xt_config |= XT_CONFIG_RUN;
223 				printf("\n[TEST] #%u is marked as ignored", testp->xt_test_num);
224 			}
225 		}
226 		printf("\n[TEST] TOC#%u name: %s expected: %d config: %x\n", testp->xt_test_num, testp->xt_name, testp->xt_expected_retval,
227 		    testp->xt_config);
228 	}
229 
230 	return KERN_SUCCESS;
231 }
232 
233 kern_return_t
xnupost_run_tests(xnupost_test_t test_list,uint32_t test_count)234 xnupost_run_tests(xnupost_test_t test_list, uint32_t test_count)
235 {
236 	uint32_t i = 0;
237 	int retval = KERN_SUCCESS;
238 	int test_retval = KERN_FAILURE;
239 
240 	if ((kernel_post_args & POSTARGS_RUN_TESTS) == 0) {
241 		printf("No POST boot-arg set.\n");
242 		return retval;
243 	}
244 
245 	T_START;
246 	xnupost_test_t testp;
247 	for (; i < test_count; i++) {
248 		xnupost_reset_panic_widgets();
249 		T_TESTRESULT = T_STATE_UNRESOLVED;
250 		testp = &test_list[i];
251 		T_BEGIN(testp->xt_name);
252 		testp->xt_begin_time = mach_absolute_time();
253 		testp->xt_end_time   = testp->xt_begin_time;
254 
255 		/*
256 		 * If test is designed to panic and controller
257 		 * is not available then mark as SKIPPED
258 		 */
259 		if ((testp->xt_config & XT_CONFIG_EXPECT_PANIC) && !(kernel_post_args & POSTARGS_CONTROLLER_AVAILABLE)) {
260 			T_SKIP(
261 				"Test expects panic but "
262 				"no controller is present");
263 			testp->xt_test_actions = XT_ACTION_SKIPPED;
264 			continue;
265 		}
266 
267 		if ((testp->xt_config & XT_CONFIG_IGNORE)) {
268 			T_SKIP("Test is marked as XT_CONFIG_IGNORE");
269 			testp->xt_test_actions = XT_ACTION_SKIPPED;
270 			continue;
271 		}
272 
273 		test_retval = testp->xt_func();
274 		if (T_STATE_UNRESOLVED == T_TESTRESULT) {
275 			/*
276 			 * If test result is unresolved due to that no T_* test cases are called,
277 			 * determine the test result based on the return value of the test function.
278 			 */
279 			if (KERN_SUCCESS == test_retval) {
280 				T_PASS("Test passed because retval == KERN_SUCCESS");
281 			} else {
282 				T_FAIL("Test failed because retval == KERN_FAILURE");
283 			}
284 		}
285 		T_END;
286 		testp->xt_retval = T_TESTRESULT;
287 		testp->xt_end_time = mach_absolute_time();
288 		if (testp->xt_retval == testp->xt_expected_retval) {
289 			testp->xt_test_actions = XT_ACTION_PASSED;
290 		} else {
291 			testp->xt_test_actions = XT_ACTION_FAILED;
292 		}
293 	}
294 	T_FINISH;
295 	return retval;
296 }
297 
298 kern_return_t
kernel_list_tests()299 kernel_list_tests()
300 {
301 	return xnupost_list_tests(kernel_post_tests, kernel_post_tests_count);
302 }
303 
304 kern_return_t
kernel_do_post()305 kernel_do_post()
306 {
307 	return xnupost_run_tests(kernel_post_tests, kernel_post_tests_count);
308 }
309 
310 kern_return_t
xnupost_register_panic_widget(xt_panic_widget_func funcp,const char * funcname,void * context,void ** outval)311 xnupost_register_panic_widget(xt_panic_widget_func funcp, const char * funcname, void * context, void ** outval)
312 {
313 	if (xt_panic_widgets.xtp_context_p != NULL || xt_panic_widgets.xtp_func != NULL) {
314 		return KERN_RESOURCE_SHORTAGE;
315 	}
316 
317 	xt_panic_widgets.xtp_context_p = context;
318 	xt_panic_widgets.xtp_func      = funcp;
319 	xt_panic_widgets.xtp_func_name = funcname;
320 	xt_panic_widgets.xtp_outval_p  = outval;
321 
322 	return KERN_SUCCESS;
323 }
324 
325 void
xnupost_reset_panic_widgets()326 xnupost_reset_panic_widgets()
327 {
328 	bzero(&xt_panic_widgets, sizeof(xt_panic_widgets));
329 }
330 
331 kern_return_t
xnupost_process_kdb_stop(const char * panic_s)332 xnupost_process_kdb_stop(const char * panic_s)
333 {
334 	xt_panic_return_t retval         = 0;
335 	struct xnupost_panic_widget * pw = &xt_panic_widgets;
336 	const char * name = "unknown";
337 	if (xt_panic_widgets.xtp_func_name) {
338 		name = xt_panic_widgets.xtp_func_name;
339 	}
340 
341 	/* bail early on if kernPOST is not set */
342 	if (kernel_post_args == 0) {
343 		return KERN_INVALID_CAPABILITY;
344 	}
345 
346 	if (xt_panic_widgets.xtp_func) {
347 		T_LOG("%s: Calling out to widget: %s", __func__, xt_panic_widgets.xtp_func_name);
348 		retval = pw->xtp_func(panic_s, pw->xtp_context_p, pw->xtp_outval_p);
349 	} else {
350 		return KERN_INVALID_CAPABILITY;
351 	}
352 
353 	switch (retval) {
354 	case XT_RET_W_SUCCESS:
355 		T_EXPECT_EQ_INT(retval, XT_RET_W_SUCCESS, "%s reported successful handling. Returning from kdb_stop.", name);
356 		/* KERN_SUCCESS means return from panic/assertion */
357 		return KERN_SUCCESS;
358 
359 	case XT_RET_W_FAIL:
360 		T_FAIL("%s reported XT_RET_W_FAIL: Returning from kdb_stop", name);
361 		return KERN_SUCCESS;
362 
363 	case XT_PANIC_W_FAIL:
364 		T_FAIL("%s reported XT_PANIC_W_FAIL: Continuing to kdb_stop", name);
365 		return KERN_FAILURE;
366 
367 	case XT_PANIC_W_SUCCESS:
368 		T_EXPECT_EQ_INT(retval, XT_PANIC_W_SUCCESS, "%s reported successful testcase. But continuing to kdb_stop.", name);
369 		return KERN_FAILURE;
370 
371 	case XT_PANIC_UNRELATED:
372 	default:
373 		T_LOG("UNRELATED: Continuing to kdb_stop.");
374 		return KERN_FAILURE;
375 	}
376 }
377 
378 xt_panic_return_t
_xt_generic_assert_check(const char * s,void * str_to_match,void ** outval)379 _xt_generic_assert_check(const char * s, void * str_to_match, void ** outval)
380 {
381 	xt_panic_return_t ret = XT_PANIC_UNRELATED;
382 
383 	if (NULL != strnstr(__DECONST(char *, s), (char *)str_to_match, strlen(s))) {
384 		T_LOG("%s: kdb_stop string: '%s' MATCHED string: '%s'", __func__, s, (char *)str_to_match);
385 		ret = XT_RET_W_SUCCESS;
386 	}
387 
388 	if (outval) {
389 		*outval = (void *)(uintptr_t)ret;
390 	}
391 	return ret;
392 }
393 
394 kern_return_t
xnupost_reset_tests(xnupost_test_t test_list,uint32_t test_count)395 xnupost_reset_tests(xnupost_test_t test_list, uint32_t test_count)
396 {
397 	uint32_t i = 0;
398 	xnupost_test_t testp;
399 	for (; i < test_count; i++) {
400 		testp                  = &test_list[i];
401 		testp->xt_begin_time   = 0;
402 		testp->xt_end_time     = 0;
403 		testp->xt_test_actions = XT_ACTION_NONE;
404 		testp->xt_retval       = -1;
405 	}
406 	return KERN_SUCCESS;
407 }
408 
409 
410 kern_return_t
zalloc_test(void)411 zalloc_test(void)
412 {
413 	zone_t test_zone;
414 	void * test_ptr;
415 
416 	T_SETUPBEGIN;
417 	test_zone = zone_create("test_uint64_zone", sizeof(uint64_t),
418 	    ZC_DESTRUCTIBLE);
419 	T_ASSERT_NOTNULL(test_zone, NULL);
420 
421 	T_ASSERT_EQ_INT(test_zone->z_elems_free, 0, NULL);
422 	T_SETUPEND;
423 
424 	T_ASSERT_NOTNULL(test_ptr = zalloc(test_zone), NULL);
425 
426 	zfree(test_zone, test_ptr);
427 
428 	/* A sample report for perfdata */
429 	T_PERF("num_threads_at_ktest", threads_count, "count", "# of threads in system at zalloc_test");
430 
431 	return KERN_SUCCESS;
432 }
433 
434 /*
435  * Function used for comparison by qsort()
436  */
437 static int
compare_numbers_ascending(const void * a,const void * b)438 compare_numbers_ascending(const void * a, const void * b)
439 {
440 	const uint64_t x = *(const uint64_t *)a;
441 	const uint64_t y = *(const uint64_t *)b;
442 	if (x < y) {
443 		return -1;
444 	} else if (x > y) {
445 		return 1;
446 	} else {
447 		return 0;
448 	}
449 }
450 
451 /*
452  * Function to count number of bits that are set in a number.
453  * It uses Side Addition using Magic Binary Numbers
454  */
455 static int
count_bits(uint64_t number)456 count_bits(uint64_t number)
457 {
458 	return __builtin_popcountll(number);
459 }
460 
461 kern_return_t
RandomULong_test()462 RandomULong_test()
463 {
464 /*
465  * Randomness test for RandomULong()
466  *
467  * This test verifies that:
468  *  a. RandomULong works
469  *  b. The generated numbers match the following entropy criteria:
470  *     For a thousand iterations, verify:
471  *          1. mean entropy > 12 bits
472  *          2. min entropy > 4 bits
473  *          3. No Duplicate
474  *          4. No incremental/decremental pattern in a window of 3
475  *          5. No Zero
476  *          6. No -1
477  *
478  * <rdar://problem/22526137> Add test to increase code coverage for /dev/random
479  */
480 
481 #define CONF_MIN_ENTROPY 4
482 #define CONF_MEAN_ENTROPY 12
483 #define CONF_ITERATIONS 1000
484 #define CONF_WINDOW_SIZE 3
485 #define CONF_WINDOW_TREND_LIMIT ((CONF_WINDOW_SIZE / 2) + (CONF_WINDOW_SIZE & 1)) >> 0
486 
487 	int i;
488 	uint32_t min_bit_entropy, max_bit_entropy, bit_entropy;
489 	uint32_t aggregate_bit_entropy = 0;
490 	uint32_t mean_bit_entropy      = 0;
491 	uint64_t numbers[CONF_ITERATIONS];
492 	min_bit_entropy = UINT32_MAX;
493 	max_bit_entropy = 0;
494 
495 	/*
496 	 * TEST 1: Number generation and basic and basic validation
497 	 * Check for non-zero (no bits set), -1 (all bits set) and error
498 	 */
499 	for (i = 0; i < CONF_ITERATIONS; i++) {
500 		read_random(&numbers[i], sizeof(numbers[i]));
501 		if (numbers[i] == 0) {
502 			T_ASSERT_NE_ULLONG(numbers[i], 0, "read_random returned zero value.");
503 		}
504 		if (numbers[i] == UINT64_MAX) {
505 			T_ASSERT_NE_ULLONG(numbers[i], UINT64_MAX, "read_random returned -1.");
506 		}
507 	}
508 	T_PASS("Generated %d non-zero random numbers with atleast one bit reset.", CONF_ITERATIONS);
509 
510 	/*
511 	 * TEST 2: Mean and Min Bit Entropy
512 	 * Check the bit entropy and its mean over the generated numbers.
513 	 */
514 	for (i = 1; i < CONF_ITERATIONS; i++) {
515 		bit_entropy = count_bits(numbers[i - 1] ^ numbers[i]);
516 		if (bit_entropy < min_bit_entropy) {
517 			min_bit_entropy = bit_entropy;
518 		}
519 		if (bit_entropy > max_bit_entropy) {
520 			max_bit_entropy = bit_entropy;
521 		}
522 
523 		if (bit_entropy < CONF_MIN_ENTROPY) {
524 			T_EXPECT_GE_UINT(bit_entropy, CONF_MIN_ENTROPY,
525 			    "Number of differing bits in consecutive numbers does not satisfy the min criteria.");
526 		}
527 
528 		aggregate_bit_entropy += bit_entropy;
529 	}
530 	T_PASS("Passed the min bit entropy expectation of %d bits", CONF_MIN_ENTROPY);
531 
532 	mean_bit_entropy = aggregate_bit_entropy / CONF_ITERATIONS;
533 	T_EXPECT_GE_UINT(mean_bit_entropy, CONF_MEAN_ENTROPY, "Test criteria for mean number of differing bits.");
534 	T_PASS("Mean bit entropy criteria satisfied (Required %d, Actual: %d).", CONF_MEAN_ENTROPY, mean_bit_entropy);
535 	T_LOG("{PERFORMANCE} iterations: %d, min_bit_entropy: %d, mean_bit_entropy: %d, max_bit_entropy: %d", CONF_ITERATIONS,
536 	    min_bit_entropy, mean_bit_entropy, max_bit_entropy);
537 	T_PERF("min_bit_entropy_" T_TOSTRING(CONF_ITERATIONS), min_bit_entropy, "bits", "minimum bit entropy in RNG. High is better");
538 	T_PERF("mean_bit_entropy_" T_TOSTRING(CONF_ITERATIONS), mean_bit_entropy, "bits", "mean bit entropy in RNG. High is better");
539 	T_PERF("max_bit_entropy_" T_TOSTRING(CONF_ITERATIONS), max_bit_entropy, "bits", "max bit entropy in RNG. High is better");
540 
541 	/*
542 	 * TEST 3: Incremental Pattern Search
543 	 * Check that incremental/decremental pattern does not exist in the given window
544 	 */
545 	int window_start, window_end, trend;
546 	window_start = window_end = trend = 0;
547 
548 	do {
549 		/*
550 		 * Set the window
551 		 */
552 		window_end = window_start + CONF_WINDOW_SIZE - 1;
553 		if (window_end >= CONF_ITERATIONS) {
554 			window_end = CONF_ITERATIONS - 1;
555 		}
556 
557 		trend = 0;
558 		for (i = window_start; i < window_end; i++) {
559 			if (numbers[i] < numbers[i + 1]) {
560 				trend++;
561 			} else if (numbers[i] > numbers[i + 1]) {
562 				trend--;
563 			}
564 		}
565 		/*
566 		 * Check that there is no increasing or decreasing trend
567 		 * i.e. trend <= ceil(window_size/2)
568 		 */
569 		if (trend < 0) {
570 			trend = -trend;
571 		}
572 		if (trend > CONF_WINDOW_TREND_LIMIT) {
573 			T_ASSERT_LE_INT(trend, CONF_WINDOW_TREND_LIMIT, "Found increasing/decreasing trend in random numbers.");
574 		}
575 
576 		/*
577 		 * Move to the next window
578 		 */
579 		window_start++;
580 	} while (window_start < (CONF_ITERATIONS - 1));
581 	T_PASS("Did not find increasing/decreasing trends in a window of %d numbers.", CONF_WINDOW_SIZE);
582 
583 	/*
584 	 * TEST 4: Find Duplicates
585 	 * Check no duplicate values are generated
586 	 */
587 	qsort(numbers, CONF_ITERATIONS, sizeof(numbers[0]), compare_numbers_ascending);
588 	for (i = 1; i < CONF_ITERATIONS; i++) {
589 		if (numbers[i] == numbers[i - 1]) {
590 			T_ASSERT_NE_ULLONG(numbers[i], numbers[i - 1], "read_random generated duplicate values.");
591 		}
592 	}
593 	T_PASS("Test did not find any duplicates as expected.");
594 
595 	return KERN_SUCCESS;
596 }
597 
598 
599 /* KCDATA kernel api tests */
600 static struct kcdata_descriptor test_kc_data;//, test_kc_data2;
601 struct sample_disk_io_stats {
602 	uint64_t disk_reads_count;
603 	uint64_t disk_reads_size;
604 	uint64_t io_priority_count[4];
605 	uint64_t io_priority_size;
606 } __attribute__((packed));
607 
608 struct kcdata_subtype_descriptor test_disk_io_stats_def[] = {
609 	{
610 		.kcs_flags = KCS_SUBTYPE_FLAGS_NONE,
611 		.kcs_elem_type = KC_ST_UINT64,
612 		.kcs_elem_offset = 0 * sizeof(uint64_t),
613 		.kcs_elem_size = sizeof(uint64_t),
614 		.kcs_name = "disk_reads_count"
615 	},
616 	{
617 		.kcs_flags = KCS_SUBTYPE_FLAGS_NONE,
618 		.kcs_elem_type = KC_ST_UINT64,
619 		.kcs_elem_offset = 1 * sizeof(uint64_t),
620 		.kcs_elem_size = sizeof(uint64_t),
621 		.kcs_name = "disk_reads_size"
622 	},
623 	{
624 		.kcs_flags = KCS_SUBTYPE_FLAGS_ARRAY,
625 		.kcs_elem_type = KC_ST_UINT64,
626 		.kcs_elem_offset = 2 * sizeof(uint64_t),
627 		.kcs_elem_size = KCS_SUBTYPE_PACK_SIZE(4, sizeof(uint64_t)),
628 		.kcs_name = "io_priority_count"
629 	},
630 	{
631 		.kcs_flags = KCS_SUBTYPE_FLAGS_ARRAY,
632 		.kcs_elem_type = KC_ST_UINT64,
633 		.kcs_elem_offset = (2 + 4) * sizeof(uint64_t),
634 		.kcs_elem_size = sizeof(uint64_t),
635 		.kcs_name = "io_priority_size"
636 	},
637 };
638 
639 kern_return_t
kcdata_api_test(void)640 kcdata_api_test(void)
641 {
642 	kern_return_t retval = KERN_SUCCESS;
643 
644 	/* test for NULL input */
645 	retval = kcdata_memory_static_init(NULL, (mach_vm_address_t)0, KCDATA_BUFFER_BEGIN_STACKSHOT, 100, KCFLAG_USE_MEMCOPY);
646 	T_ASSERT(retval == KERN_INVALID_ARGUMENT, "kcdata_memory_static_init with NULL struct");
647 
648 	/* another negative test with buffer size < 32 bytes */
649 	char data[30] = "sample_disk_io_stats";
650 	retval = kcdata_memory_static_init(&test_kc_data, (mach_vm_address_t)&data, KCDATA_BUFFER_BEGIN_CRASHINFO, sizeof(data),
651 	    KCFLAG_USE_MEMCOPY);
652 	T_ASSERT(retval == KERN_INSUFFICIENT_BUFFER_SIZE, "init with 30 bytes failed as expected with KERN_INSUFFICIENT_BUFFER_SIZE");
653 
654 	/* test with COPYOUT for 0x0 address. Should return KERN_NO_ACCESS */
655 	retval = kcdata_memory_static_init(&test_kc_data, (mach_vm_address_t)0, KCDATA_BUFFER_BEGIN_CRASHINFO, PAGE_SIZE,
656 	    KCFLAG_USE_COPYOUT);
657 	T_ASSERT(retval == KERN_NO_ACCESS, "writing to 0x0 returned KERN_NO_ACCESS");
658 
659 	/* test with successful kcdata_memory_static_init */
660 	test_kc_data.kcd_length   = 0xdeadbeef;
661 	mach_vm_address_t address = (mach_vm_address_t)kalloc(PAGE_SIZE);
662 	T_EXPECT_NOTNULL(address, "kalloc of PAGE_SIZE data.");
663 
664 	retval = kcdata_memory_static_init(&test_kc_data, (mach_vm_address_t)address, KCDATA_BUFFER_BEGIN_STACKSHOT, PAGE_SIZE,
665 	    KCFLAG_USE_MEMCOPY);
666 
667 	T_ASSERT(retval == KERN_SUCCESS, "successful kcdata_memory_static_init call");
668 
669 	T_ASSERT(test_kc_data.kcd_length == PAGE_SIZE, "kcdata length is set correctly to PAGE_SIZE.");
670 	T_LOG("addr_begin 0x%llx and end 0x%llx and address 0x%llx", test_kc_data.kcd_addr_begin, test_kc_data.kcd_addr_end, address);
671 	T_ASSERT(test_kc_data.kcd_addr_begin == address, "kcdata begin address is correct 0x%llx", (uint64_t)address);
672 
673 	/* verify we have BEGIN and END HEADERS set */
674 	uint32_t * mem = (uint32_t *)address;
675 	T_ASSERT(mem[0] == KCDATA_BUFFER_BEGIN_STACKSHOT, "buffer does contain KCDATA_BUFFER_BEGIN_STACKSHOT");
676 	T_ASSERT(mem[4] == KCDATA_TYPE_BUFFER_END, "KCDATA_TYPE_BUFFER_END is appended as expected");
677 	T_ASSERT(mem[5] == 0, "size of BUFFER_END tag is zero");
678 
679 	/* verify kcdata_memory_get_used_bytes() */
680 	uint64_t bytes_used = 0;
681 	bytes_used = kcdata_memory_get_used_bytes(&test_kc_data);
682 	T_ASSERT(bytes_used == (2 * sizeof(struct kcdata_item)), "bytes_used api returned expected %llu", bytes_used);
683 
684 	/* test for kcdata_get_memory_addr() */
685 
686 	mach_vm_address_t user_addr = 0;
687 	/* negative test for NULL user_addr AND/OR kcdata_descriptor */
688 	retval = kcdata_get_memory_addr(NULL, KCDATA_TYPE_MACH_ABSOLUTE_TIME, sizeof(uint64_t), &user_addr);
689 	T_ASSERT(retval == KERN_INVALID_ARGUMENT, "kcdata_get_memory_addr with NULL struct -> KERN_INVALID_ARGUMENT");
690 
691 	retval = kcdata_get_memory_addr(&test_kc_data, KCDATA_TYPE_MACH_ABSOLUTE_TIME, sizeof(uint64_t), NULL);
692 	T_ASSERT(retval == KERN_INVALID_ARGUMENT, "kcdata_get_memory_addr with NULL user_addr -> KERN_INVALID_ARGUMENT");
693 
694 	/* successful case with size 0. Yes this is expected to succeed as just a item type could be used as boolean */
695 	retval = kcdata_get_memory_addr(&test_kc_data, KCDATA_TYPE_USECS_SINCE_EPOCH, 0, &user_addr);
696 	T_ASSERT(retval == KERN_SUCCESS, "Successfully got kcdata entry for 0 size data");
697 	T_ASSERT(user_addr == test_kc_data.kcd_addr_end, "0 sized data did not add any extra buffer space");
698 
699 	/* successful case with valid size. */
700 	user_addr = 0xdeadbeef;
701 	retval = kcdata_get_memory_addr(&test_kc_data, KCDATA_TYPE_MACH_ABSOLUTE_TIME, sizeof(uint64_t), &user_addr);
702 	T_ASSERT(retval == KERN_SUCCESS, "kcdata_get_memory_addr with valid values succeeded.");
703 	T_ASSERT(user_addr > test_kc_data.kcd_addr_begin, "user_addr is in range of buffer");
704 	T_ASSERT(user_addr < test_kc_data.kcd_addr_end, "user_addr is in range of buffer");
705 
706 	/* Try creating an item with really large size */
707 	user_addr  = 0xdeadbeef;
708 	bytes_used = kcdata_memory_get_used_bytes(&test_kc_data);
709 	retval = kcdata_get_memory_addr(&test_kc_data, KCDATA_TYPE_MACH_ABSOLUTE_TIME, PAGE_SIZE * 4, &user_addr);
710 	T_ASSERT(retval == KERN_INSUFFICIENT_BUFFER_SIZE, "Allocating entry with size > buffer -> KERN_INSUFFICIENT_BUFFER_SIZE");
711 	T_ASSERT(user_addr == 0xdeadbeef, "user_addr remained unaffected with failed kcdata_get_memory_addr");
712 	T_ASSERT(bytes_used == kcdata_memory_get_used_bytes(&test_kc_data), "The data structure should be unaffected");
713 
714 	/* verify convenience functions for uint32_with_description */
715 	retval = kcdata_add_uint32_with_description(&test_kc_data, 0xbdc0ffee, "This is bad coffee");
716 	T_ASSERT(retval == KERN_SUCCESS, "add uint32 with description succeeded.");
717 
718 	retval = kcdata_add_uint64_with_description(&test_kc_data, 0xf001badc0ffee, "another 8 byte no.");
719 	T_ASSERT(retval == KERN_SUCCESS, "add uint64 with desc succeeded.");
720 
721 	/* verify creating an KCDATA_TYPE_ARRAY here */
722 	user_addr  = 0xdeadbeef;
723 	bytes_used = kcdata_memory_get_used_bytes(&test_kc_data);
724 	/* save memory address where the array will come up */
725 	struct kcdata_item * item_p = (struct kcdata_item *)test_kc_data.kcd_addr_end;
726 
727 	retval = kcdata_get_memory_addr_for_array(&test_kc_data, KCDATA_TYPE_MACH_ABSOLUTE_TIME, sizeof(uint64_t), 20, &user_addr);
728 	T_ASSERT(retval == KERN_SUCCESS, "Array of 20 integers should be possible");
729 	T_ASSERT(user_addr != 0xdeadbeef, "user_addr is updated as expected");
730 	T_ASSERT((kcdata_memory_get_used_bytes(&test_kc_data) - bytes_used) >= 20 * sizeof(uint64_t), "memory allocation is in range");
731 	kcdata_iter_t iter = kcdata_iter(item_p, (unsigned long)(PAGE_SIZE - kcdata_memory_get_used_bytes(&test_kc_data)));
732 	T_ASSERT(kcdata_iter_array_elem_count(iter) == 20, "array count is 20");
733 
734 	/* FIXME add tests here for ranges of sizes and counts */
735 
736 	T_ASSERT(item_p->flags == (((uint64_t)KCDATA_TYPE_MACH_ABSOLUTE_TIME << 32) | 20), "flags are set correctly");
737 
738 	/* test adding of custom type */
739 
740 	retval = kcdata_add_type_definition(&test_kc_data, 0x999, data, &test_disk_io_stats_def[0],
741 	    sizeof(test_disk_io_stats_def) / sizeof(struct kcdata_subtype_descriptor));
742 	T_ASSERT(retval == KERN_SUCCESS, "adding custom type succeeded.");
743 
744 	return KERN_SUCCESS;
745 }
746 
747 /*
748  *  kern_return_t
749  *  kcdata_api_assert_tests()
750  *  {
751  *       kern_return_t retval       = 0;
752  *       void * assert_check_retval = NULL;
753  *       test_kc_data2.kcd_length   = 0xdeadbeef;
754  *       mach_vm_address_t address = (mach_vm_address_t)kalloc(PAGE_SIZE);
755  *       T_EXPECT_NOTNULL(address, "kalloc of PAGE_SIZE data.");
756  *
757  *       retval = kcdata_memory_static_init(&test_kc_data2, (mach_vm_address_t)address, KCDATA_BUFFER_BEGIN_STACKSHOT, PAGE_SIZE,
758  *                                          KCFLAG_USE_MEMCOPY);
759  *
760  *       T_ASSERT(retval == KERN_SUCCESS, "successful kcdata_memory_static_init call");
761  *
762  *       retval = T_REGISTER_ASSERT_CHECK("KCDATA_DESC_MAXLEN", &assert_check_retval);
763  *       T_ASSERT(retval == KERN_SUCCESS, "registered assert widget");
764  *
765  *       // this will assert
766  *       retval = kcdata_add_uint32_with_description(&test_kc_data2, 0xc0ffee, "really long description string for kcdata");
767  *       T_ASSERT(retval == KERN_INVALID_ARGUMENT, "API param check returned KERN_INVALID_ARGUMENT correctly");
768  *       T_ASSERT(assert_check_retval == (void *)XT_RET_W_SUCCESS, "assertion handler verified that it was hit");
769  *
770  *       return KERN_SUCCESS;
771  *  }
772  */
773 
774 #if defined(__arm__) || defined(__arm64__)
775 
776 #include <arm/pmap.h>
777 
778 #define MAX_PMAP_OBJECT_ELEMENT 100000
779 
780 extern struct vm_object pmap_object_store; /* store pt pages */
781 extern unsigned long gPhysBase, gPhysSize, first_avail;
782 
783 /*
784  * Define macros to transverse the pmap object structures and extract
785  * physical page number with information from low global only
786  * This emulate how Astris extracts information from coredump
787  */
788 #if defined(__arm64__)
789 
790 static inline uintptr_t
astris_vm_page_unpack_ptr(uintptr_t p)791 astris_vm_page_unpack_ptr(uintptr_t p)
792 {
793 	if (!p) {
794 		return (uintptr_t)0;
795 	}
796 
797 	return (p & lowGlo.lgPmapMemFromArrayMask)
798 	       ? lowGlo.lgPmapMemStartAddr + (p & ~(lowGlo.lgPmapMemFromArrayMask)) * lowGlo.lgPmapMemPagesize
799 	       : lowGlo.lgPmapMemPackedBaseAddr + (p << lowGlo.lgPmapMemPackedShift);
800 }
801 
802 // assume next pointer is the first element
803 #define astris_vm_page_queue_next(qc) (astris_vm_page_unpack_ptr(*((uint32_t *)(qc))))
804 
805 #endif
806 
807 #if defined(__arm__)
808 
809 // assume next pointer is the first element
810 #define astris_vm_page_queue_next(qc) *((uintptr_t *)(qc))
811 
812 #endif
813 
814 #define astris_vm_page_queue_first(q) astris_vm_page_queue_next(q)
815 
816 #define astris_vm_page_queue_end(q, qe) ((q) == (qe))
817 
818 #define astris_vm_page_queue_iterate(head, elt)                                                           \
819 	for ((elt) = (uintptr_t)astris_vm_page_queue_first((head)); !astris_vm_page_queue_end((head), (elt)); \
820 	     (elt) = (uintptr_t)astris_vm_page_queue_next(((elt) + (uintptr_t)lowGlo.lgPmapMemChainOffset)))
821 
822 #define astris_ptoa(x) ((vm_address_t)(x) << lowGlo.lgPageShift)
823 
824 static inline ppnum_t
astris_vm_page_get_phys_page(uintptr_t m)825 astris_vm_page_get_phys_page(uintptr_t m)
826 {
827 	return (m >= lowGlo.lgPmapMemStartAddr && m < lowGlo.lgPmapMemEndAddr)
828 	       ? (ppnum_t)((m - lowGlo.lgPmapMemStartAddr) / lowGlo.lgPmapMemPagesize + lowGlo.lgPmapMemFirstppnum)
829 	       : *((ppnum_t *)(m + lowGlo.lgPmapMemPageOffset));
830 }
831 
832 kern_return_t
pmap_coredump_test(void)833 pmap_coredump_test(void)
834 {
835 	int iter = 0;
836 	uintptr_t p;
837 
838 	T_LOG("Testing coredump info for PMAP.");
839 
840 	T_ASSERT_GE_ULONG(lowGlo.lgStaticAddr, gPhysBase, NULL);
841 	T_ASSERT_LE_ULONG(lowGlo.lgStaticAddr + lowGlo.lgStaticSize, first_avail, NULL);
842 	T_ASSERT_EQ_ULONG(lowGlo.lgLayoutMajorVersion, 3, NULL);
843 	T_ASSERT_GE_ULONG(lowGlo.lgLayoutMinorVersion, 2, NULL);
844 	T_ASSERT_EQ_ULONG(lowGlo.lgLayoutMagic, LOWGLO_LAYOUT_MAGIC, NULL);
845 
846 	// check the constant values in lowGlo
847 	T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemQ, ((typeof(lowGlo.lgPmapMemQ)) & (pmap_object_store.memq)), NULL);
848 	T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemPageOffset, offsetof(struct vm_page_with_ppnum, vmp_phys_page), NULL);
849 	T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemChainOffset, offsetof(struct vm_page, vmp_listq), NULL);
850 	T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemPagesize, sizeof(struct vm_page), NULL);
851 
852 #if defined(__arm64__)
853 	T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemFromArrayMask, VM_PAGE_PACKED_FROM_ARRAY, NULL);
854 	T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemPackedShift, VM_PAGE_PACKED_PTR_SHIFT, NULL);
855 	T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemPackedBaseAddr, VM_PAGE_PACKED_PTR_BASE, NULL);
856 #endif
857 
858 	vm_object_lock_shared(&pmap_object_store);
859 	astris_vm_page_queue_iterate(lowGlo.lgPmapMemQ, p)
860 	{
861 		ppnum_t ppnum   = astris_vm_page_get_phys_page(p);
862 		pmap_paddr_t pa = (pmap_paddr_t)astris_ptoa(ppnum);
863 		T_ASSERT_GE_ULONG(pa, gPhysBase, NULL);
864 		T_ASSERT_LT_ULONG(pa, gPhysBase + gPhysSize, NULL);
865 		iter++;
866 		T_ASSERT_LT_INT(iter, MAX_PMAP_OBJECT_ELEMENT, NULL);
867 	}
868 	vm_object_unlock(&pmap_object_store);
869 
870 	T_ASSERT_GT_INT(iter, 0, NULL);
871 	return KERN_SUCCESS;
872 }
873 #endif /* defined(__arm__) || defined(__arm64__) */
874 
875 struct ts_kern_prim_test_args {
876 	int *end_barrier;
877 	int *notify_b;
878 	int *wait_event_b;
879 	int before_num;
880 	int *notify_a;
881 	int *wait_event_a;
882 	int after_num;
883 	int priority_to_check;
884 };
885 
886 static void
wait_threads(int * var,int num)887 wait_threads(
888 	int* var,
889 	int num)
890 {
891 	if (var != NULL) {
892 		while (os_atomic_load(var, acquire) != num) {
893 			assert_wait((event_t) var, THREAD_UNINT);
894 			if (os_atomic_load(var, acquire) != num) {
895 				(void) thread_block(THREAD_CONTINUE_NULL);
896 			} else {
897 				clear_wait(current_thread(), THREAD_AWAKENED);
898 			}
899 		}
900 	}
901 }
902 
903 static void
wake_threads(int * var)904 wake_threads(
905 	int* var)
906 {
907 	if (var) {
908 		os_atomic_inc(var, relaxed);
909 		thread_wakeup((event_t) var);
910 	}
911 }
912 
913 extern void IOSleep(int);
914 
915 static void
thread_lock_unlock_kernel_primitive(void * args,__unused wait_result_t wr)916 thread_lock_unlock_kernel_primitive(
917 	void *args,
918 	__unused wait_result_t wr)
919 {
920 	thread_t thread = current_thread();
921 	struct ts_kern_prim_test_args *info = (struct ts_kern_prim_test_args*) args;
922 	int pri;
923 
924 	wait_threads(info->wait_event_b, info->before_num);
925 	wake_threads(info->notify_b);
926 
927 	tstile_test_prim_lock(SYSCTL_TURNSTILE_TEST_KERNEL_DEFAULT);
928 
929 	wake_threads(info->notify_a);
930 	wait_threads(info->wait_event_a, info->after_num);
931 
932 	IOSleep(100);
933 
934 	if (info->priority_to_check) {
935 		spl_t s = splsched();
936 		thread_lock(thread);
937 		pri = thread->sched_pri;
938 		thread_unlock(thread);
939 		splx(s);
940 		T_ASSERT(pri == info->priority_to_check, "Priority thread: current sched %d sched wanted %d", pri, info->priority_to_check);
941 	}
942 
943 	tstile_test_prim_unlock(SYSCTL_TURNSTILE_TEST_KERNEL_DEFAULT);
944 
945 	wake_threads(info->end_barrier);
946 	thread_terminate_self();
947 }
948 
949 kern_return_t
ts_kernel_primitive_test(void)950 ts_kernel_primitive_test(void)
951 {
952 	thread_t owner, thread1, thread2;
953 	struct ts_kern_prim_test_args targs[2] = {};
954 	kern_return_t result;
955 	int end_barrier = 0;
956 	int owner_locked = 0;
957 	int waiters_ready = 0;
958 
959 	T_LOG("Testing turnstile kernel primitive");
960 
961 	targs[0].notify_b = NULL;
962 	targs[0].wait_event_b = NULL;
963 	targs[0].before_num = 0;
964 	targs[0].notify_a = &owner_locked;
965 	targs[0].wait_event_a = &waiters_ready;
966 	targs[0].after_num = 2;
967 	targs[0].priority_to_check = 90;
968 	targs[0].end_barrier = &end_barrier;
969 
970 	// Start owner with priority 80
971 	result = kernel_thread_start_priority((thread_continue_t)thread_lock_unlock_kernel_primitive, &targs[0], 80, &owner);
972 	T_ASSERT(result == KERN_SUCCESS, "Starting owner");
973 
974 	targs[1].notify_b = &waiters_ready;
975 	targs[1].wait_event_b = &owner_locked;
976 	targs[1].before_num = 1;
977 	targs[1].notify_a = NULL;
978 	targs[1].wait_event_a = NULL;
979 	targs[1].after_num = 0;
980 	targs[1].priority_to_check = 0;
981 	targs[1].end_barrier = &end_barrier;
982 
983 	// Start waiters with priority 85 and 90
984 	result = kernel_thread_start_priority((thread_continue_t)thread_lock_unlock_kernel_primitive, &targs[1], 85, &thread1);
985 	T_ASSERT(result == KERN_SUCCESS, "Starting thread1");
986 
987 	result = kernel_thread_start_priority((thread_continue_t)thread_lock_unlock_kernel_primitive, &targs[1], 90, &thread2);
988 	T_ASSERT(result == KERN_SUCCESS, "Starting thread2");
989 
990 	wait_threads(&end_barrier, 3);
991 
992 	return KERN_SUCCESS;
993 }
994 
995 #define MTX_LOCK 0
996 #define RW_LOCK 1
997 
998 #define NUM_THREADS 4
999 
1000 struct synch_test_common {
1001 	unsigned int nthreads;
1002 	thread_t *threads;
1003 	int max_pri;
1004 	int test_done;
1005 };
1006 
1007 static kern_return_t
init_synch_test_common(struct synch_test_common * info,unsigned int nthreads)1008 init_synch_test_common(struct synch_test_common *info, unsigned int nthreads)
1009 {
1010 	info->nthreads = nthreads;
1011 	info->threads = kalloc(sizeof(thread_t) * nthreads);
1012 	if (!info->threads) {
1013 		return ENOMEM;
1014 	}
1015 
1016 	return KERN_SUCCESS;
1017 }
1018 
1019 static void
destroy_synch_test_common(struct synch_test_common * info)1020 destroy_synch_test_common(struct synch_test_common *info)
1021 {
1022 	kfree(info->threads, sizeof(thread_t) * info->nthreads);
1023 }
1024 
1025 static void
start_threads(thread_continue_t func,struct synch_test_common * info,bool sleep_after_first)1026 start_threads(thread_continue_t func, struct synch_test_common *info, bool sleep_after_first)
1027 {
1028 	thread_t thread;
1029 	kern_return_t result;
1030 	uint i;
1031 	int priority = 75;
1032 
1033 	info->test_done = 0;
1034 
1035 	for (i = 0; i < info->nthreads; i++) {
1036 		info->threads[i] = NULL;
1037 	}
1038 
1039 	info->max_pri = priority + (info->nthreads - 1) * 5;
1040 	if (info->max_pri > 95) {
1041 		info->max_pri = 95;
1042 	}
1043 
1044 	for (i = 0; i < info->nthreads; i++) {
1045 		result = kernel_thread_start_priority((thread_continue_t)func, info, priority, &thread);
1046 		os_atomic_store(&info->threads[i], thread, release);
1047 		T_ASSERT(result == KERN_SUCCESS, "Starting thread %d, priority %d, %p", i, priority, thread);
1048 
1049 		priority += 5;
1050 
1051 		if (i == 0 && sleep_after_first) {
1052 			IOSleep(100);
1053 		}
1054 	}
1055 }
1056 
1057 static unsigned int
get_max_pri(struct synch_test_common * info)1058 get_max_pri(struct synch_test_common * info)
1059 {
1060 	return info->max_pri;
1061 }
1062 
1063 static void
wait_all_thread(struct synch_test_common * info)1064 wait_all_thread(struct synch_test_common * info)
1065 {
1066 	wait_threads(&info->test_done, info->nthreads);
1067 }
1068 
1069 static void
notify_waiter(struct synch_test_common * info)1070 notify_waiter(struct synch_test_common * info)
1071 {
1072 	wake_threads(&info->test_done);
1073 }
1074 
1075 static void
wait_for_waiters(struct synch_test_common * info)1076 wait_for_waiters(struct synch_test_common *info)
1077 {
1078 	uint i, j;
1079 	thread_t thread;
1080 
1081 	for (i = 0; i < info->nthreads; i++) {
1082 		j = 0;
1083 		while (os_atomic_load(&info->threads[i], acquire) == NULL) {
1084 			if (j % 100 == 0) {
1085 				IOSleep(10);
1086 			}
1087 			j++;
1088 		}
1089 
1090 		if (info->threads[i] != current_thread()) {
1091 			j = 0;
1092 			do {
1093 				thread = os_atomic_load(&info->threads[i], relaxed);
1094 				if (thread == (thread_t) 1) {
1095 					break;
1096 				}
1097 
1098 				if (!(thread->state & TH_RUN)) {
1099 					break;
1100 				}
1101 
1102 				if (j % 100 == 0) {
1103 					IOSleep(100);
1104 				}
1105 				j++;
1106 
1107 				if (thread->started == FALSE) {
1108 					continue;
1109 				}
1110 			} while (thread->state & TH_RUN);
1111 		}
1112 	}
1113 }
1114 
1115 static void
exclude_current_waiter(struct synch_test_common * info)1116 exclude_current_waiter(struct synch_test_common *info)
1117 {
1118 	uint i, j;
1119 
1120 	for (i = 0; i < info->nthreads; i++) {
1121 		j = 0;
1122 		while (os_atomic_load(&info->threads[i], acquire) == NULL) {
1123 			if (j % 100 == 0) {
1124 				IOSleep(10);
1125 			}
1126 			j++;
1127 		}
1128 
1129 		if (os_atomic_load(&info->threads[i], acquire) == current_thread()) {
1130 			os_atomic_store(&info->threads[i], (thread_t)1, release);
1131 			return;
1132 		}
1133 	}
1134 }
1135 
1136 struct info_sleep_inheritor_test {
1137 	struct synch_test_common head;
1138 	lck_mtx_t mtx_lock;
1139 	lck_rw_t rw_lock;
1140 	decl_lck_mtx_gate_data(, gate);
1141 	boolean_t gate_closed;
1142 	int prim_type;
1143 	boolean_t work_to_do;
1144 	unsigned int max_pri;
1145 	unsigned int steal_pri;
1146 	int synch_value;
1147 	int synch;
1148 	int value;
1149 	int handoff_failure;
1150 	thread_t thread_inheritor;
1151 	bool use_alloc_gate;
1152 	gate_t *alloc_gate;
1153 };
1154 
1155 static void
primitive_lock(struct info_sleep_inheritor_test * info)1156 primitive_lock(struct info_sleep_inheritor_test *info)
1157 {
1158 	switch (info->prim_type) {
1159 	case MTX_LOCK:
1160 		lck_mtx_lock(&info->mtx_lock);
1161 		break;
1162 	case RW_LOCK:
1163 		lck_rw_lock(&info->rw_lock, LCK_RW_TYPE_EXCLUSIVE);
1164 		break;
1165 	default:
1166 		panic("invalid type %d", info->prim_type);
1167 	}
1168 }
1169 
1170 static void
primitive_unlock(struct info_sleep_inheritor_test * info)1171 primitive_unlock(struct info_sleep_inheritor_test *info)
1172 {
1173 	switch (info->prim_type) {
1174 	case MTX_LOCK:
1175 		lck_mtx_unlock(&info->mtx_lock);
1176 		break;
1177 	case RW_LOCK:
1178 		lck_rw_unlock(&info->rw_lock, LCK_RW_TYPE_EXCLUSIVE);
1179 		break;
1180 	default:
1181 		panic("invalid type %d", info->prim_type);
1182 	}
1183 }
1184 
1185 static wait_result_t
primitive_sleep_with_inheritor(struct info_sleep_inheritor_test * info)1186 primitive_sleep_with_inheritor(struct info_sleep_inheritor_test *info)
1187 {
1188 	wait_result_t ret = KERN_SUCCESS;
1189 	switch (info->prim_type) {
1190 	case MTX_LOCK:
1191 		ret = lck_mtx_sleep_with_inheritor(&info->mtx_lock, LCK_SLEEP_DEFAULT, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1192 		break;
1193 	case RW_LOCK:
1194 		ret = lck_rw_sleep_with_inheritor(&info->rw_lock, LCK_SLEEP_DEFAULT, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1195 		break;
1196 	default:
1197 		panic("invalid type %d", info->prim_type);
1198 	}
1199 
1200 	return ret;
1201 }
1202 
1203 static void
primitive_wakeup_one_with_inheritor(struct info_sleep_inheritor_test * info)1204 primitive_wakeup_one_with_inheritor(struct info_sleep_inheritor_test *info)
1205 {
1206 	switch (info->prim_type) {
1207 	case MTX_LOCK:
1208 	case RW_LOCK:
1209 		wakeup_one_with_inheritor((event_t) &info->thread_inheritor, THREAD_AWAKENED, LCK_WAKE_DEFAULT, &info->thread_inheritor);
1210 		break;
1211 	default:
1212 		panic("invalid type %d", info->prim_type);
1213 	}
1214 }
1215 
1216 static void
primitive_wakeup_all_with_inheritor(struct info_sleep_inheritor_test * info)1217 primitive_wakeup_all_with_inheritor(struct info_sleep_inheritor_test *info)
1218 {
1219 	switch (info->prim_type) {
1220 	case MTX_LOCK:
1221 	case RW_LOCK:
1222 		wakeup_all_with_inheritor((event_t) &info->thread_inheritor, THREAD_AWAKENED);
1223 		break;
1224 	default:
1225 		panic("invalid type %d", info->prim_type);
1226 	}
1227 	return;
1228 }
1229 
1230 static void
primitive_change_sleep_inheritor(struct info_sleep_inheritor_test * info)1231 primitive_change_sleep_inheritor(struct info_sleep_inheritor_test *info)
1232 {
1233 	switch (info->prim_type) {
1234 	case MTX_LOCK:
1235 	case RW_LOCK:
1236 		change_sleep_inheritor((event_t) &info->thread_inheritor, info->thread_inheritor);
1237 		break;
1238 	default:
1239 		panic("invalid type %d", info->prim_type);
1240 	}
1241 	return;
1242 }
1243 
1244 static kern_return_t
primitive_gate_try_close(struct info_sleep_inheritor_test * info)1245 primitive_gate_try_close(struct info_sleep_inheritor_test *info)
1246 {
1247 	gate_t *gate = &info->gate;
1248 	if (info->use_alloc_gate == true) {
1249 		gate = info->alloc_gate;
1250 	}
1251 	kern_return_t ret = KERN_SUCCESS;
1252 	switch (info->prim_type) {
1253 	case MTX_LOCK:
1254 		ret = lck_mtx_gate_try_close(&info->mtx_lock, gate);
1255 		break;
1256 	case RW_LOCK:
1257 		ret = lck_rw_gate_try_close(&info->rw_lock, gate);
1258 		break;
1259 	default:
1260 		panic("invalid type %d", info->prim_type);
1261 	}
1262 	return ret;
1263 }
1264 
1265 static gate_wait_result_t
primitive_gate_wait(struct info_sleep_inheritor_test * info)1266 primitive_gate_wait(struct info_sleep_inheritor_test *info)
1267 {
1268 	gate_t *gate = &info->gate;
1269 	if (info->use_alloc_gate == true) {
1270 		gate = info->alloc_gate;
1271 	}
1272 	gate_wait_result_t ret = GATE_OPENED;
1273 	switch (info->prim_type) {
1274 	case MTX_LOCK:
1275 		ret = lck_mtx_gate_wait(&info->mtx_lock, gate, LCK_SLEEP_DEFAULT, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1276 		break;
1277 	case RW_LOCK:
1278 		ret = lck_rw_gate_wait(&info->rw_lock, gate, LCK_SLEEP_DEFAULT, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1279 		break;
1280 	default:
1281 		panic("invalid type %d", info->prim_type);
1282 	}
1283 	return ret;
1284 }
1285 
1286 static void
primitive_gate_open(struct info_sleep_inheritor_test * info)1287 primitive_gate_open(struct info_sleep_inheritor_test *info)
1288 {
1289 	gate_t *gate = &info->gate;
1290 	if (info->use_alloc_gate == true) {
1291 		gate = info->alloc_gate;
1292 	}
1293 	switch (info->prim_type) {
1294 	case MTX_LOCK:
1295 		lck_mtx_gate_open(&info->mtx_lock, gate);
1296 		break;
1297 	case RW_LOCK:
1298 		lck_rw_gate_open(&info->rw_lock, gate);
1299 		break;
1300 	default:
1301 		panic("invalid type %d", info->prim_type);
1302 	}
1303 }
1304 
1305 static void
primitive_gate_close(struct info_sleep_inheritor_test * info)1306 primitive_gate_close(struct info_sleep_inheritor_test *info)
1307 {
1308 	gate_t *gate = &info->gate;
1309 	if (info->use_alloc_gate == true) {
1310 		gate = info->alloc_gate;
1311 	}
1312 
1313 	switch (info->prim_type) {
1314 	case MTX_LOCK:
1315 		lck_mtx_gate_close(&info->mtx_lock, gate);
1316 		break;
1317 	case RW_LOCK:
1318 		lck_rw_gate_close(&info->rw_lock, gate);
1319 		break;
1320 	default:
1321 		panic("invalid type %d", info->prim_type);
1322 	}
1323 }
1324 
1325 static void
primitive_gate_steal(struct info_sleep_inheritor_test * info)1326 primitive_gate_steal(struct info_sleep_inheritor_test *info)
1327 {
1328 	gate_t *gate = &info->gate;
1329 	if (info->use_alloc_gate == true) {
1330 		gate = info->alloc_gate;
1331 	}
1332 
1333 	switch (info->prim_type) {
1334 	case MTX_LOCK:
1335 		lck_mtx_gate_steal(&info->mtx_lock, gate);
1336 		break;
1337 	case RW_LOCK:
1338 		lck_rw_gate_steal(&info->rw_lock, gate);
1339 		break;
1340 	default:
1341 		panic("invalid type %d", info->prim_type);
1342 	}
1343 }
1344 
1345 static kern_return_t
primitive_gate_handoff(struct info_sleep_inheritor_test * info,int flags)1346 primitive_gate_handoff(struct info_sleep_inheritor_test *info, int flags)
1347 {
1348 	gate_t *gate = &info->gate;
1349 	if (info->use_alloc_gate == true) {
1350 		gate = info->alloc_gate;
1351 	}
1352 
1353 	kern_return_t ret = KERN_SUCCESS;
1354 	switch (info->prim_type) {
1355 	case MTX_LOCK:
1356 		ret = lck_mtx_gate_handoff(&info->mtx_lock, gate, flags);
1357 		break;
1358 	case RW_LOCK:
1359 		ret = lck_rw_gate_handoff(&info->rw_lock, gate, flags);
1360 		break;
1361 	default:
1362 		panic("invalid type %d", info->prim_type);
1363 	}
1364 	return ret;
1365 }
1366 
1367 static void
primitive_gate_assert(struct info_sleep_inheritor_test * info,int type)1368 primitive_gate_assert(struct info_sleep_inheritor_test *info, int type)
1369 {
1370 	gate_t *gate = &info->gate;
1371 	if (info->use_alloc_gate == true) {
1372 		gate = info->alloc_gate;
1373 	}
1374 
1375 	switch (info->prim_type) {
1376 	case MTX_LOCK:
1377 		lck_mtx_gate_assert(&info->mtx_lock, gate, type);
1378 		break;
1379 	case RW_LOCK:
1380 		lck_rw_gate_assert(&info->rw_lock, gate, type);
1381 		break;
1382 	default:
1383 		panic("invalid type %d", info->prim_type);
1384 	}
1385 }
1386 
1387 static void
primitive_gate_init(struct info_sleep_inheritor_test * info)1388 primitive_gate_init(struct info_sleep_inheritor_test *info)
1389 {
1390 	switch (info->prim_type) {
1391 	case MTX_LOCK:
1392 		lck_mtx_gate_init(&info->mtx_lock, &info->gate);
1393 		break;
1394 	case RW_LOCK:
1395 		lck_rw_gate_init(&info->rw_lock, &info->gate);
1396 		break;
1397 	default:
1398 		panic("invalid type %d", info->prim_type);
1399 	}
1400 }
1401 
1402 static void
primitive_gate_destroy(struct info_sleep_inheritor_test * info)1403 primitive_gate_destroy(struct info_sleep_inheritor_test *info)
1404 {
1405 	switch (info->prim_type) {
1406 	case MTX_LOCK:
1407 		lck_mtx_gate_destroy(&info->mtx_lock, &info->gate);
1408 		break;
1409 	case RW_LOCK:
1410 		lck_rw_gate_destroy(&info->rw_lock, &info->gate);
1411 		break;
1412 	default:
1413 		panic("invalid type %d", info->prim_type);
1414 	}
1415 }
1416 
1417 static void
primitive_gate_alloc(struct info_sleep_inheritor_test * info)1418 primitive_gate_alloc(struct info_sleep_inheritor_test *info)
1419 {
1420 	gate_t *gate;
1421 	switch (info->prim_type) {
1422 	case MTX_LOCK:
1423 		gate = lck_mtx_gate_alloc_init(&info->mtx_lock);
1424 		break;
1425 	case RW_LOCK:
1426 		gate = lck_rw_gate_alloc_init(&info->rw_lock);
1427 		break;
1428 	default:
1429 		panic("invalid type %d", info->prim_type);
1430 	}
1431 	info->alloc_gate = gate;
1432 }
1433 
1434 static void
primitive_gate_free(struct info_sleep_inheritor_test * info)1435 primitive_gate_free(struct info_sleep_inheritor_test *info)
1436 {
1437 	T_ASSERT(info->alloc_gate != NULL, "gate not yet freed");
1438 
1439 	switch (info->prim_type) {
1440 	case MTX_LOCK:
1441 		lck_mtx_gate_free(&info->mtx_lock, info->alloc_gate);
1442 		break;
1443 	case RW_LOCK:
1444 		lck_rw_gate_free(&info->rw_lock, info->alloc_gate);
1445 		break;
1446 	default:
1447 		panic("invalid type %d", info->prim_type);
1448 	}
1449 	info->alloc_gate = NULL;
1450 }
1451 
1452 static void
thread_inheritor_like_mutex(void * args,__unused wait_result_t wr)1453 thread_inheritor_like_mutex(
1454 	void *args,
1455 	__unused wait_result_t wr)
1456 {
1457 	wait_result_t wait;
1458 
1459 	struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
1460 	uint my_pri = current_thread()->sched_pri;
1461 
1462 	T_LOG("Started thread pri %d %p", my_pri, current_thread());
1463 
1464 	/*
1465 	 * spin here to start concurrently
1466 	 */
1467 	wake_threads(&info->synch);
1468 	wait_threads(&info->synch, info->synch_value);
1469 
1470 	primitive_lock(info);
1471 
1472 	if (info->thread_inheritor == NULL) {
1473 		info->thread_inheritor = current_thread();
1474 	} else {
1475 		wait = primitive_sleep_with_inheritor(info);
1476 		T_ASSERT(wait == THREAD_AWAKENED || wait == THREAD_NOT_WAITING, "sleep_with_inheritor return");
1477 	}
1478 	primitive_unlock(info);
1479 
1480 	IOSleep(100);
1481 	info->value++;
1482 
1483 	primitive_lock(info);
1484 
1485 	T_ASSERT(info->thread_inheritor == current_thread(), "thread_inheritor is %p", info->thread_inheritor);
1486 	primitive_wakeup_one_with_inheritor(info);
1487 	T_LOG("woken up %p", info->thread_inheritor);
1488 
1489 	if (info->thread_inheritor == NULL) {
1490 		T_ASSERT(info->handoff_failure == 0, "handoff failures");
1491 		info->handoff_failure++;
1492 	} else {
1493 		T_ASSERT(info->thread_inheritor != current_thread(), "thread_inheritor is %p", info->thread_inheritor);
1494 		thread_deallocate(info->thread_inheritor);
1495 	}
1496 
1497 	primitive_unlock(info);
1498 
1499 	assert(current_thread()->kern_promotion_schedpri == 0);
1500 	notify_waiter((struct synch_test_common *)info);
1501 
1502 	thread_terminate_self();
1503 }
1504 
1505 static void
thread_just_inheritor_do_work(void * args,__unused wait_result_t wr)1506 thread_just_inheritor_do_work(
1507 	void *args,
1508 	__unused wait_result_t wr)
1509 {
1510 	struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
1511 	uint my_pri = current_thread()->sched_pri;
1512 	uint max_pri;
1513 
1514 	T_LOG("Started thread pri %d %p", my_pri, current_thread());
1515 	primitive_lock(info);
1516 
1517 	if (info->thread_inheritor == NULL) {
1518 		info->thread_inheritor = current_thread();
1519 		primitive_unlock(info);
1520 		T_LOG("Thread pri %d first to run %p", my_pri, current_thread());
1521 
1522 		wait_threads(&info->synch, info->synch_value - 1);
1523 
1524 		wait_for_waiters((struct synch_test_common *)info);
1525 
1526 		max_pri = get_max_pri((struct synch_test_common *) info);
1527 		T_ASSERT((uint) current_thread()->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", current_thread()->sched_pri, max_pri);
1528 
1529 		os_atomic_store(&info->synch, 0, relaxed);
1530 		primitive_lock(info);
1531 		primitive_wakeup_all_with_inheritor(info);
1532 	} else {
1533 		wake_threads(&info->synch);
1534 		primitive_sleep_with_inheritor(info);
1535 	}
1536 
1537 	primitive_unlock(info);
1538 
1539 	assert(current_thread()->kern_promotion_schedpri == 0);
1540 	notify_waiter((struct synch_test_common *)info);
1541 
1542 	thread_terminate_self();
1543 }
1544 
1545 static void
thread_steal_work(void * args,__unused wait_result_t wr)1546 thread_steal_work(
1547 	void *args,
1548 	__unused wait_result_t wr)
1549 {
1550 	struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
1551 	uint my_pri = current_thread()->sched_pri;
1552 
1553 	T_LOG("Started thread pri %d %p", my_pri, current_thread());
1554 	primitive_lock(info);
1555 
1556 	if (info->thread_inheritor == NULL) {
1557 		info->thread_inheritor = current_thread();
1558 		exclude_current_waiter((struct synch_test_common *)info);
1559 
1560 		T_LOG("Thread pri %d first to run %p", my_pri, current_thread());
1561 		primitive_unlock(info);
1562 
1563 		wait_threads(&info->synch, info->synch_value - 2);
1564 
1565 		wait_for_waiters((struct synch_test_common *)info);
1566 		T_LOG("Thread pri %d first to run %p", my_pri, current_thread());
1567 		primitive_lock(info);
1568 		if (info->thread_inheritor == current_thread()) {
1569 			primitive_wakeup_all_with_inheritor(info);
1570 		}
1571 	} else {
1572 		if (info->steal_pri == 0) {
1573 			info->steal_pri = my_pri;
1574 			info->thread_inheritor = current_thread();
1575 			primitive_change_sleep_inheritor(info);
1576 			exclude_current_waiter((struct synch_test_common *)info);
1577 
1578 			primitive_unlock(info);
1579 
1580 			wait_threads(&info->synch, info->synch_value - 2);
1581 
1582 			T_LOG("Thread pri %d stole push %p", my_pri, current_thread());
1583 			wait_for_waiters((struct synch_test_common *)info);
1584 
1585 			T_ASSERT((uint) current_thread()->sched_pri == info->steal_pri, "sleep_inheritor inheritor priority current is %d, should be %d", current_thread()->sched_pri, info->steal_pri);
1586 
1587 			primitive_lock(info);
1588 			primitive_wakeup_all_with_inheritor(info);
1589 		} else {
1590 			if (my_pri > info->steal_pri) {
1591 				info->steal_pri = my_pri;
1592 			}
1593 			wake_threads(&info->synch);
1594 			primitive_sleep_with_inheritor(info);
1595 			exclude_current_waiter((struct synch_test_common *)info);
1596 		}
1597 	}
1598 	primitive_unlock(info);
1599 
1600 	assert(current_thread()->kern_promotion_schedpri == 0);
1601 	notify_waiter((struct synch_test_common *)info);
1602 
1603 	thread_terminate_self();
1604 }
1605 
1606 static void
thread_no_inheritor_work(void * args,__unused wait_result_t wr)1607 thread_no_inheritor_work(
1608 	void *args,
1609 	__unused wait_result_t wr)
1610 {
1611 	struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
1612 	uint my_pri = current_thread()->sched_pri;
1613 
1614 	T_LOG("Started thread pri %d %p", my_pri, current_thread());
1615 	primitive_lock(info);
1616 
1617 	info->value--;
1618 	if (info->value == 0) {
1619 		primitive_wakeup_all_with_inheritor(info);
1620 	} else {
1621 		info->thread_inheritor = NULL;
1622 		primitive_sleep_with_inheritor(info);
1623 	}
1624 
1625 	primitive_unlock(info);
1626 
1627 	assert(current_thread()->kern_promotion_schedpri == 0);
1628 	notify_waiter((struct synch_test_common *)info);
1629 
1630 	thread_terminate_self();
1631 }
1632 
1633 static void
thread_mtx_work(void * args,__unused wait_result_t wr)1634 thread_mtx_work(
1635 	void *args,
1636 	__unused wait_result_t wr)
1637 {
1638 	struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
1639 	uint my_pri = current_thread()->sched_pri;
1640 	int i;
1641 	u_int8_t rand;
1642 	unsigned int mod_rand;
1643 	uint max_pri;
1644 
1645 	T_LOG("Started thread pri %d %p", my_pri, current_thread());
1646 
1647 	for (i = 0; i < 10; i++) {
1648 		lck_mtx_lock(&info->mtx_lock);
1649 		if (info->thread_inheritor == NULL) {
1650 			info->thread_inheritor = current_thread();
1651 			lck_mtx_unlock(&info->mtx_lock);
1652 
1653 			T_LOG("Thread pri %d first to run %p", my_pri, current_thread());
1654 
1655 			wait_threads(&info->synch, info->synch_value - 1);
1656 			wait_for_waiters((struct synch_test_common *)info);
1657 			max_pri = get_max_pri((struct synch_test_common *) info);
1658 			T_ASSERT((uint) current_thread()->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", current_thread()->sched_pri, max_pri);
1659 
1660 			os_atomic_store(&info->synch, 0, relaxed);
1661 
1662 			lck_mtx_lock(&info->mtx_lock);
1663 			info->thread_inheritor = NULL;
1664 			wakeup_all_with_inheritor((event_t) &info->thread_inheritor, THREAD_AWAKENED);
1665 			lck_mtx_unlock(&info->mtx_lock);
1666 			continue;
1667 		}
1668 
1669 		read_random(&rand, sizeof(rand));
1670 		mod_rand = rand % 2;
1671 
1672 		wake_threads(&info->synch);
1673 		switch (mod_rand) {
1674 		case 0:
1675 			lck_mtx_sleep_with_inheritor(&info->mtx_lock, LCK_SLEEP_DEFAULT, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1676 			lck_mtx_unlock(&info->mtx_lock);
1677 			break;
1678 		case 1:
1679 			lck_mtx_sleep_with_inheritor(&info->mtx_lock, LCK_SLEEP_UNLOCK, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1680 			break;
1681 		default:
1682 			panic("rand()mod4 returned %u (random %u)", mod_rand, rand);
1683 		}
1684 	}
1685 
1686 	/*
1687 	 * spin here to stop using the lock as mutex
1688 	 */
1689 	wake_threads(&info->synch);
1690 	wait_threads(&info->synch, info->synch_value);
1691 
1692 	for (i = 0; i < 10; i++) {
1693 		/* read_random might sleep so read it before acquiring the mtx as spin */
1694 		read_random(&rand, sizeof(rand));
1695 
1696 		lck_mtx_lock_spin(&info->mtx_lock);
1697 		if (info->thread_inheritor == NULL) {
1698 			info->thread_inheritor = current_thread();
1699 			lck_mtx_unlock(&info->mtx_lock);
1700 
1701 			T_LOG("Thread pri %d first to run %p", my_pri, current_thread());
1702 			wait_for_waiters((struct synch_test_common *)info);
1703 			max_pri = get_max_pri((struct synch_test_common *) info);
1704 			T_ASSERT((uint) current_thread()->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", current_thread()->sched_pri, max_pri);
1705 
1706 			lck_mtx_lock_spin(&info->mtx_lock);
1707 			info->thread_inheritor = NULL;
1708 			wakeup_all_with_inheritor((event_t) &info->thread_inheritor, THREAD_AWAKENED);
1709 			lck_mtx_unlock(&info->mtx_lock);
1710 			continue;
1711 		}
1712 
1713 		mod_rand = rand % 2;
1714 		switch (mod_rand) {
1715 		case 0:
1716 			lck_mtx_sleep_with_inheritor(&info->mtx_lock, LCK_SLEEP_SPIN, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1717 			lck_mtx_unlock(&info->mtx_lock);
1718 			break;
1719 		case 1:
1720 			lck_mtx_sleep_with_inheritor(&info->mtx_lock, LCK_SLEEP_SPIN_ALWAYS, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1721 			lck_mtx_unlock(&info->mtx_lock);
1722 			break;
1723 		default:
1724 			panic("rand()mod4 returned %u (random %u)", mod_rand, rand);
1725 		}
1726 	}
1727 	assert(current_thread()->kern_promotion_schedpri == 0);
1728 	notify_waiter((struct synch_test_common *)info);
1729 
1730 	thread_terminate_self();
1731 }
1732 
1733 static void
thread_rw_work(void * args,__unused wait_result_t wr)1734 thread_rw_work(
1735 	void *args,
1736 	__unused wait_result_t wr)
1737 {
1738 	struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
1739 	uint my_pri = current_thread()->sched_pri;
1740 	int i;
1741 	lck_rw_type_t type;
1742 	u_int8_t rand;
1743 	unsigned int mod_rand;
1744 	uint max_pri;
1745 
1746 	T_LOG("Started thread pri %d %p", my_pri, current_thread());
1747 
1748 	for (i = 0; i < 10; i++) {
1749 try_again:
1750 		type = LCK_RW_TYPE_SHARED;
1751 		lck_rw_lock(&info->rw_lock, type);
1752 		if (info->thread_inheritor == NULL) {
1753 			type = LCK_RW_TYPE_EXCLUSIVE;
1754 
1755 			if (lck_rw_lock_shared_to_exclusive(&info->rw_lock)) {
1756 				if (info->thread_inheritor == NULL) {
1757 					info->thread_inheritor = current_thread();
1758 					lck_rw_unlock(&info->rw_lock, type);
1759 					wait_threads(&info->synch, info->synch_value - 1);
1760 
1761 					T_LOG("Thread pri %d first to run %p", my_pri, current_thread());
1762 					wait_for_waiters((struct synch_test_common *)info);
1763 					max_pri = get_max_pri((struct synch_test_common *) info);
1764 					T_ASSERT((uint) current_thread()->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", current_thread()->sched_pri, max_pri);
1765 
1766 					os_atomic_store(&info->synch, 0, relaxed);
1767 
1768 					lck_rw_lock(&info->rw_lock, type);
1769 					info->thread_inheritor = NULL;
1770 					wakeup_all_with_inheritor((event_t) &info->thread_inheritor, THREAD_AWAKENED);
1771 					lck_rw_unlock(&info->rw_lock, type);
1772 					continue;
1773 				}
1774 			} else {
1775 				goto try_again;
1776 			}
1777 		}
1778 
1779 		read_random(&rand, sizeof(rand));
1780 		mod_rand = rand % 4;
1781 
1782 		wake_threads(&info->synch);
1783 		switch (mod_rand) {
1784 		case 0:
1785 			lck_rw_sleep_with_inheritor(&info->rw_lock, LCK_SLEEP_DEFAULT, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1786 			lck_rw_unlock(&info->rw_lock, type);
1787 			break;
1788 		case 1:
1789 			lck_rw_sleep_with_inheritor(&info->rw_lock, LCK_SLEEP_UNLOCK, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1790 			break;
1791 		case 2:
1792 			lck_rw_sleep_with_inheritor(&info->rw_lock, LCK_SLEEP_SHARED, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1793 			lck_rw_unlock(&info->rw_lock, LCK_RW_TYPE_SHARED);
1794 			break;
1795 		case 3:
1796 			lck_rw_sleep_with_inheritor(&info->rw_lock, LCK_SLEEP_EXCLUSIVE, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1797 			lck_rw_unlock(&info->rw_lock, LCK_RW_TYPE_EXCLUSIVE);
1798 			break;
1799 		default:
1800 			panic("rand()mod4 returned %u (random %u)", mod_rand, rand);
1801 		}
1802 	}
1803 
1804 	assert(current_thread()->kern_promotion_schedpri == 0);
1805 	notify_waiter((struct synch_test_common *)info);
1806 
1807 	thread_terminate_self();
1808 }
1809 
1810 static void
test_sleep_with_wake_all(struct info_sleep_inheritor_test * info,int prim_type)1811 test_sleep_with_wake_all(struct info_sleep_inheritor_test *info, int prim_type)
1812 {
1813 	info->prim_type = prim_type;
1814 	info->synch = 0;
1815 	info->synch_value = info->head.nthreads;
1816 
1817 	info->thread_inheritor = NULL;
1818 
1819 	start_threads((thread_continue_t)thread_just_inheritor_do_work, (struct synch_test_common *)info, TRUE);
1820 	wait_all_thread((struct synch_test_common *)info);
1821 }
1822 
1823 static void
test_sleep_with_wake_one(struct info_sleep_inheritor_test * info,int prim_type)1824 test_sleep_with_wake_one(struct info_sleep_inheritor_test *info, int prim_type)
1825 {
1826 	info->prim_type = prim_type;
1827 
1828 	info->synch = 0;
1829 	info->synch_value = info->head.nthreads;
1830 	info->value = 0;
1831 	info->handoff_failure = 0;
1832 	info->thread_inheritor = NULL;
1833 
1834 	start_threads((thread_continue_t)thread_inheritor_like_mutex, (struct synch_test_common *)info, FALSE);
1835 	wait_all_thread((struct synch_test_common *)info);
1836 
1837 	T_ASSERT(info->value == (int)info->head.nthreads, "value protected by sleep");
1838 	T_ASSERT(info->handoff_failure == 1, "handoff failures");
1839 }
1840 
1841 static void
test_change_sleep_inheritor(struct info_sleep_inheritor_test * info,int prim_type)1842 test_change_sleep_inheritor(struct info_sleep_inheritor_test *info, int prim_type)
1843 {
1844 	info->prim_type = prim_type;
1845 
1846 	info->thread_inheritor = NULL;
1847 	info->steal_pri = 0;
1848 	info->synch = 0;
1849 	info->synch_value = info->head.nthreads;
1850 
1851 	start_threads((thread_continue_t)thread_steal_work, (struct synch_test_common *)info, FALSE);
1852 	wait_all_thread((struct synch_test_common *)info);
1853 }
1854 
1855 static void
test_no_inheritor(struct info_sleep_inheritor_test * info,int prim_type)1856 test_no_inheritor(struct info_sleep_inheritor_test *info, int prim_type)
1857 {
1858 	info->prim_type = prim_type;
1859 	info->synch = 0;
1860 	info->synch_value = info->head.nthreads;
1861 
1862 	info->thread_inheritor = NULL;
1863 	info->value = info->head.nthreads;
1864 
1865 	start_threads((thread_continue_t)thread_no_inheritor_work, (struct synch_test_common *)info, FALSE);
1866 	wait_all_thread((struct synch_test_common *)info);
1867 }
1868 
1869 static void
test_rw_lock(struct info_sleep_inheritor_test * info)1870 test_rw_lock(struct info_sleep_inheritor_test *info)
1871 {
1872 	info->thread_inheritor = NULL;
1873 	info->value = info->head.nthreads;
1874 	info->synch = 0;
1875 	info->synch_value = info->head.nthreads;
1876 
1877 	start_threads((thread_continue_t)thread_rw_work, (struct synch_test_common *)info, FALSE);
1878 	wait_all_thread((struct synch_test_common *)info);
1879 }
1880 
1881 static void
test_mtx_lock(struct info_sleep_inheritor_test * info)1882 test_mtx_lock(struct info_sleep_inheritor_test *info)
1883 {
1884 	info->thread_inheritor = NULL;
1885 	info->value = info->head.nthreads;
1886 	info->synch = 0;
1887 	info->synch_value = info->head.nthreads;
1888 
1889 	start_threads((thread_continue_t)thread_mtx_work, (struct synch_test_common *)info, FALSE);
1890 	wait_all_thread((struct synch_test_common *)info);
1891 }
1892 
1893 kern_return_t
ts_kernel_sleep_inheritor_test(void)1894 ts_kernel_sleep_inheritor_test(void)
1895 {
1896 	struct info_sleep_inheritor_test info = {};
1897 
1898 	init_synch_test_common((struct synch_test_common *)&info, NUM_THREADS);
1899 
1900 	lck_attr_t* lck_attr = lck_attr_alloc_init();
1901 	lck_grp_attr_t* lck_grp_attr = lck_grp_attr_alloc_init();
1902 	lck_grp_t* lck_grp = lck_grp_alloc_init("test sleep_inheritor", lck_grp_attr);
1903 
1904 	lck_mtx_init(&info.mtx_lock, lck_grp, lck_attr);
1905 	lck_rw_init(&info.rw_lock, lck_grp, lck_attr);
1906 
1907 	/*
1908 	 * Testing lck_mtx_sleep_with_inheritor and wakeup_all_with_inheritor
1909 	 */
1910 	T_LOG("Testing mtx sleep with inheritor and wake_all_with_inheritor");
1911 	test_sleep_with_wake_all(&info, MTX_LOCK);
1912 
1913 	/*
1914 	 * Testing rw_mtx_sleep_with_inheritor and wakeup_all_with_inheritor
1915 	 */
1916 	T_LOG("Testing rw sleep with inheritor and wake_all_with_inheritor");
1917 	test_sleep_with_wake_all(&info, RW_LOCK);
1918 
1919 	/*
1920 	 * Testing lck_mtx_sleep_with_inheritor and wakeup_one_with_inheritor
1921 	 */
1922 	T_LOG("Testing mtx sleep with inheritor and wake_one_with_inheritor");
1923 	test_sleep_with_wake_one(&info, MTX_LOCK);
1924 
1925 	/*
1926 	 * Testing lck_rw_sleep_with_inheritor and wakeup_one_with_inheritor
1927 	 */
1928 	T_LOG("Testing rw sleep with inheritor and wake_one_with_inheritor");
1929 	test_sleep_with_wake_one(&info, RW_LOCK);
1930 
1931 	/*
1932 	 * Testing lck_mtx_sleep_with_inheritor and wakeup_all_with_inheritor
1933 	 * and change_sleep_inheritor
1934 	 */
1935 	T_LOG("Testing change_sleep_inheritor with mxt sleep");
1936 	test_change_sleep_inheritor(&info, MTX_LOCK);
1937 
1938 	/*
1939 	 * Testing lck_mtx_sleep_with_inheritor and wakeup_all_with_inheritor
1940 	 * and change_sleep_inheritor
1941 	 */
1942 	T_LOG("Testing change_sleep_inheritor with rw sleep");
1943 	test_change_sleep_inheritor(&info, RW_LOCK);
1944 
1945 	/*
1946 	 * Testing lck_mtx_sleep_with_inheritor and wakeup_all_with_inheritor
1947 	 * with inheritor NULL
1948 	 */
1949 	T_LOG("Testing inheritor NULL");
1950 	test_no_inheritor(&info, MTX_LOCK);
1951 
1952 	/*
1953 	 * Testing lck_mtx_sleep_with_inheritor and wakeup_all_with_inheritor
1954 	 * with inheritor NULL
1955 	 */
1956 	T_LOG("Testing inheritor NULL");
1957 	test_no_inheritor(&info, RW_LOCK);
1958 
1959 	/*
1960 	 * Testing mtx locking combinations
1961 	 */
1962 	T_LOG("Testing mtx locking combinations");
1963 	test_mtx_lock(&info);
1964 
1965 	/*
1966 	 * Testing rw locking combinations
1967 	 */
1968 	T_LOG("Testing rw locking combinations");
1969 	test_rw_lock(&info);
1970 
1971 	destroy_synch_test_common((struct synch_test_common *)&info);
1972 
1973 	lck_attr_free(lck_attr);
1974 	lck_grp_attr_free(lck_grp_attr);
1975 	lck_rw_destroy(&info.rw_lock, lck_grp);
1976 	lck_mtx_destroy(&info.mtx_lock, lck_grp);
1977 	lck_grp_free(lck_grp);
1978 
1979 	return KERN_SUCCESS;
1980 }
1981 
1982 static void
thread_gate_aggressive(void * args,__unused wait_result_t wr)1983 thread_gate_aggressive(
1984 	void *args,
1985 	__unused wait_result_t wr)
1986 {
1987 	struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
1988 	uint my_pri = current_thread()->sched_pri;
1989 
1990 	T_LOG("Started thread pri %d %p", my_pri, current_thread());
1991 
1992 	primitive_lock(info);
1993 	if (info->thread_inheritor == NULL) {
1994 		info->thread_inheritor = current_thread();
1995 		primitive_gate_assert(info, GATE_ASSERT_OPEN);
1996 		primitive_gate_close(info);
1997 		exclude_current_waiter((struct synch_test_common *)info);
1998 
1999 		primitive_unlock(info);
2000 
2001 		wait_threads(&info->synch, info->synch_value - 2);
2002 		wait_for_waiters((struct synch_test_common *)info);
2003 		T_LOG("Thread pri %d first to run %p", my_pri, current_thread());
2004 
2005 		primitive_lock(info);
2006 		if (info->thread_inheritor == current_thread()) {
2007 			primitive_gate_open(info);
2008 		}
2009 	} else {
2010 		if (info->steal_pri == 0) {
2011 			info->steal_pri = my_pri;
2012 			info->thread_inheritor = current_thread();
2013 			primitive_gate_steal(info);
2014 			exclude_current_waiter((struct synch_test_common *)info);
2015 
2016 			primitive_unlock(info);
2017 			wait_threads(&info->synch, info->synch_value - 2);
2018 
2019 			T_LOG("Thread pri %d stole push %p", my_pri, current_thread());
2020 			wait_for_waiters((struct synch_test_common *)info);
2021 			T_ASSERT((uint) current_thread()->sched_pri == info->steal_pri, "gate keeper priority current is %d, should be %d", current_thread()->sched_pri, info->steal_pri);
2022 
2023 			primitive_lock(info);
2024 			primitive_gate_open(info);
2025 		} else {
2026 			if (my_pri > info->steal_pri) {
2027 				info->steal_pri = my_pri;
2028 			}
2029 			wake_threads(&info->synch);
2030 			primitive_gate_wait(info);
2031 			exclude_current_waiter((struct synch_test_common *)info);
2032 		}
2033 	}
2034 	primitive_unlock(info);
2035 
2036 	assert(current_thread()->kern_promotion_schedpri == 0);
2037 	notify_waiter((struct synch_test_common *)info);
2038 
2039 	thread_terminate_self();
2040 }
2041 
2042 static void
thread_gate_free(void * args,__unused wait_result_t wr)2043 thread_gate_free(
2044 	void *args,
2045 	__unused wait_result_t wr)
2046 {
2047 	struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
2048 	uint my_pri = current_thread()->sched_pri;
2049 
2050 	T_LOG("Started thread pri %d %p", my_pri, current_thread());
2051 
2052 	primitive_lock(info);
2053 
2054 	if (primitive_gate_try_close(info) == KERN_SUCCESS) {
2055 		primitive_gate_assert(info, GATE_ASSERT_HELD);
2056 		primitive_unlock(info);
2057 
2058 		wait_threads(&info->synch, info->synch_value - 1);
2059 		wait_for_waiters((struct synch_test_common *) info);
2060 
2061 		primitive_lock(info);
2062 		primitive_gate_open(info);
2063 		primitive_gate_free(info);
2064 	} else {
2065 		primitive_gate_assert(info, GATE_ASSERT_CLOSED);
2066 		wake_threads(&info->synch);
2067 		gate_wait_result_t ret = primitive_gate_wait(info);
2068 		T_ASSERT(ret == GATE_OPENED, "open gate");
2069 	}
2070 
2071 	primitive_unlock(info);
2072 
2073 	notify_waiter((struct synch_test_common *)info);
2074 
2075 	thread_terminate_self();
2076 }
2077 
2078 static void
thread_gate_like_mutex(void * args,__unused wait_result_t wr)2079 thread_gate_like_mutex(
2080 	void *args,
2081 	__unused wait_result_t wr)
2082 {
2083 	gate_wait_result_t wait;
2084 	kern_return_t ret;
2085 	uint my_pri = current_thread()->sched_pri;
2086 
2087 	struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
2088 
2089 	T_LOG("Started thread pri %d %p", my_pri, current_thread());
2090 
2091 	/*
2092 	 * spin here to start concurrently
2093 	 */
2094 	wake_threads(&info->synch);
2095 	wait_threads(&info->synch, info->synch_value);
2096 
2097 	primitive_lock(info);
2098 
2099 	if (primitive_gate_try_close(info) != KERN_SUCCESS) {
2100 		wait = primitive_gate_wait(info);
2101 		T_ASSERT(wait == GATE_HANDOFF, "gate_wait return");
2102 	}
2103 
2104 	primitive_gate_assert(info, GATE_ASSERT_HELD);
2105 
2106 	primitive_unlock(info);
2107 
2108 	IOSleep(100);
2109 	info->value++;
2110 
2111 	primitive_lock(info);
2112 
2113 	ret = primitive_gate_handoff(info, GATE_HANDOFF_DEFAULT);
2114 	if (ret == KERN_NOT_WAITING) {
2115 		T_ASSERT(info->handoff_failure == 0, "handoff failures");
2116 		primitive_gate_handoff(info, GATE_HANDOFF_OPEN_IF_NO_WAITERS);
2117 		info->handoff_failure++;
2118 	}
2119 
2120 	primitive_unlock(info);
2121 	notify_waiter((struct synch_test_common *)info);
2122 
2123 	thread_terminate_self();
2124 }
2125 
2126 static void
thread_just_one_do_work(void * args,__unused wait_result_t wr)2127 thread_just_one_do_work(
2128 	void *args,
2129 	__unused wait_result_t wr)
2130 {
2131 	struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
2132 	uint my_pri = current_thread()->sched_pri;
2133 	uint max_pri;
2134 
2135 	T_LOG("Started thread pri %d %p", my_pri, current_thread());
2136 
2137 	primitive_lock(info);
2138 check_again:
2139 	if (info->work_to_do) {
2140 		if (primitive_gate_try_close(info) == KERN_SUCCESS) {
2141 			primitive_gate_assert(info, GATE_ASSERT_HELD);
2142 			primitive_unlock(info);
2143 
2144 			T_LOG("Thread pri %d acquired the gate %p", my_pri, current_thread());
2145 			wait_threads(&info->synch, info->synch_value - 1);
2146 			wait_for_waiters((struct synch_test_common *)info);
2147 			max_pri = get_max_pri((struct synch_test_common *) info);
2148 			T_ASSERT((uint) current_thread()->sched_pri == max_pri, "gate owner priority current is %d, should be %d", current_thread()->sched_pri, max_pri);
2149 			os_atomic_store(&info->synch, 0, relaxed);
2150 
2151 			primitive_lock(info);
2152 			info->work_to_do = FALSE;
2153 			primitive_gate_open(info);
2154 		} else {
2155 			primitive_gate_assert(info, GATE_ASSERT_CLOSED);
2156 			wake_threads(&info->synch);
2157 			primitive_gate_wait(info);
2158 			goto check_again;
2159 		}
2160 	}
2161 	primitive_unlock(info);
2162 
2163 	assert(current_thread()->kern_promotion_schedpri == 0);
2164 	notify_waiter((struct synch_test_common *)info);
2165 	thread_terminate_self();
2166 }
2167 
2168 static void
test_gate_push(struct info_sleep_inheritor_test * info,int prim_type)2169 test_gate_push(struct info_sleep_inheritor_test *info, int prim_type)
2170 {
2171 	info->prim_type = prim_type;
2172 	info->use_alloc_gate = false;
2173 
2174 	primitive_gate_init(info);
2175 	info->work_to_do = TRUE;
2176 	info->synch = 0;
2177 	info->synch_value = NUM_THREADS;
2178 
2179 	start_threads((thread_continue_t)thread_just_one_do_work, (struct synch_test_common *) info, TRUE);
2180 	wait_all_thread((struct synch_test_common *)info);
2181 
2182 	primitive_gate_destroy(info);
2183 }
2184 
2185 static void
test_gate_handoff(struct info_sleep_inheritor_test * info,int prim_type)2186 test_gate_handoff(struct info_sleep_inheritor_test *info, int prim_type)
2187 {
2188 	info->prim_type = prim_type;
2189 	info->use_alloc_gate = false;
2190 
2191 	primitive_gate_init(info);
2192 
2193 	info->synch = 0;
2194 	info->synch_value = NUM_THREADS;
2195 	info->value = 0;
2196 	info->handoff_failure = 0;
2197 
2198 	start_threads((thread_continue_t)thread_gate_like_mutex, (struct synch_test_common *)info, false);
2199 	wait_all_thread((struct synch_test_common *)info);
2200 
2201 	T_ASSERT(info->value == NUM_THREADS, "value protected by gate");
2202 	T_ASSERT(info->handoff_failure == 1, "handoff failures");
2203 
2204 	primitive_gate_destroy(info);
2205 }
2206 
2207 static void
test_gate_steal(struct info_sleep_inheritor_test * info,int prim_type)2208 test_gate_steal(struct info_sleep_inheritor_test *info, int prim_type)
2209 {
2210 	info->prim_type = prim_type;
2211 	info->use_alloc_gate = false;
2212 
2213 	primitive_gate_init(info);
2214 
2215 	info->synch = 0;
2216 	info->synch_value = NUM_THREADS;
2217 	info->thread_inheritor = NULL;
2218 	info->steal_pri = 0;
2219 
2220 	start_threads((thread_continue_t)thread_gate_aggressive, (struct synch_test_common *)info, FALSE);
2221 	wait_all_thread((struct synch_test_common *)info);
2222 
2223 	primitive_gate_destroy(info);
2224 }
2225 
2226 static void
test_gate_alloc_free(struct info_sleep_inheritor_test * info,int prim_type)2227 test_gate_alloc_free(struct info_sleep_inheritor_test *info, int prim_type)
2228 {
2229 	(void)info;
2230 	(void) prim_type;
2231 	info->prim_type = prim_type;
2232 	info->use_alloc_gate = true;
2233 
2234 	primitive_gate_alloc(info);
2235 
2236 	info->synch = 0;
2237 	info->synch_value = NUM_THREADS;
2238 
2239 	start_threads((thread_continue_t)thread_gate_free, (struct synch_test_common *)info, FALSE);
2240 	wait_all_thread((struct synch_test_common *)info);
2241 
2242 	T_ASSERT(info->alloc_gate == NULL, "gate free");
2243 	info->use_alloc_gate = false;
2244 }
2245 
2246 kern_return_t
ts_kernel_gate_test(void)2247 ts_kernel_gate_test(void)
2248 {
2249 	struct info_sleep_inheritor_test info = {};
2250 
2251 	T_LOG("Testing gate primitive");
2252 
2253 	init_synch_test_common((struct synch_test_common *)&info, NUM_THREADS);
2254 
2255 	lck_attr_t* lck_attr = lck_attr_alloc_init();
2256 	lck_grp_attr_t* lck_grp_attr = lck_grp_attr_alloc_init();
2257 	lck_grp_t* lck_grp = lck_grp_alloc_init("test gate", lck_grp_attr);
2258 
2259 	lck_mtx_init(&info.mtx_lock, lck_grp, lck_attr);
2260 	lck_rw_init(&info.rw_lock, lck_grp, lck_attr);
2261 
2262 	/*
2263 	 * Testing the priority inherited by the keeper
2264 	 * lck_mtx_gate_try_close, lck_mtx_gate_open, lck_mtx_gate_wait
2265 	 */
2266 	T_LOG("Testing gate push, mtx");
2267 	test_gate_push(&info, MTX_LOCK);
2268 
2269 	T_LOG("Testing gate push, rw");
2270 	test_gate_push(&info, RW_LOCK);
2271 
2272 	/*
2273 	 * Testing the handoff
2274 	 * lck_mtx_gate_wait, lck_mtx_gate_handoff
2275 	 */
2276 	T_LOG("Testing gate handoff, mtx");
2277 	test_gate_handoff(&info, MTX_LOCK);
2278 
2279 	T_LOG("Testing gate handoff, rw");
2280 	test_gate_handoff(&info, RW_LOCK);
2281 
2282 	/*
2283 	 * Testing the steal
2284 	 * lck_mtx_gate_close, lck_mtx_gate_wait, lck_mtx_gate_steal, lck_mtx_gate_handoff
2285 	 */
2286 	T_LOG("Testing gate steal, mtx");
2287 	test_gate_steal(&info, MTX_LOCK);
2288 
2289 	T_LOG("Testing gate steal, rw");
2290 	test_gate_steal(&info, RW_LOCK);
2291 
2292 	/*
2293 	 * Testing the alloc/free
2294 	 * lck_mtx_gate_alloc_init, lck_mtx_gate_close, lck_mtx_gate_wait, lck_mtx_gate_free
2295 	 */
2296 	T_LOG("Testing gate alloc/free, mtx");
2297 	test_gate_alloc_free(&info, MTX_LOCK);
2298 
2299 	T_LOG("Testing gate alloc/free, rw");
2300 	test_gate_alloc_free(&info, RW_LOCK);
2301 
2302 	destroy_synch_test_common((struct synch_test_common *)&info);
2303 
2304 	lck_attr_free(lck_attr);
2305 	lck_grp_attr_free(lck_grp_attr);
2306 	lck_mtx_destroy(&info.mtx_lock, lck_grp);
2307 	lck_grp_free(lck_grp);
2308 
2309 	return KERN_SUCCESS;
2310 }
2311 
2312 #define NUM_THREAD_CHAIN 6
2313 
2314 struct turnstile_chain_test {
2315 	struct synch_test_common head;
2316 	lck_mtx_t mtx_lock;
2317 	int synch_value;
2318 	int synch;
2319 	int synch2;
2320 	gate_t gates[NUM_THREAD_CHAIN];
2321 };
2322 
2323 static void
thread_sleep_gate_chain_work(void * args,__unused wait_result_t wr)2324 thread_sleep_gate_chain_work(
2325 	void *args,
2326 	__unused wait_result_t wr)
2327 {
2328 	struct turnstile_chain_test *info = (struct turnstile_chain_test*) args;
2329 	thread_t self = current_thread();
2330 	uint my_pri = self->sched_pri;
2331 	uint max_pri;
2332 	uint i;
2333 	thread_t inheritor = NULL, woken_up;
2334 	event_t wait_event, wake_event;
2335 	kern_return_t ret;
2336 
2337 	T_LOG("Started thread pri %d %p", my_pri, self);
2338 
2339 	/*
2340 	 * Need to use the threads ids, wait for all of them to be populated
2341 	 */
2342 
2343 	while (os_atomic_load(&info->head.threads[info->head.nthreads - 1], acquire) == NULL) {
2344 		IOSleep(10);
2345 	}
2346 
2347 	max_pri = get_max_pri((struct synch_test_common *) info);
2348 
2349 	for (i = 0; i < info->head.nthreads; i = i + 2) {
2350 		// even threads will close a gate
2351 		if (info->head.threads[i] == self) {
2352 			lck_mtx_lock(&info->mtx_lock);
2353 			lck_mtx_gate_close(&info->mtx_lock, &info->gates[i]);
2354 			lck_mtx_unlock(&info->mtx_lock);
2355 			break;
2356 		}
2357 	}
2358 
2359 	wake_threads(&info->synch2);
2360 	wait_threads(&info->synch2, info->synch_value);
2361 
2362 	if (self == os_atomic_load(&info->head.threads[0], acquire)) {
2363 		wait_threads(&info->synch, info->synch_value - 1);
2364 		wait_for_waiters((struct synch_test_common *)info);
2365 
2366 		T_ASSERT((uint) self->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", self->sched_pri, max_pri);
2367 
2368 		lck_mtx_lock(&info->mtx_lock);
2369 		lck_mtx_gate_open(&info->mtx_lock, &info->gates[0]);
2370 		lck_mtx_unlock(&info->mtx_lock);
2371 	} else {
2372 		wait_event = NULL;
2373 		wake_event = NULL;
2374 		for (i = 0; i < info->head.nthreads; i++) {
2375 			if (info->head.threads[i] == self) {
2376 				inheritor = info->head.threads[i - 1];
2377 				wait_event = (event_t) &info->head.threads[i - 1];
2378 				wake_event = (event_t) &info->head.threads[i];
2379 				break;
2380 			}
2381 		}
2382 		assert(wait_event != NULL);
2383 
2384 		lck_mtx_lock(&info->mtx_lock);
2385 		wake_threads(&info->synch);
2386 
2387 		if (i % 2 != 0) {
2388 			lck_mtx_gate_wait(&info->mtx_lock, &info->gates[i - 1], LCK_SLEEP_UNLOCK, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
2389 			T_ASSERT((uint) self->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", self->sched_pri, max_pri);
2390 
2391 			ret = wakeup_one_with_inheritor(wake_event, THREAD_AWAKENED, LCK_WAKE_DO_NOT_TRANSFER_PUSH, &woken_up);
2392 			if (ret == KERN_SUCCESS) {
2393 				T_ASSERT(i != (info->head.nthreads - 1), "thread id");
2394 				T_ASSERT(woken_up == info->head.threads[i + 1], "wakeup_one_with_inheritor woke next");
2395 			} else {
2396 				T_ASSERT(i == (info->head.nthreads - 1), "thread id");
2397 			}
2398 
2399 			// i am still the inheritor, wake all to drop inheritership
2400 			ret = wakeup_all_with_inheritor(wake_event, LCK_WAKE_DEFAULT);
2401 			T_ASSERT(ret == KERN_NOT_WAITING, "waiters on event");
2402 		} else {
2403 			// I previously closed a gate
2404 			lck_mtx_sleep_with_inheritor(&info->mtx_lock, LCK_SLEEP_UNLOCK, wait_event, inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
2405 			T_ASSERT((uint) self->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", self->sched_pri, max_pri);
2406 
2407 			lck_mtx_lock(&info->mtx_lock);
2408 			lck_mtx_gate_open(&info->mtx_lock, &info->gates[i]);
2409 			lck_mtx_unlock(&info->mtx_lock);
2410 		}
2411 	}
2412 
2413 	assert(current_thread()->kern_promotion_schedpri == 0);
2414 	notify_waiter((struct synch_test_common *)info);
2415 
2416 	thread_terminate_self();
2417 }
2418 
2419 static void
thread_gate_chain_work(void * args,__unused wait_result_t wr)2420 thread_gate_chain_work(
2421 	void *args,
2422 	__unused wait_result_t wr)
2423 {
2424 	struct turnstile_chain_test *info = (struct turnstile_chain_test*) args;
2425 	thread_t self = current_thread();
2426 	uint my_pri = self->sched_pri;
2427 	uint max_pri;
2428 	uint i;
2429 	T_LOG("Started thread pri %d %p", my_pri, self);
2430 
2431 
2432 	/*
2433 	 * Need to use the threads ids, wait for all of them to be populated
2434 	 */
2435 	while (os_atomic_load(&info->head.threads[info->head.nthreads - 1], acquire) == NULL) {
2436 		IOSleep(10);
2437 	}
2438 
2439 	max_pri = get_max_pri((struct synch_test_common *) info);
2440 
2441 	for (i = 0; i < info->head.nthreads; i++) {
2442 		if (info->head.threads[i] == self) {
2443 			lck_mtx_lock(&info->mtx_lock);
2444 			lck_mtx_gate_close(&info->mtx_lock, &info->gates[i]);
2445 			lck_mtx_unlock(&info->mtx_lock);
2446 			break;
2447 		}
2448 	}
2449 	assert(i != info->head.nthreads);
2450 
2451 	wake_threads(&info->synch2);
2452 	wait_threads(&info->synch2, info->synch_value);
2453 
2454 	if (self == os_atomic_load(&info->head.threads[0], acquire)) {
2455 		wait_threads(&info->synch, info->synch_value - 1);
2456 
2457 		wait_for_waiters((struct synch_test_common *)info);
2458 
2459 		T_ASSERT((uint) self->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", self->sched_pri, max_pri);
2460 
2461 		lck_mtx_lock(&info->mtx_lock);
2462 		lck_mtx_gate_open(&info->mtx_lock, &info->gates[0]);
2463 		lck_mtx_unlock(&info->mtx_lock);
2464 	} else {
2465 		lck_mtx_lock(&info->mtx_lock);
2466 		wake_threads(&info->synch);
2467 		lck_mtx_gate_wait(&info->mtx_lock, &info->gates[i - 1], LCK_SLEEP_UNLOCK, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
2468 
2469 		T_ASSERT((uint) self->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", self->sched_pri, max_pri);
2470 
2471 		lck_mtx_lock(&info->mtx_lock);
2472 		lck_mtx_gate_open(&info->mtx_lock, &info->gates[i]);
2473 		lck_mtx_unlock(&info->mtx_lock);
2474 	}
2475 
2476 	assert(current_thread()->kern_promotion_schedpri == 0);
2477 	notify_waiter((struct synch_test_common *)info);
2478 
2479 	thread_terminate_self();
2480 }
2481 
2482 static void
thread_sleep_chain_work(void * args,__unused wait_result_t wr)2483 thread_sleep_chain_work(
2484 	void *args,
2485 	__unused wait_result_t wr)
2486 {
2487 	struct turnstile_chain_test *info = (struct turnstile_chain_test*) args;
2488 	thread_t self = current_thread();
2489 	uint my_pri = self->sched_pri;
2490 	uint max_pri;
2491 	event_t wait_event, wake_event;
2492 	uint i;
2493 	thread_t inheritor = NULL, woken_up = NULL;
2494 	kern_return_t ret;
2495 
2496 	T_LOG("Started thread pri %d %p", my_pri, self);
2497 
2498 	/*
2499 	 * Need to use the threads ids, wait for all of them to be populated
2500 	 */
2501 	while (os_atomic_load(&info->head.threads[info->head.nthreads - 1], acquire) == NULL) {
2502 		IOSleep(10);
2503 	}
2504 
2505 	max_pri = get_max_pri((struct synch_test_common *) info);
2506 
2507 	if (self == os_atomic_load(&info->head.threads[0], acquire)) {
2508 		wait_threads(&info->synch, info->synch_value - 1);
2509 
2510 		wait_for_waiters((struct synch_test_common *)info);
2511 
2512 		T_ASSERT((uint) self->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", self->sched_pri, max_pri);
2513 
2514 		ret = wakeup_one_with_inheritor((event_t) &info->head.threads[0], THREAD_AWAKENED, LCK_WAKE_DO_NOT_TRANSFER_PUSH, &woken_up);
2515 		T_ASSERT(ret == KERN_SUCCESS, "wakeup_one_with_inheritor woke next");
2516 		T_ASSERT(woken_up == info->head.threads[1], "thread woken up");
2517 
2518 		// i am still the inheritor, wake all to drop inheritership
2519 		ret = wakeup_all_with_inheritor((event_t) &info->head.threads[0], LCK_WAKE_DEFAULT);
2520 		T_ASSERT(ret == KERN_NOT_WAITING, "waiters on event");
2521 	} else {
2522 		wait_event = NULL;
2523 		wake_event = NULL;
2524 		for (i = 0; i < info->head.nthreads; i++) {
2525 			if (info->head.threads[i] == self) {
2526 				inheritor = info->head.threads[i - 1];
2527 				wait_event = (event_t) &info->head.threads[i - 1];
2528 				wake_event = (event_t) &info->head.threads[i];
2529 				break;
2530 			}
2531 		}
2532 
2533 		assert(wait_event != NULL);
2534 		lck_mtx_lock(&info->mtx_lock);
2535 		wake_threads(&info->synch);
2536 
2537 		lck_mtx_sleep_with_inheritor(&info->mtx_lock, LCK_SLEEP_UNLOCK, wait_event, inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
2538 
2539 		T_ASSERT((uint) self->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", self->sched_pri, max_pri);
2540 
2541 		ret = wakeup_one_with_inheritor(wake_event, THREAD_AWAKENED, LCK_WAKE_DO_NOT_TRANSFER_PUSH, &woken_up);
2542 		if (ret == KERN_SUCCESS) {
2543 			T_ASSERT(i != (info->head.nthreads - 1), "thread id");
2544 			T_ASSERT(woken_up == info->head.threads[i + 1], "wakeup_one_with_inheritor woke next");
2545 		} else {
2546 			T_ASSERT(i == (info->head.nthreads - 1), "thread id");
2547 		}
2548 
2549 		// i am still the inheritor, wake all to drop inheritership
2550 		ret = wakeup_all_with_inheritor(wake_event, LCK_WAKE_DEFAULT);
2551 		T_ASSERT(ret == KERN_NOT_WAITING, "waiters on event");
2552 	}
2553 
2554 	assert(current_thread()->kern_promotion_schedpri == 0);
2555 	notify_waiter((struct synch_test_common *)info);
2556 
2557 	thread_terminate_self();
2558 }
2559 
2560 static void
test_sleep_chain(struct turnstile_chain_test * info)2561 test_sleep_chain(struct turnstile_chain_test *info)
2562 {
2563 	info->synch = 0;
2564 	info->synch_value = info->head.nthreads;
2565 
2566 	start_threads((thread_continue_t)thread_sleep_chain_work, (struct synch_test_common *)info, FALSE);
2567 	wait_all_thread((struct synch_test_common *)info);
2568 }
2569 
2570 static void
test_gate_chain(struct turnstile_chain_test * info)2571 test_gate_chain(struct turnstile_chain_test *info)
2572 {
2573 	info->synch = 0;
2574 	info->synch2 = 0;
2575 	info->synch_value = info->head.nthreads;
2576 
2577 	start_threads((thread_continue_t)thread_gate_chain_work, (struct synch_test_common *)info, FALSE);
2578 	wait_all_thread((struct synch_test_common *)info);
2579 }
2580 
2581 static void
test_sleep_gate_chain(struct turnstile_chain_test * info)2582 test_sleep_gate_chain(struct turnstile_chain_test *info)
2583 {
2584 	info->synch = 0;
2585 	info->synch2 = 0;
2586 	info->synch_value = info->head.nthreads;
2587 
2588 	start_threads((thread_continue_t)thread_sleep_gate_chain_work, (struct synch_test_common *)info, FALSE);
2589 	wait_all_thread((struct synch_test_common *)info);
2590 }
2591 
2592 kern_return_t
ts_kernel_turnstile_chain_test(void)2593 ts_kernel_turnstile_chain_test(void)
2594 {
2595 	struct turnstile_chain_test info = {};
2596 	int i;
2597 
2598 	init_synch_test_common((struct synch_test_common *)&info, NUM_THREAD_CHAIN);
2599 	lck_attr_t* lck_attr = lck_attr_alloc_init();
2600 	lck_grp_attr_t* lck_grp_attr = lck_grp_attr_alloc_init();
2601 	lck_grp_t* lck_grp = lck_grp_alloc_init("test gate", lck_grp_attr);
2602 
2603 	lck_mtx_init(&info.mtx_lock, lck_grp, lck_attr);
2604 	for (i = 0; i < NUM_THREAD_CHAIN; i++) {
2605 		lck_mtx_gate_init(&info.mtx_lock, &info.gates[i]);
2606 	}
2607 
2608 	T_LOG("Testing sleep chain, lck");
2609 	test_sleep_chain(&info);
2610 
2611 	T_LOG("Testing gate chain, lck");
2612 	test_gate_chain(&info);
2613 
2614 	T_LOG("Testing sleep and gate chain, lck");
2615 	test_sleep_gate_chain(&info);
2616 
2617 	destroy_synch_test_common((struct synch_test_common *)&info);
2618 	for (i = 0; i < NUM_THREAD_CHAIN; i++) {
2619 		lck_mtx_gate_destroy(&info.mtx_lock, &info.gates[i]);
2620 	}
2621 	lck_attr_free(lck_attr);
2622 	lck_grp_attr_free(lck_grp_attr);
2623 	lck_mtx_destroy(&info.mtx_lock, lck_grp);
2624 	lck_grp_free(lck_grp);
2625 
2626 	return KERN_SUCCESS;
2627 }
2628 
2629 kern_return_t
ts_kernel_timingsafe_bcmp_test(void)2630 ts_kernel_timingsafe_bcmp_test(void)
2631 {
2632 	int i, buf_size;
2633 	char *buf = NULL;
2634 
2635 	// empty
2636 	T_ASSERT(timingsafe_bcmp(NULL, NULL, 0) == 0, NULL);
2637 	T_ASSERT(timingsafe_bcmp("foo", "foo", 0) == 0, NULL);
2638 	T_ASSERT(timingsafe_bcmp("foo", "bar", 0) == 0, NULL);
2639 
2640 	// equal
2641 	T_ASSERT(timingsafe_bcmp("foo", "foo", strlen("foo")) == 0, NULL);
2642 
2643 	// unequal
2644 	T_ASSERT(timingsafe_bcmp("foo", "bar", strlen("foo")) == 1, NULL);
2645 	T_ASSERT(timingsafe_bcmp("foo", "goo", strlen("foo")) == 1, NULL);
2646 	T_ASSERT(timingsafe_bcmp("foo", "fpo", strlen("foo")) == 1, NULL);
2647 	T_ASSERT(timingsafe_bcmp("foo", "fop", strlen("foo")) == 1, NULL);
2648 
2649 	// all possible bitwise differences
2650 	for (i = 1; i < 256; i += 1) {
2651 		unsigned char a = 0;
2652 		unsigned char b = (unsigned char)i;
2653 
2654 		T_ASSERT(timingsafe_bcmp(&a, &b, sizeof(a)) == 1, NULL);
2655 	}
2656 
2657 	// large
2658 	buf_size = 1024 * 16;
2659 	buf = kalloc(buf_size);
2660 	T_EXPECT_NOTNULL(buf, "kalloc of buf");
2661 
2662 	read_random(buf, buf_size);
2663 	T_ASSERT(timingsafe_bcmp(buf, buf, buf_size) == 0, NULL);
2664 	T_ASSERT(timingsafe_bcmp(buf, buf + 1, buf_size - 1) == 1, NULL);
2665 	T_ASSERT(timingsafe_bcmp(buf, buf + 128, 128) == 1, NULL);
2666 
2667 	memcpy(buf + 128, buf, 128);
2668 	T_ASSERT(timingsafe_bcmp(buf, buf + 128, 128) == 0, NULL);
2669 
2670 	kfree(buf, buf_size);
2671 
2672 	return KERN_SUCCESS;
2673 }
2674 
2675 kern_return_t
kprintf_hhx_test(void)2676 kprintf_hhx_test(void)
2677 {
2678 	printf("POST hhx test %hx%hx%hx%hx %hhx%hhx%hhx%hhx - %llx",
2679 	    (unsigned short)0xfeed, (unsigned short)0xface,
2680 	    (unsigned short)0xabad, (unsigned short)0xcafe,
2681 	    (unsigned char)'h', (unsigned char)'h', (unsigned char)'x',
2682 	    (unsigned char)'!',
2683 	    0xfeedfaceULL);
2684 	T_PASS("kprintf_hhx_test passed");
2685 	return KERN_SUCCESS;
2686 }
2687