1 /*
2 * Copyright (c) 2019-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <kern/kern_types.h>
30 #include <kern/assert.h>
31 #include <kern/host.h>
32 #include <kern/macro_help.h>
33 #include <kern/sched.h>
34 #include <kern/locks.h>
35 #include <kern/sched_prim.h>
36 #include <kern/misc_protos.h>
37 #include <kern/thread_call.h>
38 #include <kern/zalloc_internal.h>
39 #include <kern/kalloc.h>
40 #include <tests/ktest.h>
41 #include <sys/errno.h>
42 #include <sys/random.h>
43 #include <kern/kern_cdata.h>
44 #include <machine/lowglobals.h>
45 #include <vm/vm_page.h>
46 #include <vm/vm_object.h>
47 #include <vm/vm_protos.h>
48 #include <string.h>
49 #include <kern/kern_apfs_reflock.h>
50
51 #if !(DEVELOPMENT || DEBUG)
52 #error "Testing is not enabled on RELEASE configurations"
53 #endif
54
55 #include <tests/xnupost.h>
56
57 extern boolean_t get_range_bounds(char * c, int64_t * lower, int64_t * upper);
58 __private_extern__ void qsort(void * a, size_t n, size_t es, int (*cmp)(const void *, const void *));
59
60 uint32_t total_post_tests_count = 0;
61 void xnupost_reset_panic_widgets(void);
62
63 /* test declarations */
64 kern_return_t zalloc_test(void);
65 kern_return_t RandomULong_test(void);
66 kern_return_t kcdata_api_test(void);
67 kern_return_t ts_kernel_primitive_test(void);
68 kern_return_t ts_kernel_sleep_inheritor_test(void);
69 kern_return_t ts_kernel_gate_test(void);
70 kern_return_t ts_kernel_turnstile_chain_test(void);
71 kern_return_t ts_kernel_timingsafe_bcmp_test(void);
72
73 #if __ARM_VFP__
74 extern kern_return_t vfp_state_test(void);
75 #endif
76
77 extern kern_return_t kprintf_hhx_test(void);
78
79 #if defined(__arm64__)
80 kern_return_t pmap_coredump_test(void);
81 #endif
82
83 extern kern_return_t console_serial_test(void);
84 extern kern_return_t console_serial_parallel_log_tests(void);
85 extern kern_return_t test_printf(void);
86 extern kern_return_t test_os_log(void);
87 extern kern_return_t test_os_log_handles(void);
88 extern kern_return_t test_os_log_parallel(void);
89 extern kern_return_t bitmap_post_test(void);
90 extern kern_return_t counter_tests(void);
91 #if ML_IO_TIMEOUTS_ENABLED
92 extern kern_return_t ml_io_timeout_test(void);
93 #endif
94
95 #ifdef __arm64__
96 extern kern_return_t arm64_munger_test(void);
97 #if __ARM_PAN_AVAILABLE__
98 extern kern_return_t arm64_pan_test(void);
99 #endif
100 #if defined(HAS_APPLE_PAC)
101 extern kern_return_t arm64_ropjop_test(void);
102 #endif /* defined(HAS_APPLE_PAC) */
103 #if CONFIG_SPTM
104 extern kern_return_t arm64_panic_lockdown_test(void);
105 #endif /* CONFIG_SPTM */
106 #endif /* __arm64__ */
107
108 extern kern_return_t test_thread_call(void);
109
110
111 struct xnupost_panic_widget xt_panic_widgets = {.xtp_context_p = NULL,
112 .xtp_outval_p = NULL,
113 .xtp_func_name = NULL,
114 .xtp_func = NULL};
115
116 struct xnupost_test kernel_post_tests[] = {XNUPOST_TEST_CONFIG_BASIC(zalloc_test),
117 XNUPOST_TEST_CONFIG_BASIC(RandomULong_test),
118 XNUPOST_TEST_CONFIG_BASIC(test_printf),
119 XNUPOST_TEST_CONFIG_BASIC(test_os_log_handles),
120 XNUPOST_TEST_CONFIG_BASIC(test_os_log),
121 XNUPOST_TEST_CONFIG_BASIC(test_os_log_parallel),
122 #ifdef __arm64__
123 XNUPOST_TEST_CONFIG_BASIC(arm64_munger_test),
124 #if __ARM_PAN_AVAILABLE__
125 XNUPOST_TEST_CONFIG_BASIC(arm64_pan_test),
126 #endif
127 #if defined(HAS_APPLE_PAC)
128 XNUPOST_TEST_CONFIG_BASIC(arm64_ropjop_test),
129 #endif /* defined(HAS_APPLE_PAC) */
130 #if CONFIG_SPTM
131 XNUPOST_TEST_CONFIG_BASIC(arm64_panic_lockdown_test),
132 #endif /* CONFIG_SPTM */
133 #endif /* __arm64__ */
134 XNUPOST_TEST_CONFIG_BASIC(kcdata_api_test),
135 XNUPOST_TEST_CONFIG_BASIC(console_serial_test),
136 XNUPOST_TEST_CONFIG_BASIC(console_serial_parallel_log_tests),
137 #if defined(__arm64__)
138 XNUPOST_TEST_CONFIG_BASIC(pmap_coredump_test),
139 #endif
140 XNUPOST_TEST_CONFIG_BASIC(bitmap_post_test),
141 //XNUPOST_TEST_CONFIG_TEST_PANIC(kcdata_api_assert_tests)
142 XNUPOST_TEST_CONFIG_BASIC(test_thread_call),
143 XNUPOST_TEST_CONFIG_BASIC(ts_kernel_primitive_test),
144 XNUPOST_TEST_CONFIG_BASIC(ts_kernel_sleep_inheritor_test),
145 XNUPOST_TEST_CONFIG_BASIC(ts_kernel_gate_test),
146 XNUPOST_TEST_CONFIG_BASIC(ts_kernel_turnstile_chain_test),
147 XNUPOST_TEST_CONFIG_BASIC(ts_kernel_timingsafe_bcmp_test),
148 XNUPOST_TEST_CONFIG_BASIC(kprintf_hhx_test),
149 #if __ARM_VFP__
150 XNUPOST_TEST_CONFIG_BASIC(vfp_state_test),
151 #endif
152 XNUPOST_TEST_CONFIG_BASIC(vm_tests),
153 XNUPOST_TEST_CONFIG_BASIC(counter_tests),
154 #if ML_IO_TIMEOUTS_ENABLED
155 XNUPOST_TEST_CONFIG_BASIC(ml_io_timeout_test),
156 #endif
157 };
158
159 uint32_t kernel_post_tests_count = sizeof(kernel_post_tests) / sizeof(xnupost_test_data_t);
160
161 #define POSTARGS_RUN_TESTS 0x1
162 #define POSTARGS_CONTROLLER_AVAILABLE 0x2
163 #define POSTARGS_CUSTOM_TEST_RUNLIST 0x4
164 uint64_t kernel_post_args = 0x0;
165
166 /* static variables to hold state */
167 static kern_return_t parse_config_retval = KERN_INVALID_CAPABILITY;
168 static char kernel_post_test_configs[256];
169 boolean_t xnupost_should_run_test(uint32_t test_num);
170
171 kern_return_t
xnupost_parse_config()172 xnupost_parse_config()
173 {
174 if (parse_config_retval != KERN_INVALID_CAPABILITY) {
175 return parse_config_retval;
176 }
177 PE_parse_boot_argn("kernPOST", &kernel_post_args, sizeof(kernel_post_args));
178
179 if (PE_parse_boot_argn("kernPOST_config", &kernel_post_test_configs[0], sizeof(kernel_post_test_configs)) == TRUE) {
180 kernel_post_args |= POSTARGS_CUSTOM_TEST_RUNLIST;
181 }
182
183 if (kernel_post_args != 0) {
184 parse_config_retval = KERN_SUCCESS;
185 goto out;
186 }
187 parse_config_retval = KERN_NOT_SUPPORTED;
188 out:
189 return parse_config_retval;
190 }
191
192 boolean_t
xnupost_should_run_test(uint32_t test_num)193 xnupost_should_run_test(uint32_t test_num)
194 {
195 if (kernel_post_args & POSTARGS_CUSTOM_TEST_RUNLIST) {
196 int64_t begin = 0, end = 999999;
197 char * b = kernel_post_test_configs;
198 while (*b) {
199 get_range_bounds(b, &begin, &end);
200 if (test_num >= begin && test_num <= end) {
201 return TRUE;
202 }
203
204 /* skip to the next "," */
205 while (*b != ',') {
206 if (*b == '\0') {
207 return FALSE;
208 }
209 b++;
210 }
211 /* skip past the ',' */
212 b++;
213 }
214 return FALSE;
215 }
216 return TRUE;
217 }
218
219 kern_return_t
xnupost_list_tests(xnupost_test_t test_list,uint32_t test_count)220 xnupost_list_tests(xnupost_test_t test_list, uint32_t test_count)
221 {
222 if (KERN_SUCCESS != xnupost_parse_config()) {
223 return KERN_FAILURE;
224 }
225
226 xnupost_test_t testp;
227 for (uint32_t i = 0; i < test_count; i++) {
228 testp = &test_list[i];
229 if (testp->xt_test_num == 0) {
230 assert(total_post_tests_count < UINT16_MAX);
231 testp->xt_test_num = (uint16_t)++total_post_tests_count;
232 }
233 /* make sure the boot-arg based test run list is honored */
234 if (kernel_post_args & POSTARGS_CUSTOM_TEST_RUNLIST) {
235 testp->xt_config |= XT_CONFIG_IGNORE;
236 if (xnupost_should_run_test(testp->xt_test_num)) {
237 testp->xt_config &= ~(XT_CONFIG_IGNORE);
238 testp->xt_config |= XT_CONFIG_RUN;
239 printf("\n[TEST] #%u is marked as ignored", testp->xt_test_num);
240 }
241 }
242 printf("\n[TEST] TOC#%u name: %s expected: %d config: %x\n", testp->xt_test_num, testp->xt_name, testp->xt_expected_retval,
243 testp->xt_config);
244 }
245
246 return KERN_SUCCESS;
247 }
248
249 kern_return_t
xnupost_run_tests(xnupost_test_t test_list,uint32_t test_count)250 xnupost_run_tests(xnupost_test_t test_list, uint32_t test_count)
251 {
252 uint32_t i = 0;
253 int retval = KERN_SUCCESS;
254 int test_retval = KERN_FAILURE;
255
256 if ((kernel_post_args & POSTARGS_RUN_TESTS) == 0) {
257 printf("No POST boot-arg set.\n");
258 return retval;
259 }
260
261 T_START;
262 xnupost_test_t testp;
263 for (; i < test_count; i++) {
264 xnupost_reset_panic_widgets();
265 T_TESTRESULT = T_STATE_UNRESOLVED;
266 testp = &test_list[i];
267 T_BEGIN(testp->xt_name);
268 testp->xt_begin_time = mach_absolute_time();
269 testp->xt_end_time = testp->xt_begin_time;
270
271 /*
272 * If test is designed to panic and controller
273 * is not available then mark as SKIPPED
274 */
275 if ((testp->xt_config & XT_CONFIG_EXPECT_PANIC) && !(kernel_post_args & POSTARGS_CONTROLLER_AVAILABLE)) {
276 T_SKIP(
277 "Test expects panic but "
278 "no controller is present");
279 testp->xt_test_actions = XT_ACTION_SKIPPED;
280 continue;
281 }
282
283 if ((testp->xt_config & XT_CONFIG_IGNORE)) {
284 T_SKIP("Test is marked as XT_CONFIG_IGNORE");
285 testp->xt_test_actions = XT_ACTION_SKIPPED;
286 continue;
287 }
288
289 test_retval = testp->xt_func();
290 if (T_STATE_UNRESOLVED == T_TESTRESULT) {
291 /*
292 * If test result is unresolved due to that no T_* test cases are called,
293 * determine the test result based on the return value of the test function.
294 */
295 if (KERN_SUCCESS == test_retval) {
296 T_PASS("Test passed because retval == KERN_SUCCESS");
297 } else {
298 T_FAIL("Test failed because retval == KERN_FAILURE");
299 }
300 }
301 T_END;
302 testp->xt_retval = T_TESTRESULT;
303 testp->xt_end_time = mach_absolute_time();
304 if (testp->xt_retval == testp->xt_expected_retval) {
305 testp->xt_test_actions = XT_ACTION_PASSED;
306 } else {
307 testp->xt_test_actions = XT_ACTION_FAILED;
308 }
309 }
310 T_FINISH;
311 return retval;
312 }
313
314 kern_return_t
kernel_list_tests()315 kernel_list_tests()
316 {
317 return xnupost_list_tests(kernel_post_tests, kernel_post_tests_count);
318 }
319
320 kern_return_t
kernel_do_post()321 kernel_do_post()
322 {
323 return xnupost_run_tests(kernel_post_tests, kernel_post_tests_count);
324 }
325
326 kern_return_t
xnupost_register_panic_widget(xt_panic_widget_func funcp,const char * funcname,void * context,void ** outval)327 xnupost_register_panic_widget(xt_panic_widget_func funcp, const char * funcname, void * context, void ** outval)
328 {
329 if (xt_panic_widgets.xtp_context_p != NULL || xt_panic_widgets.xtp_func != NULL) {
330 return KERN_RESOURCE_SHORTAGE;
331 }
332
333 xt_panic_widgets.xtp_context_p = context;
334 xt_panic_widgets.xtp_func = funcp;
335 xt_panic_widgets.xtp_func_name = funcname;
336 xt_panic_widgets.xtp_outval_p = outval;
337
338 return KERN_SUCCESS;
339 }
340
341 void
xnupost_reset_panic_widgets()342 xnupost_reset_panic_widgets()
343 {
344 bzero(&xt_panic_widgets, sizeof(xt_panic_widgets));
345 }
346
347 kern_return_t
xnupost_process_kdb_stop(const char * panic_s)348 xnupost_process_kdb_stop(const char * panic_s)
349 {
350 xt_panic_return_t retval = 0;
351 struct xnupost_panic_widget * pw = &xt_panic_widgets;
352 const char * name = "unknown";
353 if (xt_panic_widgets.xtp_func_name) {
354 name = xt_panic_widgets.xtp_func_name;
355 }
356
357 /* bail early on if kernPOST is not set */
358 if (kernel_post_args == 0) {
359 return KERN_INVALID_CAPABILITY;
360 }
361
362 if (xt_panic_widgets.xtp_func) {
363 T_LOG("%s: Calling out to widget: %s", __func__, xt_panic_widgets.xtp_func_name);
364 retval = pw->xtp_func(panic_s, pw->xtp_context_p, pw->xtp_outval_p);
365 } else {
366 return KERN_INVALID_CAPABILITY;
367 }
368
369 switch (retval) {
370 case XT_RET_W_SUCCESS:
371 T_EXPECT_EQ_INT(retval, XT_RET_W_SUCCESS, "%s reported successful handling. Returning from kdb_stop.", name);
372 /* KERN_SUCCESS means return from panic/assertion */
373 return KERN_SUCCESS;
374
375 case XT_RET_W_FAIL:
376 T_FAIL("%s reported XT_RET_W_FAIL: Returning from kdb_stop", name);
377 return KERN_SUCCESS;
378
379 case XT_PANIC_W_FAIL:
380 T_FAIL("%s reported XT_PANIC_W_FAIL: Continuing to kdb_stop", name);
381 return KERN_FAILURE;
382
383 case XT_PANIC_W_SUCCESS:
384 T_EXPECT_EQ_INT(retval, XT_PANIC_W_SUCCESS, "%s reported successful testcase. But continuing to kdb_stop.", name);
385 return KERN_FAILURE;
386
387 case XT_PANIC_UNRELATED:
388 default:
389 T_LOG("UNRELATED: Continuing to kdb_stop.");
390 return KERN_FAILURE;
391 }
392 }
393
394 xt_panic_return_t
_xt_generic_assert_check(const char * s,void * str_to_match,void ** outval)395 _xt_generic_assert_check(const char * s, void * str_to_match, void ** outval)
396 {
397 xt_panic_return_t ret = XT_PANIC_UNRELATED;
398
399 if (NULL != strnstr(__DECONST(char *, s), (char *)str_to_match, strlen(s))) {
400 T_LOG("%s: kdb_stop string: '%s' MATCHED string: '%s'", __func__, s, (char *)str_to_match);
401 ret = XT_RET_W_SUCCESS;
402 }
403
404 if (outval) {
405 *outval = (void *)(uintptr_t)ret;
406 }
407 return ret;
408 }
409
410 kern_return_t
xnupost_reset_tests(xnupost_test_t test_list,uint32_t test_count)411 xnupost_reset_tests(xnupost_test_t test_list, uint32_t test_count)
412 {
413 uint32_t i = 0;
414 xnupost_test_t testp;
415 for (; i < test_count; i++) {
416 testp = &test_list[i];
417 testp->xt_begin_time = 0;
418 testp->xt_end_time = 0;
419 testp->xt_test_actions = XT_ACTION_NONE;
420 testp->xt_retval = -1;
421 }
422 return KERN_SUCCESS;
423 }
424
425
426 kern_return_t
zalloc_test(void)427 zalloc_test(void)
428 {
429 zone_t test_zone;
430 void * test_ptr;
431
432 T_SETUPBEGIN;
433 test_zone = zone_create("test_uint64_zone", sizeof(uint64_t),
434 ZC_DESTRUCTIBLE);
435 T_ASSERT_NOTNULL(test_zone, NULL);
436
437 T_ASSERT_EQ_INT(test_zone->z_elems_free, 0, NULL);
438 T_SETUPEND;
439
440 T_ASSERT_NOTNULL(test_ptr = zalloc(test_zone), NULL);
441
442 zfree(test_zone, test_ptr);
443
444 /* A sample report for perfdata */
445 T_PERF("num_threads_at_ktest", threads_count, "count", "# of threads in system at zalloc_test");
446
447 return KERN_SUCCESS;
448 }
449
450 /*
451 * Function used for comparison by qsort()
452 */
453 static int
compare_numbers_ascending(const void * a,const void * b)454 compare_numbers_ascending(const void * a, const void * b)
455 {
456 const uint64_t x = *(const uint64_t *)a;
457 const uint64_t y = *(const uint64_t *)b;
458 if (x < y) {
459 return -1;
460 } else if (x > y) {
461 return 1;
462 } else {
463 return 0;
464 }
465 }
466
467 /*
468 * Function to count number of bits that are set in a number.
469 * It uses Side Addition using Magic Binary Numbers
470 */
471 static int
count_bits(uint64_t number)472 count_bits(uint64_t number)
473 {
474 return __builtin_popcountll(number);
475 }
476
477 kern_return_t
RandomULong_test()478 RandomULong_test()
479 {
480 /*
481 * Randomness test for RandomULong()
482 *
483 * This test verifies that:
484 * a. RandomULong works
485 * b. The generated numbers match the following entropy criteria:
486 * For a thousand iterations, verify:
487 * 1. mean entropy > 12 bits
488 * 2. min entropy > 4 bits
489 * 3. No Duplicate
490 * 4. No incremental/decremental pattern in a window of 3
491 * 5. No Zero
492 * 6. No -1
493 *
494 * <rdar://problem/22526137> Add test to increase code coverage for /dev/random
495 */
496
497 #define CONF_MIN_ENTROPY 4
498 #define CONF_MEAN_ENTROPY 12
499 #define CONF_ITERATIONS 1000
500 #define CONF_WINDOW_SIZE 3
501 #define CONF_WINDOW_TREND_LIMIT ((CONF_WINDOW_SIZE / 2) + (CONF_WINDOW_SIZE & 1)) >> 0
502
503 int i;
504 uint32_t min_bit_entropy, max_bit_entropy, bit_entropy;
505 uint32_t aggregate_bit_entropy = 0;
506 uint32_t mean_bit_entropy = 0;
507 uint64_t numbers[CONF_ITERATIONS];
508 min_bit_entropy = UINT32_MAX;
509 max_bit_entropy = 0;
510
511 /*
512 * TEST 1: Number generation and basic and basic validation
513 * Check for non-zero (no bits set), -1 (all bits set) and error
514 */
515 for (i = 0; i < CONF_ITERATIONS; i++) {
516 read_random(&numbers[i], sizeof(numbers[i]));
517 if (numbers[i] == 0) {
518 T_ASSERT_NE_ULLONG(numbers[i], 0, "read_random returned zero value.");
519 }
520 if (numbers[i] == UINT64_MAX) {
521 T_ASSERT_NE_ULLONG(numbers[i], UINT64_MAX, "read_random returned -1.");
522 }
523 }
524 T_PASS("Generated %d non-zero random numbers with atleast one bit reset.", CONF_ITERATIONS);
525
526 /*
527 * TEST 2: Mean and Min Bit Entropy
528 * Check the bit entropy and its mean over the generated numbers.
529 */
530 for (i = 1; i < CONF_ITERATIONS; i++) {
531 bit_entropy = count_bits(numbers[i - 1] ^ numbers[i]);
532 if (bit_entropy < min_bit_entropy) {
533 min_bit_entropy = bit_entropy;
534 }
535 if (bit_entropy > max_bit_entropy) {
536 max_bit_entropy = bit_entropy;
537 }
538
539 if (bit_entropy < CONF_MIN_ENTROPY) {
540 T_EXPECT_GE_UINT(bit_entropy, CONF_MIN_ENTROPY,
541 "Number of differing bits in consecutive numbers does not satisfy the min criteria.");
542 }
543
544 aggregate_bit_entropy += bit_entropy;
545 }
546 T_PASS("Passed the min bit entropy expectation of %d bits", CONF_MIN_ENTROPY);
547
548 mean_bit_entropy = aggregate_bit_entropy / CONF_ITERATIONS;
549 T_EXPECT_GE_UINT(mean_bit_entropy, CONF_MEAN_ENTROPY, "Test criteria for mean number of differing bits.");
550 T_PASS("Mean bit entropy criteria satisfied (Required %d, Actual: %d).", CONF_MEAN_ENTROPY, mean_bit_entropy);
551 T_LOG("{PERFORMANCE} iterations: %d, min_bit_entropy: %d, mean_bit_entropy: %d, max_bit_entropy: %d", CONF_ITERATIONS,
552 min_bit_entropy, mean_bit_entropy, max_bit_entropy);
553 T_PERF("min_bit_entropy_" T_TOSTRING(CONF_ITERATIONS), min_bit_entropy, "bits", "minimum bit entropy in RNG. High is better");
554 T_PERF("mean_bit_entropy_" T_TOSTRING(CONF_ITERATIONS), mean_bit_entropy, "bits", "mean bit entropy in RNG. High is better");
555 T_PERF("max_bit_entropy_" T_TOSTRING(CONF_ITERATIONS), max_bit_entropy, "bits", "max bit entropy in RNG. High is better");
556
557 /*
558 * TEST 3: Incremental Pattern Search
559 * Check that incremental/decremental pattern does not exist in the given window
560 */
561 int window_start, window_end, trend;
562 window_start = window_end = trend = 0;
563
564 do {
565 /*
566 * Set the window
567 */
568 window_end = window_start + CONF_WINDOW_SIZE - 1;
569 if (window_end >= CONF_ITERATIONS) {
570 window_end = CONF_ITERATIONS - 1;
571 }
572
573 trend = 0;
574 for (i = window_start; i < window_end; i++) {
575 if (numbers[i] < numbers[i + 1]) {
576 trend++;
577 } else if (numbers[i] > numbers[i + 1]) {
578 trend--;
579 }
580 }
581 /*
582 * Check that there is no increasing or decreasing trend
583 * i.e. trend <= ceil(window_size/2)
584 */
585 if (trend < 0) {
586 trend = -trend;
587 }
588 if (trend > CONF_WINDOW_TREND_LIMIT) {
589 T_ASSERT_LE_INT(trend, CONF_WINDOW_TREND_LIMIT, "Found increasing/decreasing trend in random numbers.");
590 }
591
592 /*
593 * Move to the next window
594 */
595 window_start++;
596 } while (window_start < (CONF_ITERATIONS - 1));
597 T_PASS("Did not find increasing/decreasing trends in a window of %d numbers.", CONF_WINDOW_SIZE);
598
599 /*
600 * TEST 4: Find Duplicates
601 * Check no duplicate values are generated
602 */
603 qsort(numbers, CONF_ITERATIONS, sizeof(numbers[0]), compare_numbers_ascending);
604 for (i = 1; i < CONF_ITERATIONS; i++) {
605 if (numbers[i] == numbers[i - 1]) {
606 T_ASSERT_NE_ULLONG(numbers[i], numbers[i - 1], "read_random generated duplicate values.");
607 }
608 }
609 T_PASS("Test did not find any duplicates as expected.");
610
611 return KERN_SUCCESS;
612 }
613
614
615 /* KCDATA kernel api tests */
616 static struct kcdata_descriptor test_kc_data;//, test_kc_data2;
617 struct sample_disk_io_stats {
618 uint64_t disk_reads_count;
619 uint64_t disk_reads_size;
620 uint64_t io_priority_count[4];
621 uint64_t io_priority_size;
622 } __attribute__((packed));
623
624 struct kcdata_subtype_descriptor test_disk_io_stats_def[] = {
625 {
626 .kcs_flags = KCS_SUBTYPE_FLAGS_NONE,
627 .kcs_elem_type = KC_ST_UINT64,
628 .kcs_elem_offset = 0 * sizeof(uint64_t),
629 .kcs_elem_size = sizeof(uint64_t),
630 .kcs_name = "disk_reads_count"
631 },
632 {
633 .kcs_flags = KCS_SUBTYPE_FLAGS_NONE,
634 .kcs_elem_type = KC_ST_UINT64,
635 .kcs_elem_offset = 1 * sizeof(uint64_t),
636 .kcs_elem_size = sizeof(uint64_t),
637 .kcs_name = "disk_reads_size"
638 },
639 {
640 .kcs_flags = KCS_SUBTYPE_FLAGS_ARRAY,
641 .kcs_elem_type = KC_ST_UINT64,
642 .kcs_elem_offset = 2 * sizeof(uint64_t),
643 .kcs_elem_size = KCS_SUBTYPE_PACK_SIZE(4, sizeof(uint64_t)),
644 .kcs_name = "io_priority_count"
645 },
646 {
647 .kcs_flags = KCS_SUBTYPE_FLAGS_ARRAY,
648 .kcs_elem_type = KC_ST_UINT64,
649 .kcs_elem_offset = (2 + 4) * sizeof(uint64_t),
650 .kcs_elem_size = sizeof(uint64_t),
651 .kcs_name = "io_priority_size"
652 },
653 };
654
655 kern_return_t
kcdata_api_test(void)656 kcdata_api_test(void)
657 {
658 kern_return_t retval = KERN_SUCCESS;
659
660 /* test for NULL input */
661 retval = kcdata_memory_static_init(NULL, (mach_vm_address_t)0, KCDATA_BUFFER_BEGIN_STACKSHOT, 100, KCFLAG_USE_MEMCOPY);
662 T_ASSERT(retval == KERN_INVALID_ARGUMENT, "kcdata_memory_static_init with NULL struct");
663
664 /* another negative test with buffer size < 32 bytes */
665 char data[30] = "sample_disk_io_stats";
666 retval = kcdata_memory_static_init(&test_kc_data, (mach_vm_address_t)&data, KCDATA_BUFFER_BEGIN_CRASHINFO, sizeof(data),
667 KCFLAG_USE_MEMCOPY);
668 T_ASSERT(retval == KERN_INSUFFICIENT_BUFFER_SIZE, "init with 30 bytes failed as expected with KERN_INSUFFICIENT_BUFFER_SIZE");
669
670 /* test with COPYOUT for 0x0 address. Should return KERN_NO_ACCESS */
671 retval = kcdata_memory_static_init(&test_kc_data, (mach_vm_address_t)0, KCDATA_BUFFER_BEGIN_CRASHINFO, PAGE_SIZE,
672 KCFLAG_USE_COPYOUT);
673 T_ASSERT(retval == KERN_NO_ACCESS, "writing to 0x0 returned KERN_NO_ACCESS");
674
675 /* test with successful kcdata_memory_static_init */
676 test_kc_data.kcd_length = 0xdeadbeef;
677
678 void *data_ptr = kalloc_data(PAGE_SIZE, Z_WAITOK_ZERO_NOFAIL);
679 mach_vm_address_t address = (mach_vm_address_t)data_ptr;
680 T_EXPECT_NOTNULL(address, "kalloc of PAGE_SIZE data.");
681
682 retval = kcdata_memory_static_init(&test_kc_data, (mach_vm_address_t)address, KCDATA_BUFFER_BEGIN_STACKSHOT, PAGE_SIZE,
683 KCFLAG_USE_MEMCOPY);
684
685 T_ASSERT(retval == KERN_SUCCESS, "successful kcdata_memory_static_init call");
686
687 T_ASSERT(test_kc_data.kcd_length == PAGE_SIZE, "kcdata length is set correctly to PAGE_SIZE.");
688 T_LOG("addr_begin 0x%llx and end 0x%llx and address 0x%llx", test_kc_data.kcd_addr_begin, test_kc_data.kcd_addr_end, address);
689 T_ASSERT(test_kc_data.kcd_addr_begin == address, "kcdata begin address is correct 0x%llx", (uint64_t)address);
690
691 /* verify we have BEGIN and END HEADERS set */
692 uint32_t * mem = (uint32_t *)address;
693 T_ASSERT(mem[0] == KCDATA_BUFFER_BEGIN_STACKSHOT, "buffer does contain KCDATA_BUFFER_BEGIN_STACKSHOT");
694 T_ASSERT(mem[4] == KCDATA_TYPE_BUFFER_END, "KCDATA_TYPE_BUFFER_END is appended as expected");
695 T_ASSERT(mem[5] == 0, "size of BUFFER_END tag is zero");
696
697 /* verify kcdata_memory_get_used_bytes() */
698 uint64_t bytes_used = 0;
699 bytes_used = kcdata_memory_get_used_bytes(&test_kc_data);
700 T_ASSERT(bytes_used == (2 * sizeof(struct kcdata_item)), "bytes_used api returned expected %llu", bytes_used);
701
702 /* test for kcdata_get_memory_addr() */
703
704 mach_vm_address_t user_addr = 0;
705 /* negative test for NULL user_addr AND/OR kcdata_descriptor */
706 retval = kcdata_get_memory_addr(NULL, KCDATA_TYPE_MACH_ABSOLUTE_TIME, sizeof(uint64_t), &user_addr);
707 T_ASSERT(retval == KERN_INVALID_ARGUMENT, "kcdata_get_memory_addr with NULL struct -> KERN_INVALID_ARGUMENT");
708
709 retval = kcdata_get_memory_addr(&test_kc_data, KCDATA_TYPE_MACH_ABSOLUTE_TIME, sizeof(uint64_t), NULL);
710 T_ASSERT(retval == KERN_INVALID_ARGUMENT, "kcdata_get_memory_addr with NULL user_addr -> KERN_INVALID_ARGUMENT");
711
712 /* successful case with size 0. Yes this is expected to succeed as just a item type could be used as boolean */
713 retval = kcdata_get_memory_addr(&test_kc_data, KCDATA_TYPE_USECS_SINCE_EPOCH, 0, &user_addr);
714 T_ASSERT(retval == KERN_SUCCESS, "Successfully got kcdata entry for 0 size data");
715 T_ASSERT(user_addr == test_kc_data.kcd_addr_end, "0 sized data did not add any extra buffer space");
716
717 /* successful case with valid size. */
718 user_addr = 0xdeadbeef;
719 retval = kcdata_get_memory_addr(&test_kc_data, KCDATA_TYPE_MACH_ABSOLUTE_TIME, sizeof(uint64_t), &user_addr);
720 T_ASSERT(retval == KERN_SUCCESS, "kcdata_get_memory_addr with valid values succeeded.");
721 T_ASSERT(user_addr > test_kc_data.kcd_addr_begin, "user_addr is in range of buffer");
722 T_ASSERT(user_addr < test_kc_data.kcd_addr_end, "user_addr is in range of buffer");
723
724 /* Try creating an item with really large size */
725 user_addr = 0xdeadbeef;
726 bytes_used = kcdata_memory_get_used_bytes(&test_kc_data);
727 retval = kcdata_get_memory_addr(&test_kc_data, KCDATA_TYPE_MACH_ABSOLUTE_TIME, PAGE_SIZE * 4, &user_addr);
728 T_ASSERT(retval == KERN_INSUFFICIENT_BUFFER_SIZE, "Allocating entry with size > buffer -> KERN_INSUFFICIENT_BUFFER_SIZE");
729 T_ASSERT(user_addr == 0xdeadbeef, "user_addr remained unaffected with failed kcdata_get_memory_addr");
730 T_ASSERT(bytes_used == kcdata_memory_get_used_bytes(&test_kc_data), "The data structure should be unaffected");
731
732 /* verify convenience functions for uint32_with_description */
733 retval = kcdata_add_uint32_with_description(&test_kc_data, 0xbdc0ffee, "This is bad coffee");
734 T_ASSERT(retval == KERN_SUCCESS, "add uint32 with description succeeded.");
735
736 retval = kcdata_add_uint64_with_description(&test_kc_data, 0xf001badc0ffee, "another 8 byte no.");
737 T_ASSERT(retval == KERN_SUCCESS, "add uint64 with desc succeeded.");
738
739 /* verify creating an KCDATA_TYPE_ARRAY here */
740 user_addr = 0xdeadbeef;
741 bytes_used = kcdata_memory_get_used_bytes(&test_kc_data);
742 /* save memory address where the array will come up */
743 struct kcdata_item * item_p = (struct kcdata_item *)test_kc_data.kcd_addr_end;
744
745 retval = kcdata_get_memory_addr_for_array(&test_kc_data, KCDATA_TYPE_MACH_ABSOLUTE_TIME, sizeof(uint64_t), 20, &user_addr);
746 T_ASSERT(retval == KERN_SUCCESS, "Array of 20 integers should be possible");
747 T_ASSERT(user_addr != 0xdeadbeef, "user_addr is updated as expected");
748 T_ASSERT((kcdata_memory_get_used_bytes(&test_kc_data) - bytes_used) >= 20 * sizeof(uint64_t), "memory allocation is in range");
749 kcdata_iter_t iter = kcdata_iter(item_p, (unsigned long)(PAGE_SIZE - kcdata_memory_get_used_bytes(&test_kc_data)));
750 T_ASSERT(kcdata_iter_array_elem_count(iter) == 20, "array count is 20");
751
752 /* FIXME add tests here for ranges of sizes and counts */
753
754 T_ASSERT(item_p->flags == (((uint64_t)KCDATA_TYPE_MACH_ABSOLUTE_TIME << 32) | 20), "flags are set correctly");
755
756 /* test adding of custom type */
757
758 retval = kcdata_add_type_definition(&test_kc_data, 0x999, data, &test_disk_io_stats_def[0],
759 sizeof(test_disk_io_stats_def) / sizeof(struct kcdata_subtype_descriptor));
760 T_ASSERT(retval == KERN_SUCCESS, "adding custom type succeeded.");
761
762 kfree_data(data_ptr, PAGE_SIZE);
763 return KERN_SUCCESS;
764 }
765
766 /*
767 * kern_return_t
768 * kcdata_api_assert_tests()
769 * {
770 * kern_return_t retval = 0;
771 * void * assert_check_retval = NULL;
772 * test_kc_data2.kcd_length = 0xdeadbeef;
773 * mach_vm_address_t address = (mach_vm_address_t)kalloc(PAGE_SIZE);
774 * T_EXPECT_NOTNULL(address, "kalloc of PAGE_SIZE data.");
775 *
776 * retval = kcdata_memory_static_init(&test_kc_data2, (mach_vm_address_t)address, KCDATA_BUFFER_BEGIN_STACKSHOT, PAGE_SIZE,
777 * KCFLAG_USE_MEMCOPY);
778 *
779 * T_ASSERT(retval == KERN_SUCCESS, "successful kcdata_memory_static_init call");
780 *
781 * retval = T_REGISTER_ASSERT_CHECK("KCDATA_DESC_MAXLEN", &assert_check_retval);
782 * T_ASSERT(retval == KERN_SUCCESS, "registered assert widget");
783 *
784 * // this will assert
785 * retval = kcdata_add_uint32_with_description(&test_kc_data2, 0xc0ffee, "really long description string for kcdata");
786 * T_ASSERT(retval == KERN_INVALID_ARGUMENT, "API param check returned KERN_INVALID_ARGUMENT correctly");
787 * T_ASSERT(assert_check_retval == (void *)XT_RET_W_SUCCESS, "assertion handler verified that it was hit");
788 *
789 * return KERN_SUCCESS;
790 * }
791 */
792
793 #if defined(__arm64__)
794
795 #include <arm/pmap.h>
796
797 #define MAX_PMAP_OBJECT_ELEMENT 100000
798
799 extern struct vm_object pmap_object_store; /* store pt pages */
800 extern unsigned long gPhysBase, gPhysSize, first_avail;
801
802 /*
803 * Define macros to transverse the pmap object structures and extract
804 * physical page number with information from low global only
805 * This emulate how Astris extracts information from coredump
806 */
807 #if defined(__arm64__)
808
809 static inline uintptr_t
astris_vm_page_unpack_ptr(uintptr_t p)810 astris_vm_page_unpack_ptr(uintptr_t p)
811 {
812 if (!p) {
813 return (uintptr_t)0;
814 }
815
816 return (p & lowGlo.lgPmapMemFromArrayMask)
817 ? lowGlo.lgPmapMemStartAddr + (p & ~(lowGlo.lgPmapMemFromArrayMask)) * lowGlo.lgPmapMemPagesize
818 : lowGlo.lgPmapMemPackedBaseAddr + (p << lowGlo.lgPmapMemPackedShift);
819 }
820
821 // assume next pointer is the first element
822 #define astris_vm_page_queue_next(qc) (astris_vm_page_unpack_ptr(*((uint32_t *)(qc))))
823
824 #endif
825
826 #define astris_vm_page_queue_first(q) astris_vm_page_queue_next(q)
827
828 #define astris_vm_page_queue_end(q, qe) ((q) == (qe))
829
830 #define astris_vm_page_queue_iterate(head, elt) \
831 for ((elt) = (uintptr_t)astris_vm_page_queue_first((head)); !astris_vm_page_queue_end((head), (elt)); \
832 (elt) = (uintptr_t)astris_vm_page_queue_next(((elt) + (uintptr_t)lowGlo.lgPmapMemChainOffset)))
833
834 #define astris_ptoa(x) ((vm_address_t)(x) << lowGlo.lgPageShift)
835
836 static inline ppnum_t
astris_vm_page_get_phys_page(uintptr_t m)837 astris_vm_page_get_phys_page(uintptr_t m)
838 {
839 return (m >= lowGlo.lgPmapMemStartAddr && m < lowGlo.lgPmapMemEndAddr)
840 ? (ppnum_t)((m - lowGlo.lgPmapMemStartAddr) / lowGlo.lgPmapMemPagesize + lowGlo.lgPmapMemFirstppnum)
841 : *((ppnum_t *)(m + lowGlo.lgPmapMemPageOffset));
842 }
843
844 kern_return_t
pmap_coredump_test(void)845 pmap_coredump_test(void)
846 {
847 int iter = 0;
848 uintptr_t p;
849
850 T_LOG("Testing coredump info for PMAP.");
851
852 T_ASSERT_GE_ULONG(lowGlo.lgStaticAddr, gPhysBase, NULL);
853 T_ASSERT_LE_ULONG(lowGlo.lgStaticAddr + lowGlo.lgStaticSize, first_avail, NULL);
854 T_ASSERT_EQ_ULONG(lowGlo.lgLayoutMajorVersion, 3, NULL);
855 T_ASSERT_GE_ULONG(lowGlo.lgLayoutMinorVersion, 2, NULL);
856 T_ASSERT_EQ_ULONG(lowGlo.lgLayoutMagic, LOWGLO_LAYOUT_MAGIC, NULL);
857
858 // check the constant values in lowGlo
859 T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemQ, ((typeof(lowGlo.lgPmapMemQ)) & (pmap_object_store.memq)), NULL);
860 T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemPageOffset, offsetof(struct vm_page_with_ppnum, vmp_phys_page), NULL);
861 T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemChainOffset, offsetof(struct vm_page, vmp_listq), NULL);
862 T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemPagesize, sizeof(struct vm_page), NULL);
863
864 #if defined(__arm64__)
865 T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemFromArrayMask, VM_PAGE_PACKED_FROM_ARRAY, NULL);
866 T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemPackedShift, VM_PAGE_PACKED_PTR_SHIFT, NULL);
867 T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemPackedBaseAddr, VM_PAGE_PACKED_PTR_BASE, NULL);
868 #endif
869
870 vm_object_lock_shared(&pmap_object_store);
871 astris_vm_page_queue_iterate(lowGlo.lgPmapMemQ, p)
872 {
873 ppnum_t ppnum = astris_vm_page_get_phys_page(p);
874 pmap_paddr_t pa = (pmap_paddr_t)astris_ptoa(ppnum);
875 T_ASSERT_GE_ULONG(pa, gPhysBase, NULL);
876 T_ASSERT_LT_ULONG(pa, gPhysBase + gPhysSize, NULL);
877 iter++;
878 T_ASSERT_LT_INT(iter, MAX_PMAP_OBJECT_ELEMENT, NULL);
879 }
880 vm_object_unlock(&pmap_object_store);
881
882 T_ASSERT_GT_INT(iter, 0, NULL);
883 return KERN_SUCCESS;
884 }
885 #endif /* defined(__arm64__) */
886
887 struct ts_kern_prim_test_args {
888 int *end_barrier;
889 int *notify_b;
890 int *wait_event_b;
891 int before_num;
892 int *notify_a;
893 int *wait_event_a;
894 int after_num;
895 int priority_to_check;
896 };
897
898 static void
wait_threads(int * var,int num)899 wait_threads(
900 int* var,
901 int num)
902 {
903 if (var != NULL) {
904 while (os_atomic_load(var, acquire) != num) {
905 assert_wait((event_t) var, THREAD_UNINT);
906 if (os_atomic_load(var, acquire) != num) {
907 (void) thread_block(THREAD_CONTINUE_NULL);
908 } else {
909 clear_wait(current_thread(), THREAD_AWAKENED);
910 }
911 }
912 }
913 }
914
915 static void
wake_threads(int * var)916 wake_threads(
917 int* var)
918 {
919 if (var) {
920 os_atomic_inc(var, relaxed);
921 thread_wakeup((event_t) var);
922 }
923 }
924
925 extern void IOSleep(int);
926
927 static void
thread_lock_unlock_kernel_primitive(void * args,__unused wait_result_t wr)928 thread_lock_unlock_kernel_primitive(
929 void *args,
930 __unused wait_result_t wr)
931 {
932 thread_t thread = current_thread();
933 struct ts_kern_prim_test_args *info = (struct ts_kern_prim_test_args*) args;
934 int pri;
935
936 wait_threads(info->wait_event_b, info->before_num);
937 wake_threads(info->notify_b);
938
939 tstile_test_prim_lock(SYSCTL_TURNSTILE_TEST_KERNEL_DEFAULT);
940
941 wake_threads(info->notify_a);
942 wait_threads(info->wait_event_a, info->after_num);
943
944 IOSleep(100);
945
946 if (info->priority_to_check) {
947 spl_t s = splsched();
948 thread_lock(thread);
949 pri = thread->sched_pri;
950 thread_unlock(thread);
951 splx(s);
952 T_ASSERT(pri == info->priority_to_check, "Priority thread: current sched %d sched wanted %d", pri, info->priority_to_check);
953 }
954
955 tstile_test_prim_unlock(SYSCTL_TURNSTILE_TEST_KERNEL_DEFAULT);
956
957 wake_threads(info->end_barrier);
958 thread_terminate_self();
959 }
960
961 kern_return_t
ts_kernel_primitive_test(void)962 ts_kernel_primitive_test(void)
963 {
964 thread_t owner, thread1, thread2;
965 struct ts_kern_prim_test_args targs[2] = {};
966 kern_return_t result;
967 int end_barrier = 0;
968 int owner_locked = 0;
969 int waiters_ready = 0;
970
971 T_LOG("Testing turnstile kernel primitive");
972
973 targs[0].notify_b = NULL;
974 targs[0].wait_event_b = NULL;
975 targs[0].before_num = 0;
976 targs[0].notify_a = &owner_locked;
977 targs[0].wait_event_a = &waiters_ready;
978 targs[0].after_num = 2;
979 targs[0].priority_to_check = 90;
980 targs[0].end_barrier = &end_barrier;
981
982 // Start owner with priority 80
983 result = kernel_thread_start_priority((thread_continue_t)thread_lock_unlock_kernel_primitive, &targs[0], 80, &owner);
984 T_ASSERT(result == KERN_SUCCESS, "Starting owner");
985
986 targs[1].notify_b = &waiters_ready;
987 targs[1].wait_event_b = &owner_locked;
988 targs[1].before_num = 1;
989 targs[1].notify_a = NULL;
990 targs[1].wait_event_a = NULL;
991 targs[1].after_num = 0;
992 targs[1].priority_to_check = 0;
993 targs[1].end_barrier = &end_barrier;
994
995 // Start waiters with priority 85 and 90
996 result = kernel_thread_start_priority((thread_continue_t)thread_lock_unlock_kernel_primitive, &targs[1], 85, &thread1);
997 T_ASSERT(result == KERN_SUCCESS, "Starting thread1");
998
999 result = kernel_thread_start_priority((thread_continue_t)thread_lock_unlock_kernel_primitive, &targs[1], 90, &thread2);
1000 T_ASSERT(result == KERN_SUCCESS, "Starting thread2");
1001
1002 wait_threads(&end_barrier, 3);
1003
1004 return KERN_SUCCESS;
1005 }
1006
1007 #define MTX_LOCK 0
1008 #define RW_LOCK 1
1009
1010 #define NUM_THREADS 4
1011
1012 struct synch_test_common {
1013 unsigned int nthreads;
1014 thread_t *threads;
1015 int max_pri;
1016 int test_done;
1017 };
1018
1019 static kern_return_t
init_synch_test_common(struct synch_test_common * info,unsigned int nthreads)1020 init_synch_test_common(struct synch_test_common *info, unsigned int nthreads)
1021 {
1022 info->nthreads = nthreads;
1023 info->threads = kalloc_type(thread_t, nthreads, Z_WAITOK);
1024 if (!info->threads) {
1025 return ENOMEM;
1026 }
1027
1028 return KERN_SUCCESS;
1029 }
1030
1031 static void
destroy_synch_test_common(struct synch_test_common * info)1032 destroy_synch_test_common(struct synch_test_common *info)
1033 {
1034 kfree_type(thread_t, info->nthreads, info->threads);
1035 }
1036
1037 static void
start_threads(thread_continue_t func,struct synch_test_common * info,bool sleep_after_first)1038 start_threads(thread_continue_t func, struct synch_test_common *info, bool sleep_after_first)
1039 {
1040 thread_t thread;
1041 kern_return_t result;
1042 uint i;
1043 int priority = 75;
1044
1045 info->test_done = 0;
1046
1047 for (i = 0; i < info->nthreads; i++) {
1048 info->threads[i] = NULL;
1049 }
1050
1051 info->max_pri = priority + (info->nthreads - 1) * 5;
1052 if (info->max_pri > 95) {
1053 info->max_pri = 95;
1054 }
1055
1056 for (i = 0; i < info->nthreads; i++) {
1057 result = kernel_thread_start_priority((thread_continue_t)func, info, priority, &thread);
1058 os_atomic_store(&info->threads[i], thread, release);
1059 T_ASSERT(result == KERN_SUCCESS, "Starting thread %d, priority %d, %p", i, priority, thread);
1060
1061 priority += 5;
1062
1063 if (i == 0 && sleep_after_first) {
1064 IOSleep(100);
1065 }
1066 }
1067 }
1068
1069 static unsigned int
get_max_pri(struct synch_test_common * info)1070 get_max_pri(struct synch_test_common * info)
1071 {
1072 return info->max_pri;
1073 }
1074
1075 static void
wait_all_thread(struct synch_test_common * info)1076 wait_all_thread(struct synch_test_common * info)
1077 {
1078 wait_threads(&info->test_done, info->nthreads);
1079 }
1080
1081 static void
notify_waiter(struct synch_test_common * info)1082 notify_waiter(struct synch_test_common * info)
1083 {
1084 wake_threads(&info->test_done);
1085 }
1086
1087 static void
wait_for_waiters(struct synch_test_common * info)1088 wait_for_waiters(struct synch_test_common *info)
1089 {
1090 uint i, j;
1091 thread_t thread;
1092
1093 for (i = 0; i < info->nthreads; i++) {
1094 j = 0;
1095 while (os_atomic_load(&info->threads[i], acquire) == NULL) {
1096 if (j % 100 == 0) {
1097 IOSleep(10);
1098 }
1099 j++;
1100 }
1101
1102 if (info->threads[i] != current_thread()) {
1103 j = 0;
1104 do {
1105 thread = os_atomic_load(&info->threads[i], relaxed);
1106 if (thread == (thread_t) 1) {
1107 break;
1108 }
1109
1110 if (!(thread->state & TH_RUN)) {
1111 break;
1112 }
1113
1114 if (j % 100 == 0) {
1115 IOSleep(100);
1116 }
1117 j++;
1118
1119 if (thread->started == FALSE) {
1120 continue;
1121 }
1122 } while (thread->state & TH_RUN);
1123 }
1124 }
1125 }
1126
1127 static void
exclude_current_waiter(struct synch_test_common * info)1128 exclude_current_waiter(struct synch_test_common *info)
1129 {
1130 uint i, j;
1131
1132 for (i = 0; i < info->nthreads; i++) {
1133 j = 0;
1134 while (os_atomic_load(&info->threads[i], acquire) == NULL) {
1135 if (j % 100 == 0) {
1136 IOSleep(10);
1137 }
1138 j++;
1139 }
1140
1141 if (os_atomic_load(&info->threads[i], acquire) == current_thread()) {
1142 os_atomic_store(&info->threads[i], (thread_t)1, release);
1143 return;
1144 }
1145 }
1146 }
1147
1148 struct info_sleep_inheritor_test {
1149 struct synch_test_common head;
1150 lck_mtx_t mtx_lock;
1151 lck_rw_t rw_lock;
1152 decl_lck_mtx_gate_data(, gate);
1153 boolean_t gate_closed;
1154 int prim_type;
1155 boolean_t work_to_do;
1156 unsigned int max_pri;
1157 unsigned int steal_pri;
1158 int synch_value;
1159 int synch;
1160 int value;
1161 int handoff_failure;
1162 thread_t thread_inheritor;
1163 bool use_alloc_gate;
1164 gate_t *alloc_gate;
1165 struct obj_cached **obj_cache;
1166 kern_apfs_reflock_data(, reflock);
1167 int reflock_protected_status;
1168 };
1169
1170 static void
primitive_lock(struct info_sleep_inheritor_test * info)1171 primitive_lock(struct info_sleep_inheritor_test *info)
1172 {
1173 switch (info->prim_type) {
1174 case MTX_LOCK:
1175 lck_mtx_lock(&info->mtx_lock);
1176 break;
1177 case RW_LOCK:
1178 lck_rw_lock(&info->rw_lock, LCK_RW_TYPE_EXCLUSIVE);
1179 break;
1180 default:
1181 panic("invalid type %d", info->prim_type);
1182 }
1183 }
1184
1185 static void
primitive_unlock(struct info_sleep_inheritor_test * info)1186 primitive_unlock(struct info_sleep_inheritor_test *info)
1187 {
1188 switch (info->prim_type) {
1189 case MTX_LOCK:
1190 lck_mtx_unlock(&info->mtx_lock);
1191 break;
1192 case RW_LOCK:
1193 lck_rw_unlock(&info->rw_lock, LCK_RW_TYPE_EXCLUSIVE);
1194 break;
1195 default:
1196 panic("invalid type %d", info->prim_type);
1197 }
1198 }
1199
1200 static wait_result_t
primitive_sleep_with_inheritor(struct info_sleep_inheritor_test * info)1201 primitive_sleep_with_inheritor(struct info_sleep_inheritor_test *info)
1202 {
1203 wait_result_t ret = KERN_SUCCESS;
1204 switch (info->prim_type) {
1205 case MTX_LOCK:
1206 ret = lck_mtx_sleep_with_inheritor(&info->mtx_lock, LCK_SLEEP_DEFAULT, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1207 break;
1208 case RW_LOCK:
1209 ret = lck_rw_sleep_with_inheritor(&info->rw_lock, LCK_SLEEP_DEFAULT, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1210 break;
1211 default:
1212 panic("invalid type %d", info->prim_type);
1213 }
1214
1215 return ret;
1216 }
1217
1218 static void
primitive_wakeup_one_with_inheritor(struct info_sleep_inheritor_test * info)1219 primitive_wakeup_one_with_inheritor(struct info_sleep_inheritor_test *info)
1220 {
1221 switch (info->prim_type) {
1222 case MTX_LOCK:
1223 case RW_LOCK:
1224 wakeup_one_with_inheritor((event_t) &info->thread_inheritor, THREAD_AWAKENED, LCK_WAKE_DEFAULT, &info->thread_inheritor);
1225 break;
1226 default:
1227 panic("invalid type %d", info->prim_type);
1228 }
1229 }
1230
1231 static void
primitive_wakeup_all_with_inheritor(struct info_sleep_inheritor_test * info)1232 primitive_wakeup_all_with_inheritor(struct info_sleep_inheritor_test *info)
1233 {
1234 switch (info->prim_type) {
1235 case MTX_LOCK:
1236 case RW_LOCK:
1237 wakeup_all_with_inheritor((event_t) &info->thread_inheritor, THREAD_AWAKENED);
1238 break;
1239 default:
1240 panic("invalid type %d", info->prim_type);
1241 }
1242 return;
1243 }
1244
1245 static void
primitive_change_sleep_inheritor(struct info_sleep_inheritor_test * info)1246 primitive_change_sleep_inheritor(struct info_sleep_inheritor_test *info)
1247 {
1248 switch (info->prim_type) {
1249 case MTX_LOCK:
1250 case RW_LOCK:
1251 change_sleep_inheritor((event_t) &info->thread_inheritor, info->thread_inheritor);
1252 break;
1253 default:
1254 panic("invalid type %d", info->prim_type);
1255 }
1256 return;
1257 }
1258
1259 static kern_return_t
primitive_gate_try_close(struct info_sleep_inheritor_test * info)1260 primitive_gate_try_close(struct info_sleep_inheritor_test *info)
1261 {
1262 gate_t *gate = &info->gate;
1263 if (info->use_alloc_gate == true) {
1264 gate = info->alloc_gate;
1265 }
1266 kern_return_t ret = KERN_SUCCESS;
1267 switch (info->prim_type) {
1268 case MTX_LOCK:
1269 ret = lck_mtx_gate_try_close(&info->mtx_lock, gate);
1270 break;
1271 case RW_LOCK:
1272 ret = lck_rw_gate_try_close(&info->rw_lock, gate);
1273 break;
1274 default:
1275 panic("invalid type %d", info->prim_type);
1276 }
1277 return ret;
1278 }
1279
1280 static gate_wait_result_t
primitive_gate_wait(struct info_sleep_inheritor_test * info)1281 primitive_gate_wait(struct info_sleep_inheritor_test *info)
1282 {
1283 gate_t *gate = &info->gate;
1284 if (info->use_alloc_gate == true) {
1285 gate = info->alloc_gate;
1286 }
1287 gate_wait_result_t ret = GATE_OPENED;
1288 switch (info->prim_type) {
1289 case MTX_LOCK:
1290 ret = lck_mtx_gate_wait(&info->mtx_lock, gate, LCK_SLEEP_DEFAULT, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1291 break;
1292 case RW_LOCK:
1293 ret = lck_rw_gate_wait(&info->rw_lock, gate, LCK_SLEEP_DEFAULT, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1294 break;
1295 default:
1296 panic("invalid type %d", info->prim_type);
1297 }
1298 return ret;
1299 }
1300
1301 static void
primitive_gate_open(struct info_sleep_inheritor_test * info)1302 primitive_gate_open(struct info_sleep_inheritor_test *info)
1303 {
1304 gate_t *gate = &info->gate;
1305 if (info->use_alloc_gate == true) {
1306 gate = info->alloc_gate;
1307 }
1308 switch (info->prim_type) {
1309 case MTX_LOCK:
1310 lck_mtx_gate_open(&info->mtx_lock, gate);
1311 break;
1312 case RW_LOCK:
1313 lck_rw_gate_open(&info->rw_lock, gate);
1314 break;
1315 default:
1316 panic("invalid type %d", info->prim_type);
1317 }
1318 }
1319
1320 static void
primitive_gate_close(struct info_sleep_inheritor_test * info)1321 primitive_gate_close(struct info_sleep_inheritor_test *info)
1322 {
1323 gate_t *gate = &info->gate;
1324 if (info->use_alloc_gate == true) {
1325 gate = info->alloc_gate;
1326 }
1327
1328 switch (info->prim_type) {
1329 case MTX_LOCK:
1330 lck_mtx_gate_close(&info->mtx_lock, gate);
1331 break;
1332 case RW_LOCK:
1333 lck_rw_gate_close(&info->rw_lock, gate);
1334 break;
1335 default:
1336 panic("invalid type %d", info->prim_type);
1337 }
1338 }
1339
1340 static void
primitive_gate_steal(struct info_sleep_inheritor_test * info)1341 primitive_gate_steal(struct info_sleep_inheritor_test *info)
1342 {
1343 gate_t *gate = &info->gate;
1344 if (info->use_alloc_gate == true) {
1345 gate = info->alloc_gate;
1346 }
1347
1348 switch (info->prim_type) {
1349 case MTX_LOCK:
1350 lck_mtx_gate_steal(&info->mtx_lock, gate);
1351 break;
1352 case RW_LOCK:
1353 lck_rw_gate_steal(&info->rw_lock, gate);
1354 break;
1355 default:
1356 panic("invalid type %d", info->prim_type);
1357 }
1358 }
1359
1360 static kern_return_t
primitive_gate_handoff(struct info_sleep_inheritor_test * info,int flags)1361 primitive_gate_handoff(struct info_sleep_inheritor_test *info, int flags)
1362 {
1363 gate_t *gate = &info->gate;
1364 if (info->use_alloc_gate == true) {
1365 gate = info->alloc_gate;
1366 }
1367
1368 kern_return_t ret = KERN_SUCCESS;
1369 switch (info->prim_type) {
1370 case MTX_LOCK:
1371 ret = lck_mtx_gate_handoff(&info->mtx_lock, gate, flags);
1372 break;
1373 case RW_LOCK:
1374 ret = lck_rw_gate_handoff(&info->rw_lock, gate, flags);
1375 break;
1376 default:
1377 panic("invalid type %d", info->prim_type);
1378 }
1379 return ret;
1380 }
1381
1382 static void
primitive_gate_assert(struct info_sleep_inheritor_test * info,int type)1383 primitive_gate_assert(struct info_sleep_inheritor_test *info, int type)
1384 {
1385 gate_t *gate = &info->gate;
1386 if (info->use_alloc_gate == true) {
1387 gate = info->alloc_gate;
1388 }
1389
1390 switch (info->prim_type) {
1391 case MTX_LOCK:
1392 lck_mtx_gate_assert(&info->mtx_lock, gate, type);
1393 break;
1394 case RW_LOCK:
1395 lck_rw_gate_assert(&info->rw_lock, gate, type);
1396 break;
1397 default:
1398 panic("invalid type %d", info->prim_type);
1399 }
1400 }
1401
1402 static void
primitive_gate_init(struct info_sleep_inheritor_test * info)1403 primitive_gate_init(struct info_sleep_inheritor_test *info)
1404 {
1405 switch (info->prim_type) {
1406 case MTX_LOCK:
1407 lck_mtx_gate_init(&info->mtx_lock, &info->gate);
1408 break;
1409 case RW_LOCK:
1410 lck_rw_gate_init(&info->rw_lock, &info->gate);
1411 break;
1412 default:
1413 panic("invalid type %d", info->prim_type);
1414 }
1415 }
1416
1417 static void
primitive_gate_destroy(struct info_sleep_inheritor_test * info)1418 primitive_gate_destroy(struct info_sleep_inheritor_test *info)
1419 {
1420 switch (info->prim_type) {
1421 case MTX_LOCK:
1422 lck_mtx_gate_destroy(&info->mtx_lock, &info->gate);
1423 break;
1424 case RW_LOCK:
1425 lck_rw_gate_destroy(&info->rw_lock, &info->gate);
1426 break;
1427 default:
1428 panic("invalid type %d", info->prim_type);
1429 }
1430 }
1431
1432 static void
primitive_gate_alloc(struct info_sleep_inheritor_test * info)1433 primitive_gate_alloc(struct info_sleep_inheritor_test *info)
1434 {
1435 gate_t *gate;
1436 switch (info->prim_type) {
1437 case MTX_LOCK:
1438 gate = lck_mtx_gate_alloc_init(&info->mtx_lock);
1439 break;
1440 case RW_LOCK:
1441 gate = lck_rw_gate_alloc_init(&info->rw_lock);
1442 break;
1443 default:
1444 panic("invalid type %d", info->prim_type);
1445 }
1446 info->alloc_gate = gate;
1447 }
1448
1449 static void
primitive_gate_free(struct info_sleep_inheritor_test * info)1450 primitive_gate_free(struct info_sleep_inheritor_test *info)
1451 {
1452 T_ASSERT(info->alloc_gate != NULL, "gate not yet freed");
1453
1454 switch (info->prim_type) {
1455 case MTX_LOCK:
1456 lck_mtx_gate_free(&info->mtx_lock, info->alloc_gate);
1457 break;
1458 case RW_LOCK:
1459 lck_rw_gate_free(&info->rw_lock, info->alloc_gate);
1460 break;
1461 default:
1462 panic("invalid type %d", info->prim_type);
1463 }
1464 info->alloc_gate = NULL;
1465 }
1466
1467 static void
thread_inheritor_like_mutex(void * args,__unused wait_result_t wr)1468 thread_inheritor_like_mutex(
1469 void *args,
1470 __unused wait_result_t wr)
1471 {
1472 wait_result_t wait;
1473
1474 struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
1475 uint my_pri = current_thread()->sched_pri;
1476
1477 T_LOG("Started thread pri %d %p", my_pri, current_thread());
1478
1479 /*
1480 * spin here to start concurrently
1481 */
1482 wake_threads(&info->synch);
1483 wait_threads(&info->synch, info->synch_value);
1484
1485 primitive_lock(info);
1486
1487 if (info->thread_inheritor == NULL) {
1488 info->thread_inheritor = current_thread();
1489 } else {
1490 wait = primitive_sleep_with_inheritor(info);
1491 T_ASSERT(wait == THREAD_AWAKENED || wait == THREAD_NOT_WAITING, "sleep_with_inheritor return");
1492 }
1493 primitive_unlock(info);
1494
1495 IOSleep(100);
1496 info->value++;
1497
1498 primitive_lock(info);
1499
1500 T_ASSERT(info->thread_inheritor == current_thread(), "thread_inheritor is %p", info->thread_inheritor);
1501 primitive_wakeup_one_with_inheritor(info);
1502 T_LOG("woken up %p", info->thread_inheritor);
1503
1504 if (info->thread_inheritor == NULL) {
1505 T_ASSERT(info->handoff_failure == 0, "handoff failures");
1506 info->handoff_failure++;
1507 } else {
1508 T_ASSERT(info->thread_inheritor != current_thread(), "thread_inheritor is %p", info->thread_inheritor);
1509 thread_deallocate(info->thread_inheritor);
1510 }
1511
1512 primitive_unlock(info);
1513
1514 assert(current_thread()->kern_promotion_schedpri == 0);
1515 notify_waiter((struct synch_test_common *)info);
1516
1517 thread_terminate_self();
1518 }
1519
1520 static void
thread_just_inheritor_do_work(void * args,__unused wait_result_t wr)1521 thread_just_inheritor_do_work(
1522 void *args,
1523 __unused wait_result_t wr)
1524 {
1525 struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
1526 uint my_pri = current_thread()->sched_pri;
1527 uint max_pri;
1528
1529 T_LOG("Started thread pri %d %p", my_pri, current_thread());
1530 primitive_lock(info);
1531
1532 if (info->thread_inheritor == NULL) {
1533 info->thread_inheritor = current_thread();
1534 primitive_unlock(info);
1535 T_LOG("Thread pri %d first to run %p", my_pri, current_thread());
1536
1537 wait_threads(&info->synch, info->synch_value - 1);
1538
1539 wait_for_waiters((struct synch_test_common *)info);
1540
1541 max_pri = get_max_pri((struct synch_test_common *) info);
1542 T_ASSERT((uint) current_thread()->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", current_thread()->sched_pri, max_pri);
1543
1544 os_atomic_store(&info->synch, 0, relaxed);
1545 primitive_lock(info);
1546 primitive_wakeup_all_with_inheritor(info);
1547 } else {
1548 wake_threads(&info->synch);
1549 primitive_sleep_with_inheritor(info);
1550 }
1551
1552 primitive_unlock(info);
1553
1554 assert(current_thread()->kern_promotion_schedpri == 0);
1555 notify_waiter((struct synch_test_common *)info);
1556
1557 thread_terminate_self();
1558 }
1559
1560 static void
thread_steal_work(void * args,__unused wait_result_t wr)1561 thread_steal_work(
1562 void *args,
1563 __unused wait_result_t wr)
1564 {
1565 struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
1566 uint my_pri = current_thread()->sched_pri;
1567
1568 T_LOG("Started thread pri %d %p", my_pri, current_thread());
1569 primitive_lock(info);
1570
1571 if (info->thread_inheritor == NULL) {
1572 info->thread_inheritor = current_thread();
1573 exclude_current_waiter((struct synch_test_common *)info);
1574
1575 T_LOG("Thread pri %d first to run %p", my_pri, current_thread());
1576 primitive_unlock(info);
1577
1578 wait_threads(&info->synch, info->synch_value - 2);
1579
1580 wait_for_waiters((struct synch_test_common *)info);
1581 T_LOG("Thread pri %d first to run %p", my_pri, current_thread());
1582 primitive_lock(info);
1583 if (info->thread_inheritor == current_thread()) {
1584 primitive_wakeup_all_with_inheritor(info);
1585 }
1586 } else {
1587 if (info->steal_pri == 0) {
1588 info->steal_pri = my_pri;
1589 info->thread_inheritor = current_thread();
1590 primitive_change_sleep_inheritor(info);
1591 exclude_current_waiter((struct synch_test_common *)info);
1592
1593 primitive_unlock(info);
1594
1595 wait_threads(&info->synch, info->synch_value - 2);
1596
1597 T_LOG("Thread pri %d stole push %p", my_pri, current_thread());
1598 wait_for_waiters((struct synch_test_common *)info);
1599
1600 T_ASSERT((uint) current_thread()->sched_pri == info->steal_pri, "sleep_inheritor inheritor priority current is %d, should be %d", current_thread()->sched_pri, info->steal_pri);
1601
1602 primitive_lock(info);
1603 primitive_wakeup_all_with_inheritor(info);
1604 } else {
1605 if (my_pri > info->steal_pri) {
1606 info->steal_pri = my_pri;
1607 }
1608 wake_threads(&info->synch);
1609 primitive_sleep_with_inheritor(info);
1610 exclude_current_waiter((struct synch_test_common *)info);
1611 }
1612 }
1613 primitive_unlock(info);
1614
1615 assert(current_thread()->kern_promotion_schedpri == 0);
1616 notify_waiter((struct synch_test_common *)info);
1617
1618 thread_terminate_self();
1619 }
1620
1621 static void
thread_no_inheritor_work(void * args,__unused wait_result_t wr)1622 thread_no_inheritor_work(
1623 void *args,
1624 __unused wait_result_t wr)
1625 {
1626 struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
1627 uint my_pri = current_thread()->sched_pri;
1628
1629 T_LOG("Started thread pri %d %p", my_pri, current_thread());
1630 primitive_lock(info);
1631
1632 info->value--;
1633 if (info->value == 0) {
1634 primitive_wakeup_all_with_inheritor(info);
1635 } else {
1636 info->thread_inheritor = NULL;
1637 primitive_sleep_with_inheritor(info);
1638 }
1639
1640 primitive_unlock(info);
1641
1642 assert(current_thread()->kern_promotion_schedpri == 0);
1643 notify_waiter((struct synch_test_common *)info);
1644
1645 thread_terminate_self();
1646 }
1647
1648 static void
thread_mtx_work(void * args,__unused wait_result_t wr)1649 thread_mtx_work(
1650 void *args,
1651 __unused wait_result_t wr)
1652 {
1653 struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
1654 uint my_pri = current_thread()->sched_pri;
1655 int i;
1656 u_int8_t rand;
1657 unsigned int mod_rand;
1658 uint max_pri;
1659
1660 T_LOG("Started thread pri %d %p", my_pri, current_thread());
1661
1662 for (i = 0; i < 10; i++) {
1663 lck_mtx_lock(&info->mtx_lock);
1664 if (info->thread_inheritor == NULL) {
1665 info->thread_inheritor = current_thread();
1666 lck_mtx_unlock(&info->mtx_lock);
1667
1668 T_LOG("Thread pri %d first to run %p", my_pri, current_thread());
1669
1670 wait_threads(&info->synch, info->synch_value - 1);
1671 wait_for_waiters((struct synch_test_common *)info);
1672 max_pri = get_max_pri((struct synch_test_common *) info);
1673 T_ASSERT((uint) current_thread()->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", current_thread()->sched_pri, max_pri);
1674
1675 os_atomic_store(&info->synch, 0, relaxed);
1676
1677 lck_mtx_lock(&info->mtx_lock);
1678 info->thread_inheritor = NULL;
1679 wakeup_all_with_inheritor((event_t) &info->thread_inheritor, THREAD_AWAKENED);
1680 lck_mtx_unlock(&info->mtx_lock);
1681 continue;
1682 }
1683
1684 read_random(&rand, sizeof(rand));
1685 mod_rand = rand % 2;
1686
1687 wake_threads(&info->synch);
1688 switch (mod_rand) {
1689 case 0:
1690 lck_mtx_sleep_with_inheritor(&info->mtx_lock, LCK_SLEEP_DEFAULT, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1691 lck_mtx_unlock(&info->mtx_lock);
1692 break;
1693 case 1:
1694 lck_mtx_sleep_with_inheritor(&info->mtx_lock, LCK_SLEEP_UNLOCK, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1695 break;
1696 default:
1697 panic("rand()mod4 returned %u (random %u)", mod_rand, rand);
1698 }
1699 }
1700
1701 /*
1702 * spin here to stop using the lock as mutex
1703 */
1704 wake_threads(&info->synch);
1705 wait_threads(&info->synch, info->synch_value);
1706
1707 for (i = 0; i < 10; i++) {
1708 /* read_random might sleep so read it before acquiring the mtx as spin */
1709 read_random(&rand, sizeof(rand));
1710
1711 lck_mtx_lock_spin(&info->mtx_lock);
1712 if (info->thread_inheritor == NULL) {
1713 info->thread_inheritor = current_thread();
1714 lck_mtx_unlock(&info->mtx_lock);
1715
1716 T_LOG("Thread pri %d first to run %p", my_pri, current_thread());
1717 wait_for_waiters((struct synch_test_common *)info);
1718 max_pri = get_max_pri((struct synch_test_common *) info);
1719 T_ASSERT((uint) current_thread()->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", current_thread()->sched_pri, max_pri);
1720
1721 lck_mtx_lock_spin(&info->mtx_lock);
1722 info->thread_inheritor = NULL;
1723 wakeup_all_with_inheritor((event_t) &info->thread_inheritor, THREAD_AWAKENED);
1724 lck_mtx_unlock(&info->mtx_lock);
1725 continue;
1726 }
1727
1728 mod_rand = rand % 2;
1729 switch (mod_rand) {
1730 case 0:
1731 lck_mtx_sleep_with_inheritor(&info->mtx_lock, LCK_SLEEP_SPIN, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1732 lck_mtx_unlock(&info->mtx_lock);
1733 break;
1734 case 1:
1735 lck_mtx_sleep_with_inheritor(&info->mtx_lock, LCK_SLEEP_SPIN_ALWAYS, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1736 lck_mtx_unlock(&info->mtx_lock);
1737 break;
1738 default:
1739 panic("rand()mod4 returned %u (random %u)", mod_rand, rand);
1740 }
1741 }
1742 assert(current_thread()->kern_promotion_schedpri == 0);
1743 notify_waiter((struct synch_test_common *)info);
1744
1745 thread_terminate_self();
1746 }
1747
1748 static void
thread_rw_work(void * args,__unused wait_result_t wr)1749 thread_rw_work(
1750 void *args,
1751 __unused wait_result_t wr)
1752 {
1753 struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
1754 uint my_pri = current_thread()->sched_pri;
1755 int i;
1756 lck_rw_type_t type;
1757 u_int8_t rand;
1758 unsigned int mod_rand;
1759 uint max_pri;
1760
1761 T_LOG("Started thread pri %d %p", my_pri, current_thread());
1762
1763 for (i = 0; i < 10; i++) {
1764 try_again:
1765 type = LCK_RW_TYPE_SHARED;
1766 lck_rw_lock(&info->rw_lock, type);
1767 if (info->thread_inheritor == NULL) {
1768 type = LCK_RW_TYPE_EXCLUSIVE;
1769
1770 if (lck_rw_lock_shared_to_exclusive(&info->rw_lock)) {
1771 if (info->thread_inheritor == NULL) {
1772 info->thread_inheritor = current_thread();
1773 lck_rw_unlock(&info->rw_lock, type);
1774 wait_threads(&info->synch, info->synch_value - 1);
1775
1776 T_LOG("Thread pri %d first to run %p", my_pri, current_thread());
1777 wait_for_waiters((struct synch_test_common *)info);
1778 max_pri = get_max_pri((struct synch_test_common *) info);
1779 T_ASSERT((uint) current_thread()->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", current_thread()->sched_pri, max_pri);
1780
1781 os_atomic_store(&info->synch, 0, relaxed);
1782
1783 lck_rw_lock(&info->rw_lock, type);
1784 info->thread_inheritor = NULL;
1785 wakeup_all_with_inheritor((event_t) &info->thread_inheritor, THREAD_AWAKENED);
1786 lck_rw_unlock(&info->rw_lock, type);
1787 continue;
1788 }
1789 } else {
1790 goto try_again;
1791 }
1792 }
1793
1794 read_random(&rand, sizeof(rand));
1795 mod_rand = rand % 4;
1796
1797 wake_threads(&info->synch);
1798 switch (mod_rand) {
1799 case 0:
1800 lck_rw_sleep_with_inheritor(&info->rw_lock, LCK_SLEEP_DEFAULT, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1801 lck_rw_unlock(&info->rw_lock, type);
1802 break;
1803 case 1:
1804 lck_rw_sleep_with_inheritor(&info->rw_lock, LCK_SLEEP_UNLOCK, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1805 break;
1806 case 2:
1807 lck_rw_sleep_with_inheritor(&info->rw_lock, LCK_SLEEP_SHARED, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1808 lck_rw_unlock(&info->rw_lock, LCK_RW_TYPE_SHARED);
1809 break;
1810 case 3:
1811 lck_rw_sleep_with_inheritor(&info->rw_lock, LCK_SLEEP_EXCLUSIVE, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1812 lck_rw_unlock(&info->rw_lock, LCK_RW_TYPE_EXCLUSIVE);
1813 break;
1814 default:
1815 panic("rand()mod4 returned %u (random %u)", mod_rand, rand);
1816 }
1817 }
1818
1819 assert(current_thread()->kern_promotion_schedpri == 0);
1820 notify_waiter((struct synch_test_common *)info);
1821
1822 thread_terminate_self();
1823 }
1824
1825 #define OBJ_STATE_UNUSED 0
1826 #define OBJ_STATE_REAL 1
1827 #define OBJ_STATE_PLACEHOLDER 2
1828
1829 #define OBJ_BUFF_SIZE 11
1830 struct obj_cached {
1831 int obj_id;
1832 int obj_state;
1833 struct kern_apfs_reflock *obj_refcount;
1834 char obj_buff[OBJ_BUFF_SIZE];
1835 };
1836
1837 #define CACHE_SIZE 2
1838 #define USE_CACHE_ROUNDS 15
1839
1840 #define REFCOUNT_REFLOCK_ROUNDS 15
1841
1842 /*
1843 * For the reflock cache test the cache is allocated
1844 * and its pointer is saved in obj_cache.
1845 * The lock for the cache is going to be one of the exclusive
1846 * locks already present in struct info_sleep_inheritor_test.
1847 */
1848
1849 static struct obj_cached *
alloc_init_cache_entry(void)1850 alloc_init_cache_entry(void)
1851 {
1852 struct obj_cached *cache_entry = kalloc_type(struct obj_cached, 1, Z_WAITOK | Z_NOFAIL | Z_ZERO);
1853 cache_entry->obj_id = 0;
1854 cache_entry->obj_state = OBJ_STATE_UNUSED;
1855 cache_entry->obj_refcount = kern_apfs_reflock_alloc_init();
1856 snprintf(cache_entry->obj_buff, OBJ_BUFF_SIZE, "I am groot");
1857 return cache_entry;
1858 }
1859
1860 static void
init_cache(struct info_sleep_inheritor_test * info)1861 init_cache(struct info_sleep_inheritor_test *info)
1862 {
1863 struct obj_cached **obj_cache = kalloc_type(struct obj_cached *, CACHE_SIZE, Z_WAITOK | Z_NOFAIL | Z_ZERO);
1864
1865 int i;
1866 for (i = 0; i < CACHE_SIZE; i++) {
1867 obj_cache[i] = alloc_init_cache_entry();
1868 }
1869
1870 info->obj_cache = obj_cache;
1871 }
1872
1873 static void
check_cache_empty(struct info_sleep_inheritor_test * info)1874 check_cache_empty(struct info_sleep_inheritor_test *info)
1875 {
1876 struct obj_cached **obj_cache = info->obj_cache;
1877
1878 int i, ret;
1879 for (i = 0; i < CACHE_SIZE; i++) {
1880 if (obj_cache[i] != NULL) {
1881 T_ASSERT(obj_cache[i]->obj_state == OBJ_STATE_UNUSED, "checked OBJ_STATE_UNUSED");
1882 T_ASSERT(obj_cache[i]->obj_refcount != NULL, "checked obj_refcount");
1883 ret = memcmp(obj_cache[i]->obj_buff, "I am groot", OBJ_BUFF_SIZE);
1884 T_ASSERT(ret == 0, "checked buff correctly emptied");
1885 }
1886 }
1887 }
1888
1889 static void
free_cache(struct info_sleep_inheritor_test * info)1890 free_cache(struct info_sleep_inheritor_test *info)
1891 {
1892 struct obj_cached **obj_cache = info->obj_cache;
1893
1894 int i;
1895 for (i = 0; i < CACHE_SIZE; i++) {
1896 if (obj_cache[i] != NULL) {
1897 kern_apfs_reflock_free(obj_cache[i]->obj_refcount);
1898 obj_cache[i]->obj_refcount = NULL;
1899 kfree_type(struct obj_cached, 1, obj_cache[i]);
1900 obj_cache[i] = NULL;
1901 }
1902 }
1903
1904 kfree_type(struct obj_cached *, CACHE_SIZE, obj_cache);
1905 info->obj_cache = NULL;
1906 }
1907
1908 static struct obj_cached *
find_id_in_cache(int obj_id,struct info_sleep_inheritor_test * info)1909 find_id_in_cache(int obj_id, struct info_sleep_inheritor_test *info)
1910 {
1911 struct obj_cached **obj_cache = info->obj_cache;
1912 int i;
1913 for (i = 0; i < CACHE_SIZE; i++) {
1914 if (obj_cache[i] != NULL && obj_cache[i]->obj_id == obj_id) {
1915 return obj_cache[i];
1916 }
1917 }
1918 return NULL;
1919 }
1920
1921 static bool
free_id_in_cache(int obj_id,struct info_sleep_inheritor_test * info,struct obj_cached * expected)1922 free_id_in_cache(int obj_id, struct info_sleep_inheritor_test *info, struct obj_cached *expected)
1923 {
1924 struct obj_cached **obj_cache = info->obj_cache;
1925 int i;
1926 for (i = 0; i < CACHE_SIZE; i++) {
1927 if (obj_cache[i] != NULL && obj_cache[i]->obj_id == obj_id) {
1928 assert(obj_cache[i] == expected);
1929 kfree_type(struct obj_cached, 1, obj_cache[i]);
1930 obj_cache[i] = NULL;
1931 return true;
1932 }
1933 }
1934 return false;
1935 }
1936
1937 static struct obj_cached *
find_empty_spot_in_cache(struct info_sleep_inheritor_test * info)1938 find_empty_spot_in_cache(struct info_sleep_inheritor_test *info)
1939 {
1940 struct obj_cached **obj_cache = info->obj_cache;
1941 int i;
1942 for (i = 0; i < CACHE_SIZE; i++) {
1943 if (obj_cache[i] == NULL) {
1944 obj_cache[i] = alloc_init_cache_entry();
1945 return obj_cache[i];
1946 }
1947 if (obj_cache[i]->obj_state == OBJ_STATE_UNUSED) {
1948 return obj_cache[i];
1949 }
1950 }
1951 return NULL;
1952 }
1953
1954 static int
get_obj_cache(int obj_id,struct info_sleep_inheritor_test * info,char ** buff)1955 get_obj_cache(int obj_id, struct info_sleep_inheritor_test *info, char **buff)
1956 {
1957 struct obj_cached *obj = NULL, *obj2 = NULL;
1958 kern_apfs_reflock_t refcount = NULL;
1959 bool ret;
1960 kern_apfs_reflock_out_flags_t out_flags;
1961
1962 try_again:
1963 primitive_lock(info);
1964 if ((obj = find_id_in_cache(obj_id, info)) != NULL) {
1965 /* Found an allocated object on the cache with same id */
1966
1967 /*
1968 * copy the pointer to obj_refcount as obj might
1969 * get deallocated after primitive_unlock()
1970 */
1971 refcount = obj->obj_refcount;
1972 if (kern_apfs_reflock_try_get_ref(refcount, KERN_APFS_REFLOCK_IN_WILL_WAIT, &out_flags)) {
1973 /*
1974 * Got a ref, let's check the state
1975 */
1976 switch (obj->obj_state) {
1977 case OBJ_STATE_UNUSED:
1978 goto init;
1979 case OBJ_STATE_REAL:
1980 goto done;
1981 case OBJ_STATE_PLACEHOLDER:
1982 panic("Thread %p observed OBJ_STATE_PLACEHOLDER %d for obj %d", current_thread(), obj->obj_state, obj_id);
1983 default:
1984 panic("Thread %p observed an unknown obj_state %d for obj %d", current_thread(), obj->obj_state, obj_id);
1985 }
1986 } else {
1987 /*
1988 * Didn't get a ref.
1989 * This means or an obj_put() of the last ref is ongoing
1990 * or a init of the object is happening.
1991 * Both cases wait for that to finish and retry.
1992 * While waiting the thread that is holding the reflock
1993 * will get a priority at least as the one of this thread.
1994 */
1995 primitive_unlock(info);
1996 kern_apfs_reflock_wait_for_unlock(refcount, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1997 goto try_again;
1998 }
1999 } else {
2000 /* Look for a spot on the cache where we can save the object */
2001
2002 if ((obj = find_empty_spot_in_cache(info)) == NULL) {
2003 /*
2004 * Sadness cache is full, and everyting in the cache is
2005 * used.
2006 */
2007 primitive_unlock(info);
2008 return -1;
2009 } else {
2010 /*
2011 * copy the pointer to obj_refcount as obj might
2012 * get deallocated after primitive_unlock()
2013 */
2014 refcount = obj->obj_refcount;
2015 if (kern_apfs_reflock_try_get_ref(refcount, KERN_APFS_REFLOCK_IN_WILL_WAIT, &out_flags)) {
2016 /*
2017 * Got a ref on a OBJ_STATE_UNUSED obj.
2018 * Recicle time.
2019 */
2020 obj->obj_id = obj_id;
2021 goto init;
2022 } else {
2023 /*
2024 * This could happen if the obj_put() has just changed the
2025 * state to OBJ_STATE_UNUSED, but not unlocked the reflock yet.
2026 */
2027 primitive_unlock(info);
2028 kern_apfs_reflock_wait_for_unlock(refcount, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
2029 goto try_again;
2030 }
2031 }
2032 }
2033 init:
2034 assert(obj->obj_id == obj_id);
2035 assert(obj->obj_state == OBJ_STATE_UNUSED);
2036 /*
2037 * We already got a ref on the object, but we need
2038 * to initialize it. Mark it as
2039 * OBJ_STATE_PLACEHOLDER and get the obj_reflock.
2040 * In this way all thread waiting for this init
2041 * to finish will push on this thread.
2042 */
2043 ret = kern_apfs_reflock_try_lock(refcount, KERN_APFS_REFLOCK_IN_DEFAULT, NULL);
2044 assert(ret == true);
2045 obj->obj_state = OBJ_STATE_PLACEHOLDER;
2046 primitive_unlock(info);
2047
2048 //let's pretend we are populating the obj
2049 IOSleep(10);
2050 /*
2051 * obj will not be deallocated while I hold a ref.
2052 * So it is safe to access it.
2053 */
2054 snprintf(obj->obj_buff, OBJ_BUFF_SIZE, "I am %d", obj_id);
2055
2056 primitive_lock(info);
2057 obj2 = find_id_in_cache(obj_id, info);
2058 assert(obj == obj2);
2059 assert(obj->obj_state == OBJ_STATE_PLACEHOLDER);
2060
2061 obj->obj_state = OBJ_STATE_REAL;
2062 kern_apfs_reflock_unlock(refcount);
2063
2064 done:
2065 *buff = obj->obj_buff;
2066 primitive_unlock(info);
2067 return 0;
2068 }
2069
2070 static void
put_obj_cache(int obj_id,struct info_sleep_inheritor_test * info,bool free)2071 put_obj_cache(int obj_id, struct info_sleep_inheritor_test *info, bool free)
2072 {
2073 struct obj_cached *obj = NULL, *obj2 = NULL;
2074 bool ret;
2075 kern_apfs_reflock_out_flags_t out_flags;
2076 kern_apfs_reflock_t refcount = NULL;
2077
2078 primitive_lock(info);
2079 obj = find_id_in_cache(obj_id, info);
2080 primitive_unlock(info);
2081
2082 /*
2083 * Nobody should have been able to remove obj_id
2084 * from the cache.
2085 */
2086 assert(obj != NULL);
2087 assert(obj->obj_state == OBJ_STATE_REAL);
2088
2089 refcount = obj->obj_refcount;
2090
2091 /*
2092 * This should never fail, as or the reflock
2093 * was acquired when the state was OBJ_STATE_UNUSED to init,
2094 * or from a put that reached zero. And if the latter
2095 * happened subsequent reflock_get_ref() will had to wait to transition
2096 * to OBJ_STATE_REAL.
2097 */
2098 ret = kern_apfs_reflock_try_put_ref(refcount, KERN_APFS_REFLOCK_IN_LOCK_IF_LAST, &out_flags);
2099 assert(ret == true);
2100 if ((out_flags & KERN_APFS_REFLOCK_OUT_LOCKED) == 0) {
2101 return;
2102 }
2103
2104 /*
2105 * Note: nobody at this point will be able to get a ref or a lock on
2106 * refcount.
2107 * All people waiting on refcount will push on this thread.
2108 */
2109
2110 //let's pretend we are flushing the obj somewhere.
2111 IOSleep(10);
2112 snprintf(obj->obj_buff, OBJ_BUFF_SIZE, "I am groot");
2113
2114 primitive_lock(info);
2115 obj->obj_state = OBJ_STATE_UNUSED;
2116 if (free) {
2117 obj2 = find_id_in_cache(obj_id, info);
2118 assert(obj == obj2);
2119
2120 ret = free_id_in_cache(obj_id, info, obj);
2121 assert(ret == true);
2122 }
2123 primitive_unlock(info);
2124
2125 kern_apfs_reflock_unlock(refcount);
2126
2127 if (free) {
2128 kern_apfs_reflock_free(refcount);
2129 }
2130 }
2131
2132 static void
thread_use_cache(void * args,__unused wait_result_t wr)2133 thread_use_cache(
2134 void *args,
2135 __unused wait_result_t wr)
2136 {
2137 struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
2138 int my_obj;
2139
2140 primitive_lock(info);
2141 my_obj = ((info->value--) % (CACHE_SIZE + 1)) + 1;
2142 primitive_unlock(info);
2143
2144 T_LOG("Thread %p started and it is going to use obj %d", current_thread(), my_obj);
2145 /*
2146 * This is the string I would expect to see
2147 * on my_obj buff.
2148 */
2149 char my_string[OBJ_BUFF_SIZE];
2150 int my_string_size = snprintf(my_string, OBJ_BUFF_SIZE, "I am %d", my_obj);
2151
2152 /*
2153 * spin here to start concurrently with the other threads
2154 */
2155 wake_threads(&info->synch);
2156 wait_threads(&info->synch, info->synch_value);
2157
2158 for (int i = 0; i < USE_CACHE_ROUNDS; i++) {
2159 char *buff;
2160 while (get_obj_cache(my_obj, info, &buff) == -1) {
2161 /*
2162 * Cache is full, wait.
2163 */
2164 IOSleep(10);
2165 }
2166 T_ASSERT(memcmp(buff, my_string, my_string_size) == 0, "reflock: thread %p obj_id %d value in buff", current_thread(), my_obj);
2167 IOSleep(10);
2168 T_ASSERT(memcmp(buff, my_string, my_string_size) == 0, "reflock: thread %p obj_id %d value in buff", current_thread(), my_obj);
2169 put_obj_cache(my_obj, info, (i % 2 == 0));
2170 }
2171
2172 notify_waiter((struct synch_test_common *)info);
2173 thread_terminate_self();
2174 }
2175
2176 static void
thread_refcount_reflock(void * args,__unused wait_result_t wr)2177 thread_refcount_reflock(
2178 void *args,
2179 __unused wait_result_t wr)
2180 {
2181 struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
2182 bool ret;
2183 kern_apfs_reflock_out_flags_t out_flags;
2184 kern_apfs_reflock_in_flags_t in_flags;
2185
2186 T_LOG("Thread %p started", current_thread());
2187 /*
2188 * spin here to start concurrently with the other threads
2189 */
2190 wake_threads(&info->synch);
2191 wait_threads(&info->synch, info->synch_value);
2192
2193 for (int i = 0; i < REFCOUNT_REFLOCK_ROUNDS; i++) {
2194 in_flags = KERN_APFS_REFLOCK_IN_LOCK_IF_FIRST;
2195 if ((i % 2) == 0) {
2196 in_flags |= KERN_APFS_REFLOCK_IN_WILL_WAIT;
2197 }
2198 ret = kern_apfs_reflock_try_get_ref(&info->reflock, in_flags, &out_flags);
2199 if (ret == true) {
2200 /* got reference, check if we did 0->1 */
2201 if ((out_flags & KERN_APFS_REFLOCK_OUT_LOCKED) == KERN_APFS_REFLOCK_OUT_LOCKED) {
2202 T_ASSERT(info->reflock_protected_status == 0, "status init check");
2203 info->reflock_protected_status = 1;
2204 kern_apfs_reflock_unlock(&info->reflock);
2205 } else {
2206 T_ASSERT(info->reflock_protected_status == 1, "status set check");
2207 }
2208 /* release the reference and check if we did 1->0 */
2209 ret = kern_apfs_reflock_try_put_ref(&info->reflock, KERN_APFS_REFLOCK_IN_LOCK_IF_LAST, &out_flags);
2210 T_ASSERT(ret == true, "kern_apfs_reflock_try_put_ref success");
2211 if ((out_flags & KERN_APFS_REFLOCK_OUT_LOCKED) == KERN_APFS_REFLOCK_OUT_LOCKED) {
2212 T_ASSERT(info->reflock_protected_status == 1, "status set check");
2213 info->reflock_protected_status = 0;
2214 kern_apfs_reflock_unlock(&info->reflock);
2215 }
2216 } else {
2217 /* didn't get a reference */
2218 if ((in_flags & KERN_APFS_REFLOCK_IN_WILL_WAIT) == KERN_APFS_REFLOCK_IN_WILL_WAIT) {
2219 kern_apfs_reflock_wait_for_unlock(&info->reflock, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
2220 }
2221 }
2222 }
2223
2224 notify_waiter((struct synch_test_common *)info);
2225 thread_terminate_self();
2226 }
2227
2228 static void
thread_force_reflock(void * args,__unused wait_result_t wr)2229 thread_force_reflock(
2230 void *args,
2231 __unused wait_result_t wr)
2232 {
2233 struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
2234 bool ret;
2235 kern_apfs_reflock_out_flags_t out_flags;
2236 bool lock = false;
2237 uint32_t count;
2238
2239 T_LOG("Thread %p started", current_thread());
2240 if (os_atomic_inc_orig(&info->value, relaxed) == 0) {
2241 T_LOG("Thread %p is locker", current_thread());
2242 lock = true;
2243 ret = kern_apfs_reflock_try_lock(&info->reflock, KERN_APFS_REFLOCK_IN_ALLOW_FORCE, &count);
2244 T_ASSERT(ret == true, "kern_apfs_reflock_try_lock success");
2245 T_ASSERT(count == 0, "refcount value");
2246 }
2247 /*
2248 * spin here to start concurrently with the other threads
2249 */
2250 wake_threads(&info->synch);
2251 wait_threads(&info->synch, info->synch_value);
2252
2253 if (lock) {
2254 IOSleep(100);
2255 kern_apfs_reflock_unlock(&info->reflock);
2256 } else {
2257 for (int i = 0; i < REFCOUNT_REFLOCK_ROUNDS; i++) {
2258 ret = kern_apfs_reflock_try_get_ref(&info->reflock, KERN_APFS_REFLOCK_IN_FORCE, &out_flags);
2259 T_ASSERT(ret == true, "kern_apfs_reflock_try_get_ref success");
2260 ret = kern_apfs_reflock_try_put_ref(&info->reflock, KERN_APFS_REFLOCK_IN_FORCE, &out_flags);
2261 T_ASSERT(ret == true, "kern_apfs_reflock_try_put_ref success");
2262 }
2263 }
2264
2265 notify_waiter((struct synch_test_common *)info);
2266 thread_terminate_self();
2267 }
2268
2269 static void
thread_lock_reflock(void * args,__unused wait_result_t wr)2270 thread_lock_reflock(
2271 void *args,
2272 __unused wait_result_t wr)
2273 {
2274 struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
2275 bool ret;
2276 kern_apfs_reflock_out_flags_t out_flags;
2277 bool lock = false;
2278 uint32_t count;
2279
2280 T_LOG("Thread %p started", current_thread());
2281 if (os_atomic_inc_orig(&info->value, relaxed) == 0) {
2282 T_LOG("Thread %p is locker", current_thread());
2283 lock = true;
2284 ret = kern_apfs_reflock_try_lock(&info->reflock, KERN_APFS_REFLOCK_IN_DEFAULT, &count);
2285 T_ASSERT(ret == true, "kern_apfs_reflock_try_lock success");
2286 T_ASSERT(count == 0, "refcount value");
2287 info->reflock_protected_status = 1;
2288 }
2289 /*
2290 * spin here to start concurrently with the other threads
2291 */
2292 wake_threads(&info->synch);
2293 wait_threads(&info->synch, info->synch_value);
2294
2295 if (lock) {
2296 IOSleep(100);
2297 info->reflock_protected_status = 0;
2298 kern_apfs_reflock_unlock(&info->reflock);
2299 } else {
2300 for (int i = 0; i < REFCOUNT_REFLOCK_ROUNDS; i++) {
2301 ret = kern_apfs_reflock_try_get_ref(&info->reflock, KERN_APFS_REFLOCK_IN_DEFAULT, &out_flags);
2302 if (ret == true) {
2303 T_ASSERT(info->reflock_protected_status == 0, "unlocked status check");
2304 ret = kern_apfs_reflock_try_put_ref(&info->reflock, KERN_APFS_REFLOCK_IN_DEFAULT, &out_flags);
2305 T_ASSERT(ret == true, "kern_apfs_reflock_try_put_ref success");
2306 break;
2307 }
2308 }
2309 }
2310
2311 notify_waiter((struct synch_test_common *)info);
2312 thread_terminate_self();
2313 }
2314
2315 static void
test_cache_reflock(struct info_sleep_inheritor_test * info)2316 test_cache_reflock(struct info_sleep_inheritor_test *info)
2317 {
2318 info->synch = 0;
2319 info->synch_value = info->head.nthreads;
2320
2321 info->value = info->head.nthreads;
2322 /*
2323 * Use the mtx as cache lock
2324 */
2325 info->prim_type = MTX_LOCK;
2326
2327 init_cache(info);
2328
2329 start_threads((thread_continue_t)thread_use_cache, (struct synch_test_common *)info, FALSE);
2330 wait_all_thread((struct synch_test_common *)info);
2331
2332 check_cache_empty(info);
2333 free_cache(info);
2334 }
2335
2336 static void
test_refcount_reflock(struct info_sleep_inheritor_test * info)2337 test_refcount_reflock(struct info_sleep_inheritor_test *info)
2338 {
2339 info->synch = 0;
2340 info->synch_value = info->head.nthreads;
2341 kern_apfs_reflock_init(&info->reflock);
2342 info->reflock_protected_status = 0;
2343
2344 start_threads((thread_continue_t)thread_refcount_reflock, (struct synch_test_common *)info, FALSE);
2345 wait_all_thread((struct synch_test_common *)info);
2346
2347 kern_apfs_reflock_destroy(&info->reflock);
2348
2349 T_ASSERT(info->reflock_protected_status == 0, "unlocked status check");
2350 }
2351
2352 static void
test_force_reflock(struct info_sleep_inheritor_test * info)2353 test_force_reflock(struct info_sleep_inheritor_test *info)
2354 {
2355 info->synch = 0;
2356 info->synch_value = info->head.nthreads;
2357 kern_apfs_reflock_init(&info->reflock);
2358 info->value = 0;
2359
2360 start_threads((thread_continue_t)thread_force_reflock, (struct synch_test_common *)info, FALSE);
2361 wait_all_thread((struct synch_test_common *)info);
2362
2363 kern_apfs_reflock_destroy(&info->reflock);
2364 }
2365
2366 static void
test_lock_reflock(struct info_sleep_inheritor_test * info)2367 test_lock_reflock(struct info_sleep_inheritor_test *info)
2368 {
2369 info->synch = 0;
2370 info->synch_value = info->head.nthreads;
2371 kern_apfs_reflock_init(&info->reflock);
2372 info->value = 0;
2373
2374 start_threads((thread_continue_t)thread_lock_reflock, (struct synch_test_common *)info, FALSE);
2375 wait_all_thread((struct synch_test_common *)info);
2376
2377 kern_apfs_reflock_destroy(&info->reflock);
2378 }
2379
2380 static void
test_sleep_with_wake_all(struct info_sleep_inheritor_test * info,int prim_type)2381 test_sleep_with_wake_all(struct info_sleep_inheritor_test *info, int prim_type)
2382 {
2383 info->prim_type = prim_type;
2384 info->synch = 0;
2385 info->synch_value = info->head.nthreads;
2386
2387 info->thread_inheritor = NULL;
2388
2389 start_threads((thread_continue_t)thread_just_inheritor_do_work, (struct synch_test_common *)info, TRUE);
2390 wait_all_thread((struct synch_test_common *)info);
2391 }
2392
2393 static void
test_sleep_with_wake_one(struct info_sleep_inheritor_test * info,int prim_type)2394 test_sleep_with_wake_one(struct info_sleep_inheritor_test *info, int prim_type)
2395 {
2396 info->prim_type = prim_type;
2397
2398 info->synch = 0;
2399 info->synch_value = info->head.nthreads;
2400 info->value = 0;
2401 info->handoff_failure = 0;
2402 info->thread_inheritor = NULL;
2403
2404 start_threads((thread_continue_t)thread_inheritor_like_mutex, (struct synch_test_common *)info, FALSE);
2405 wait_all_thread((struct synch_test_common *)info);
2406
2407 T_ASSERT(info->value == (int)info->head.nthreads, "value protected by sleep");
2408 T_ASSERT(info->handoff_failure == 1, "handoff failures");
2409 }
2410
2411 static void
test_change_sleep_inheritor(struct info_sleep_inheritor_test * info,int prim_type)2412 test_change_sleep_inheritor(struct info_sleep_inheritor_test *info, int prim_type)
2413 {
2414 info->prim_type = prim_type;
2415
2416 info->thread_inheritor = NULL;
2417 info->steal_pri = 0;
2418 info->synch = 0;
2419 info->synch_value = info->head.nthreads;
2420
2421 start_threads((thread_continue_t)thread_steal_work, (struct synch_test_common *)info, FALSE);
2422 wait_all_thread((struct synch_test_common *)info);
2423 }
2424
2425 static void
test_no_inheritor(struct info_sleep_inheritor_test * info,int prim_type)2426 test_no_inheritor(struct info_sleep_inheritor_test *info, int prim_type)
2427 {
2428 info->prim_type = prim_type;
2429 info->synch = 0;
2430 info->synch_value = info->head.nthreads;
2431
2432 info->thread_inheritor = NULL;
2433 info->value = info->head.nthreads;
2434
2435 start_threads((thread_continue_t)thread_no_inheritor_work, (struct synch_test_common *)info, FALSE);
2436 wait_all_thread((struct synch_test_common *)info);
2437 }
2438
2439 static void
test_rw_lock(struct info_sleep_inheritor_test * info)2440 test_rw_lock(struct info_sleep_inheritor_test *info)
2441 {
2442 info->thread_inheritor = NULL;
2443 info->value = info->head.nthreads;
2444 info->synch = 0;
2445 info->synch_value = info->head.nthreads;
2446
2447 start_threads((thread_continue_t)thread_rw_work, (struct synch_test_common *)info, FALSE);
2448 wait_all_thread((struct synch_test_common *)info);
2449 }
2450
2451 static void
test_mtx_lock(struct info_sleep_inheritor_test * info)2452 test_mtx_lock(struct info_sleep_inheritor_test *info)
2453 {
2454 info->thread_inheritor = NULL;
2455 info->value = info->head.nthreads;
2456 info->synch = 0;
2457 info->synch_value = info->head.nthreads;
2458
2459 start_threads((thread_continue_t)thread_mtx_work, (struct synch_test_common *)info, FALSE);
2460 wait_all_thread((struct synch_test_common *)info);
2461 }
2462
2463 kern_return_t
ts_kernel_sleep_inheritor_test(void)2464 ts_kernel_sleep_inheritor_test(void)
2465 {
2466 struct info_sleep_inheritor_test info = {};
2467
2468 init_synch_test_common((struct synch_test_common *)&info, NUM_THREADS);
2469
2470 lck_attr_t* lck_attr = lck_attr_alloc_init();
2471 lck_grp_attr_t* lck_grp_attr = lck_grp_attr_alloc_init();
2472 lck_grp_t* lck_grp = lck_grp_alloc_init("test sleep_inheritor", lck_grp_attr);
2473
2474 lck_mtx_init(&info.mtx_lock, lck_grp, lck_attr);
2475 lck_rw_init(&info.rw_lock, lck_grp, lck_attr);
2476
2477 /*
2478 * Testing lck_mtx_sleep_with_inheritor and wakeup_all_with_inheritor
2479 */
2480 T_LOG("Testing mtx sleep with inheritor and wake_all_with_inheritor");
2481 test_sleep_with_wake_all(&info, MTX_LOCK);
2482
2483 /*
2484 * Testing rw_mtx_sleep_with_inheritor and wakeup_all_with_inheritor
2485 */
2486 T_LOG("Testing rw sleep with inheritor and wake_all_with_inheritor");
2487 test_sleep_with_wake_all(&info, RW_LOCK);
2488
2489 /*
2490 * Testing lck_mtx_sleep_with_inheritor and wakeup_one_with_inheritor
2491 */
2492 T_LOG("Testing mtx sleep with inheritor and wake_one_with_inheritor");
2493 test_sleep_with_wake_one(&info, MTX_LOCK);
2494
2495 /*
2496 * Testing lck_rw_sleep_with_inheritor and wakeup_one_with_inheritor
2497 */
2498 T_LOG("Testing rw sleep with inheritor and wake_one_with_inheritor");
2499 test_sleep_with_wake_one(&info, RW_LOCK);
2500
2501 /*
2502 * Testing lck_mtx_sleep_with_inheritor and wakeup_all_with_inheritor
2503 * and change_sleep_inheritor
2504 */
2505 T_LOG("Testing change_sleep_inheritor with mxt sleep");
2506 test_change_sleep_inheritor(&info, MTX_LOCK);
2507
2508 /*
2509 * Testing lck_mtx_sleep_with_inheritor and wakeup_all_with_inheritor
2510 * and change_sleep_inheritor
2511 */
2512 T_LOG("Testing change_sleep_inheritor with rw sleep");
2513 test_change_sleep_inheritor(&info, RW_LOCK);
2514
2515 /*
2516 * Testing lck_mtx_sleep_with_inheritor and wakeup_all_with_inheritor
2517 * with inheritor NULL
2518 */
2519 T_LOG("Testing inheritor NULL");
2520 test_no_inheritor(&info, MTX_LOCK);
2521
2522 /*
2523 * Testing lck_mtx_sleep_with_inheritor and wakeup_all_with_inheritor
2524 * with inheritor NULL
2525 */
2526 T_LOG("Testing inheritor NULL");
2527 test_no_inheritor(&info, RW_LOCK);
2528
2529 /*
2530 * Testing mtx locking combinations
2531 */
2532 T_LOG("Testing mtx locking combinations");
2533 test_mtx_lock(&info);
2534
2535 /*
2536 * Testing rw locking combinations
2537 */
2538 T_LOG("Testing rw locking combinations");
2539 test_rw_lock(&info);
2540
2541 /*
2542 * Testing reflock / cond_sleep_with_inheritor
2543 */
2544 T_LOG("Test cache reflock + cond_sleep_with_inheritor");
2545 test_cache_reflock(&info);
2546 T_LOG("Test force reflock + cond_sleep_with_inheritor");
2547 test_force_reflock(&info);
2548 T_LOG("Test refcount reflock + cond_sleep_with_inheritor");
2549 test_refcount_reflock(&info);
2550 T_LOG("Test lock reflock + cond_sleep_with_inheritor");
2551 test_lock_reflock(&info);
2552
2553 destroy_synch_test_common((struct synch_test_common *)&info);
2554
2555 lck_attr_free(lck_attr);
2556 lck_grp_attr_free(lck_grp_attr);
2557 lck_rw_destroy(&info.rw_lock, lck_grp);
2558 lck_mtx_destroy(&info.mtx_lock, lck_grp);
2559 lck_grp_free(lck_grp);
2560
2561 return KERN_SUCCESS;
2562 }
2563
2564 static void
thread_gate_aggressive(void * args,__unused wait_result_t wr)2565 thread_gate_aggressive(
2566 void *args,
2567 __unused wait_result_t wr)
2568 {
2569 struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
2570 uint my_pri = current_thread()->sched_pri;
2571
2572 T_LOG("Started thread pri %d %p", my_pri, current_thread());
2573
2574 primitive_lock(info);
2575 if (info->thread_inheritor == NULL) {
2576 info->thread_inheritor = current_thread();
2577 primitive_gate_assert(info, GATE_ASSERT_OPEN);
2578 primitive_gate_close(info);
2579 exclude_current_waiter((struct synch_test_common *)info);
2580
2581 primitive_unlock(info);
2582
2583 wait_threads(&info->synch, info->synch_value - 2);
2584 wait_for_waiters((struct synch_test_common *)info);
2585 T_LOG("Thread pri %d first to run %p", my_pri, current_thread());
2586
2587 primitive_lock(info);
2588 if (info->thread_inheritor == current_thread()) {
2589 primitive_gate_open(info);
2590 }
2591 } else {
2592 if (info->steal_pri == 0) {
2593 info->steal_pri = my_pri;
2594 info->thread_inheritor = current_thread();
2595 primitive_gate_steal(info);
2596 exclude_current_waiter((struct synch_test_common *)info);
2597
2598 primitive_unlock(info);
2599 wait_threads(&info->synch, info->synch_value - 2);
2600
2601 T_LOG("Thread pri %d stole push %p", my_pri, current_thread());
2602 wait_for_waiters((struct synch_test_common *)info);
2603 T_ASSERT((uint) current_thread()->sched_pri == info->steal_pri, "gate keeper priority current is %d, should be %d", current_thread()->sched_pri, info->steal_pri);
2604
2605 primitive_lock(info);
2606 primitive_gate_open(info);
2607 } else {
2608 if (my_pri > info->steal_pri) {
2609 info->steal_pri = my_pri;
2610 }
2611 wake_threads(&info->synch);
2612 primitive_gate_wait(info);
2613 exclude_current_waiter((struct synch_test_common *)info);
2614 }
2615 }
2616 primitive_unlock(info);
2617
2618 assert(current_thread()->kern_promotion_schedpri == 0);
2619 notify_waiter((struct synch_test_common *)info);
2620
2621 thread_terminate_self();
2622 }
2623
2624 static void
thread_gate_free(void * args,__unused wait_result_t wr)2625 thread_gate_free(
2626 void *args,
2627 __unused wait_result_t wr)
2628 {
2629 struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
2630 uint my_pri = current_thread()->sched_pri;
2631
2632 T_LOG("Started thread pri %d %p", my_pri, current_thread());
2633
2634 primitive_lock(info);
2635
2636 if (primitive_gate_try_close(info) == KERN_SUCCESS) {
2637 primitive_gate_assert(info, GATE_ASSERT_HELD);
2638 primitive_unlock(info);
2639
2640 wait_threads(&info->synch, info->synch_value - 1);
2641 wait_for_waiters((struct synch_test_common *) info);
2642
2643 primitive_lock(info);
2644 primitive_gate_open(info);
2645 primitive_gate_free(info);
2646 } else {
2647 primitive_gate_assert(info, GATE_ASSERT_CLOSED);
2648 wake_threads(&info->synch);
2649 gate_wait_result_t ret = primitive_gate_wait(info);
2650 T_ASSERT(ret == GATE_OPENED, "open gate");
2651 }
2652
2653 primitive_unlock(info);
2654
2655 notify_waiter((struct synch_test_common *)info);
2656
2657 thread_terminate_self();
2658 }
2659
2660 static void
thread_gate_like_mutex(void * args,__unused wait_result_t wr)2661 thread_gate_like_mutex(
2662 void *args,
2663 __unused wait_result_t wr)
2664 {
2665 gate_wait_result_t wait;
2666 kern_return_t ret;
2667 uint my_pri = current_thread()->sched_pri;
2668
2669 struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
2670
2671 T_LOG("Started thread pri %d %p", my_pri, current_thread());
2672
2673 /*
2674 * spin here to start concurrently
2675 */
2676 wake_threads(&info->synch);
2677 wait_threads(&info->synch, info->synch_value);
2678
2679 primitive_lock(info);
2680
2681 if (primitive_gate_try_close(info) != KERN_SUCCESS) {
2682 wait = primitive_gate_wait(info);
2683 T_ASSERT(wait == GATE_HANDOFF, "gate_wait return");
2684 }
2685
2686 primitive_gate_assert(info, GATE_ASSERT_HELD);
2687
2688 primitive_unlock(info);
2689
2690 IOSleep(100);
2691 info->value++;
2692
2693 primitive_lock(info);
2694
2695 ret = primitive_gate_handoff(info, GATE_HANDOFF_DEFAULT);
2696 if (ret == KERN_NOT_WAITING) {
2697 T_ASSERT(info->handoff_failure == 0, "handoff failures");
2698 primitive_gate_handoff(info, GATE_HANDOFF_OPEN_IF_NO_WAITERS);
2699 info->handoff_failure++;
2700 }
2701
2702 primitive_unlock(info);
2703 notify_waiter((struct synch_test_common *)info);
2704
2705 thread_terminate_self();
2706 }
2707
2708 static void
thread_just_one_do_work(void * args,__unused wait_result_t wr)2709 thread_just_one_do_work(
2710 void *args,
2711 __unused wait_result_t wr)
2712 {
2713 struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
2714 uint my_pri = current_thread()->sched_pri;
2715 uint max_pri;
2716
2717 T_LOG("Started thread pri %d %p", my_pri, current_thread());
2718
2719 primitive_lock(info);
2720 check_again:
2721 if (info->work_to_do) {
2722 if (primitive_gate_try_close(info) == KERN_SUCCESS) {
2723 primitive_gate_assert(info, GATE_ASSERT_HELD);
2724 primitive_unlock(info);
2725
2726 T_LOG("Thread pri %d acquired the gate %p", my_pri, current_thread());
2727 wait_threads(&info->synch, info->synch_value - 1);
2728 wait_for_waiters((struct synch_test_common *)info);
2729 max_pri = get_max_pri((struct synch_test_common *) info);
2730 T_ASSERT((uint) current_thread()->sched_pri == max_pri, "gate owner priority current is %d, should be %d", current_thread()->sched_pri, max_pri);
2731 os_atomic_store(&info->synch, 0, relaxed);
2732
2733 primitive_lock(info);
2734 info->work_to_do = FALSE;
2735 primitive_gate_open(info);
2736 } else {
2737 primitive_gate_assert(info, GATE_ASSERT_CLOSED);
2738 wake_threads(&info->synch);
2739 primitive_gate_wait(info);
2740 goto check_again;
2741 }
2742 }
2743 primitive_unlock(info);
2744
2745 assert(current_thread()->kern_promotion_schedpri == 0);
2746 notify_waiter((struct synch_test_common *)info);
2747 thread_terminate_self();
2748 }
2749
2750 static void
test_gate_push(struct info_sleep_inheritor_test * info,int prim_type)2751 test_gate_push(struct info_sleep_inheritor_test *info, int prim_type)
2752 {
2753 info->prim_type = prim_type;
2754 info->use_alloc_gate = false;
2755
2756 primitive_gate_init(info);
2757 info->work_to_do = TRUE;
2758 info->synch = 0;
2759 info->synch_value = NUM_THREADS;
2760
2761 start_threads((thread_continue_t)thread_just_one_do_work, (struct synch_test_common *) info, TRUE);
2762 wait_all_thread((struct synch_test_common *)info);
2763
2764 primitive_gate_destroy(info);
2765 }
2766
2767 static void
test_gate_handoff(struct info_sleep_inheritor_test * info,int prim_type)2768 test_gate_handoff(struct info_sleep_inheritor_test *info, int prim_type)
2769 {
2770 info->prim_type = prim_type;
2771 info->use_alloc_gate = false;
2772
2773 primitive_gate_init(info);
2774
2775 info->synch = 0;
2776 info->synch_value = NUM_THREADS;
2777 info->value = 0;
2778 info->handoff_failure = 0;
2779
2780 start_threads((thread_continue_t)thread_gate_like_mutex, (struct synch_test_common *)info, false);
2781 wait_all_thread((struct synch_test_common *)info);
2782
2783 T_ASSERT(info->value == NUM_THREADS, "value protected by gate");
2784 T_ASSERT(info->handoff_failure == 1, "handoff failures");
2785
2786 primitive_gate_destroy(info);
2787 }
2788
2789 static void
test_gate_steal(struct info_sleep_inheritor_test * info,int prim_type)2790 test_gate_steal(struct info_sleep_inheritor_test *info, int prim_type)
2791 {
2792 info->prim_type = prim_type;
2793 info->use_alloc_gate = false;
2794
2795 primitive_gate_init(info);
2796
2797 info->synch = 0;
2798 info->synch_value = NUM_THREADS;
2799 info->thread_inheritor = NULL;
2800 info->steal_pri = 0;
2801
2802 start_threads((thread_continue_t)thread_gate_aggressive, (struct synch_test_common *)info, FALSE);
2803 wait_all_thread((struct synch_test_common *)info);
2804
2805 primitive_gate_destroy(info);
2806 }
2807
2808 static void
test_gate_alloc_free(struct info_sleep_inheritor_test * info,int prim_type)2809 test_gate_alloc_free(struct info_sleep_inheritor_test *info, int prim_type)
2810 {
2811 (void)info;
2812 (void) prim_type;
2813 info->prim_type = prim_type;
2814 info->use_alloc_gate = true;
2815
2816 primitive_gate_alloc(info);
2817
2818 info->synch = 0;
2819 info->synch_value = NUM_THREADS;
2820
2821 start_threads((thread_continue_t)thread_gate_free, (struct synch_test_common *)info, FALSE);
2822 wait_all_thread((struct synch_test_common *)info);
2823
2824 T_ASSERT(info->alloc_gate == NULL, "gate free");
2825 info->use_alloc_gate = false;
2826 }
2827
2828 kern_return_t
ts_kernel_gate_test(void)2829 ts_kernel_gate_test(void)
2830 {
2831 struct info_sleep_inheritor_test info = {};
2832
2833 T_LOG("Testing gate primitive");
2834
2835 init_synch_test_common((struct synch_test_common *)&info, NUM_THREADS);
2836
2837 lck_attr_t* lck_attr = lck_attr_alloc_init();
2838 lck_grp_attr_t* lck_grp_attr = lck_grp_attr_alloc_init();
2839 lck_grp_t* lck_grp = lck_grp_alloc_init("test gate", lck_grp_attr);
2840
2841 lck_mtx_init(&info.mtx_lock, lck_grp, lck_attr);
2842 lck_rw_init(&info.rw_lock, lck_grp, lck_attr);
2843
2844 /*
2845 * Testing the priority inherited by the keeper
2846 * lck_mtx_gate_try_close, lck_mtx_gate_open, lck_mtx_gate_wait
2847 */
2848 T_LOG("Testing gate push, mtx");
2849 test_gate_push(&info, MTX_LOCK);
2850
2851 T_LOG("Testing gate push, rw");
2852 test_gate_push(&info, RW_LOCK);
2853
2854 /*
2855 * Testing the handoff
2856 * lck_mtx_gate_wait, lck_mtx_gate_handoff
2857 */
2858 T_LOG("Testing gate handoff, mtx");
2859 test_gate_handoff(&info, MTX_LOCK);
2860
2861 T_LOG("Testing gate handoff, rw");
2862 test_gate_handoff(&info, RW_LOCK);
2863
2864 /*
2865 * Testing the steal
2866 * lck_mtx_gate_close, lck_mtx_gate_wait, lck_mtx_gate_steal, lck_mtx_gate_handoff
2867 */
2868 T_LOG("Testing gate steal, mtx");
2869 test_gate_steal(&info, MTX_LOCK);
2870
2871 T_LOG("Testing gate steal, rw");
2872 test_gate_steal(&info, RW_LOCK);
2873
2874 /*
2875 * Testing the alloc/free
2876 * lck_mtx_gate_alloc_init, lck_mtx_gate_close, lck_mtx_gate_wait, lck_mtx_gate_free
2877 */
2878 T_LOG("Testing gate alloc/free, mtx");
2879 test_gate_alloc_free(&info, MTX_LOCK);
2880
2881 T_LOG("Testing gate alloc/free, rw");
2882 test_gate_alloc_free(&info, RW_LOCK);
2883
2884 destroy_synch_test_common((struct synch_test_common *)&info);
2885
2886 lck_attr_free(lck_attr);
2887 lck_grp_attr_free(lck_grp_attr);
2888 lck_mtx_destroy(&info.mtx_lock, lck_grp);
2889 lck_grp_free(lck_grp);
2890
2891 return KERN_SUCCESS;
2892 }
2893
2894 #define NUM_THREAD_CHAIN 6
2895
2896 struct turnstile_chain_test {
2897 struct synch_test_common head;
2898 lck_mtx_t mtx_lock;
2899 int synch_value;
2900 int synch;
2901 int synch2;
2902 gate_t gates[NUM_THREAD_CHAIN];
2903 };
2904
2905 static void
thread_sleep_gate_chain_work(void * args,__unused wait_result_t wr)2906 thread_sleep_gate_chain_work(
2907 void *args,
2908 __unused wait_result_t wr)
2909 {
2910 struct turnstile_chain_test *info = (struct turnstile_chain_test*) args;
2911 thread_t self = current_thread();
2912 uint my_pri = self->sched_pri;
2913 uint max_pri;
2914 uint i;
2915 thread_t inheritor = NULL, woken_up;
2916 event_t wait_event, wake_event;
2917 kern_return_t ret;
2918
2919 T_LOG("Started thread pri %d %p", my_pri, self);
2920
2921 /*
2922 * Need to use the threads ids, wait for all of them to be populated
2923 */
2924
2925 while (os_atomic_load(&info->head.threads[info->head.nthreads - 1], acquire) == NULL) {
2926 IOSleep(10);
2927 }
2928
2929 max_pri = get_max_pri((struct synch_test_common *) info);
2930
2931 for (i = 0; i < info->head.nthreads; i = i + 2) {
2932 // even threads will close a gate
2933 if (info->head.threads[i] == self) {
2934 lck_mtx_lock(&info->mtx_lock);
2935 lck_mtx_gate_close(&info->mtx_lock, &info->gates[i]);
2936 lck_mtx_unlock(&info->mtx_lock);
2937 break;
2938 }
2939 }
2940
2941 wake_threads(&info->synch2);
2942 wait_threads(&info->synch2, info->synch_value);
2943
2944 if (self == os_atomic_load(&info->head.threads[0], acquire)) {
2945 wait_threads(&info->synch, info->synch_value - 1);
2946 wait_for_waiters((struct synch_test_common *)info);
2947
2948 T_ASSERT((uint) self->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", self->sched_pri, max_pri);
2949
2950 lck_mtx_lock(&info->mtx_lock);
2951 lck_mtx_gate_open(&info->mtx_lock, &info->gates[0]);
2952 lck_mtx_unlock(&info->mtx_lock);
2953 } else {
2954 wait_event = NULL;
2955 wake_event = NULL;
2956 for (i = 0; i < info->head.nthreads; i++) {
2957 if (info->head.threads[i] == self) {
2958 inheritor = info->head.threads[i - 1];
2959 wait_event = (event_t) &info->head.threads[i - 1];
2960 wake_event = (event_t) &info->head.threads[i];
2961 break;
2962 }
2963 }
2964 assert(wait_event != NULL);
2965
2966 lck_mtx_lock(&info->mtx_lock);
2967 wake_threads(&info->synch);
2968
2969 if (i % 2 != 0) {
2970 lck_mtx_gate_wait(&info->mtx_lock, &info->gates[i - 1], LCK_SLEEP_UNLOCK, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
2971 T_ASSERT((uint) self->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", self->sched_pri, max_pri);
2972
2973 ret = wakeup_one_with_inheritor(wake_event, THREAD_AWAKENED, LCK_WAKE_DO_NOT_TRANSFER_PUSH, &woken_up);
2974 if (ret == KERN_SUCCESS) {
2975 T_ASSERT(i != (info->head.nthreads - 1), "thread id");
2976 T_ASSERT(woken_up == info->head.threads[i + 1], "wakeup_one_with_inheritor woke next");
2977 } else {
2978 T_ASSERT(i == (info->head.nthreads - 1), "thread id");
2979 }
2980
2981 // i am still the inheritor, wake all to drop inheritership
2982 ret = wakeup_all_with_inheritor(wake_event, LCK_WAKE_DEFAULT);
2983 T_ASSERT(ret == KERN_NOT_WAITING, "waiters on event");
2984 } else {
2985 // I previously closed a gate
2986 lck_mtx_sleep_with_inheritor(&info->mtx_lock, LCK_SLEEP_UNLOCK, wait_event, inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
2987 T_ASSERT((uint) self->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", self->sched_pri, max_pri);
2988
2989 lck_mtx_lock(&info->mtx_lock);
2990 lck_mtx_gate_open(&info->mtx_lock, &info->gates[i]);
2991 lck_mtx_unlock(&info->mtx_lock);
2992 }
2993 }
2994
2995 assert(current_thread()->kern_promotion_schedpri == 0);
2996 notify_waiter((struct synch_test_common *)info);
2997
2998 thread_terminate_self();
2999 }
3000
3001 static void
thread_gate_chain_work(void * args,__unused wait_result_t wr)3002 thread_gate_chain_work(
3003 void *args,
3004 __unused wait_result_t wr)
3005 {
3006 struct turnstile_chain_test *info = (struct turnstile_chain_test*) args;
3007 thread_t self = current_thread();
3008 uint my_pri = self->sched_pri;
3009 uint max_pri;
3010 uint i;
3011 T_LOG("Started thread pri %d %p", my_pri, self);
3012
3013
3014 /*
3015 * Need to use the threads ids, wait for all of them to be populated
3016 */
3017 while (os_atomic_load(&info->head.threads[info->head.nthreads - 1], acquire) == NULL) {
3018 IOSleep(10);
3019 }
3020
3021 max_pri = get_max_pri((struct synch_test_common *) info);
3022
3023 for (i = 0; i < info->head.nthreads; i++) {
3024 if (info->head.threads[i] == self) {
3025 lck_mtx_lock(&info->mtx_lock);
3026 lck_mtx_gate_close(&info->mtx_lock, &info->gates[i]);
3027 lck_mtx_unlock(&info->mtx_lock);
3028 break;
3029 }
3030 }
3031 assert(i != info->head.nthreads);
3032
3033 wake_threads(&info->synch2);
3034 wait_threads(&info->synch2, info->synch_value);
3035
3036 if (self == os_atomic_load(&info->head.threads[0], acquire)) {
3037 wait_threads(&info->synch, info->synch_value - 1);
3038
3039 wait_for_waiters((struct synch_test_common *)info);
3040
3041 T_ASSERT((uint) self->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", self->sched_pri, max_pri);
3042
3043 lck_mtx_lock(&info->mtx_lock);
3044 lck_mtx_gate_open(&info->mtx_lock, &info->gates[0]);
3045 lck_mtx_unlock(&info->mtx_lock);
3046 } else {
3047 lck_mtx_lock(&info->mtx_lock);
3048 wake_threads(&info->synch);
3049 lck_mtx_gate_wait(&info->mtx_lock, &info->gates[i - 1], LCK_SLEEP_UNLOCK, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
3050
3051 T_ASSERT((uint) self->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", self->sched_pri, max_pri);
3052
3053 lck_mtx_lock(&info->mtx_lock);
3054 lck_mtx_gate_open(&info->mtx_lock, &info->gates[i]);
3055 lck_mtx_unlock(&info->mtx_lock);
3056 }
3057
3058 assert(current_thread()->kern_promotion_schedpri == 0);
3059 notify_waiter((struct synch_test_common *)info);
3060
3061 thread_terminate_self();
3062 }
3063
3064 static void
thread_sleep_chain_work(void * args,__unused wait_result_t wr)3065 thread_sleep_chain_work(
3066 void *args,
3067 __unused wait_result_t wr)
3068 {
3069 struct turnstile_chain_test *info = (struct turnstile_chain_test*) args;
3070 thread_t self = current_thread();
3071 uint my_pri = self->sched_pri;
3072 uint max_pri;
3073 event_t wait_event, wake_event;
3074 uint i;
3075 thread_t inheritor = NULL, woken_up = NULL;
3076 kern_return_t ret;
3077
3078 T_LOG("Started thread pri %d %p", my_pri, self);
3079
3080 /*
3081 * Need to use the threads ids, wait for all of them to be populated
3082 */
3083 while (os_atomic_load(&info->head.threads[info->head.nthreads - 1], acquire) == NULL) {
3084 IOSleep(10);
3085 }
3086
3087 max_pri = get_max_pri((struct synch_test_common *) info);
3088
3089 if (self == os_atomic_load(&info->head.threads[0], acquire)) {
3090 wait_threads(&info->synch, info->synch_value - 1);
3091
3092 wait_for_waiters((struct synch_test_common *)info);
3093
3094 T_ASSERT((uint) self->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", self->sched_pri, max_pri);
3095
3096 ret = wakeup_one_with_inheritor((event_t) &info->head.threads[0], THREAD_AWAKENED, LCK_WAKE_DO_NOT_TRANSFER_PUSH, &woken_up);
3097 T_ASSERT(ret == KERN_SUCCESS, "wakeup_one_with_inheritor woke next");
3098 T_ASSERT(woken_up == info->head.threads[1], "thread woken up");
3099
3100 // i am still the inheritor, wake all to drop inheritership
3101 ret = wakeup_all_with_inheritor((event_t) &info->head.threads[0], LCK_WAKE_DEFAULT);
3102 T_ASSERT(ret == KERN_NOT_WAITING, "waiters on event");
3103 } else {
3104 wait_event = NULL;
3105 wake_event = NULL;
3106 for (i = 0; i < info->head.nthreads; i++) {
3107 if (info->head.threads[i] == self) {
3108 inheritor = info->head.threads[i - 1];
3109 wait_event = (event_t) &info->head.threads[i - 1];
3110 wake_event = (event_t) &info->head.threads[i];
3111 break;
3112 }
3113 }
3114
3115 assert(wait_event != NULL);
3116 lck_mtx_lock(&info->mtx_lock);
3117 wake_threads(&info->synch);
3118
3119 lck_mtx_sleep_with_inheritor(&info->mtx_lock, LCK_SLEEP_UNLOCK, wait_event, inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
3120
3121 T_ASSERT((uint) self->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", self->sched_pri, max_pri);
3122
3123 ret = wakeup_one_with_inheritor(wake_event, THREAD_AWAKENED, LCK_WAKE_DO_NOT_TRANSFER_PUSH, &woken_up);
3124 if (ret == KERN_SUCCESS) {
3125 T_ASSERT(i != (info->head.nthreads - 1), "thread id");
3126 T_ASSERT(woken_up == info->head.threads[i + 1], "wakeup_one_with_inheritor woke next");
3127 } else {
3128 T_ASSERT(i == (info->head.nthreads - 1), "thread id");
3129 }
3130
3131 // i am still the inheritor, wake all to drop inheritership
3132 ret = wakeup_all_with_inheritor(wake_event, LCK_WAKE_DEFAULT);
3133 T_ASSERT(ret == KERN_NOT_WAITING, "waiters on event");
3134 }
3135
3136 assert(current_thread()->kern_promotion_schedpri == 0);
3137 notify_waiter((struct synch_test_common *)info);
3138
3139 thread_terminate_self();
3140 }
3141
3142 static void
test_sleep_chain(struct turnstile_chain_test * info)3143 test_sleep_chain(struct turnstile_chain_test *info)
3144 {
3145 info->synch = 0;
3146 info->synch_value = info->head.nthreads;
3147
3148 start_threads((thread_continue_t)thread_sleep_chain_work, (struct synch_test_common *)info, FALSE);
3149 wait_all_thread((struct synch_test_common *)info);
3150 }
3151
3152 static void
test_gate_chain(struct turnstile_chain_test * info)3153 test_gate_chain(struct turnstile_chain_test *info)
3154 {
3155 info->synch = 0;
3156 info->synch2 = 0;
3157 info->synch_value = info->head.nthreads;
3158
3159 start_threads((thread_continue_t)thread_gate_chain_work, (struct synch_test_common *)info, FALSE);
3160 wait_all_thread((struct synch_test_common *)info);
3161 }
3162
3163 static void
test_sleep_gate_chain(struct turnstile_chain_test * info)3164 test_sleep_gate_chain(struct turnstile_chain_test *info)
3165 {
3166 info->synch = 0;
3167 info->synch2 = 0;
3168 info->synch_value = info->head.nthreads;
3169
3170 start_threads((thread_continue_t)thread_sleep_gate_chain_work, (struct synch_test_common *)info, FALSE);
3171 wait_all_thread((struct synch_test_common *)info);
3172 }
3173
3174 kern_return_t
ts_kernel_turnstile_chain_test(void)3175 ts_kernel_turnstile_chain_test(void)
3176 {
3177 struct turnstile_chain_test info = {};
3178 int i;
3179
3180 init_synch_test_common((struct synch_test_common *)&info, NUM_THREAD_CHAIN);
3181 lck_attr_t* lck_attr = lck_attr_alloc_init();
3182 lck_grp_attr_t* lck_grp_attr = lck_grp_attr_alloc_init();
3183 lck_grp_t* lck_grp = lck_grp_alloc_init("test gate", lck_grp_attr);
3184
3185 lck_mtx_init(&info.mtx_lock, lck_grp, lck_attr);
3186 for (i = 0; i < NUM_THREAD_CHAIN; i++) {
3187 lck_mtx_gate_init(&info.mtx_lock, &info.gates[i]);
3188 }
3189
3190 T_LOG("Testing sleep chain, lck");
3191 test_sleep_chain(&info);
3192
3193 T_LOG("Testing gate chain, lck");
3194 test_gate_chain(&info);
3195
3196 T_LOG("Testing sleep and gate chain, lck");
3197 test_sleep_gate_chain(&info);
3198
3199 destroy_synch_test_common((struct synch_test_common *)&info);
3200 for (i = 0; i < NUM_THREAD_CHAIN; i++) {
3201 lck_mtx_gate_destroy(&info.mtx_lock, &info.gates[i]);
3202 }
3203 lck_attr_free(lck_attr);
3204 lck_grp_attr_free(lck_grp_attr);
3205 lck_mtx_destroy(&info.mtx_lock, lck_grp);
3206 lck_grp_free(lck_grp);
3207
3208 return KERN_SUCCESS;
3209 }
3210
3211 kern_return_t
ts_kernel_timingsafe_bcmp_test(void)3212 ts_kernel_timingsafe_bcmp_test(void)
3213 {
3214 int i, buf_size;
3215 char *buf = NULL;
3216
3217 // empty
3218 T_ASSERT(timingsafe_bcmp(NULL, NULL, 0) == 0, NULL);
3219 T_ASSERT(timingsafe_bcmp("foo", "foo", 0) == 0, NULL);
3220 T_ASSERT(timingsafe_bcmp("foo", "bar", 0) == 0, NULL);
3221
3222 // equal
3223 T_ASSERT(timingsafe_bcmp("foo", "foo", strlen("foo")) == 0, NULL);
3224
3225 // unequal
3226 T_ASSERT(timingsafe_bcmp("foo", "bar", strlen("foo")) == 1, NULL);
3227 T_ASSERT(timingsafe_bcmp("foo", "goo", strlen("foo")) == 1, NULL);
3228 T_ASSERT(timingsafe_bcmp("foo", "fpo", strlen("foo")) == 1, NULL);
3229 T_ASSERT(timingsafe_bcmp("foo", "fop", strlen("foo")) == 1, NULL);
3230
3231 // all possible bitwise differences
3232 for (i = 1; i < 256; i += 1) {
3233 unsigned char a = 0;
3234 unsigned char b = (unsigned char)i;
3235
3236 T_ASSERT(timingsafe_bcmp(&a, &b, sizeof(a)) == 1, NULL);
3237 }
3238
3239 // large
3240 buf_size = 1024 * 16;
3241 buf = kalloc_data(buf_size, Z_WAITOK);
3242 T_EXPECT_NOTNULL(buf, "kalloc of buf");
3243
3244 read_random(buf, buf_size);
3245 T_ASSERT(timingsafe_bcmp(buf, buf, buf_size) == 0, NULL);
3246 T_ASSERT(timingsafe_bcmp(buf, buf + 1, buf_size - 1) == 1, NULL);
3247 T_ASSERT(timingsafe_bcmp(buf, buf + 128, 128) == 1, NULL);
3248
3249 memcpy(buf + 128, buf, 128);
3250 T_ASSERT(timingsafe_bcmp(buf, buf + 128, 128) == 0, NULL);
3251
3252 kfree_data(buf, buf_size);
3253
3254 return KERN_SUCCESS;
3255 }
3256
3257 kern_return_t
kprintf_hhx_test(void)3258 kprintf_hhx_test(void)
3259 {
3260 printf("POST hhx test %hx%hx%hx%hx %hhx%hhx%hhx%hhx - %llx",
3261 (unsigned short)0xfeed, (unsigned short)0xface,
3262 (unsigned short)0xabad, (unsigned short)0xcafe,
3263 (unsigned char)'h', (unsigned char)'h', (unsigned char)'x',
3264 (unsigned char)'!',
3265 0xfeedfaceULL);
3266 T_PASS("kprintf_hhx_test passed");
3267 return KERN_SUCCESS;
3268 }
3269