1 /*
2 * Copyright (c) 2019-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <kern/kern_types.h>
30 #include <kern/assert.h>
31 #include <kern/host.h>
32 #include <kern/macro_help.h>
33 #include <kern/sched.h>
34 #include <kern/locks.h>
35 #include <kern/sched_prim.h>
36 #include <kern/misc_protos.h>
37 #include <kern/thread_call.h>
38 #include <kern/zalloc_internal.h>
39 #include <kern/kalloc.h>
40 #include <tests/ktest.h>
41 #include <sys/errno.h>
42 #include <sys/random.h>
43 #include <kern/kern_cdata.h>
44 #include <machine/lowglobals.h>
45 #include <vm/vm_page.h>
46 #include <vm/vm_object_internal.h>
47 #include <vm/vm_protos.h>
48 #include <vm/vm_iokit.h>
49 #include <string.h>
50 #include <kern/kern_apfs_reflock.h>
51
52 #if !(DEVELOPMENT || DEBUG)
53 #error "Testing is not enabled on RELEASE configurations"
54 #endif
55
56 #include <tests/xnupost.h>
57
58 extern boolean_t get_range_bounds(char * c, int64_t * lower, int64_t * upper);
59 __private_extern__ void qsort(void * a, size_t n, size_t es, int (*cmp)(const void *, const void *));
60
61 uint32_t total_post_tests_count = 0;
62 void xnupost_reset_panic_widgets(void);
63
64 /* test declarations */
65 kern_return_t zalloc_test(void);
66 kern_return_t RandomULong_test(void);
67 kern_return_t kcdata_api_test(void);
68 kern_return_t ts_kernel_primitive_test(void);
69 kern_return_t ts_kernel_sleep_inheritor_test(void);
70 kern_return_t ts_kernel_gate_test(void);
71 kern_return_t ts_kernel_turnstile_chain_test(void);
72 kern_return_t ts_kernel_timingsafe_bcmp_test(void);
73
74 #if __ARM_VFP__
75 extern kern_return_t vfp_state_test(void);
76 #endif
77
78 extern kern_return_t kprintf_hhx_test(void);
79
80 #if defined(__arm64__)
81 kern_return_t pmap_coredump_test(void);
82 #endif
83
84 extern kern_return_t console_serial_test(void);
85 extern kern_return_t console_serial_parallel_log_tests(void);
86 extern kern_return_t test_printf(void);
87 extern kern_return_t test_os_log(void);
88 extern kern_return_t test_os_log_handles(void);
89 extern kern_return_t test_os_log_parallel(void);
90 extern kern_return_t bitmap_post_test(void);
91 extern kern_return_t counter_tests(void);
92 #if ML_IO_TIMEOUTS_ENABLED
93 extern kern_return_t ml_io_timeout_test(void);
94 #endif
95
96 #ifdef __arm64__
97 extern kern_return_t arm64_munger_test(void);
98 #if __ARM_PAN_AVAILABLE__
99 extern kern_return_t arm64_pan_test(void);
100 #endif
101 #if defined(HAS_APPLE_PAC)
102 extern kern_return_t arm64_ropjop_test(void);
103 #endif /* defined(HAS_APPLE_PAC) */
104 #if CONFIG_SPTM
105 extern kern_return_t arm64_panic_lockdown_test(void);
106 #endif /* CONFIG_SPTM */
107 #if HAS_SPECRES
108 extern kern_return_t specres_test(void);
109 #endif /* HAS_SPECRES */
110 #if BTI_ENFORCED
111 kern_return_t arm64_bti_test(void);
112 #endif /* BTI_ENFORCED */
113 #endif /* __arm64__ */
114
115 extern kern_return_t test_thread_call(void);
116
117
118 struct xnupost_panic_widget xt_panic_widgets = {.xtp_context_p = NULL,
119 .xtp_outval_p = NULL,
120 .xtp_func_name = NULL,
121 .xtp_func = NULL};
122
123 struct xnupost_test kernel_post_tests[] = {XNUPOST_TEST_CONFIG_BASIC(zalloc_test),
124 XNUPOST_TEST_CONFIG_BASIC(RandomULong_test),
125 XNUPOST_TEST_CONFIG_BASIC(test_printf),
126 XNUPOST_TEST_CONFIG_BASIC(test_os_log_handles),
127 XNUPOST_TEST_CONFIG_BASIC(test_os_log),
128 XNUPOST_TEST_CONFIG_BASIC(test_os_log_parallel),
129 #ifdef __arm64__
130 XNUPOST_TEST_CONFIG_BASIC(arm64_munger_test),
131 #if __ARM_PAN_AVAILABLE__
132 XNUPOST_TEST_CONFIG_BASIC(arm64_pan_test),
133 #endif
134 #if defined(HAS_APPLE_PAC)
135 XNUPOST_TEST_CONFIG_BASIC(arm64_ropjop_test),
136 #endif /* defined(HAS_APPLE_PAC) */
137 #if CONFIG_SPTM
138 XNUPOST_TEST_CONFIG_BASIC(arm64_panic_lockdown_test),
139 #endif /* CONFIG_SPTM */
140 #endif /* __arm64__ */
141 XNUPOST_TEST_CONFIG_BASIC(kcdata_api_test),
142 XNUPOST_TEST_CONFIG_BASIC(console_serial_test),
143 XNUPOST_TEST_CONFIG_BASIC(console_serial_parallel_log_tests),
144 #if defined(__arm64__)
145 XNUPOST_TEST_CONFIG_BASIC(pmap_coredump_test),
146 #endif
147 XNUPOST_TEST_CONFIG_BASIC(bitmap_post_test),
148 //XNUPOST_TEST_CONFIG_TEST_PANIC(kcdata_api_assert_tests)
149 XNUPOST_TEST_CONFIG_BASIC(test_thread_call),
150 XNUPOST_TEST_CONFIG_BASIC(ts_kernel_primitive_test),
151 XNUPOST_TEST_CONFIG_BASIC(ts_kernel_sleep_inheritor_test),
152 XNUPOST_TEST_CONFIG_BASIC(ts_kernel_gate_test),
153 XNUPOST_TEST_CONFIG_BASIC(ts_kernel_turnstile_chain_test),
154 XNUPOST_TEST_CONFIG_BASIC(ts_kernel_timingsafe_bcmp_test),
155 XNUPOST_TEST_CONFIG_BASIC(kprintf_hhx_test),
156 #if __ARM_VFP__
157 XNUPOST_TEST_CONFIG_BASIC(vfp_state_test),
158 #endif
159 XNUPOST_TEST_CONFIG_BASIC(vm_tests),
160 XNUPOST_TEST_CONFIG_BASIC(counter_tests),
161 #if ML_IO_TIMEOUTS_ENABLED
162 XNUPOST_TEST_CONFIG_BASIC(ml_io_timeout_test),
163 #endif
164 #if HAS_SPECRES
165 XNUPOST_TEST_CONFIG_BASIC(specres_test),
166 #endif
167 };
168
169 uint32_t kernel_post_tests_count = sizeof(kernel_post_tests) / sizeof(xnupost_test_data_t);
170
171 #define POSTARGS_RUN_TESTS 0x1
172 #define POSTARGS_CONTROLLER_AVAILABLE 0x2
173 #define POSTARGS_CUSTOM_TEST_RUNLIST 0x4
174 uint64_t kernel_post_args = 0x0;
175
176 /* static variables to hold state */
177 static kern_return_t parse_config_retval = KERN_INVALID_CAPABILITY;
178 static char kernel_post_test_configs[256];
179 boolean_t xnupost_should_run_test(uint32_t test_num);
180
181 kern_return_t
xnupost_parse_config()182 xnupost_parse_config()
183 {
184 if (parse_config_retval != KERN_INVALID_CAPABILITY) {
185 return parse_config_retval;
186 }
187 PE_parse_boot_argn("kernPOST", &kernel_post_args, sizeof(kernel_post_args));
188
189 if (PE_parse_boot_argn("kernPOST_config", &kernel_post_test_configs[0], sizeof(kernel_post_test_configs)) == TRUE) {
190 kernel_post_args |= POSTARGS_CUSTOM_TEST_RUNLIST;
191 }
192
193 if (kernel_post_args != 0) {
194 parse_config_retval = KERN_SUCCESS;
195 goto out;
196 }
197 parse_config_retval = KERN_NOT_SUPPORTED;
198 out:
199 return parse_config_retval;
200 }
201
202 boolean_t
xnupost_should_run_test(uint32_t test_num)203 xnupost_should_run_test(uint32_t test_num)
204 {
205 if (kernel_post_args & POSTARGS_CUSTOM_TEST_RUNLIST) {
206 int64_t begin = 0, end = 999999;
207 char * b = kernel_post_test_configs;
208 while (*b) {
209 get_range_bounds(b, &begin, &end);
210 if (test_num >= begin && test_num <= end) {
211 return TRUE;
212 }
213
214 /* skip to the next "," */
215 while (*b != ',') {
216 if (*b == '\0') {
217 return FALSE;
218 }
219 b++;
220 }
221 /* skip past the ',' */
222 b++;
223 }
224 return FALSE;
225 }
226 return TRUE;
227 }
228
229 kern_return_t
xnupost_list_tests(xnupost_test_t test_list,uint32_t test_count)230 xnupost_list_tests(xnupost_test_t test_list, uint32_t test_count)
231 {
232 if (KERN_SUCCESS != xnupost_parse_config()) {
233 return KERN_FAILURE;
234 }
235
236 xnupost_test_t testp;
237 for (uint32_t i = 0; i < test_count; i++) {
238 testp = &test_list[i];
239 if (testp->xt_test_num == 0) {
240 assert(total_post_tests_count < UINT16_MAX);
241 testp->xt_test_num = (uint16_t)++total_post_tests_count;
242 }
243 /* make sure the boot-arg based test run list is honored */
244 if (kernel_post_args & POSTARGS_CUSTOM_TEST_RUNLIST) {
245 testp->xt_config |= XT_CONFIG_IGNORE;
246 if (xnupost_should_run_test(testp->xt_test_num)) {
247 testp->xt_config &= ~(XT_CONFIG_IGNORE);
248 testp->xt_config |= XT_CONFIG_RUN;
249 printf("\n[TEST] #%u is marked as ignored", testp->xt_test_num);
250 }
251 }
252 printf("\n[TEST] TOC#%u name: %s expected: %d config: %x\n", testp->xt_test_num, testp->xt_name, testp->xt_expected_retval,
253 testp->xt_config);
254 }
255
256 return KERN_SUCCESS;
257 }
258
259 kern_return_t
xnupost_run_tests(xnupost_test_t test_list,uint32_t test_count)260 xnupost_run_tests(xnupost_test_t test_list, uint32_t test_count)
261 {
262 uint32_t i = 0;
263 int retval = KERN_SUCCESS;
264 int test_retval = KERN_FAILURE;
265
266 if ((kernel_post_args & POSTARGS_RUN_TESTS) == 0) {
267 printf("No POST boot-arg set.\n");
268 return retval;
269 }
270
271 T_START;
272 xnupost_test_t testp;
273 for (; i < test_count; i++) {
274 xnupost_reset_panic_widgets();
275 T_TESTRESULT = T_STATE_UNRESOLVED;
276 testp = &test_list[i];
277 T_BEGIN(testp->xt_name);
278 testp->xt_begin_time = mach_absolute_time();
279 testp->xt_end_time = testp->xt_begin_time;
280
281 /*
282 * If test is designed to panic and controller
283 * is not available then mark as SKIPPED
284 */
285 if ((testp->xt_config & XT_CONFIG_EXPECT_PANIC) && !(kernel_post_args & POSTARGS_CONTROLLER_AVAILABLE)) {
286 T_SKIP(
287 "Test expects panic but "
288 "no controller is present");
289 testp->xt_test_actions = XT_ACTION_SKIPPED;
290 continue;
291 }
292
293 if ((testp->xt_config & XT_CONFIG_IGNORE)) {
294 T_SKIP("Test is marked as XT_CONFIG_IGNORE");
295 testp->xt_test_actions = XT_ACTION_SKIPPED;
296 continue;
297 }
298
299 test_retval = testp->xt_func();
300 if (T_STATE_UNRESOLVED == T_TESTRESULT) {
301 /*
302 * If test result is unresolved due to that no T_* test cases are called,
303 * determine the test result based on the return value of the test function.
304 */
305 if (KERN_SUCCESS == test_retval) {
306 T_PASS("Test passed because retval == KERN_SUCCESS");
307 } else {
308 T_FAIL("Test failed because retval == KERN_FAILURE");
309 }
310 }
311 T_END;
312 testp->xt_retval = T_TESTRESULT;
313 testp->xt_end_time = mach_absolute_time();
314 if (testp->xt_retval == testp->xt_expected_retval) {
315 testp->xt_test_actions = XT_ACTION_PASSED;
316 } else {
317 testp->xt_test_actions = XT_ACTION_FAILED;
318 }
319 }
320 T_FINISH;
321 return retval;
322 }
323
324 kern_return_t
kernel_list_tests()325 kernel_list_tests()
326 {
327 return xnupost_list_tests(kernel_post_tests, kernel_post_tests_count);
328 }
329
330 kern_return_t
kernel_do_post()331 kernel_do_post()
332 {
333 return xnupost_run_tests(kernel_post_tests, kernel_post_tests_count);
334 }
335
336 kern_return_t
xnupost_register_panic_widget(xt_panic_widget_func funcp,const char * funcname,void * context,void ** outval)337 xnupost_register_panic_widget(xt_panic_widget_func funcp, const char * funcname, void * context, void ** outval)
338 {
339 if (xt_panic_widgets.xtp_context_p != NULL || xt_panic_widgets.xtp_func != NULL) {
340 return KERN_RESOURCE_SHORTAGE;
341 }
342
343 xt_panic_widgets.xtp_context_p = context;
344 xt_panic_widgets.xtp_func = funcp;
345 xt_panic_widgets.xtp_func_name = funcname;
346 xt_panic_widgets.xtp_outval_p = outval;
347
348 return KERN_SUCCESS;
349 }
350
351 void
xnupost_reset_panic_widgets()352 xnupost_reset_panic_widgets()
353 {
354 bzero(&xt_panic_widgets, sizeof(xt_panic_widgets));
355 }
356
357 kern_return_t
xnupost_process_kdb_stop(const char * panic_s)358 xnupost_process_kdb_stop(const char * panic_s)
359 {
360 xt_panic_return_t retval = 0;
361 struct xnupost_panic_widget * pw = &xt_panic_widgets;
362 const char * name = "unknown";
363 if (xt_panic_widgets.xtp_func_name) {
364 name = xt_panic_widgets.xtp_func_name;
365 }
366
367 /* bail early on if kernPOST is not set */
368 if (kernel_post_args == 0) {
369 return KERN_INVALID_CAPABILITY;
370 }
371
372 if (xt_panic_widgets.xtp_func) {
373 T_LOG("%s: Calling out to widget: %s", __func__, xt_panic_widgets.xtp_func_name);
374 retval = pw->xtp_func(panic_s, pw->xtp_context_p, pw->xtp_outval_p);
375 } else {
376 return KERN_INVALID_CAPABILITY;
377 }
378
379 switch (retval) {
380 case XT_RET_W_SUCCESS:
381 T_EXPECT_EQ_INT(retval, XT_RET_W_SUCCESS, "%s reported successful handling. Returning from kdb_stop.", name);
382 /* KERN_SUCCESS means return from panic/assertion */
383 return KERN_SUCCESS;
384
385 case XT_RET_W_FAIL:
386 T_FAIL("%s reported XT_RET_W_FAIL: Returning from kdb_stop", name);
387 return KERN_SUCCESS;
388
389 case XT_PANIC_W_FAIL:
390 T_FAIL("%s reported XT_PANIC_W_FAIL: Continuing to kdb_stop", name);
391 return KERN_FAILURE;
392
393 case XT_PANIC_W_SUCCESS:
394 T_EXPECT_EQ_INT(retval, XT_PANIC_W_SUCCESS, "%s reported successful testcase. But continuing to kdb_stop.", name);
395 return KERN_FAILURE;
396
397 case XT_PANIC_UNRELATED:
398 default:
399 T_LOG("UNRELATED: Continuing to kdb_stop.");
400 return KERN_FAILURE;
401 }
402 }
403
404 xt_panic_return_t
_xt_generic_assert_check(const char * s,void * str_to_match,void ** outval)405 _xt_generic_assert_check(const char * s, void * str_to_match, void ** outval)
406 {
407 xt_panic_return_t ret = XT_PANIC_UNRELATED;
408
409 if (NULL != strnstr(__DECONST(char *, s), (char *)str_to_match, strlen(s))) {
410 T_LOG("%s: kdb_stop string: '%s' MATCHED string: '%s'", __func__, s, (char *)str_to_match);
411 ret = XT_RET_W_SUCCESS;
412 }
413
414 if (outval) {
415 *outval = (void *)(uintptr_t)ret;
416 }
417 return ret;
418 }
419
420 kern_return_t
xnupost_reset_tests(xnupost_test_t test_list,uint32_t test_count)421 xnupost_reset_tests(xnupost_test_t test_list, uint32_t test_count)
422 {
423 uint32_t i = 0;
424 xnupost_test_t testp;
425 for (; i < test_count; i++) {
426 testp = &test_list[i];
427 testp->xt_begin_time = 0;
428 testp->xt_end_time = 0;
429 testp->xt_test_actions = XT_ACTION_NONE;
430 testp->xt_retval = -1;
431 }
432 return KERN_SUCCESS;
433 }
434
435
436 kern_return_t
zalloc_test(void)437 zalloc_test(void)
438 {
439 zone_t test_zone;
440 void * test_ptr;
441
442 T_SETUPBEGIN;
443 test_zone = zone_create("test_uint64_zone", sizeof(uint64_t),
444 ZC_DESTRUCTIBLE);
445 T_ASSERT_NOTNULL(test_zone, NULL);
446
447 T_ASSERT_EQ_INT(test_zone->z_elems_free, 0, NULL);
448 T_SETUPEND;
449
450 T_ASSERT_NOTNULL(test_ptr = zalloc(test_zone), NULL);
451
452 zfree(test_zone, test_ptr);
453
454 /* A sample report for perfdata */
455 T_PERF("num_threads_at_ktest", threads_count, "count", "# of threads in system at zalloc_test");
456
457 return KERN_SUCCESS;
458 }
459
460 /*
461 * Function used for comparison by qsort()
462 */
463 static int
compare_numbers_ascending(const void * a,const void * b)464 compare_numbers_ascending(const void * a, const void * b)
465 {
466 const uint64_t x = *(const uint64_t *)a;
467 const uint64_t y = *(const uint64_t *)b;
468 if (x < y) {
469 return -1;
470 } else if (x > y) {
471 return 1;
472 } else {
473 return 0;
474 }
475 }
476
477 /*
478 * Function to count number of bits that are set in a number.
479 * It uses Side Addition using Magic Binary Numbers
480 */
481 static int
count_bits(uint64_t number)482 count_bits(uint64_t number)
483 {
484 return __builtin_popcountll(number);
485 }
486
487 kern_return_t
RandomULong_test()488 RandomULong_test()
489 {
490 /*
491 * Randomness test for RandomULong()
492 *
493 * This test verifies that:
494 * a. RandomULong works
495 * b. The generated numbers match the following entropy criteria:
496 * For a thousand iterations, verify:
497 * 1. mean entropy > 12 bits
498 * 2. min entropy > 4 bits
499 * 3. No Duplicate
500 * 4. No incremental/decremental pattern in a window of 3
501 * 5. No Zero
502 * 6. No -1
503 *
504 * <rdar://problem/22526137> Add test to increase code coverage for /dev/random
505 */
506
507 #define CONF_MIN_ENTROPY 4
508 #define CONF_MEAN_ENTROPY 12
509 #define CONF_ITERATIONS 1000
510 #define CONF_WINDOW_SIZE 3
511 #define CONF_WINDOW_TREND_LIMIT ((CONF_WINDOW_SIZE / 2) + (CONF_WINDOW_SIZE & 1)) >> 0
512
513 int i;
514 uint32_t min_bit_entropy, max_bit_entropy, bit_entropy;
515 uint32_t aggregate_bit_entropy = 0;
516 uint32_t mean_bit_entropy = 0;
517 uint64_t numbers[CONF_ITERATIONS];
518 min_bit_entropy = UINT32_MAX;
519 max_bit_entropy = 0;
520
521 /*
522 * TEST 1: Number generation and basic and basic validation
523 * Check for non-zero (no bits set), -1 (all bits set) and error
524 */
525 for (i = 0; i < CONF_ITERATIONS; i++) {
526 read_random(&numbers[i], sizeof(numbers[i]));
527 if (numbers[i] == 0) {
528 T_ASSERT_NE_ULLONG(numbers[i], 0, "read_random returned zero value.");
529 }
530 if (numbers[i] == UINT64_MAX) {
531 T_ASSERT_NE_ULLONG(numbers[i], UINT64_MAX, "read_random returned -1.");
532 }
533 }
534 T_PASS("Generated %d non-zero random numbers with atleast one bit reset.", CONF_ITERATIONS);
535
536 /*
537 * TEST 2: Mean and Min Bit Entropy
538 * Check the bit entropy and its mean over the generated numbers.
539 */
540 for (i = 1; i < CONF_ITERATIONS; i++) {
541 bit_entropy = count_bits(numbers[i - 1] ^ numbers[i]);
542 if (bit_entropy < min_bit_entropy) {
543 min_bit_entropy = bit_entropy;
544 }
545 if (bit_entropy > max_bit_entropy) {
546 max_bit_entropy = bit_entropy;
547 }
548
549 if (bit_entropy < CONF_MIN_ENTROPY) {
550 T_EXPECT_GE_UINT(bit_entropy, CONF_MIN_ENTROPY,
551 "Number of differing bits in consecutive numbers does not satisfy the min criteria.");
552 }
553
554 aggregate_bit_entropy += bit_entropy;
555 }
556 T_PASS("Passed the min bit entropy expectation of %d bits", CONF_MIN_ENTROPY);
557
558 mean_bit_entropy = aggregate_bit_entropy / CONF_ITERATIONS;
559 T_EXPECT_GE_UINT(mean_bit_entropy, CONF_MEAN_ENTROPY, "Test criteria for mean number of differing bits.");
560 T_PASS("Mean bit entropy criteria satisfied (Required %d, Actual: %d).", CONF_MEAN_ENTROPY, mean_bit_entropy);
561 T_LOG("{PERFORMANCE} iterations: %d, min_bit_entropy: %d, mean_bit_entropy: %d, max_bit_entropy: %d", CONF_ITERATIONS,
562 min_bit_entropy, mean_bit_entropy, max_bit_entropy);
563 T_PERF("min_bit_entropy_" T_TOSTRING(CONF_ITERATIONS), min_bit_entropy, "bits", "minimum bit entropy in RNG. High is better");
564 T_PERF("mean_bit_entropy_" T_TOSTRING(CONF_ITERATIONS), mean_bit_entropy, "bits", "mean bit entropy in RNG. High is better");
565 T_PERF("max_bit_entropy_" T_TOSTRING(CONF_ITERATIONS), max_bit_entropy, "bits", "max bit entropy in RNG. High is better");
566
567 /*
568 * TEST 3: Incremental Pattern Search
569 * Check that incremental/decremental pattern does not exist in the given window
570 */
571 int window_start, window_end, trend;
572 window_start = window_end = trend = 0;
573
574 do {
575 /*
576 * Set the window
577 */
578 window_end = window_start + CONF_WINDOW_SIZE - 1;
579 if (window_end >= CONF_ITERATIONS) {
580 window_end = CONF_ITERATIONS - 1;
581 }
582
583 trend = 0;
584 for (i = window_start; i < window_end; i++) {
585 if (numbers[i] < numbers[i + 1]) {
586 trend++;
587 } else if (numbers[i] > numbers[i + 1]) {
588 trend--;
589 }
590 }
591 /*
592 * Check that there is no increasing or decreasing trend
593 * i.e. trend <= ceil(window_size/2)
594 */
595 if (trend < 0) {
596 trend = -trend;
597 }
598 if (trend > CONF_WINDOW_TREND_LIMIT) {
599 T_ASSERT_LE_INT(trend, CONF_WINDOW_TREND_LIMIT, "Found increasing/decreasing trend in random numbers.");
600 }
601
602 /*
603 * Move to the next window
604 */
605 window_start++;
606 } while (window_start < (CONF_ITERATIONS - 1));
607 T_PASS("Did not find increasing/decreasing trends in a window of %d numbers.", CONF_WINDOW_SIZE);
608
609 /*
610 * TEST 4: Find Duplicates
611 * Check no duplicate values are generated
612 */
613 qsort(numbers, CONF_ITERATIONS, sizeof(numbers[0]), compare_numbers_ascending);
614 for (i = 1; i < CONF_ITERATIONS; i++) {
615 if (numbers[i] == numbers[i - 1]) {
616 T_ASSERT_NE_ULLONG(numbers[i], numbers[i - 1], "read_random generated duplicate values.");
617 }
618 }
619 T_PASS("Test did not find any duplicates as expected.");
620
621 return KERN_SUCCESS;
622 }
623
624
625 /* KCDATA kernel api tests */
626 static struct kcdata_descriptor test_kc_data;//, test_kc_data2;
627 struct sample_disk_io_stats {
628 uint64_t disk_reads_count;
629 uint64_t disk_reads_size;
630 uint64_t io_priority_count[4];
631 uint64_t io_priority_size;
632 } __attribute__((packed));
633
634 struct kcdata_subtype_descriptor test_disk_io_stats_def[] = {
635 {
636 .kcs_flags = KCS_SUBTYPE_FLAGS_NONE,
637 .kcs_elem_type = KC_ST_UINT64,
638 .kcs_elem_offset = 0 * sizeof(uint64_t),
639 .kcs_elem_size = sizeof(uint64_t),
640 .kcs_name = "disk_reads_count"
641 },
642 {
643 .kcs_flags = KCS_SUBTYPE_FLAGS_NONE,
644 .kcs_elem_type = KC_ST_UINT64,
645 .kcs_elem_offset = 1 * sizeof(uint64_t),
646 .kcs_elem_size = sizeof(uint64_t),
647 .kcs_name = "disk_reads_size"
648 },
649 {
650 .kcs_flags = KCS_SUBTYPE_FLAGS_ARRAY,
651 .kcs_elem_type = KC_ST_UINT64,
652 .kcs_elem_offset = 2 * sizeof(uint64_t),
653 .kcs_elem_size = KCS_SUBTYPE_PACK_SIZE(4, sizeof(uint64_t)),
654 .kcs_name = "io_priority_count"
655 },
656 {
657 .kcs_flags = KCS_SUBTYPE_FLAGS_ARRAY,
658 .kcs_elem_type = KC_ST_UINT64,
659 .kcs_elem_offset = (2 + 4) * sizeof(uint64_t),
660 .kcs_elem_size = sizeof(uint64_t),
661 .kcs_name = "io_priority_size"
662 },
663 };
664
665 kern_return_t
kcdata_api_test(void)666 kcdata_api_test(void)
667 {
668 kern_return_t retval = KERN_SUCCESS;
669
670 /* test for NULL input */
671 retval = kcdata_memory_static_init(NULL, (mach_vm_address_t)0, KCDATA_BUFFER_BEGIN_STACKSHOT, 100, KCFLAG_USE_MEMCOPY);
672 T_ASSERT(retval == KERN_INVALID_ARGUMENT, "kcdata_memory_static_init with NULL struct");
673
674 /* another negative test with buffer size < 32 bytes */
675 char data[30] = "sample_disk_io_stats";
676 retval = kcdata_memory_static_init(&test_kc_data, (mach_vm_address_t)&data, KCDATA_BUFFER_BEGIN_CRASHINFO, sizeof(data),
677 KCFLAG_USE_MEMCOPY);
678 T_ASSERT(retval == KERN_INSUFFICIENT_BUFFER_SIZE, "init with 30 bytes failed as expected with KERN_INSUFFICIENT_BUFFER_SIZE");
679
680 /* test with COPYOUT for 0x0 address. Should return KERN_NO_ACCESS */
681 retval = kcdata_memory_static_init(&test_kc_data, (mach_vm_address_t)0, KCDATA_BUFFER_BEGIN_CRASHINFO, PAGE_SIZE,
682 KCFLAG_USE_COPYOUT);
683 T_ASSERT(retval == KERN_NO_ACCESS, "writing to 0x0 returned KERN_NO_ACCESS");
684
685 /* test with successful kcdata_memory_static_init */
686 test_kc_data.kcd_length = 0xdeadbeef;
687
688 void *data_ptr = kalloc_data(PAGE_SIZE, Z_WAITOK_ZERO_NOFAIL);
689 mach_vm_address_t address = (mach_vm_address_t)data_ptr;
690 T_EXPECT_NOTNULL(address, "kalloc of PAGE_SIZE data.");
691
692 retval = kcdata_memory_static_init(&test_kc_data, (mach_vm_address_t)address, KCDATA_BUFFER_BEGIN_STACKSHOT, PAGE_SIZE,
693 KCFLAG_USE_MEMCOPY);
694
695 T_ASSERT(retval == KERN_SUCCESS, "successful kcdata_memory_static_init call");
696
697 T_ASSERT(test_kc_data.kcd_length == PAGE_SIZE, "kcdata length is set correctly to PAGE_SIZE.");
698 T_LOG("addr_begin 0x%llx and end 0x%llx and address 0x%llx", test_kc_data.kcd_addr_begin, test_kc_data.kcd_addr_end, address);
699 T_ASSERT(test_kc_data.kcd_addr_begin == address, "kcdata begin address is correct 0x%llx", (uint64_t)address);
700
701 /* verify we have BEGIN and END HEADERS set */
702 uint32_t * mem = (uint32_t *)address;
703 T_ASSERT(mem[0] == KCDATA_BUFFER_BEGIN_STACKSHOT, "buffer does contain KCDATA_BUFFER_BEGIN_STACKSHOT");
704 T_ASSERT(mem[4] == KCDATA_TYPE_BUFFER_END, "KCDATA_TYPE_BUFFER_END is appended as expected");
705 T_ASSERT(mem[5] == 0, "size of BUFFER_END tag is zero");
706
707 /* verify kcdata_memory_get_used_bytes() */
708 uint64_t bytes_used = 0;
709 bytes_used = kcdata_memory_get_used_bytes(&test_kc_data);
710 T_ASSERT(bytes_used == (2 * sizeof(struct kcdata_item)), "bytes_used api returned expected %llu", bytes_used);
711
712 /* test for kcdata_get_memory_addr() */
713
714 mach_vm_address_t user_addr = 0;
715 /* negative test for NULL user_addr AND/OR kcdata_descriptor */
716 retval = kcdata_get_memory_addr(NULL, KCDATA_TYPE_MACH_ABSOLUTE_TIME, sizeof(uint64_t), &user_addr);
717 T_ASSERT(retval == KERN_INVALID_ARGUMENT, "kcdata_get_memory_addr with NULL struct -> KERN_INVALID_ARGUMENT");
718
719 retval = kcdata_get_memory_addr(&test_kc_data, KCDATA_TYPE_MACH_ABSOLUTE_TIME, sizeof(uint64_t), NULL);
720 T_ASSERT(retval == KERN_INVALID_ARGUMENT, "kcdata_get_memory_addr with NULL user_addr -> KERN_INVALID_ARGUMENT");
721
722 /* successful case with size 0. Yes this is expected to succeed as just a item type could be used as boolean */
723 retval = kcdata_get_memory_addr(&test_kc_data, KCDATA_TYPE_USECS_SINCE_EPOCH, 0, &user_addr);
724 T_ASSERT(retval == KERN_SUCCESS, "Successfully got kcdata entry for 0 size data");
725 T_ASSERT(user_addr == test_kc_data.kcd_addr_end, "0 sized data did not add any extra buffer space");
726
727 /* successful case with valid size. */
728 user_addr = 0xdeadbeef;
729 retval = kcdata_get_memory_addr(&test_kc_data, KCDATA_TYPE_MACH_ABSOLUTE_TIME, sizeof(uint64_t), &user_addr);
730 T_ASSERT(retval == KERN_SUCCESS, "kcdata_get_memory_addr with valid values succeeded.");
731 T_ASSERT(user_addr > test_kc_data.kcd_addr_begin, "user_addr is in range of buffer");
732 T_ASSERT(user_addr < test_kc_data.kcd_addr_end, "user_addr is in range of buffer");
733
734 /* Try creating an item with really large size */
735 user_addr = 0xdeadbeef;
736 bytes_used = kcdata_memory_get_used_bytes(&test_kc_data);
737 retval = kcdata_get_memory_addr(&test_kc_data, KCDATA_TYPE_MACH_ABSOLUTE_TIME, PAGE_SIZE * 4, &user_addr);
738 T_ASSERT(retval == KERN_INSUFFICIENT_BUFFER_SIZE, "Allocating entry with size > buffer -> KERN_INSUFFICIENT_BUFFER_SIZE");
739 T_ASSERT(user_addr == 0xdeadbeef, "user_addr remained unaffected with failed kcdata_get_memory_addr");
740 T_ASSERT(bytes_used == kcdata_memory_get_used_bytes(&test_kc_data), "The data structure should be unaffected");
741
742 /* verify convenience functions for uint32_with_description */
743 retval = kcdata_add_uint32_with_description(&test_kc_data, 0xbdc0ffee, "This is bad coffee");
744 T_ASSERT(retval == KERN_SUCCESS, "add uint32 with description succeeded.");
745
746 retval = kcdata_add_uint64_with_description(&test_kc_data, 0xf001badc0ffee, "another 8 byte no.");
747 T_ASSERT(retval == KERN_SUCCESS, "add uint64 with desc succeeded.");
748
749 /* verify creating an KCDATA_TYPE_ARRAY here */
750 user_addr = 0xdeadbeef;
751 bytes_used = kcdata_memory_get_used_bytes(&test_kc_data);
752 /* save memory address where the array will come up */
753 struct kcdata_item * item_p = (struct kcdata_item *)test_kc_data.kcd_addr_end;
754
755 retval = kcdata_get_memory_addr_for_array(&test_kc_data, KCDATA_TYPE_MACH_ABSOLUTE_TIME, sizeof(uint64_t), 20, &user_addr);
756 T_ASSERT(retval == KERN_SUCCESS, "Array of 20 integers should be possible");
757 T_ASSERT(user_addr != 0xdeadbeef, "user_addr is updated as expected");
758 T_ASSERT((kcdata_memory_get_used_bytes(&test_kc_data) - bytes_used) >= 20 * sizeof(uint64_t), "memory allocation is in range");
759 kcdata_iter_t iter = kcdata_iter(item_p, (unsigned long)(PAGE_SIZE - kcdata_memory_get_used_bytes(&test_kc_data)));
760 T_ASSERT(kcdata_iter_array_elem_count(iter) == 20, "array count is 20");
761
762 /* FIXME add tests here for ranges of sizes and counts */
763
764 T_ASSERT(item_p->flags == (((uint64_t)KCDATA_TYPE_MACH_ABSOLUTE_TIME << 32) | 20), "flags are set correctly");
765
766 /* test adding of custom type */
767
768 retval = kcdata_add_type_definition(&test_kc_data, 0x999, data, &test_disk_io_stats_def[0],
769 sizeof(test_disk_io_stats_def) / sizeof(struct kcdata_subtype_descriptor));
770 T_ASSERT(retval == KERN_SUCCESS, "adding custom type succeeded.");
771
772 kfree_data(data_ptr, PAGE_SIZE);
773 return KERN_SUCCESS;
774 }
775
776 /*
777 * kern_return_t
778 * kcdata_api_assert_tests()
779 * {
780 * kern_return_t retval = 0;
781 * void * assert_check_retval = NULL;
782 * test_kc_data2.kcd_length = 0xdeadbeef;
783 * mach_vm_address_t address = (mach_vm_address_t)kalloc(PAGE_SIZE);
784 * T_EXPECT_NOTNULL(address, "kalloc of PAGE_SIZE data.");
785 *
786 * retval = kcdata_memory_static_init(&test_kc_data2, (mach_vm_address_t)address, KCDATA_BUFFER_BEGIN_STACKSHOT, PAGE_SIZE,
787 * KCFLAG_USE_MEMCOPY);
788 *
789 * T_ASSERT(retval == KERN_SUCCESS, "successful kcdata_memory_static_init call");
790 *
791 * retval = T_REGISTER_ASSERT_CHECK("KCDATA_DESC_MAXLEN", &assert_check_retval);
792 * T_ASSERT(retval == KERN_SUCCESS, "registered assert widget");
793 *
794 * // this will assert
795 * retval = kcdata_add_uint32_with_description(&test_kc_data2, 0xc0ffee, "really long description string for kcdata");
796 * T_ASSERT(retval == KERN_INVALID_ARGUMENT, "API param check returned KERN_INVALID_ARGUMENT correctly");
797 * T_ASSERT(assert_check_retval == (void *)XT_RET_W_SUCCESS, "assertion handler verified that it was hit");
798 *
799 * return KERN_SUCCESS;
800 * }
801 */
802
803 #if defined(__arm64__)
804
805 #include <arm/pmap.h>
806
807 #define MAX_PMAP_OBJECT_ELEMENT 100000
808
809 extern struct vm_object pmap_object_store; /* store pt pages */
810 extern unsigned long gPhysBase, gPhysSize, first_avail;
811
812 /*
813 * Define macros to transverse the pmap object structures and extract
814 * physical page number with information from low global only
815 * This emulate how Astris extracts information from coredump
816 */
817 #if defined(__arm64__)
818
819 static inline uintptr_t
astris_vm_page_unpack_ptr(uintptr_t p)820 astris_vm_page_unpack_ptr(uintptr_t p)
821 {
822 if (!p) {
823 return (uintptr_t)0;
824 }
825
826 return (p & lowGlo.lgPmapMemFromArrayMask)
827 ? lowGlo.lgPmapMemStartAddr + (p & ~(lowGlo.lgPmapMemFromArrayMask)) * lowGlo.lgPmapMemPagesize
828 : lowGlo.lgPmapMemPackedBaseAddr + (p << lowGlo.lgPmapMemPackedShift);
829 }
830
831 // assume next pointer is the first element
832 #define astris_vm_page_queue_next(qc) (astris_vm_page_unpack_ptr(*((uint32_t *)(qc))))
833
834 #endif
835
836 #define astris_vm_page_queue_first(q) astris_vm_page_queue_next(q)
837
838 #define astris_vm_page_queue_end(q, qe) ((q) == (qe))
839
840 #define astris_vm_page_queue_iterate(head, elt) \
841 for ((elt) = (uintptr_t)astris_vm_page_queue_first((head)); !astris_vm_page_queue_end((head), (elt)); \
842 (elt) = (uintptr_t)astris_vm_page_queue_next(((elt) + (uintptr_t)lowGlo.lgPmapMemChainOffset)))
843
844 #define astris_ptoa(x) ((vm_address_t)(x) << lowGlo.lgPageShift)
845
846 static inline ppnum_t
astris_vm_page_get_phys_page(uintptr_t m)847 astris_vm_page_get_phys_page(uintptr_t m)
848 {
849 return (m >= lowGlo.lgPmapMemStartAddr && m < lowGlo.lgPmapMemEndAddr)
850 ? (ppnum_t)((m - lowGlo.lgPmapMemStartAddr) / lowGlo.lgPmapMemPagesize + lowGlo.lgPmapMemFirstppnum)
851 : *((ppnum_t *)(m + lowGlo.lgPmapMemPageOffset));
852 }
853
854 kern_return_t
pmap_coredump_test(void)855 pmap_coredump_test(void)
856 {
857 int iter = 0;
858 uintptr_t p;
859
860 T_LOG("Testing coredump info for PMAP.");
861
862 T_ASSERT_GE_ULONG(lowGlo.lgStaticAddr, gPhysBase, NULL);
863 T_ASSERT_LE_ULONG(lowGlo.lgStaticAddr + lowGlo.lgStaticSize, first_avail, NULL);
864 T_ASSERT_EQ_ULONG(lowGlo.lgLayoutMajorVersion, 3, NULL);
865 T_ASSERT_GE_ULONG(lowGlo.lgLayoutMinorVersion, 2, NULL);
866 T_ASSERT_EQ_ULONG(lowGlo.lgLayoutMagic, LOWGLO_LAYOUT_MAGIC, NULL);
867
868 // check the constant values in lowGlo
869 T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemQ, ((typeof(lowGlo.lgPmapMemQ)) & (pmap_object_store.memq)), NULL);
870 T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemPageOffset, offsetof(struct vm_page_with_ppnum, vmp_phys_page), NULL);
871 T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemChainOffset, offsetof(struct vm_page, vmp_listq), NULL);
872 T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemPagesize, sizeof(struct vm_page), NULL);
873
874 #if defined(__arm64__)
875 T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemFromArrayMask, VM_PAGE_PACKED_FROM_ARRAY, NULL);
876 T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemPackedShift, VM_PAGE_PACKED_PTR_SHIFT, NULL);
877 T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemPackedBaseAddr, VM_PAGE_PACKED_PTR_BASE, NULL);
878 #endif
879
880 vm_object_lock_shared(&pmap_object_store);
881 astris_vm_page_queue_iterate(lowGlo.lgPmapMemQ, p)
882 {
883 ppnum_t ppnum = astris_vm_page_get_phys_page(p);
884 pmap_paddr_t pa = (pmap_paddr_t)astris_ptoa(ppnum);
885 T_ASSERT_GE_ULONG(pa, gPhysBase, NULL);
886 T_ASSERT_LT_ULONG(pa, gPhysBase + gPhysSize, NULL);
887 iter++;
888 T_ASSERT_LT_INT(iter, MAX_PMAP_OBJECT_ELEMENT, NULL);
889 }
890 vm_object_unlock(&pmap_object_store);
891
892 T_ASSERT_GT_INT(iter, 0, NULL);
893 return KERN_SUCCESS;
894 }
895 #endif /* defined(__arm64__) */
896
897 struct ts_kern_prim_test_args {
898 int *end_barrier;
899 int *notify_b;
900 int *wait_event_b;
901 int before_num;
902 int *notify_a;
903 int *wait_event_a;
904 int after_num;
905 int priority_to_check;
906 };
907
908 static void
wait_threads(int * var,int num)909 wait_threads(
910 int* var,
911 int num)
912 {
913 if (var != NULL) {
914 while (os_atomic_load(var, acquire) != num) {
915 assert_wait((event_t) var, THREAD_UNINT);
916 if (os_atomic_load(var, acquire) != num) {
917 (void) thread_block(THREAD_CONTINUE_NULL);
918 } else {
919 clear_wait(current_thread(), THREAD_AWAKENED);
920 }
921 }
922 }
923 }
924
925 static void
wake_threads(int * var)926 wake_threads(
927 int* var)
928 {
929 if (var) {
930 os_atomic_inc(var, relaxed);
931 thread_wakeup((event_t) var);
932 }
933 }
934
935 extern void IOSleep(int);
936
937 static void
thread_lock_unlock_kernel_primitive(void * args,__unused wait_result_t wr)938 thread_lock_unlock_kernel_primitive(
939 void *args,
940 __unused wait_result_t wr)
941 {
942 thread_t thread = current_thread();
943 struct ts_kern_prim_test_args *info = (struct ts_kern_prim_test_args*) args;
944 int pri;
945
946 wait_threads(info->wait_event_b, info->before_num);
947 wake_threads(info->notify_b);
948
949 tstile_test_prim_lock(SYSCTL_TURNSTILE_TEST_KERNEL_DEFAULT);
950
951 wake_threads(info->notify_a);
952 wait_threads(info->wait_event_a, info->after_num);
953
954 IOSleep(100);
955
956 if (info->priority_to_check) {
957 spl_t s = splsched();
958 thread_lock(thread);
959 pri = thread->sched_pri;
960 thread_unlock(thread);
961 splx(s);
962 T_ASSERT(pri == info->priority_to_check, "Priority thread: current sched %d sched wanted %d", pri, info->priority_to_check);
963 }
964
965 tstile_test_prim_unlock(SYSCTL_TURNSTILE_TEST_KERNEL_DEFAULT);
966
967 wake_threads(info->end_barrier);
968 thread_terminate_self();
969 }
970
971 kern_return_t
ts_kernel_primitive_test(void)972 ts_kernel_primitive_test(void)
973 {
974 thread_t owner, thread1, thread2;
975 struct ts_kern_prim_test_args targs[2] = {};
976 kern_return_t result;
977 int end_barrier = 0;
978 int owner_locked = 0;
979 int waiters_ready = 0;
980
981 T_LOG("Testing turnstile kernel primitive");
982
983 targs[0].notify_b = NULL;
984 targs[0].wait_event_b = NULL;
985 targs[0].before_num = 0;
986 targs[0].notify_a = &owner_locked;
987 targs[0].wait_event_a = &waiters_ready;
988 targs[0].after_num = 2;
989 targs[0].priority_to_check = 90;
990 targs[0].end_barrier = &end_barrier;
991
992 // Start owner with priority 80
993 result = kernel_thread_start_priority((thread_continue_t)thread_lock_unlock_kernel_primitive, &targs[0], 80, &owner);
994 T_ASSERT(result == KERN_SUCCESS, "Starting owner");
995
996 targs[1].notify_b = &waiters_ready;
997 targs[1].wait_event_b = &owner_locked;
998 targs[1].before_num = 1;
999 targs[1].notify_a = NULL;
1000 targs[1].wait_event_a = NULL;
1001 targs[1].after_num = 0;
1002 targs[1].priority_to_check = 0;
1003 targs[1].end_barrier = &end_barrier;
1004
1005 // Start waiters with priority 85 and 90
1006 result = kernel_thread_start_priority((thread_continue_t)thread_lock_unlock_kernel_primitive, &targs[1], 85, &thread1);
1007 T_ASSERT(result == KERN_SUCCESS, "Starting thread1");
1008
1009 result = kernel_thread_start_priority((thread_continue_t)thread_lock_unlock_kernel_primitive, &targs[1], 90, &thread2);
1010 T_ASSERT(result == KERN_SUCCESS, "Starting thread2");
1011
1012 wait_threads(&end_barrier, 3);
1013
1014 return KERN_SUCCESS;
1015 }
1016
1017 #define MTX_LOCK 0
1018 #define RW_LOCK 1
1019
1020 #define NUM_THREADS 4
1021
1022 struct synch_test_common {
1023 unsigned int nthreads;
1024 thread_t *threads;
1025 int max_pri;
1026 int test_done;
1027 };
1028
1029 static kern_return_t
init_synch_test_common(struct synch_test_common * info,unsigned int nthreads)1030 init_synch_test_common(struct synch_test_common *info, unsigned int nthreads)
1031 {
1032 info->nthreads = nthreads;
1033 info->threads = kalloc_type(thread_t, nthreads, Z_WAITOK);
1034 if (!info->threads) {
1035 return ENOMEM;
1036 }
1037
1038 return KERN_SUCCESS;
1039 }
1040
1041 static void
destroy_synch_test_common(struct synch_test_common * info)1042 destroy_synch_test_common(struct synch_test_common *info)
1043 {
1044 kfree_type(thread_t, info->nthreads, info->threads);
1045 }
1046
1047 static void
start_threads(thread_continue_t func,struct synch_test_common * info,bool sleep_after_first)1048 start_threads(thread_continue_t func, struct synch_test_common *info, bool sleep_after_first)
1049 {
1050 thread_t thread;
1051 kern_return_t result;
1052 uint i;
1053 int priority = 75;
1054
1055 info->test_done = 0;
1056
1057 for (i = 0; i < info->nthreads; i++) {
1058 info->threads[i] = NULL;
1059 }
1060
1061 info->max_pri = priority + (info->nthreads - 1) * 5;
1062 if (info->max_pri > 95) {
1063 info->max_pri = 95;
1064 }
1065
1066 for (i = 0; i < info->nthreads; i++) {
1067 result = kernel_thread_start_priority((thread_continue_t)func, info, priority, &thread);
1068 os_atomic_store(&info->threads[i], thread, release);
1069 T_ASSERT(result == KERN_SUCCESS, "Starting thread %d, priority %d, %p", i, priority, thread);
1070
1071 priority += 5;
1072
1073 if (i == 0 && sleep_after_first) {
1074 IOSleep(100);
1075 }
1076 }
1077 }
1078
1079 static unsigned int
get_max_pri(struct synch_test_common * info)1080 get_max_pri(struct synch_test_common * info)
1081 {
1082 return info->max_pri;
1083 }
1084
1085 static void
wait_all_thread(struct synch_test_common * info)1086 wait_all_thread(struct synch_test_common * info)
1087 {
1088 wait_threads(&info->test_done, info->nthreads);
1089 }
1090
1091 static void
notify_waiter(struct synch_test_common * info)1092 notify_waiter(struct synch_test_common * info)
1093 {
1094 wake_threads(&info->test_done);
1095 }
1096
1097 static void
wait_for_waiters(struct synch_test_common * info)1098 wait_for_waiters(struct synch_test_common *info)
1099 {
1100 uint i, j;
1101 thread_t thread;
1102
1103 for (i = 0; i < info->nthreads; i++) {
1104 j = 0;
1105 while (os_atomic_load(&info->threads[i], acquire) == NULL) {
1106 if (j % 100 == 0) {
1107 IOSleep(10);
1108 }
1109 j++;
1110 }
1111
1112 if (info->threads[i] != current_thread()) {
1113 j = 0;
1114 do {
1115 thread = os_atomic_load(&info->threads[i], relaxed);
1116 if (thread == (thread_t) 1) {
1117 break;
1118 }
1119
1120 if (!(thread->state & TH_RUN)) {
1121 break;
1122 }
1123
1124 if (j % 100 == 0) {
1125 IOSleep(100);
1126 }
1127 j++;
1128
1129 if (thread->started == FALSE) {
1130 continue;
1131 }
1132 } while (thread->state & TH_RUN);
1133 }
1134 }
1135 }
1136
1137 static void
exclude_current_waiter(struct synch_test_common * info)1138 exclude_current_waiter(struct synch_test_common *info)
1139 {
1140 uint i, j;
1141
1142 for (i = 0; i < info->nthreads; i++) {
1143 j = 0;
1144 while (os_atomic_load(&info->threads[i], acquire) == NULL) {
1145 if (j % 100 == 0) {
1146 IOSleep(10);
1147 }
1148 j++;
1149 }
1150
1151 if (os_atomic_load(&info->threads[i], acquire) == current_thread()) {
1152 os_atomic_store(&info->threads[i], (thread_t)1, release);
1153 return;
1154 }
1155 }
1156 }
1157
1158 struct info_sleep_inheritor_test {
1159 struct synch_test_common head;
1160 lck_mtx_t mtx_lock;
1161 lck_rw_t rw_lock;
1162 decl_lck_mtx_gate_data(, gate);
1163 boolean_t gate_closed;
1164 int prim_type;
1165 boolean_t work_to_do;
1166 unsigned int max_pri;
1167 unsigned int steal_pri;
1168 int synch_value;
1169 int synch;
1170 int value;
1171 int handoff_failure;
1172 thread_t thread_inheritor;
1173 bool use_alloc_gate;
1174 gate_t *alloc_gate;
1175 struct obj_cached **obj_cache;
1176 kern_apfs_reflock_data(, reflock);
1177 int reflock_protected_status;
1178 };
1179
1180 static void
primitive_lock(struct info_sleep_inheritor_test * info)1181 primitive_lock(struct info_sleep_inheritor_test *info)
1182 {
1183 switch (info->prim_type) {
1184 case MTX_LOCK:
1185 lck_mtx_lock(&info->mtx_lock);
1186 break;
1187 case RW_LOCK:
1188 lck_rw_lock(&info->rw_lock, LCK_RW_TYPE_EXCLUSIVE);
1189 break;
1190 default:
1191 panic("invalid type %d", info->prim_type);
1192 }
1193 }
1194
1195 static void
primitive_unlock(struct info_sleep_inheritor_test * info)1196 primitive_unlock(struct info_sleep_inheritor_test *info)
1197 {
1198 switch (info->prim_type) {
1199 case MTX_LOCK:
1200 lck_mtx_unlock(&info->mtx_lock);
1201 break;
1202 case RW_LOCK:
1203 lck_rw_unlock(&info->rw_lock, LCK_RW_TYPE_EXCLUSIVE);
1204 break;
1205 default:
1206 panic("invalid type %d", info->prim_type);
1207 }
1208 }
1209
1210 static wait_result_t
primitive_sleep_with_inheritor(struct info_sleep_inheritor_test * info)1211 primitive_sleep_with_inheritor(struct info_sleep_inheritor_test *info)
1212 {
1213 wait_result_t ret = KERN_SUCCESS;
1214 switch (info->prim_type) {
1215 case MTX_LOCK:
1216 ret = lck_mtx_sleep_with_inheritor(&info->mtx_lock, LCK_SLEEP_DEFAULT, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1217 break;
1218 case RW_LOCK:
1219 ret = lck_rw_sleep_with_inheritor(&info->rw_lock, LCK_SLEEP_DEFAULT, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1220 break;
1221 default:
1222 panic("invalid type %d", info->prim_type);
1223 }
1224
1225 return ret;
1226 }
1227
1228 static void
primitive_wakeup_one_with_inheritor(struct info_sleep_inheritor_test * info)1229 primitive_wakeup_one_with_inheritor(struct info_sleep_inheritor_test *info)
1230 {
1231 switch (info->prim_type) {
1232 case MTX_LOCK:
1233 case RW_LOCK:
1234 wakeup_one_with_inheritor((event_t) &info->thread_inheritor, THREAD_AWAKENED, LCK_WAKE_DEFAULT, &info->thread_inheritor);
1235 break;
1236 default:
1237 panic("invalid type %d", info->prim_type);
1238 }
1239 }
1240
1241 static void
primitive_wakeup_all_with_inheritor(struct info_sleep_inheritor_test * info)1242 primitive_wakeup_all_with_inheritor(struct info_sleep_inheritor_test *info)
1243 {
1244 switch (info->prim_type) {
1245 case MTX_LOCK:
1246 case RW_LOCK:
1247 wakeup_all_with_inheritor((event_t) &info->thread_inheritor, THREAD_AWAKENED);
1248 break;
1249 default:
1250 panic("invalid type %d", info->prim_type);
1251 }
1252 return;
1253 }
1254
1255 static void
primitive_change_sleep_inheritor(struct info_sleep_inheritor_test * info)1256 primitive_change_sleep_inheritor(struct info_sleep_inheritor_test *info)
1257 {
1258 switch (info->prim_type) {
1259 case MTX_LOCK:
1260 case RW_LOCK:
1261 change_sleep_inheritor((event_t) &info->thread_inheritor, info->thread_inheritor);
1262 break;
1263 default:
1264 panic("invalid type %d", info->prim_type);
1265 }
1266 return;
1267 }
1268
1269 static kern_return_t
primitive_gate_try_close(struct info_sleep_inheritor_test * info)1270 primitive_gate_try_close(struct info_sleep_inheritor_test *info)
1271 {
1272 gate_t *gate = &info->gate;
1273 if (info->use_alloc_gate == true) {
1274 gate = info->alloc_gate;
1275 }
1276 kern_return_t ret = KERN_SUCCESS;
1277 switch (info->prim_type) {
1278 case MTX_LOCK:
1279 ret = lck_mtx_gate_try_close(&info->mtx_lock, gate);
1280 break;
1281 case RW_LOCK:
1282 ret = lck_rw_gate_try_close(&info->rw_lock, gate);
1283 break;
1284 default:
1285 panic("invalid type %d", info->prim_type);
1286 }
1287 return ret;
1288 }
1289
1290 static gate_wait_result_t
primitive_gate_wait(struct info_sleep_inheritor_test * info)1291 primitive_gate_wait(struct info_sleep_inheritor_test *info)
1292 {
1293 gate_t *gate = &info->gate;
1294 if (info->use_alloc_gate == true) {
1295 gate = info->alloc_gate;
1296 }
1297 gate_wait_result_t ret = GATE_OPENED;
1298 switch (info->prim_type) {
1299 case MTX_LOCK:
1300 ret = lck_mtx_gate_wait(&info->mtx_lock, gate, LCK_SLEEP_DEFAULT, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1301 break;
1302 case RW_LOCK:
1303 ret = lck_rw_gate_wait(&info->rw_lock, gate, LCK_SLEEP_DEFAULT, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1304 break;
1305 default:
1306 panic("invalid type %d", info->prim_type);
1307 }
1308 return ret;
1309 }
1310
1311 static void
primitive_gate_open(struct info_sleep_inheritor_test * info)1312 primitive_gate_open(struct info_sleep_inheritor_test *info)
1313 {
1314 gate_t *gate = &info->gate;
1315 if (info->use_alloc_gate == true) {
1316 gate = info->alloc_gate;
1317 }
1318 switch (info->prim_type) {
1319 case MTX_LOCK:
1320 lck_mtx_gate_open(&info->mtx_lock, gate);
1321 break;
1322 case RW_LOCK:
1323 lck_rw_gate_open(&info->rw_lock, gate);
1324 break;
1325 default:
1326 panic("invalid type %d", info->prim_type);
1327 }
1328 }
1329
1330 static void
primitive_gate_close(struct info_sleep_inheritor_test * info)1331 primitive_gate_close(struct info_sleep_inheritor_test *info)
1332 {
1333 gate_t *gate = &info->gate;
1334 if (info->use_alloc_gate == true) {
1335 gate = info->alloc_gate;
1336 }
1337
1338 switch (info->prim_type) {
1339 case MTX_LOCK:
1340 lck_mtx_gate_close(&info->mtx_lock, gate);
1341 break;
1342 case RW_LOCK:
1343 lck_rw_gate_close(&info->rw_lock, gate);
1344 break;
1345 default:
1346 panic("invalid type %d", info->prim_type);
1347 }
1348 }
1349
1350 static void
primitive_gate_steal(struct info_sleep_inheritor_test * info)1351 primitive_gate_steal(struct info_sleep_inheritor_test *info)
1352 {
1353 gate_t *gate = &info->gate;
1354 if (info->use_alloc_gate == true) {
1355 gate = info->alloc_gate;
1356 }
1357
1358 switch (info->prim_type) {
1359 case MTX_LOCK:
1360 lck_mtx_gate_steal(&info->mtx_lock, gate);
1361 break;
1362 case RW_LOCK:
1363 lck_rw_gate_steal(&info->rw_lock, gate);
1364 break;
1365 default:
1366 panic("invalid type %d", info->prim_type);
1367 }
1368 }
1369
1370 static kern_return_t
primitive_gate_handoff(struct info_sleep_inheritor_test * info,int flags)1371 primitive_gate_handoff(struct info_sleep_inheritor_test *info, int flags)
1372 {
1373 gate_t *gate = &info->gate;
1374 if (info->use_alloc_gate == true) {
1375 gate = info->alloc_gate;
1376 }
1377
1378 kern_return_t ret = KERN_SUCCESS;
1379 switch (info->prim_type) {
1380 case MTX_LOCK:
1381 ret = lck_mtx_gate_handoff(&info->mtx_lock, gate, flags);
1382 break;
1383 case RW_LOCK:
1384 ret = lck_rw_gate_handoff(&info->rw_lock, gate, flags);
1385 break;
1386 default:
1387 panic("invalid type %d", info->prim_type);
1388 }
1389 return ret;
1390 }
1391
1392 static void
primitive_gate_assert(struct info_sleep_inheritor_test * info,int type)1393 primitive_gate_assert(struct info_sleep_inheritor_test *info, int type)
1394 {
1395 gate_t *gate = &info->gate;
1396 if (info->use_alloc_gate == true) {
1397 gate = info->alloc_gate;
1398 }
1399
1400 switch (info->prim_type) {
1401 case MTX_LOCK:
1402 lck_mtx_gate_assert(&info->mtx_lock, gate, type);
1403 break;
1404 case RW_LOCK:
1405 lck_rw_gate_assert(&info->rw_lock, gate, type);
1406 break;
1407 default:
1408 panic("invalid type %d", info->prim_type);
1409 }
1410 }
1411
1412 static void
primitive_gate_init(struct info_sleep_inheritor_test * info)1413 primitive_gate_init(struct info_sleep_inheritor_test *info)
1414 {
1415 switch (info->prim_type) {
1416 case MTX_LOCK:
1417 lck_mtx_gate_init(&info->mtx_lock, &info->gate);
1418 break;
1419 case RW_LOCK:
1420 lck_rw_gate_init(&info->rw_lock, &info->gate);
1421 break;
1422 default:
1423 panic("invalid type %d", info->prim_type);
1424 }
1425 }
1426
1427 static void
primitive_gate_destroy(struct info_sleep_inheritor_test * info)1428 primitive_gate_destroy(struct info_sleep_inheritor_test *info)
1429 {
1430 switch (info->prim_type) {
1431 case MTX_LOCK:
1432 lck_mtx_gate_destroy(&info->mtx_lock, &info->gate);
1433 break;
1434 case RW_LOCK:
1435 lck_rw_gate_destroy(&info->rw_lock, &info->gate);
1436 break;
1437 default:
1438 panic("invalid type %d", info->prim_type);
1439 }
1440 }
1441
1442 static void
primitive_gate_alloc(struct info_sleep_inheritor_test * info)1443 primitive_gate_alloc(struct info_sleep_inheritor_test *info)
1444 {
1445 gate_t *gate;
1446 switch (info->prim_type) {
1447 case MTX_LOCK:
1448 gate = lck_mtx_gate_alloc_init(&info->mtx_lock);
1449 break;
1450 case RW_LOCK:
1451 gate = lck_rw_gate_alloc_init(&info->rw_lock);
1452 break;
1453 default:
1454 panic("invalid type %d", info->prim_type);
1455 }
1456 info->alloc_gate = gate;
1457 }
1458
1459 static void
primitive_gate_free(struct info_sleep_inheritor_test * info)1460 primitive_gate_free(struct info_sleep_inheritor_test *info)
1461 {
1462 T_ASSERT(info->alloc_gate != NULL, "gate not yet freed");
1463
1464 switch (info->prim_type) {
1465 case MTX_LOCK:
1466 lck_mtx_gate_free(&info->mtx_lock, info->alloc_gate);
1467 break;
1468 case RW_LOCK:
1469 lck_rw_gate_free(&info->rw_lock, info->alloc_gate);
1470 break;
1471 default:
1472 panic("invalid type %d", info->prim_type);
1473 }
1474 info->alloc_gate = NULL;
1475 }
1476
1477 static void
thread_inheritor_like_mutex(void * args,__unused wait_result_t wr)1478 thread_inheritor_like_mutex(
1479 void *args,
1480 __unused wait_result_t wr)
1481 {
1482 wait_result_t wait;
1483
1484 struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
1485 uint my_pri = current_thread()->sched_pri;
1486
1487 T_LOG("Started thread pri %d %p", my_pri, current_thread());
1488
1489 /*
1490 * spin here to start concurrently
1491 */
1492 wake_threads(&info->synch);
1493 wait_threads(&info->synch, info->synch_value);
1494
1495 primitive_lock(info);
1496
1497 if (info->thread_inheritor == NULL) {
1498 info->thread_inheritor = current_thread();
1499 } else {
1500 wait = primitive_sleep_with_inheritor(info);
1501 T_ASSERT(wait == THREAD_AWAKENED || wait == THREAD_NOT_WAITING, "sleep_with_inheritor return");
1502 }
1503 primitive_unlock(info);
1504
1505 IOSleep(100);
1506 info->value++;
1507
1508 primitive_lock(info);
1509
1510 T_ASSERT(info->thread_inheritor == current_thread(), "thread_inheritor is %p", info->thread_inheritor);
1511 primitive_wakeup_one_with_inheritor(info);
1512 T_LOG("woken up %p", info->thread_inheritor);
1513
1514 if (info->thread_inheritor == NULL) {
1515 T_ASSERT(info->handoff_failure == 0, "handoff failures");
1516 info->handoff_failure++;
1517 } else {
1518 T_ASSERT(info->thread_inheritor != current_thread(), "thread_inheritor is %p", info->thread_inheritor);
1519 thread_deallocate(info->thread_inheritor);
1520 }
1521
1522 primitive_unlock(info);
1523
1524 assert(current_thread()->kern_promotion_schedpri == 0);
1525 notify_waiter((struct synch_test_common *)info);
1526
1527 thread_terminate_self();
1528 }
1529
1530 static void
thread_just_inheritor_do_work(void * args,__unused wait_result_t wr)1531 thread_just_inheritor_do_work(
1532 void *args,
1533 __unused wait_result_t wr)
1534 {
1535 struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
1536 uint my_pri = current_thread()->sched_pri;
1537 uint max_pri;
1538
1539 T_LOG("Started thread pri %d %p", my_pri, current_thread());
1540 primitive_lock(info);
1541
1542 if (info->thread_inheritor == NULL) {
1543 info->thread_inheritor = current_thread();
1544 primitive_unlock(info);
1545 T_LOG("Thread pri %d first to run %p", my_pri, current_thread());
1546
1547 wait_threads(&info->synch, info->synch_value - 1);
1548
1549 wait_for_waiters((struct synch_test_common *)info);
1550
1551 max_pri = get_max_pri((struct synch_test_common *) info);
1552 T_ASSERT((uint) current_thread()->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", current_thread()->sched_pri, max_pri);
1553
1554 os_atomic_store(&info->synch, 0, relaxed);
1555 primitive_lock(info);
1556 primitive_wakeup_all_with_inheritor(info);
1557 } else {
1558 wake_threads(&info->synch);
1559 primitive_sleep_with_inheritor(info);
1560 }
1561
1562 primitive_unlock(info);
1563
1564 assert(current_thread()->kern_promotion_schedpri == 0);
1565 notify_waiter((struct synch_test_common *)info);
1566
1567 thread_terminate_self();
1568 }
1569
1570 static void
thread_steal_work(void * args,__unused wait_result_t wr)1571 thread_steal_work(
1572 void *args,
1573 __unused wait_result_t wr)
1574 {
1575 struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
1576 uint my_pri = current_thread()->sched_pri;
1577
1578 T_LOG("Started thread pri %d %p", my_pri, current_thread());
1579 primitive_lock(info);
1580
1581 if (info->thread_inheritor == NULL) {
1582 info->thread_inheritor = current_thread();
1583 exclude_current_waiter((struct synch_test_common *)info);
1584
1585 T_LOG("Thread pri %d first to run %p", my_pri, current_thread());
1586 primitive_unlock(info);
1587
1588 wait_threads(&info->synch, info->synch_value - 2);
1589
1590 wait_for_waiters((struct synch_test_common *)info);
1591 T_LOG("Thread pri %d first to run %p", my_pri, current_thread());
1592 primitive_lock(info);
1593 if (info->thread_inheritor == current_thread()) {
1594 primitive_wakeup_all_with_inheritor(info);
1595 }
1596 } else {
1597 if (info->steal_pri == 0) {
1598 info->steal_pri = my_pri;
1599 info->thread_inheritor = current_thread();
1600 primitive_change_sleep_inheritor(info);
1601 exclude_current_waiter((struct synch_test_common *)info);
1602
1603 primitive_unlock(info);
1604
1605 wait_threads(&info->synch, info->synch_value - 2);
1606
1607 T_LOG("Thread pri %d stole push %p", my_pri, current_thread());
1608 wait_for_waiters((struct synch_test_common *)info);
1609
1610 T_ASSERT((uint) current_thread()->sched_pri == info->steal_pri, "sleep_inheritor inheritor priority current is %d, should be %d", current_thread()->sched_pri, info->steal_pri);
1611
1612 primitive_lock(info);
1613 primitive_wakeup_all_with_inheritor(info);
1614 } else {
1615 if (my_pri > info->steal_pri) {
1616 info->steal_pri = my_pri;
1617 }
1618 wake_threads(&info->synch);
1619 primitive_sleep_with_inheritor(info);
1620 exclude_current_waiter((struct synch_test_common *)info);
1621 }
1622 }
1623 primitive_unlock(info);
1624
1625 assert(current_thread()->kern_promotion_schedpri == 0);
1626 notify_waiter((struct synch_test_common *)info);
1627
1628 thread_terminate_self();
1629 }
1630
1631 static void
thread_no_inheritor_work(void * args,__unused wait_result_t wr)1632 thread_no_inheritor_work(
1633 void *args,
1634 __unused wait_result_t wr)
1635 {
1636 struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
1637 uint my_pri = current_thread()->sched_pri;
1638
1639 T_LOG("Started thread pri %d %p", my_pri, current_thread());
1640 primitive_lock(info);
1641
1642 info->value--;
1643 if (info->value == 0) {
1644 primitive_wakeup_all_with_inheritor(info);
1645 } else {
1646 info->thread_inheritor = NULL;
1647 primitive_sleep_with_inheritor(info);
1648 }
1649
1650 primitive_unlock(info);
1651
1652 assert(current_thread()->kern_promotion_schedpri == 0);
1653 notify_waiter((struct synch_test_common *)info);
1654
1655 thread_terminate_self();
1656 }
1657
1658 static void
thread_mtx_work(void * args,__unused wait_result_t wr)1659 thread_mtx_work(
1660 void *args,
1661 __unused wait_result_t wr)
1662 {
1663 struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
1664 uint my_pri = current_thread()->sched_pri;
1665 int i;
1666 u_int8_t rand;
1667 unsigned int mod_rand;
1668 uint max_pri;
1669
1670 T_LOG("Started thread pri %d %p", my_pri, current_thread());
1671
1672 for (i = 0; i < 10; i++) {
1673 lck_mtx_lock(&info->mtx_lock);
1674 if (info->thread_inheritor == NULL) {
1675 info->thread_inheritor = current_thread();
1676 lck_mtx_unlock(&info->mtx_lock);
1677
1678 T_LOG("Thread pri %d first to run %p", my_pri, current_thread());
1679
1680 wait_threads(&info->synch, info->synch_value - 1);
1681 wait_for_waiters((struct synch_test_common *)info);
1682 max_pri = get_max_pri((struct synch_test_common *) info);
1683 T_ASSERT((uint) current_thread()->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", current_thread()->sched_pri, max_pri);
1684
1685 os_atomic_store(&info->synch, 0, relaxed);
1686
1687 lck_mtx_lock(&info->mtx_lock);
1688 info->thread_inheritor = NULL;
1689 wakeup_all_with_inheritor((event_t) &info->thread_inheritor, THREAD_AWAKENED);
1690 lck_mtx_unlock(&info->mtx_lock);
1691 continue;
1692 }
1693
1694 read_random(&rand, sizeof(rand));
1695 mod_rand = rand % 2;
1696
1697 wake_threads(&info->synch);
1698 switch (mod_rand) {
1699 case 0:
1700 lck_mtx_sleep_with_inheritor(&info->mtx_lock, LCK_SLEEP_DEFAULT, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1701 lck_mtx_unlock(&info->mtx_lock);
1702 break;
1703 case 1:
1704 lck_mtx_sleep_with_inheritor(&info->mtx_lock, LCK_SLEEP_UNLOCK, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1705 break;
1706 default:
1707 panic("rand()mod4 returned %u (random %u)", mod_rand, rand);
1708 }
1709 }
1710
1711 /*
1712 * spin here to stop using the lock as mutex
1713 */
1714 wake_threads(&info->synch);
1715 wait_threads(&info->synch, info->synch_value);
1716
1717 for (i = 0; i < 10; i++) {
1718 /* read_random might sleep so read it before acquiring the mtx as spin */
1719 read_random(&rand, sizeof(rand));
1720
1721 lck_mtx_lock_spin(&info->mtx_lock);
1722 if (info->thread_inheritor == NULL) {
1723 info->thread_inheritor = current_thread();
1724 lck_mtx_unlock(&info->mtx_lock);
1725
1726 T_LOG("Thread pri %d first to run %p", my_pri, current_thread());
1727 wait_for_waiters((struct synch_test_common *)info);
1728 max_pri = get_max_pri((struct synch_test_common *) info);
1729 T_ASSERT((uint) current_thread()->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", current_thread()->sched_pri, max_pri);
1730
1731 lck_mtx_lock_spin(&info->mtx_lock);
1732 info->thread_inheritor = NULL;
1733 wakeup_all_with_inheritor((event_t) &info->thread_inheritor, THREAD_AWAKENED);
1734 lck_mtx_unlock(&info->mtx_lock);
1735 continue;
1736 }
1737
1738 mod_rand = rand % 2;
1739 switch (mod_rand) {
1740 case 0:
1741 lck_mtx_sleep_with_inheritor(&info->mtx_lock, LCK_SLEEP_SPIN, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1742 lck_mtx_unlock(&info->mtx_lock);
1743 break;
1744 case 1:
1745 lck_mtx_sleep_with_inheritor(&info->mtx_lock, LCK_SLEEP_SPIN_ALWAYS, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1746 lck_mtx_unlock(&info->mtx_lock);
1747 break;
1748 default:
1749 panic("rand()mod4 returned %u (random %u)", mod_rand, rand);
1750 }
1751 }
1752 assert(current_thread()->kern_promotion_schedpri == 0);
1753 notify_waiter((struct synch_test_common *)info);
1754
1755 thread_terminate_self();
1756 }
1757
1758 static void
thread_rw_work(void * args,__unused wait_result_t wr)1759 thread_rw_work(
1760 void *args,
1761 __unused wait_result_t wr)
1762 {
1763 struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
1764 uint my_pri = current_thread()->sched_pri;
1765 int i;
1766 lck_rw_type_t type;
1767 u_int8_t rand;
1768 unsigned int mod_rand;
1769 uint max_pri;
1770
1771 T_LOG("Started thread pri %d %p", my_pri, current_thread());
1772
1773 for (i = 0; i < 10; i++) {
1774 try_again:
1775 type = LCK_RW_TYPE_SHARED;
1776 lck_rw_lock(&info->rw_lock, type);
1777 if (info->thread_inheritor == NULL) {
1778 type = LCK_RW_TYPE_EXCLUSIVE;
1779
1780 if (lck_rw_lock_shared_to_exclusive(&info->rw_lock)) {
1781 if (info->thread_inheritor == NULL) {
1782 info->thread_inheritor = current_thread();
1783 lck_rw_unlock(&info->rw_lock, type);
1784 wait_threads(&info->synch, info->synch_value - 1);
1785
1786 T_LOG("Thread pri %d first to run %p", my_pri, current_thread());
1787 wait_for_waiters((struct synch_test_common *)info);
1788 max_pri = get_max_pri((struct synch_test_common *) info);
1789 T_ASSERT((uint) current_thread()->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", current_thread()->sched_pri, max_pri);
1790
1791 os_atomic_store(&info->synch, 0, relaxed);
1792
1793 lck_rw_lock(&info->rw_lock, type);
1794 info->thread_inheritor = NULL;
1795 wakeup_all_with_inheritor((event_t) &info->thread_inheritor, THREAD_AWAKENED);
1796 lck_rw_unlock(&info->rw_lock, type);
1797 continue;
1798 }
1799 } else {
1800 goto try_again;
1801 }
1802 }
1803
1804 read_random(&rand, sizeof(rand));
1805 mod_rand = rand % 4;
1806
1807 wake_threads(&info->synch);
1808 switch (mod_rand) {
1809 case 0:
1810 lck_rw_sleep_with_inheritor(&info->rw_lock, LCK_SLEEP_DEFAULT, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1811 lck_rw_unlock(&info->rw_lock, type);
1812 break;
1813 case 1:
1814 lck_rw_sleep_with_inheritor(&info->rw_lock, LCK_SLEEP_UNLOCK, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1815 break;
1816 case 2:
1817 lck_rw_sleep_with_inheritor(&info->rw_lock, LCK_SLEEP_SHARED, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1818 lck_rw_unlock(&info->rw_lock, LCK_RW_TYPE_SHARED);
1819 break;
1820 case 3:
1821 lck_rw_sleep_with_inheritor(&info->rw_lock, LCK_SLEEP_EXCLUSIVE, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1822 lck_rw_unlock(&info->rw_lock, LCK_RW_TYPE_EXCLUSIVE);
1823 break;
1824 default:
1825 panic("rand()mod4 returned %u (random %u)", mod_rand, rand);
1826 }
1827 }
1828
1829 assert(current_thread()->kern_promotion_schedpri == 0);
1830 notify_waiter((struct synch_test_common *)info);
1831
1832 thread_terminate_self();
1833 }
1834
1835 #define OBJ_STATE_UNUSED 0
1836 #define OBJ_STATE_REAL 1
1837 #define OBJ_STATE_PLACEHOLDER 2
1838
1839 #define OBJ_BUFF_SIZE 11
1840 struct obj_cached {
1841 int obj_id;
1842 int obj_state;
1843 struct kern_apfs_reflock *obj_refcount;
1844 char obj_buff[OBJ_BUFF_SIZE];
1845 };
1846
1847 #define CACHE_SIZE 2
1848 #define USE_CACHE_ROUNDS 15
1849
1850 #define REFCOUNT_REFLOCK_ROUNDS 15
1851
1852 /*
1853 * For the reflock cache test the cache is allocated
1854 * and its pointer is saved in obj_cache.
1855 * The lock for the cache is going to be one of the exclusive
1856 * locks already present in struct info_sleep_inheritor_test.
1857 */
1858
1859 static struct obj_cached *
alloc_init_cache_entry(void)1860 alloc_init_cache_entry(void)
1861 {
1862 struct obj_cached *cache_entry = kalloc_type(struct obj_cached, 1, Z_WAITOK | Z_NOFAIL | Z_ZERO);
1863 cache_entry->obj_id = 0;
1864 cache_entry->obj_state = OBJ_STATE_UNUSED;
1865 cache_entry->obj_refcount = kern_apfs_reflock_alloc_init();
1866 snprintf(cache_entry->obj_buff, OBJ_BUFF_SIZE, "I am groot");
1867 return cache_entry;
1868 }
1869
1870 static void
init_cache(struct info_sleep_inheritor_test * info)1871 init_cache(struct info_sleep_inheritor_test *info)
1872 {
1873 struct obj_cached **obj_cache = kalloc_type(struct obj_cached *, CACHE_SIZE, Z_WAITOK | Z_NOFAIL | Z_ZERO);
1874
1875 int i;
1876 for (i = 0; i < CACHE_SIZE; i++) {
1877 obj_cache[i] = alloc_init_cache_entry();
1878 }
1879
1880 info->obj_cache = obj_cache;
1881 }
1882
1883 static void
check_cache_empty(struct info_sleep_inheritor_test * info)1884 check_cache_empty(struct info_sleep_inheritor_test *info)
1885 {
1886 struct obj_cached **obj_cache = info->obj_cache;
1887
1888 int i, ret;
1889 for (i = 0; i < CACHE_SIZE; i++) {
1890 if (obj_cache[i] != NULL) {
1891 T_ASSERT(obj_cache[i]->obj_state == OBJ_STATE_UNUSED, "checked OBJ_STATE_UNUSED");
1892 T_ASSERT(obj_cache[i]->obj_refcount != NULL, "checked obj_refcount");
1893 ret = memcmp(obj_cache[i]->obj_buff, "I am groot", OBJ_BUFF_SIZE);
1894 T_ASSERT(ret == 0, "checked buff correctly emptied");
1895 }
1896 }
1897 }
1898
1899 static void
free_cache(struct info_sleep_inheritor_test * info)1900 free_cache(struct info_sleep_inheritor_test *info)
1901 {
1902 struct obj_cached **obj_cache = info->obj_cache;
1903
1904 int i;
1905 for (i = 0; i < CACHE_SIZE; i++) {
1906 if (obj_cache[i] != NULL) {
1907 kern_apfs_reflock_free(obj_cache[i]->obj_refcount);
1908 obj_cache[i]->obj_refcount = NULL;
1909 kfree_type(struct obj_cached, 1, obj_cache[i]);
1910 obj_cache[i] = NULL;
1911 }
1912 }
1913
1914 kfree_type(struct obj_cached *, CACHE_SIZE, obj_cache);
1915 info->obj_cache = NULL;
1916 }
1917
1918 static struct obj_cached *
find_id_in_cache(int obj_id,struct info_sleep_inheritor_test * info)1919 find_id_in_cache(int obj_id, struct info_sleep_inheritor_test *info)
1920 {
1921 struct obj_cached **obj_cache = info->obj_cache;
1922 int i;
1923 for (i = 0; i < CACHE_SIZE; i++) {
1924 if (obj_cache[i] != NULL && obj_cache[i]->obj_id == obj_id) {
1925 return obj_cache[i];
1926 }
1927 }
1928 return NULL;
1929 }
1930
1931 static bool
free_id_in_cache(int obj_id,struct info_sleep_inheritor_test * info,__assert_only struct obj_cached * expected)1932 free_id_in_cache(int obj_id, struct info_sleep_inheritor_test *info, __assert_only struct obj_cached *expected)
1933 {
1934 struct obj_cached **obj_cache = info->obj_cache;
1935 int i;
1936 for (i = 0; i < CACHE_SIZE; i++) {
1937 if (obj_cache[i] != NULL && obj_cache[i]->obj_id == obj_id) {
1938 assert(obj_cache[i] == expected);
1939 kfree_type(struct obj_cached, 1, obj_cache[i]);
1940 obj_cache[i] = NULL;
1941 return true;
1942 }
1943 }
1944 return false;
1945 }
1946
1947 static struct obj_cached *
find_empty_spot_in_cache(struct info_sleep_inheritor_test * info)1948 find_empty_spot_in_cache(struct info_sleep_inheritor_test *info)
1949 {
1950 struct obj_cached **obj_cache = info->obj_cache;
1951 int i;
1952 for (i = 0; i < CACHE_SIZE; i++) {
1953 if (obj_cache[i] == NULL) {
1954 obj_cache[i] = alloc_init_cache_entry();
1955 return obj_cache[i];
1956 }
1957 if (obj_cache[i]->obj_state == OBJ_STATE_UNUSED) {
1958 return obj_cache[i];
1959 }
1960 }
1961 return NULL;
1962 }
1963
1964 static int
get_obj_cache(int obj_id,struct info_sleep_inheritor_test * info,char ** buff)1965 get_obj_cache(int obj_id, struct info_sleep_inheritor_test *info, char **buff)
1966 {
1967 struct obj_cached *obj = NULL, *obj2 = NULL;
1968 kern_apfs_reflock_t refcount = NULL;
1969 bool ret;
1970 kern_apfs_reflock_out_flags_t out_flags;
1971
1972 try_again:
1973 primitive_lock(info);
1974 if ((obj = find_id_in_cache(obj_id, info)) != NULL) {
1975 /* Found an allocated object on the cache with same id */
1976
1977 /*
1978 * copy the pointer to obj_refcount as obj might
1979 * get deallocated after primitive_unlock()
1980 */
1981 refcount = obj->obj_refcount;
1982 if (kern_apfs_reflock_try_get_ref(refcount, KERN_APFS_REFLOCK_IN_WILL_WAIT, &out_flags)) {
1983 /*
1984 * Got a ref, let's check the state
1985 */
1986 switch (obj->obj_state) {
1987 case OBJ_STATE_UNUSED:
1988 goto init;
1989 case OBJ_STATE_REAL:
1990 goto done;
1991 case OBJ_STATE_PLACEHOLDER:
1992 panic("Thread %p observed OBJ_STATE_PLACEHOLDER %d for obj %d", current_thread(), obj->obj_state, obj_id);
1993 default:
1994 panic("Thread %p observed an unknown obj_state %d for obj %d", current_thread(), obj->obj_state, obj_id);
1995 }
1996 } else {
1997 /*
1998 * Didn't get a ref.
1999 * This means or an obj_put() of the last ref is ongoing
2000 * or a init of the object is happening.
2001 * Both cases wait for that to finish and retry.
2002 * While waiting the thread that is holding the reflock
2003 * will get a priority at least as the one of this thread.
2004 */
2005 primitive_unlock(info);
2006 kern_apfs_reflock_wait_for_unlock(refcount, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
2007 goto try_again;
2008 }
2009 } else {
2010 /* Look for a spot on the cache where we can save the object */
2011
2012 if ((obj = find_empty_spot_in_cache(info)) == NULL) {
2013 /*
2014 * Sadness cache is full, and everyting in the cache is
2015 * used.
2016 */
2017 primitive_unlock(info);
2018 return -1;
2019 } else {
2020 /*
2021 * copy the pointer to obj_refcount as obj might
2022 * get deallocated after primitive_unlock()
2023 */
2024 refcount = obj->obj_refcount;
2025 if (kern_apfs_reflock_try_get_ref(refcount, KERN_APFS_REFLOCK_IN_WILL_WAIT, &out_flags)) {
2026 /*
2027 * Got a ref on a OBJ_STATE_UNUSED obj.
2028 * Recicle time.
2029 */
2030 obj->obj_id = obj_id;
2031 goto init;
2032 } else {
2033 /*
2034 * This could happen if the obj_put() has just changed the
2035 * state to OBJ_STATE_UNUSED, but not unlocked the reflock yet.
2036 */
2037 primitive_unlock(info);
2038 kern_apfs_reflock_wait_for_unlock(refcount, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
2039 goto try_again;
2040 }
2041 }
2042 }
2043 init:
2044 assert(obj->obj_id == obj_id);
2045 assert(obj->obj_state == OBJ_STATE_UNUSED);
2046 /*
2047 * We already got a ref on the object, but we need
2048 * to initialize it. Mark it as
2049 * OBJ_STATE_PLACEHOLDER and get the obj_reflock.
2050 * In this way all thread waiting for this init
2051 * to finish will push on this thread.
2052 */
2053 ret = kern_apfs_reflock_try_lock(refcount, KERN_APFS_REFLOCK_IN_DEFAULT, NULL);
2054 assert(ret == true);
2055 obj->obj_state = OBJ_STATE_PLACEHOLDER;
2056 primitive_unlock(info);
2057
2058 //let's pretend we are populating the obj
2059 IOSleep(10);
2060 /*
2061 * obj will not be deallocated while I hold a ref.
2062 * So it is safe to access it.
2063 */
2064 snprintf(obj->obj_buff, OBJ_BUFF_SIZE, "I am %d", obj_id);
2065
2066 primitive_lock(info);
2067 obj2 = find_id_in_cache(obj_id, info);
2068 assert(obj == obj2);
2069 assert(obj->obj_state == OBJ_STATE_PLACEHOLDER);
2070
2071 obj->obj_state = OBJ_STATE_REAL;
2072 kern_apfs_reflock_unlock(refcount);
2073
2074 done:
2075 *buff = obj->obj_buff;
2076 primitive_unlock(info);
2077 return 0;
2078 }
2079
2080 static void
put_obj_cache(int obj_id,struct info_sleep_inheritor_test * info,bool free)2081 put_obj_cache(int obj_id, struct info_sleep_inheritor_test *info, bool free)
2082 {
2083 struct obj_cached *obj = NULL, *obj2 = NULL;
2084 bool ret;
2085 kern_apfs_reflock_out_flags_t out_flags;
2086 kern_apfs_reflock_t refcount = NULL;
2087
2088 primitive_lock(info);
2089 obj = find_id_in_cache(obj_id, info);
2090 primitive_unlock(info);
2091
2092 /*
2093 * Nobody should have been able to remove obj_id
2094 * from the cache.
2095 */
2096 assert(obj != NULL);
2097 assert(obj->obj_state == OBJ_STATE_REAL);
2098
2099 refcount = obj->obj_refcount;
2100
2101 /*
2102 * This should never fail, as or the reflock
2103 * was acquired when the state was OBJ_STATE_UNUSED to init,
2104 * or from a put that reached zero. And if the latter
2105 * happened subsequent reflock_get_ref() will had to wait to transition
2106 * to OBJ_STATE_REAL.
2107 */
2108 ret = kern_apfs_reflock_try_put_ref(refcount, KERN_APFS_REFLOCK_IN_LOCK_IF_LAST, &out_flags);
2109 assert(ret == true);
2110 if ((out_flags & KERN_APFS_REFLOCK_OUT_LOCKED) == 0) {
2111 return;
2112 }
2113
2114 /*
2115 * Note: nobody at this point will be able to get a ref or a lock on
2116 * refcount.
2117 * All people waiting on refcount will push on this thread.
2118 */
2119
2120 //let's pretend we are flushing the obj somewhere.
2121 IOSleep(10);
2122 snprintf(obj->obj_buff, OBJ_BUFF_SIZE, "I am groot");
2123
2124 primitive_lock(info);
2125 obj->obj_state = OBJ_STATE_UNUSED;
2126 if (free) {
2127 obj2 = find_id_in_cache(obj_id, info);
2128 assert(obj == obj2);
2129
2130 ret = free_id_in_cache(obj_id, info, obj);
2131 assert(ret == true);
2132 }
2133 primitive_unlock(info);
2134
2135 kern_apfs_reflock_unlock(refcount);
2136
2137 if (free) {
2138 kern_apfs_reflock_free(refcount);
2139 }
2140 }
2141
2142 static void
thread_use_cache(void * args,__unused wait_result_t wr)2143 thread_use_cache(
2144 void *args,
2145 __unused wait_result_t wr)
2146 {
2147 struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
2148 int my_obj;
2149
2150 primitive_lock(info);
2151 my_obj = ((info->value--) % (CACHE_SIZE + 1)) + 1;
2152 primitive_unlock(info);
2153
2154 T_LOG("Thread %p started and it is going to use obj %d", current_thread(), my_obj);
2155 /*
2156 * This is the string I would expect to see
2157 * on my_obj buff.
2158 */
2159 char my_string[OBJ_BUFF_SIZE];
2160 int my_string_size = snprintf(my_string, OBJ_BUFF_SIZE, "I am %d", my_obj);
2161
2162 /*
2163 * spin here to start concurrently with the other threads
2164 */
2165 wake_threads(&info->synch);
2166 wait_threads(&info->synch, info->synch_value);
2167
2168 for (int i = 0; i < USE_CACHE_ROUNDS; i++) {
2169 char *buff;
2170 while (get_obj_cache(my_obj, info, &buff) == -1) {
2171 /*
2172 * Cache is full, wait.
2173 */
2174 IOSleep(10);
2175 }
2176 T_ASSERT(memcmp(buff, my_string, my_string_size) == 0, "reflock: thread %p obj_id %d value in buff", current_thread(), my_obj);
2177 IOSleep(10);
2178 T_ASSERT(memcmp(buff, my_string, my_string_size) == 0, "reflock: thread %p obj_id %d value in buff", current_thread(), my_obj);
2179 put_obj_cache(my_obj, info, (i % 2 == 0));
2180 }
2181
2182 notify_waiter((struct synch_test_common *)info);
2183 thread_terminate_self();
2184 }
2185
2186 static void
thread_refcount_reflock(void * args,__unused wait_result_t wr)2187 thread_refcount_reflock(
2188 void *args,
2189 __unused wait_result_t wr)
2190 {
2191 struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
2192 bool ret;
2193 kern_apfs_reflock_out_flags_t out_flags;
2194 kern_apfs_reflock_in_flags_t in_flags;
2195
2196 T_LOG("Thread %p started", current_thread());
2197 /*
2198 * spin here to start concurrently with the other threads
2199 */
2200 wake_threads(&info->synch);
2201 wait_threads(&info->synch, info->synch_value);
2202
2203 for (int i = 0; i < REFCOUNT_REFLOCK_ROUNDS; i++) {
2204 in_flags = KERN_APFS_REFLOCK_IN_LOCK_IF_FIRST;
2205 if ((i % 2) == 0) {
2206 in_flags |= KERN_APFS_REFLOCK_IN_WILL_WAIT;
2207 }
2208 ret = kern_apfs_reflock_try_get_ref(&info->reflock, in_flags, &out_flags);
2209 if (ret == true) {
2210 /* got reference, check if we did 0->1 */
2211 if ((out_flags & KERN_APFS_REFLOCK_OUT_LOCKED) == KERN_APFS_REFLOCK_OUT_LOCKED) {
2212 T_ASSERT(info->reflock_protected_status == 0, "status init check");
2213 info->reflock_protected_status = 1;
2214 kern_apfs_reflock_unlock(&info->reflock);
2215 } else {
2216 T_ASSERT(info->reflock_protected_status == 1, "status set check");
2217 }
2218 /* release the reference and check if we did 1->0 */
2219 ret = kern_apfs_reflock_try_put_ref(&info->reflock, KERN_APFS_REFLOCK_IN_LOCK_IF_LAST, &out_flags);
2220 T_ASSERT(ret == true, "kern_apfs_reflock_try_put_ref success");
2221 if ((out_flags & KERN_APFS_REFLOCK_OUT_LOCKED) == KERN_APFS_REFLOCK_OUT_LOCKED) {
2222 T_ASSERT(info->reflock_protected_status == 1, "status set check");
2223 info->reflock_protected_status = 0;
2224 kern_apfs_reflock_unlock(&info->reflock);
2225 }
2226 } else {
2227 /* didn't get a reference */
2228 if ((in_flags & KERN_APFS_REFLOCK_IN_WILL_WAIT) == KERN_APFS_REFLOCK_IN_WILL_WAIT) {
2229 kern_apfs_reflock_wait_for_unlock(&info->reflock, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
2230 }
2231 }
2232 }
2233
2234 notify_waiter((struct synch_test_common *)info);
2235 thread_terminate_self();
2236 }
2237
2238 static void
thread_force_reflock(void * args,__unused wait_result_t wr)2239 thread_force_reflock(
2240 void *args,
2241 __unused wait_result_t wr)
2242 {
2243 struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
2244 bool ret;
2245 kern_apfs_reflock_out_flags_t out_flags;
2246 bool lock = false;
2247 uint32_t count;
2248
2249 T_LOG("Thread %p started", current_thread());
2250 if (os_atomic_inc_orig(&info->value, relaxed) == 0) {
2251 T_LOG("Thread %p is locker", current_thread());
2252 lock = true;
2253 ret = kern_apfs_reflock_try_lock(&info->reflock, KERN_APFS_REFLOCK_IN_ALLOW_FORCE, &count);
2254 T_ASSERT(ret == true, "kern_apfs_reflock_try_lock success");
2255 T_ASSERT(count == 0, "refcount value");
2256 }
2257 /*
2258 * spin here to start concurrently with the other threads
2259 */
2260 wake_threads(&info->synch);
2261 wait_threads(&info->synch, info->synch_value);
2262
2263 if (lock) {
2264 IOSleep(100);
2265 kern_apfs_reflock_unlock(&info->reflock);
2266 } else {
2267 for (int i = 0; i < REFCOUNT_REFLOCK_ROUNDS; i++) {
2268 ret = kern_apfs_reflock_try_get_ref(&info->reflock, KERN_APFS_REFLOCK_IN_FORCE, &out_flags);
2269 T_ASSERT(ret == true, "kern_apfs_reflock_try_get_ref success");
2270 ret = kern_apfs_reflock_try_put_ref(&info->reflock, KERN_APFS_REFLOCK_IN_FORCE, &out_flags);
2271 T_ASSERT(ret == true, "kern_apfs_reflock_try_put_ref success");
2272 }
2273 }
2274
2275 notify_waiter((struct synch_test_common *)info);
2276 thread_terminate_self();
2277 }
2278
2279 static void
thread_lock_reflock(void * args,__unused wait_result_t wr)2280 thread_lock_reflock(
2281 void *args,
2282 __unused wait_result_t wr)
2283 {
2284 struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
2285 bool ret;
2286 kern_apfs_reflock_out_flags_t out_flags;
2287 bool lock = false;
2288 uint32_t count;
2289
2290 T_LOG("Thread %p started", current_thread());
2291 if (os_atomic_inc_orig(&info->value, relaxed) == 0) {
2292 T_LOG("Thread %p is locker", current_thread());
2293 lock = true;
2294 ret = kern_apfs_reflock_try_lock(&info->reflock, KERN_APFS_REFLOCK_IN_DEFAULT, &count);
2295 T_ASSERT(ret == true, "kern_apfs_reflock_try_lock success");
2296 T_ASSERT(count == 0, "refcount value");
2297 info->reflock_protected_status = 1;
2298 }
2299 /*
2300 * spin here to start concurrently with the other threads
2301 */
2302 wake_threads(&info->synch);
2303 wait_threads(&info->synch, info->synch_value);
2304
2305 if (lock) {
2306 IOSleep(100);
2307 info->reflock_protected_status = 0;
2308 kern_apfs_reflock_unlock(&info->reflock);
2309 } else {
2310 for (int i = 0; i < REFCOUNT_REFLOCK_ROUNDS; i++) {
2311 ret = kern_apfs_reflock_try_get_ref(&info->reflock, KERN_APFS_REFLOCK_IN_DEFAULT, &out_flags);
2312 if (ret == true) {
2313 T_ASSERT(info->reflock_protected_status == 0, "unlocked status check");
2314 ret = kern_apfs_reflock_try_put_ref(&info->reflock, KERN_APFS_REFLOCK_IN_DEFAULT, &out_flags);
2315 T_ASSERT(ret == true, "kern_apfs_reflock_try_put_ref success");
2316 break;
2317 }
2318 }
2319 }
2320
2321 notify_waiter((struct synch_test_common *)info);
2322 thread_terminate_self();
2323 }
2324
2325 static void
test_cache_reflock(struct info_sleep_inheritor_test * info)2326 test_cache_reflock(struct info_sleep_inheritor_test *info)
2327 {
2328 info->synch = 0;
2329 info->synch_value = info->head.nthreads;
2330
2331 info->value = info->head.nthreads;
2332 /*
2333 * Use the mtx as cache lock
2334 */
2335 info->prim_type = MTX_LOCK;
2336
2337 init_cache(info);
2338
2339 start_threads((thread_continue_t)thread_use_cache, (struct synch_test_common *)info, FALSE);
2340 wait_all_thread((struct synch_test_common *)info);
2341
2342 check_cache_empty(info);
2343 free_cache(info);
2344 }
2345
2346 static void
test_refcount_reflock(struct info_sleep_inheritor_test * info)2347 test_refcount_reflock(struct info_sleep_inheritor_test *info)
2348 {
2349 info->synch = 0;
2350 info->synch_value = info->head.nthreads;
2351 kern_apfs_reflock_init(&info->reflock);
2352 info->reflock_protected_status = 0;
2353
2354 start_threads((thread_continue_t)thread_refcount_reflock, (struct synch_test_common *)info, FALSE);
2355 wait_all_thread((struct synch_test_common *)info);
2356
2357 kern_apfs_reflock_destroy(&info->reflock);
2358
2359 T_ASSERT(info->reflock_protected_status == 0, "unlocked status check");
2360 }
2361
2362 static void
test_force_reflock(struct info_sleep_inheritor_test * info)2363 test_force_reflock(struct info_sleep_inheritor_test *info)
2364 {
2365 info->synch = 0;
2366 info->synch_value = info->head.nthreads;
2367 kern_apfs_reflock_init(&info->reflock);
2368 info->value = 0;
2369
2370 start_threads((thread_continue_t)thread_force_reflock, (struct synch_test_common *)info, FALSE);
2371 wait_all_thread((struct synch_test_common *)info);
2372
2373 kern_apfs_reflock_destroy(&info->reflock);
2374 }
2375
2376 static void
test_lock_reflock(struct info_sleep_inheritor_test * info)2377 test_lock_reflock(struct info_sleep_inheritor_test *info)
2378 {
2379 info->synch = 0;
2380 info->synch_value = info->head.nthreads;
2381 kern_apfs_reflock_init(&info->reflock);
2382 info->value = 0;
2383
2384 start_threads((thread_continue_t)thread_lock_reflock, (struct synch_test_common *)info, FALSE);
2385 wait_all_thread((struct synch_test_common *)info);
2386
2387 kern_apfs_reflock_destroy(&info->reflock);
2388 }
2389
2390 static void
test_sleep_with_wake_all(struct info_sleep_inheritor_test * info,int prim_type)2391 test_sleep_with_wake_all(struct info_sleep_inheritor_test *info, int prim_type)
2392 {
2393 info->prim_type = prim_type;
2394 info->synch = 0;
2395 info->synch_value = info->head.nthreads;
2396
2397 info->thread_inheritor = NULL;
2398
2399 start_threads((thread_continue_t)thread_just_inheritor_do_work, (struct synch_test_common *)info, TRUE);
2400 wait_all_thread((struct synch_test_common *)info);
2401 }
2402
2403 static void
test_sleep_with_wake_one(struct info_sleep_inheritor_test * info,int prim_type)2404 test_sleep_with_wake_one(struct info_sleep_inheritor_test *info, int prim_type)
2405 {
2406 info->prim_type = prim_type;
2407
2408 info->synch = 0;
2409 info->synch_value = info->head.nthreads;
2410 info->value = 0;
2411 info->handoff_failure = 0;
2412 info->thread_inheritor = NULL;
2413
2414 start_threads((thread_continue_t)thread_inheritor_like_mutex, (struct synch_test_common *)info, FALSE);
2415 wait_all_thread((struct synch_test_common *)info);
2416
2417 T_ASSERT(info->value == (int)info->head.nthreads, "value protected by sleep");
2418 T_ASSERT(info->handoff_failure == 1, "handoff failures");
2419 }
2420
2421 static void
test_change_sleep_inheritor(struct info_sleep_inheritor_test * info,int prim_type)2422 test_change_sleep_inheritor(struct info_sleep_inheritor_test *info, int prim_type)
2423 {
2424 info->prim_type = prim_type;
2425
2426 info->thread_inheritor = NULL;
2427 info->steal_pri = 0;
2428 info->synch = 0;
2429 info->synch_value = info->head.nthreads;
2430
2431 start_threads((thread_continue_t)thread_steal_work, (struct synch_test_common *)info, FALSE);
2432 wait_all_thread((struct synch_test_common *)info);
2433 }
2434
2435 static void
test_no_inheritor(struct info_sleep_inheritor_test * info,int prim_type)2436 test_no_inheritor(struct info_sleep_inheritor_test *info, int prim_type)
2437 {
2438 info->prim_type = prim_type;
2439 info->synch = 0;
2440 info->synch_value = info->head.nthreads;
2441
2442 info->thread_inheritor = NULL;
2443 info->value = info->head.nthreads;
2444
2445 start_threads((thread_continue_t)thread_no_inheritor_work, (struct synch_test_common *)info, FALSE);
2446 wait_all_thread((struct synch_test_common *)info);
2447 }
2448
2449 static void
test_rw_lock(struct info_sleep_inheritor_test * info)2450 test_rw_lock(struct info_sleep_inheritor_test *info)
2451 {
2452 info->thread_inheritor = NULL;
2453 info->value = info->head.nthreads;
2454 info->synch = 0;
2455 info->synch_value = info->head.nthreads;
2456
2457 start_threads((thread_continue_t)thread_rw_work, (struct synch_test_common *)info, FALSE);
2458 wait_all_thread((struct synch_test_common *)info);
2459 }
2460
2461 static void
test_mtx_lock(struct info_sleep_inheritor_test * info)2462 test_mtx_lock(struct info_sleep_inheritor_test *info)
2463 {
2464 info->thread_inheritor = NULL;
2465 info->value = info->head.nthreads;
2466 info->synch = 0;
2467 info->synch_value = info->head.nthreads;
2468
2469 start_threads((thread_continue_t)thread_mtx_work, (struct synch_test_common *)info, FALSE);
2470 wait_all_thread((struct synch_test_common *)info);
2471 }
2472
2473 kern_return_t
ts_kernel_sleep_inheritor_test(void)2474 ts_kernel_sleep_inheritor_test(void)
2475 {
2476 struct info_sleep_inheritor_test info = {};
2477
2478 init_synch_test_common((struct synch_test_common *)&info, NUM_THREADS);
2479
2480 lck_attr_t* lck_attr = lck_attr_alloc_init();
2481 lck_grp_attr_t* lck_grp_attr = lck_grp_attr_alloc_init();
2482 lck_grp_t* lck_grp = lck_grp_alloc_init("test sleep_inheritor", lck_grp_attr);
2483
2484 lck_mtx_init(&info.mtx_lock, lck_grp, lck_attr);
2485 lck_rw_init(&info.rw_lock, lck_grp, lck_attr);
2486
2487 /*
2488 * Testing lck_mtx_sleep_with_inheritor and wakeup_all_with_inheritor
2489 */
2490 T_LOG("Testing mtx sleep with inheritor and wake_all_with_inheritor");
2491 test_sleep_with_wake_all(&info, MTX_LOCK);
2492
2493 /*
2494 * Testing rw_mtx_sleep_with_inheritor and wakeup_all_with_inheritor
2495 */
2496 T_LOG("Testing rw sleep with inheritor and wake_all_with_inheritor");
2497 test_sleep_with_wake_all(&info, RW_LOCK);
2498
2499 /*
2500 * Testing lck_mtx_sleep_with_inheritor and wakeup_one_with_inheritor
2501 */
2502 T_LOG("Testing mtx sleep with inheritor and wake_one_with_inheritor");
2503 test_sleep_with_wake_one(&info, MTX_LOCK);
2504
2505 /*
2506 * Testing lck_rw_sleep_with_inheritor and wakeup_one_with_inheritor
2507 */
2508 T_LOG("Testing rw sleep with inheritor and wake_one_with_inheritor");
2509 test_sleep_with_wake_one(&info, RW_LOCK);
2510
2511 /*
2512 * Testing lck_mtx_sleep_with_inheritor and wakeup_all_with_inheritor
2513 * and change_sleep_inheritor
2514 */
2515 T_LOG("Testing change_sleep_inheritor with mxt sleep");
2516 test_change_sleep_inheritor(&info, MTX_LOCK);
2517
2518 /*
2519 * Testing lck_mtx_sleep_with_inheritor and wakeup_all_with_inheritor
2520 * and change_sleep_inheritor
2521 */
2522 T_LOG("Testing change_sleep_inheritor with rw sleep");
2523 test_change_sleep_inheritor(&info, RW_LOCK);
2524
2525 /*
2526 * Testing lck_mtx_sleep_with_inheritor and wakeup_all_with_inheritor
2527 * with inheritor NULL
2528 */
2529 T_LOG("Testing inheritor NULL");
2530 test_no_inheritor(&info, MTX_LOCK);
2531
2532 /*
2533 * Testing lck_mtx_sleep_with_inheritor and wakeup_all_with_inheritor
2534 * with inheritor NULL
2535 */
2536 T_LOG("Testing inheritor NULL");
2537 test_no_inheritor(&info, RW_LOCK);
2538
2539 /*
2540 * Testing mtx locking combinations
2541 */
2542 T_LOG("Testing mtx locking combinations");
2543 test_mtx_lock(&info);
2544
2545 /*
2546 * Testing rw locking combinations
2547 */
2548 T_LOG("Testing rw locking combinations");
2549 test_rw_lock(&info);
2550
2551 /*
2552 * Testing reflock / cond_sleep_with_inheritor
2553 */
2554 T_LOG("Test cache reflock + cond_sleep_with_inheritor");
2555 test_cache_reflock(&info);
2556 T_LOG("Test force reflock + cond_sleep_with_inheritor");
2557 test_force_reflock(&info);
2558 T_LOG("Test refcount reflock + cond_sleep_with_inheritor");
2559 test_refcount_reflock(&info);
2560 T_LOG("Test lock reflock + cond_sleep_with_inheritor");
2561 test_lock_reflock(&info);
2562
2563 destroy_synch_test_common((struct synch_test_common *)&info);
2564
2565 lck_attr_free(lck_attr);
2566 lck_grp_attr_free(lck_grp_attr);
2567 lck_rw_destroy(&info.rw_lock, lck_grp);
2568 lck_mtx_destroy(&info.mtx_lock, lck_grp);
2569 lck_grp_free(lck_grp);
2570
2571 return KERN_SUCCESS;
2572 }
2573
2574 static void
thread_gate_aggressive(void * args,__unused wait_result_t wr)2575 thread_gate_aggressive(
2576 void *args,
2577 __unused wait_result_t wr)
2578 {
2579 struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
2580 uint my_pri = current_thread()->sched_pri;
2581
2582 T_LOG("Started thread pri %d %p", my_pri, current_thread());
2583
2584 primitive_lock(info);
2585 if (info->thread_inheritor == NULL) {
2586 info->thread_inheritor = current_thread();
2587 primitive_gate_assert(info, GATE_ASSERT_OPEN);
2588 primitive_gate_close(info);
2589 exclude_current_waiter((struct synch_test_common *)info);
2590
2591 primitive_unlock(info);
2592
2593 wait_threads(&info->synch, info->synch_value - 2);
2594 wait_for_waiters((struct synch_test_common *)info);
2595 T_LOG("Thread pri %d first to run %p", my_pri, current_thread());
2596
2597 primitive_lock(info);
2598 if (info->thread_inheritor == current_thread()) {
2599 primitive_gate_open(info);
2600 }
2601 } else {
2602 if (info->steal_pri == 0) {
2603 info->steal_pri = my_pri;
2604 info->thread_inheritor = current_thread();
2605 primitive_gate_steal(info);
2606 exclude_current_waiter((struct synch_test_common *)info);
2607
2608 primitive_unlock(info);
2609 wait_threads(&info->synch, info->synch_value - 2);
2610
2611 T_LOG("Thread pri %d stole push %p", my_pri, current_thread());
2612 wait_for_waiters((struct synch_test_common *)info);
2613 T_ASSERT((uint) current_thread()->sched_pri == info->steal_pri, "gate keeper priority current is %d, should be %d", current_thread()->sched_pri, info->steal_pri);
2614
2615 primitive_lock(info);
2616 primitive_gate_open(info);
2617 } else {
2618 if (my_pri > info->steal_pri) {
2619 info->steal_pri = my_pri;
2620 }
2621 wake_threads(&info->synch);
2622 primitive_gate_wait(info);
2623 exclude_current_waiter((struct synch_test_common *)info);
2624 }
2625 }
2626 primitive_unlock(info);
2627
2628 assert(current_thread()->kern_promotion_schedpri == 0);
2629 notify_waiter((struct synch_test_common *)info);
2630
2631 thread_terminate_self();
2632 }
2633
2634 static void
thread_gate_free(void * args,__unused wait_result_t wr)2635 thread_gate_free(
2636 void *args,
2637 __unused wait_result_t wr)
2638 {
2639 struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
2640 uint my_pri = current_thread()->sched_pri;
2641
2642 T_LOG("Started thread pri %d %p", my_pri, current_thread());
2643
2644 primitive_lock(info);
2645
2646 if (primitive_gate_try_close(info) == KERN_SUCCESS) {
2647 primitive_gate_assert(info, GATE_ASSERT_HELD);
2648 primitive_unlock(info);
2649
2650 wait_threads(&info->synch, info->synch_value - 1);
2651 wait_for_waiters((struct synch_test_common *) info);
2652
2653 primitive_lock(info);
2654 primitive_gate_open(info);
2655 primitive_gate_free(info);
2656 } else {
2657 primitive_gate_assert(info, GATE_ASSERT_CLOSED);
2658 wake_threads(&info->synch);
2659 gate_wait_result_t ret = primitive_gate_wait(info);
2660 T_ASSERT(ret == GATE_OPENED, "open gate");
2661 }
2662
2663 primitive_unlock(info);
2664
2665 notify_waiter((struct synch_test_common *)info);
2666
2667 thread_terminate_self();
2668 }
2669
2670 static void
thread_gate_like_mutex(void * args,__unused wait_result_t wr)2671 thread_gate_like_mutex(
2672 void *args,
2673 __unused wait_result_t wr)
2674 {
2675 gate_wait_result_t wait;
2676 kern_return_t ret;
2677 uint my_pri = current_thread()->sched_pri;
2678
2679 struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
2680
2681 T_LOG("Started thread pri %d %p", my_pri, current_thread());
2682
2683 /*
2684 * spin here to start concurrently
2685 */
2686 wake_threads(&info->synch);
2687 wait_threads(&info->synch, info->synch_value);
2688
2689 primitive_lock(info);
2690
2691 if (primitive_gate_try_close(info) != KERN_SUCCESS) {
2692 wait = primitive_gate_wait(info);
2693 T_ASSERT(wait == GATE_HANDOFF, "gate_wait return");
2694 }
2695
2696 primitive_gate_assert(info, GATE_ASSERT_HELD);
2697
2698 primitive_unlock(info);
2699
2700 IOSleep(100);
2701 info->value++;
2702
2703 primitive_lock(info);
2704
2705 ret = primitive_gate_handoff(info, GATE_HANDOFF_DEFAULT);
2706 if (ret == KERN_NOT_WAITING) {
2707 T_ASSERT(info->handoff_failure == 0, "handoff failures");
2708 primitive_gate_handoff(info, GATE_HANDOFF_OPEN_IF_NO_WAITERS);
2709 info->handoff_failure++;
2710 }
2711
2712 primitive_unlock(info);
2713 notify_waiter((struct synch_test_common *)info);
2714
2715 thread_terminate_self();
2716 }
2717
2718 static void
thread_just_one_do_work(void * args,__unused wait_result_t wr)2719 thread_just_one_do_work(
2720 void *args,
2721 __unused wait_result_t wr)
2722 {
2723 struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
2724 uint my_pri = current_thread()->sched_pri;
2725 uint max_pri;
2726
2727 T_LOG("Started thread pri %d %p", my_pri, current_thread());
2728
2729 primitive_lock(info);
2730 check_again:
2731 if (info->work_to_do) {
2732 if (primitive_gate_try_close(info) == KERN_SUCCESS) {
2733 primitive_gate_assert(info, GATE_ASSERT_HELD);
2734 primitive_unlock(info);
2735
2736 T_LOG("Thread pri %d acquired the gate %p", my_pri, current_thread());
2737 wait_threads(&info->synch, info->synch_value - 1);
2738 wait_for_waiters((struct synch_test_common *)info);
2739 max_pri = get_max_pri((struct synch_test_common *) info);
2740 T_ASSERT((uint) current_thread()->sched_pri == max_pri, "gate owner priority current is %d, should be %d", current_thread()->sched_pri, max_pri);
2741 os_atomic_store(&info->synch, 0, relaxed);
2742
2743 primitive_lock(info);
2744 info->work_to_do = FALSE;
2745 primitive_gate_open(info);
2746 } else {
2747 primitive_gate_assert(info, GATE_ASSERT_CLOSED);
2748 wake_threads(&info->synch);
2749 primitive_gate_wait(info);
2750 goto check_again;
2751 }
2752 }
2753 primitive_unlock(info);
2754
2755 assert(current_thread()->kern_promotion_schedpri == 0);
2756 notify_waiter((struct synch_test_common *)info);
2757 thread_terminate_self();
2758 }
2759
2760 static void
test_gate_push(struct info_sleep_inheritor_test * info,int prim_type)2761 test_gate_push(struct info_sleep_inheritor_test *info, int prim_type)
2762 {
2763 info->prim_type = prim_type;
2764 info->use_alloc_gate = false;
2765
2766 primitive_gate_init(info);
2767 info->work_to_do = TRUE;
2768 info->synch = 0;
2769 info->synch_value = NUM_THREADS;
2770
2771 start_threads((thread_continue_t)thread_just_one_do_work, (struct synch_test_common *) info, TRUE);
2772 wait_all_thread((struct synch_test_common *)info);
2773
2774 primitive_gate_destroy(info);
2775 }
2776
2777 static void
test_gate_handoff(struct info_sleep_inheritor_test * info,int prim_type)2778 test_gate_handoff(struct info_sleep_inheritor_test *info, int prim_type)
2779 {
2780 info->prim_type = prim_type;
2781 info->use_alloc_gate = false;
2782
2783 primitive_gate_init(info);
2784
2785 info->synch = 0;
2786 info->synch_value = NUM_THREADS;
2787 info->value = 0;
2788 info->handoff_failure = 0;
2789
2790 start_threads((thread_continue_t)thread_gate_like_mutex, (struct synch_test_common *)info, false);
2791 wait_all_thread((struct synch_test_common *)info);
2792
2793 T_ASSERT(info->value == NUM_THREADS, "value protected by gate");
2794 T_ASSERT(info->handoff_failure == 1, "handoff failures");
2795
2796 primitive_gate_destroy(info);
2797 }
2798
2799 static void
test_gate_steal(struct info_sleep_inheritor_test * info,int prim_type)2800 test_gate_steal(struct info_sleep_inheritor_test *info, int prim_type)
2801 {
2802 info->prim_type = prim_type;
2803 info->use_alloc_gate = false;
2804
2805 primitive_gate_init(info);
2806
2807 info->synch = 0;
2808 info->synch_value = NUM_THREADS;
2809 info->thread_inheritor = NULL;
2810 info->steal_pri = 0;
2811
2812 start_threads((thread_continue_t)thread_gate_aggressive, (struct synch_test_common *)info, FALSE);
2813 wait_all_thread((struct synch_test_common *)info);
2814
2815 primitive_gate_destroy(info);
2816 }
2817
2818 static void
test_gate_alloc_free(struct info_sleep_inheritor_test * info,int prim_type)2819 test_gate_alloc_free(struct info_sleep_inheritor_test *info, int prim_type)
2820 {
2821 (void)info;
2822 (void) prim_type;
2823 info->prim_type = prim_type;
2824 info->use_alloc_gate = true;
2825
2826 primitive_gate_alloc(info);
2827
2828 info->synch = 0;
2829 info->synch_value = NUM_THREADS;
2830
2831 start_threads((thread_continue_t)thread_gate_free, (struct synch_test_common *)info, FALSE);
2832 wait_all_thread((struct synch_test_common *)info);
2833
2834 T_ASSERT(info->alloc_gate == NULL, "gate free");
2835 info->use_alloc_gate = false;
2836 }
2837
2838 kern_return_t
ts_kernel_gate_test(void)2839 ts_kernel_gate_test(void)
2840 {
2841 struct info_sleep_inheritor_test info = {};
2842
2843 T_LOG("Testing gate primitive");
2844
2845 init_synch_test_common((struct synch_test_common *)&info, NUM_THREADS);
2846
2847 lck_attr_t* lck_attr = lck_attr_alloc_init();
2848 lck_grp_attr_t* lck_grp_attr = lck_grp_attr_alloc_init();
2849 lck_grp_t* lck_grp = lck_grp_alloc_init("test gate", lck_grp_attr);
2850
2851 lck_mtx_init(&info.mtx_lock, lck_grp, lck_attr);
2852 lck_rw_init(&info.rw_lock, lck_grp, lck_attr);
2853
2854 /*
2855 * Testing the priority inherited by the keeper
2856 * lck_mtx_gate_try_close, lck_mtx_gate_open, lck_mtx_gate_wait
2857 */
2858 T_LOG("Testing gate push, mtx");
2859 test_gate_push(&info, MTX_LOCK);
2860
2861 T_LOG("Testing gate push, rw");
2862 test_gate_push(&info, RW_LOCK);
2863
2864 /*
2865 * Testing the handoff
2866 * lck_mtx_gate_wait, lck_mtx_gate_handoff
2867 */
2868 T_LOG("Testing gate handoff, mtx");
2869 test_gate_handoff(&info, MTX_LOCK);
2870
2871 T_LOG("Testing gate handoff, rw");
2872 test_gate_handoff(&info, RW_LOCK);
2873
2874 /*
2875 * Testing the steal
2876 * lck_mtx_gate_close, lck_mtx_gate_wait, lck_mtx_gate_steal, lck_mtx_gate_handoff
2877 */
2878 T_LOG("Testing gate steal, mtx");
2879 test_gate_steal(&info, MTX_LOCK);
2880
2881 T_LOG("Testing gate steal, rw");
2882 test_gate_steal(&info, RW_LOCK);
2883
2884 /*
2885 * Testing the alloc/free
2886 * lck_mtx_gate_alloc_init, lck_mtx_gate_close, lck_mtx_gate_wait, lck_mtx_gate_free
2887 */
2888 T_LOG("Testing gate alloc/free, mtx");
2889 test_gate_alloc_free(&info, MTX_LOCK);
2890
2891 T_LOG("Testing gate alloc/free, rw");
2892 test_gate_alloc_free(&info, RW_LOCK);
2893
2894 destroy_synch_test_common((struct synch_test_common *)&info);
2895
2896 lck_attr_free(lck_attr);
2897 lck_grp_attr_free(lck_grp_attr);
2898 lck_mtx_destroy(&info.mtx_lock, lck_grp);
2899 lck_grp_free(lck_grp);
2900
2901 return KERN_SUCCESS;
2902 }
2903
2904 #define NUM_THREAD_CHAIN 6
2905
2906 struct turnstile_chain_test {
2907 struct synch_test_common head;
2908 lck_mtx_t mtx_lock;
2909 int synch_value;
2910 int synch;
2911 int synch2;
2912 gate_t gates[NUM_THREAD_CHAIN];
2913 };
2914
2915 static void
thread_sleep_gate_chain_work(void * args,__unused wait_result_t wr)2916 thread_sleep_gate_chain_work(
2917 void *args,
2918 __unused wait_result_t wr)
2919 {
2920 struct turnstile_chain_test *info = (struct turnstile_chain_test*) args;
2921 thread_t self = current_thread();
2922 uint my_pri = self->sched_pri;
2923 uint max_pri;
2924 uint i;
2925 thread_t inheritor = NULL, woken_up;
2926 event_t wait_event, wake_event;
2927 kern_return_t ret;
2928
2929 T_LOG("Started thread pri %d %p", my_pri, self);
2930
2931 /*
2932 * Need to use the threads ids, wait for all of them to be populated
2933 */
2934
2935 while (os_atomic_load(&info->head.threads[info->head.nthreads - 1], acquire) == NULL) {
2936 IOSleep(10);
2937 }
2938
2939 max_pri = get_max_pri((struct synch_test_common *) info);
2940
2941 for (i = 0; i < info->head.nthreads; i = i + 2) {
2942 // even threads will close a gate
2943 if (info->head.threads[i] == self) {
2944 lck_mtx_lock(&info->mtx_lock);
2945 lck_mtx_gate_close(&info->mtx_lock, &info->gates[i]);
2946 lck_mtx_unlock(&info->mtx_lock);
2947 break;
2948 }
2949 }
2950
2951 wake_threads(&info->synch2);
2952 wait_threads(&info->synch2, info->synch_value);
2953
2954 if (self == os_atomic_load(&info->head.threads[0], acquire)) {
2955 wait_threads(&info->synch, info->synch_value - 1);
2956 wait_for_waiters((struct synch_test_common *)info);
2957
2958 T_ASSERT((uint) self->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", self->sched_pri, max_pri);
2959
2960 lck_mtx_lock(&info->mtx_lock);
2961 lck_mtx_gate_open(&info->mtx_lock, &info->gates[0]);
2962 lck_mtx_unlock(&info->mtx_lock);
2963 } else {
2964 wait_event = NULL;
2965 wake_event = NULL;
2966 for (i = 0; i < info->head.nthreads; i++) {
2967 if (info->head.threads[i] == self) {
2968 inheritor = info->head.threads[i - 1];
2969 wait_event = (event_t) &info->head.threads[i - 1];
2970 wake_event = (event_t) &info->head.threads[i];
2971 break;
2972 }
2973 }
2974 assert(wait_event != NULL);
2975
2976 lck_mtx_lock(&info->mtx_lock);
2977 wake_threads(&info->synch);
2978
2979 if (i % 2 != 0) {
2980 lck_mtx_gate_wait(&info->mtx_lock, &info->gates[i - 1], LCK_SLEEP_UNLOCK, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
2981 T_ASSERT((uint) self->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", self->sched_pri, max_pri);
2982
2983 ret = wakeup_one_with_inheritor(wake_event, THREAD_AWAKENED, LCK_WAKE_DO_NOT_TRANSFER_PUSH, &woken_up);
2984 if (ret == KERN_SUCCESS) {
2985 T_ASSERT(i != (info->head.nthreads - 1), "thread id");
2986 T_ASSERT(woken_up == info->head.threads[i + 1], "wakeup_one_with_inheritor woke next");
2987 } else {
2988 T_ASSERT(i == (info->head.nthreads - 1), "thread id");
2989 }
2990
2991 // i am still the inheritor, wake all to drop inheritership
2992 ret = wakeup_all_with_inheritor(wake_event, LCK_WAKE_DEFAULT);
2993 T_ASSERT(ret == KERN_NOT_WAITING, "waiters on event");
2994 } else {
2995 // I previously closed a gate
2996 lck_mtx_sleep_with_inheritor(&info->mtx_lock, LCK_SLEEP_UNLOCK, wait_event, inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
2997 T_ASSERT((uint) self->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", self->sched_pri, max_pri);
2998
2999 lck_mtx_lock(&info->mtx_lock);
3000 lck_mtx_gate_open(&info->mtx_lock, &info->gates[i]);
3001 lck_mtx_unlock(&info->mtx_lock);
3002 }
3003 }
3004
3005 assert(current_thread()->kern_promotion_schedpri == 0);
3006 notify_waiter((struct synch_test_common *)info);
3007
3008 thread_terminate_self();
3009 }
3010
3011 static void
thread_gate_chain_work(void * args,__unused wait_result_t wr)3012 thread_gate_chain_work(
3013 void *args,
3014 __unused wait_result_t wr)
3015 {
3016 struct turnstile_chain_test *info = (struct turnstile_chain_test*) args;
3017 thread_t self = current_thread();
3018 uint my_pri = self->sched_pri;
3019 uint max_pri;
3020 uint i;
3021 T_LOG("Started thread pri %d %p", my_pri, self);
3022
3023
3024 /*
3025 * Need to use the threads ids, wait for all of them to be populated
3026 */
3027 while (os_atomic_load(&info->head.threads[info->head.nthreads - 1], acquire) == NULL) {
3028 IOSleep(10);
3029 }
3030
3031 max_pri = get_max_pri((struct synch_test_common *) info);
3032
3033 for (i = 0; i < info->head.nthreads; i++) {
3034 if (info->head.threads[i] == self) {
3035 lck_mtx_lock(&info->mtx_lock);
3036 lck_mtx_gate_close(&info->mtx_lock, &info->gates[i]);
3037 lck_mtx_unlock(&info->mtx_lock);
3038 break;
3039 }
3040 }
3041 assert(i != info->head.nthreads);
3042
3043 wake_threads(&info->synch2);
3044 wait_threads(&info->synch2, info->synch_value);
3045
3046 if (self == os_atomic_load(&info->head.threads[0], acquire)) {
3047 wait_threads(&info->synch, info->synch_value - 1);
3048
3049 wait_for_waiters((struct synch_test_common *)info);
3050
3051 T_ASSERT((uint) self->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", self->sched_pri, max_pri);
3052
3053 lck_mtx_lock(&info->mtx_lock);
3054 lck_mtx_gate_open(&info->mtx_lock, &info->gates[0]);
3055 lck_mtx_unlock(&info->mtx_lock);
3056 } else {
3057 lck_mtx_lock(&info->mtx_lock);
3058 wake_threads(&info->synch);
3059 lck_mtx_gate_wait(&info->mtx_lock, &info->gates[i - 1], LCK_SLEEP_UNLOCK, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
3060
3061 T_ASSERT((uint) self->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", self->sched_pri, max_pri);
3062
3063 lck_mtx_lock(&info->mtx_lock);
3064 lck_mtx_gate_open(&info->mtx_lock, &info->gates[i]);
3065 lck_mtx_unlock(&info->mtx_lock);
3066 }
3067
3068 assert(current_thread()->kern_promotion_schedpri == 0);
3069 notify_waiter((struct synch_test_common *)info);
3070
3071 thread_terminate_self();
3072 }
3073
3074 static void
thread_sleep_chain_work(void * args,__unused wait_result_t wr)3075 thread_sleep_chain_work(
3076 void *args,
3077 __unused wait_result_t wr)
3078 {
3079 struct turnstile_chain_test *info = (struct turnstile_chain_test*) args;
3080 thread_t self = current_thread();
3081 uint my_pri = self->sched_pri;
3082 uint max_pri;
3083 event_t wait_event, wake_event;
3084 uint i;
3085 thread_t inheritor = NULL, woken_up = NULL;
3086 kern_return_t ret;
3087
3088 T_LOG("Started thread pri %d %p", my_pri, self);
3089
3090 /*
3091 * Need to use the threads ids, wait for all of them to be populated
3092 */
3093 while (os_atomic_load(&info->head.threads[info->head.nthreads - 1], acquire) == NULL) {
3094 IOSleep(10);
3095 }
3096
3097 max_pri = get_max_pri((struct synch_test_common *) info);
3098
3099 if (self == os_atomic_load(&info->head.threads[0], acquire)) {
3100 wait_threads(&info->synch, info->synch_value - 1);
3101
3102 wait_for_waiters((struct synch_test_common *)info);
3103
3104 T_ASSERT((uint) self->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", self->sched_pri, max_pri);
3105
3106 ret = wakeup_one_with_inheritor((event_t) &info->head.threads[0], THREAD_AWAKENED, LCK_WAKE_DO_NOT_TRANSFER_PUSH, &woken_up);
3107 T_ASSERT(ret == KERN_SUCCESS, "wakeup_one_with_inheritor woke next");
3108 T_ASSERT(woken_up == info->head.threads[1], "thread woken up");
3109
3110 // i am still the inheritor, wake all to drop inheritership
3111 ret = wakeup_all_with_inheritor((event_t) &info->head.threads[0], LCK_WAKE_DEFAULT);
3112 T_ASSERT(ret == KERN_NOT_WAITING, "waiters on event");
3113 } else {
3114 wait_event = NULL;
3115 wake_event = NULL;
3116 for (i = 0; i < info->head.nthreads; i++) {
3117 if (info->head.threads[i] == self) {
3118 inheritor = info->head.threads[i - 1];
3119 wait_event = (event_t) &info->head.threads[i - 1];
3120 wake_event = (event_t) &info->head.threads[i];
3121 break;
3122 }
3123 }
3124
3125 assert(wait_event != NULL);
3126 lck_mtx_lock(&info->mtx_lock);
3127 wake_threads(&info->synch);
3128
3129 lck_mtx_sleep_with_inheritor(&info->mtx_lock, LCK_SLEEP_UNLOCK, wait_event, inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
3130
3131 T_ASSERT((uint) self->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", self->sched_pri, max_pri);
3132
3133 ret = wakeup_one_with_inheritor(wake_event, THREAD_AWAKENED, LCK_WAKE_DO_NOT_TRANSFER_PUSH, &woken_up);
3134 if (ret == KERN_SUCCESS) {
3135 T_ASSERT(i != (info->head.nthreads - 1), "thread id");
3136 T_ASSERT(woken_up == info->head.threads[i + 1], "wakeup_one_with_inheritor woke next");
3137 } else {
3138 T_ASSERT(i == (info->head.nthreads - 1), "thread id");
3139 }
3140
3141 // i am still the inheritor, wake all to drop inheritership
3142 ret = wakeup_all_with_inheritor(wake_event, LCK_WAKE_DEFAULT);
3143 T_ASSERT(ret == KERN_NOT_WAITING, "waiters on event");
3144 }
3145
3146 assert(current_thread()->kern_promotion_schedpri == 0);
3147 notify_waiter((struct synch_test_common *)info);
3148
3149 thread_terminate_self();
3150 }
3151
3152 static void
test_sleep_chain(struct turnstile_chain_test * info)3153 test_sleep_chain(struct turnstile_chain_test *info)
3154 {
3155 info->synch = 0;
3156 info->synch_value = info->head.nthreads;
3157
3158 start_threads((thread_continue_t)thread_sleep_chain_work, (struct synch_test_common *)info, FALSE);
3159 wait_all_thread((struct synch_test_common *)info);
3160 }
3161
3162 static void
test_gate_chain(struct turnstile_chain_test * info)3163 test_gate_chain(struct turnstile_chain_test *info)
3164 {
3165 info->synch = 0;
3166 info->synch2 = 0;
3167 info->synch_value = info->head.nthreads;
3168
3169 start_threads((thread_continue_t)thread_gate_chain_work, (struct synch_test_common *)info, FALSE);
3170 wait_all_thread((struct synch_test_common *)info);
3171 }
3172
3173 static void
test_sleep_gate_chain(struct turnstile_chain_test * info)3174 test_sleep_gate_chain(struct turnstile_chain_test *info)
3175 {
3176 info->synch = 0;
3177 info->synch2 = 0;
3178 info->synch_value = info->head.nthreads;
3179
3180 start_threads((thread_continue_t)thread_sleep_gate_chain_work, (struct synch_test_common *)info, FALSE);
3181 wait_all_thread((struct synch_test_common *)info);
3182 }
3183
3184 kern_return_t
ts_kernel_turnstile_chain_test(void)3185 ts_kernel_turnstile_chain_test(void)
3186 {
3187 struct turnstile_chain_test info = {};
3188 int i;
3189
3190 init_synch_test_common((struct synch_test_common *)&info, NUM_THREAD_CHAIN);
3191 lck_attr_t* lck_attr = lck_attr_alloc_init();
3192 lck_grp_attr_t* lck_grp_attr = lck_grp_attr_alloc_init();
3193 lck_grp_t* lck_grp = lck_grp_alloc_init("test gate", lck_grp_attr);
3194
3195 lck_mtx_init(&info.mtx_lock, lck_grp, lck_attr);
3196 for (i = 0; i < NUM_THREAD_CHAIN; i++) {
3197 lck_mtx_gate_init(&info.mtx_lock, &info.gates[i]);
3198 }
3199
3200 T_LOG("Testing sleep chain, lck");
3201 test_sleep_chain(&info);
3202
3203 T_LOG("Testing gate chain, lck");
3204 test_gate_chain(&info);
3205
3206 T_LOG("Testing sleep and gate chain, lck");
3207 test_sleep_gate_chain(&info);
3208
3209 destroy_synch_test_common((struct synch_test_common *)&info);
3210 for (i = 0; i < NUM_THREAD_CHAIN; i++) {
3211 lck_mtx_gate_destroy(&info.mtx_lock, &info.gates[i]);
3212 }
3213 lck_attr_free(lck_attr);
3214 lck_grp_attr_free(lck_grp_attr);
3215 lck_mtx_destroy(&info.mtx_lock, lck_grp);
3216 lck_grp_free(lck_grp);
3217
3218 return KERN_SUCCESS;
3219 }
3220
3221 kern_return_t
ts_kernel_timingsafe_bcmp_test(void)3222 ts_kernel_timingsafe_bcmp_test(void)
3223 {
3224 int i, buf_size;
3225 char *buf = NULL;
3226
3227 // empty
3228 T_ASSERT(timingsafe_bcmp(NULL, NULL, 0) == 0, NULL);
3229 T_ASSERT(timingsafe_bcmp("foo", "foo", 0) == 0, NULL);
3230 T_ASSERT(timingsafe_bcmp("foo", "bar", 0) == 0, NULL);
3231
3232 // equal
3233 T_ASSERT(timingsafe_bcmp("foo", "foo", strlen("foo")) == 0, NULL);
3234
3235 // unequal
3236 T_ASSERT(timingsafe_bcmp("foo", "bar", strlen("foo")) == 1, NULL);
3237 T_ASSERT(timingsafe_bcmp("foo", "goo", strlen("foo")) == 1, NULL);
3238 T_ASSERT(timingsafe_bcmp("foo", "fpo", strlen("foo")) == 1, NULL);
3239 T_ASSERT(timingsafe_bcmp("foo", "fop", strlen("foo")) == 1, NULL);
3240
3241 // all possible bitwise differences
3242 for (i = 1; i < 256; i += 1) {
3243 unsigned char a = 0;
3244 unsigned char b = (unsigned char)i;
3245
3246 T_ASSERT(timingsafe_bcmp(&a, &b, sizeof(a)) == 1, NULL);
3247 }
3248
3249 // large
3250 buf_size = 1024 * 16;
3251 buf = kalloc_data(buf_size, Z_WAITOK);
3252 T_EXPECT_NOTNULL(buf, "kalloc of buf");
3253
3254 read_random(buf, buf_size);
3255 T_ASSERT(timingsafe_bcmp(buf, buf, buf_size) == 0, NULL);
3256 T_ASSERT(timingsafe_bcmp(buf, buf + 1, buf_size - 1) == 1, NULL);
3257 T_ASSERT(timingsafe_bcmp(buf, buf + 128, 128) == 1, NULL);
3258
3259 memcpy(buf + 128, buf, 128);
3260 T_ASSERT(timingsafe_bcmp(buf, buf + 128, 128) == 0, NULL);
3261
3262 kfree_data(buf, buf_size);
3263
3264 return KERN_SUCCESS;
3265 }
3266
3267 kern_return_t
kprintf_hhx_test(void)3268 kprintf_hhx_test(void)
3269 {
3270 printf("POST hhx test %hx%hx%hx%hx %hhx%hhx%hhx%hhx - %llx",
3271 (unsigned short)0xfeed, (unsigned short)0xface,
3272 (unsigned short)0xabad, (unsigned short)0xcafe,
3273 (unsigned char)'h', (unsigned char)'h', (unsigned char)'x',
3274 (unsigned char)'!',
3275 0xfeedfaceULL);
3276 T_PASS("kprintf_hhx_test passed");
3277 return KERN_SUCCESS;
3278 }
3279