1 /*
2 * Copyright (c) 2019-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <kern/kern_types.h>
30 #include <kern/assert.h>
31 #include <kern/host.h>
32 #include <kern/macro_help.h>
33 #include <kern/sched.h>
34 #include <kern/locks.h>
35 #include <kern/sched_prim.h>
36 #include <kern/misc_protos.h>
37 #include <kern/thread_call.h>
38 #include <kern/zalloc_internal.h>
39 #include <kern/kalloc.h>
40 #include <tests/ktest.h>
41 #include <sys/errno.h>
42 #include <sys/random.h>
43 #include <kern/kern_cdata.h>
44 #include <machine/lowglobals.h>
45 #include <machine/static_if.h>
46 #include <vm/vm_page.h>
47 #include <vm/vm_object_internal.h>
48 #include <vm/vm_protos.h>
49 #include <vm/vm_iokit.h>
50 #include <string.h>
51 #include <kern/kern_apfs_reflock.h>
52
53 #if !(DEVELOPMENT || DEBUG)
54 #error "Testing is not enabled on RELEASE configurations"
55 #endif
56
57 #include <tests/xnupost.h>
58
59 extern boolean_t get_range_bounds(char * c, int64_t * lower, int64_t * upper);
60 __private_extern__ void qsort(void * a, size_t n, size_t es, int (*cmp)(const void *, const void *));
61
62 uint32_t total_post_tests_count = 0;
63 void xnupost_reset_panic_widgets(void);
64
65 /* test declarations */
66 kern_return_t zalloc_test(void);
67 kern_return_t RandomULong_test(void);
68 kern_return_t kcdata_api_test(void);
69 kern_return_t ts_kernel_primitive_test(void);
70 kern_return_t ts_kernel_sleep_inheritor_test(void);
71 kern_return_t ts_kernel_gate_test(void);
72 kern_return_t ts_kernel_turnstile_chain_test(void);
73 kern_return_t ts_kernel_timingsafe_bcmp_test(void);
74
75 #if __ARM_VFP__
76 extern kern_return_t vfp_state_test(void);
77 #endif
78
79 extern kern_return_t kprintf_hhx_test(void);
80
81 #if defined(__arm64__)
82 kern_return_t pmap_coredump_test(void);
83 #endif
84
85 extern kern_return_t console_serial_test(void);
86 extern kern_return_t console_serial_parallel_log_tests(void);
87 extern kern_return_t test_printf(void);
88 extern kern_return_t test_os_log(void);
89 extern kern_return_t test_os_log_handles(void);
90 extern kern_return_t test_os_log_parallel(void);
91 extern kern_return_t bitmap_post_test(void);
92 extern kern_return_t counter_tests(void);
93 #if ML_IO_TIMEOUTS_ENABLED
94 extern kern_return_t ml_io_timeout_test(void);
95 #endif
96
97 #ifdef __arm64__
98 extern kern_return_t arm64_munger_test(void);
99 #if __ARM_PAN_AVAILABLE__
100 extern kern_return_t arm64_pan_test(void);
101 #endif
102 #if defined(HAS_APPLE_PAC)
103 extern kern_return_t arm64_ropjop_test(void);
104 #endif /* defined(HAS_APPLE_PAC) */
105 #if CONFIG_SPTM
106 extern kern_return_t arm64_panic_lockdown_test(void);
107 #endif /* CONFIG_SPTM */
108 #if HAS_SPECRES
109 extern kern_return_t specres_test(void);
110 #endif /* HAS_SPECRES */
111 #if BTI_ENFORCED
112 kern_return_t arm64_bti_test(void);
113 #endif /* BTI_ENFORCED */
114 extern kern_return_t arm64_speculation_guard_test(void);
115 #endif /* __arm64__ */
116
117 extern kern_return_t test_thread_call(void);
118
119 struct xnupost_panic_widget xt_panic_widgets = {.xtp_context_p = NULL,
120 .xtp_outval_p = NULL,
121 .xtp_func_name = NULL,
122 .xtp_func = NULL};
123
124 struct xnupost_test kernel_post_tests[] = {
125 XNUPOST_TEST_CONFIG_BASIC(zalloc_test),
126 XNUPOST_TEST_CONFIG_BASIC(RandomULong_test),
127 XNUPOST_TEST_CONFIG_BASIC(test_printf),
128 XNUPOST_TEST_CONFIG_BASIC(test_os_log_handles),
129 XNUPOST_TEST_CONFIG_BASIC(test_os_log),
130 XNUPOST_TEST_CONFIG_BASIC(test_os_log_parallel),
131 #ifdef __arm64__
132 XNUPOST_TEST_CONFIG_BASIC(arm64_munger_test),
133 #if __ARM_PAN_AVAILABLE__
134 XNUPOST_TEST_CONFIG_BASIC(arm64_pan_test),
135 #endif
136 #if defined(HAS_APPLE_PAC)
137 XNUPOST_TEST_CONFIG_BASIC(arm64_ropjop_test),
138 #endif /* defined(HAS_APPLE_PAC) */
139 #if CONFIG_SPTM
140 XNUPOST_TEST_CONFIG_BASIC(arm64_panic_lockdown_test),
141 #endif /* CONFIG_SPTM */
142 XNUPOST_TEST_CONFIG_BASIC(arm64_speculation_guard_test),
143 #endif /* __arm64__ */
144 XNUPOST_TEST_CONFIG_BASIC(kcdata_api_test),
145 XNUPOST_TEST_CONFIG_BASIC(console_serial_test),
146 XNUPOST_TEST_CONFIG_BASIC(console_serial_parallel_log_tests),
147 #if defined(__arm64__)
148 XNUPOST_TEST_CONFIG_BASIC(pmap_coredump_test),
149 #endif
150 XNUPOST_TEST_CONFIG_BASIC(bitmap_post_test),
151 //XNUPOST_TEST_CONFIG_TEST_PANIC(kcdata_api_assert_tests)
152 XNUPOST_TEST_CONFIG_BASIC(test_thread_call),
153 XNUPOST_TEST_CONFIG_BASIC(ts_kernel_primitive_test),
154 XNUPOST_TEST_CONFIG_BASIC(ts_kernel_sleep_inheritor_test),
155 XNUPOST_TEST_CONFIG_BASIC(ts_kernel_gate_test),
156 XNUPOST_TEST_CONFIG_BASIC(ts_kernel_turnstile_chain_test),
157 XNUPOST_TEST_CONFIG_BASIC(ts_kernel_timingsafe_bcmp_test),
158 XNUPOST_TEST_CONFIG_BASIC(kprintf_hhx_test),
159 #if __ARM_VFP__
160 XNUPOST_TEST_CONFIG_BASIC(vfp_state_test),
161 #endif
162 XNUPOST_TEST_CONFIG_BASIC(vm_tests),
163 XNUPOST_TEST_CONFIG_BASIC(counter_tests),
164 #if ML_IO_TIMEOUTS_ENABLED
165 XNUPOST_TEST_CONFIG_BASIC(ml_io_timeout_test),
166 #endif
167 #if HAS_SPECRES
168 XNUPOST_TEST_CONFIG_BASIC(specres_test),
169 #endif
170 };
171
172 uint32_t kernel_post_tests_count = sizeof(kernel_post_tests) / sizeof(xnupost_test_data_t);
173
174 #define POSTARGS_RUN_TESTS 0x1
175 #define POSTARGS_CONTROLLER_AVAILABLE 0x2
176 #define POSTARGS_CUSTOM_TEST_RUNLIST 0x4
177 uint64_t kernel_post_args = 0x0;
178
179 /* static variables to hold state */
180 static kern_return_t parse_config_retval = KERN_INVALID_CAPABILITY;
181 static char kernel_post_test_configs[256];
182 boolean_t xnupost_should_run_test(uint32_t test_num);
183
184 kern_return_t
xnupost_parse_config()185 xnupost_parse_config()
186 {
187 if (parse_config_retval != KERN_INVALID_CAPABILITY) {
188 return parse_config_retval;
189 }
190 PE_parse_boot_argn("kernPOST", &kernel_post_args, sizeof(kernel_post_args));
191
192 if (PE_parse_boot_argn("kernPOST_config", &kernel_post_test_configs[0], sizeof(kernel_post_test_configs)) == TRUE) {
193 kernel_post_args |= POSTARGS_CUSTOM_TEST_RUNLIST;
194 }
195
196 if (kernel_post_args != 0) {
197 parse_config_retval = KERN_SUCCESS;
198 goto out;
199 }
200 parse_config_retval = KERN_NOT_SUPPORTED;
201 out:
202 return parse_config_retval;
203 }
204
205 boolean_t
xnupost_should_run_test(uint32_t test_num)206 xnupost_should_run_test(uint32_t test_num)
207 {
208 if (kernel_post_args & POSTARGS_CUSTOM_TEST_RUNLIST) {
209 int64_t begin = 0, end = 999999;
210 char * b = kernel_post_test_configs;
211 while (*b) {
212 get_range_bounds(b, &begin, &end);
213 if (test_num >= begin && test_num <= end) {
214 return TRUE;
215 }
216
217 /* skip to the next "," */
218 while (*b != ',') {
219 if (*b == '\0') {
220 return FALSE;
221 }
222 b++;
223 }
224 /* skip past the ',' */
225 b++;
226 }
227 return FALSE;
228 }
229 return TRUE;
230 }
231
232 kern_return_t
xnupost_list_tests(xnupost_test_t test_list,uint32_t test_count)233 xnupost_list_tests(xnupost_test_t test_list, uint32_t test_count)
234 {
235 if (KERN_SUCCESS != xnupost_parse_config()) {
236 return KERN_FAILURE;
237 }
238
239 xnupost_test_t testp;
240 for (uint32_t i = 0; i < test_count; i++) {
241 testp = &test_list[i];
242 if (testp->xt_test_num == 0) {
243 assert(total_post_tests_count < UINT16_MAX);
244 testp->xt_test_num = (uint16_t)++total_post_tests_count;
245 }
246 /* make sure the boot-arg based test run list is honored */
247 if (kernel_post_args & POSTARGS_CUSTOM_TEST_RUNLIST) {
248 testp->xt_config |= XT_CONFIG_IGNORE;
249 if (xnupost_should_run_test(testp->xt_test_num)) {
250 testp->xt_config &= ~(XT_CONFIG_IGNORE);
251 testp->xt_config |= XT_CONFIG_RUN;
252 printf("\n[TEST] #%u is marked as ignored", testp->xt_test_num);
253 }
254 }
255 printf("\n[TEST] TOC#%u name: %s expected: %d config: %x\n", testp->xt_test_num, testp->xt_name, testp->xt_expected_retval,
256 testp->xt_config);
257 }
258
259 return KERN_SUCCESS;
260 }
261
262 kern_return_t
xnupost_run_tests(xnupost_test_t test_list,uint32_t test_count)263 xnupost_run_tests(xnupost_test_t test_list, uint32_t test_count)
264 {
265 uint32_t i = 0;
266 int retval = KERN_SUCCESS;
267 int test_retval = KERN_FAILURE;
268
269 if ((kernel_post_args & POSTARGS_RUN_TESTS) == 0) {
270 printf("No POST boot-arg set.\n");
271 return retval;
272 }
273
274 T_START;
275 xnupost_test_t testp;
276 for (; i < test_count; i++) {
277 xnupost_reset_panic_widgets();
278 T_TESTRESULT = T_STATE_UNRESOLVED;
279 testp = &test_list[i];
280 T_BEGIN(testp->xt_name);
281 testp->xt_begin_time = mach_absolute_time();
282 testp->xt_end_time = testp->xt_begin_time;
283
284 /*
285 * If test is designed to panic and controller
286 * is not available then mark as SKIPPED
287 */
288 if ((testp->xt_config & XT_CONFIG_EXPECT_PANIC) && !(kernel_post_args & POSTARGS_CONTROLLER_AVAILABLE)) {
289 T_SKIP(
290 "Test expects panic but "
291 "no controller is present");
292 testp->xt_test_actions = XT_ACTION_SKIPPED;
293 continue;
294 }
295
296 if ((testp->xt_config & XT_CONFIG_IGNORE)) {
297 T_SKIP("Test is marked as XT_CONFIG_IGNORE");
298 testp->xt_test_actions = XT_ACTION_SKIPPED;
299 continue;
300 }
301
302 test_retval = testp->xt_func();
303 if (T_STATE_UNRESOLVED == T_TESTRESULT) {
304 /*
305 * If test result is unresolved due to that no T_* test cases are called,
306 * determine the test result based on the return value of the test function.
307 */
308 if (KERN_SUCCESS == test_retval) {
309 T_PASS("Test passed because retval == KERN_SUCCESS");
310 } else {
311 T_FAIL("Test failed because retval == KERN_FAILURE");
312 }
313 }
314 T_END;
315 testp->xt_retval = T_TESTRESULT;
316 testp->xt_end_time = mach_absolute_time();
317 if (testp->xt_retval == testp->xt_expected_retval) {
318 testp->xt_test_actions = XT_ACTION_PASSED;
319 } else {
320 testp->xt_test_actions = XT_ACTION_FAILED;
321 }
322 }
323 T_FINISH;
324 return retval;
325 }
326
327 kern_return_t
kernel_list_tests()328 kernel_list_tests()
329 {
330 return xnupost_list_tests(kernel_post_tests, kernel_post_tests_count);
331 }
332
333 kern_return_t
kernel_do_post()334 kernel_do_post()
335 {
336 return xnupost_run_tests(kernel_post_tests, kernel_post_tests_count);
337 }
338
339 kern_return_t
xnupost_register_panic_widget(xt_panic_widget_func funcp,const char * funcname,void * context,void ** outval)340 xnupost_register_panic_widget(xt_panic_widget_func funcp, const char * funcname, void * context, void ** outval)
341 {
342 if (xt_panic_widgets.xtp_context_p != NULL || xt_panic_widgets.xtp_func != NULL) {
343 return KERN_RESOURCE_SHORTAGE;
344 }
345
346 xt_panic_widgets.xtp_context_p = context;
347 xt_panic_widgets.xtp_func = funcp;
348 xt_panic_widgets.xtp_func_name = funcname;
349 xt_panic_widgets.xtp_outval_p = outval;
350
351 return KERN_SUCCESS;
352 }
353
354 void
xnupost_reset_panic_widgets()355 xnupost_reset_panic_widgets()
356 {
357 bzero(&xt_panic_widgets, sizeof(xt_panic_widgets));
358 }
359
360 kern_return_t
xnupost_process_kdb_stop(const char * panic_s)361 xnupost_process_kdb_stop(const char * panic_s)
362 {
363 xt_panic_return_t retval = 0;
364 struct xnupost_panic_widget * pw = &xt_panic_widgets;
365 const char * name = "unknown";
366 if (xt_panic_widgets.xtp_func_name) {
367 name = xt_panic_widgets.xtp_func_name;
368 }
369
370 /* bail early on if kernPOST is not set */
371 if (kernel_post_args == 0) {
372 return KERN_INVALID_CAPABILITY;
373 }
374
375 if (xt_panic_widgets.xtp_func) {
376 T_LOG("%s: Calling out to widget: %s", __func__, xt_panic_widgets.xtp_func_name);
377 retval = pw->xtp_func(panic_s, pw->xtp_context_p, pw->xtp_outval_p);
378 } else {
379 return KERN_INVALID_CAPABILITY;
380 }
381
382 switch (retval) {
383 case XT_RET_W_SUCCESS:
384 T_EXPECT_EQ_INT(retval, XT_RET_W_SUCCESS, "%s reported successful handling. Returning from kdb_stop.", name);
385 /* KERN_SUCCESS means return from panic/assertion */
386 return KERN_SUCCESS;
387
388 case XT_RET_W_FAIL:
389 T_FAIL("%s reported XT_RET_W_FAIL: Returning from kdb_stop", name);
390 return KERN_SUCCESS;
391
392 case XT_PANIC_W_FAIL:
393 T_FAIL("%s reported XT_PANIC_W_FAIL: Continuing to kdb_stop", name);
394 return KERN_FAILURE;
395
396 case XT_PANIC_W_SUCCESS:
397 T_EXPECT_EQ_INT(retval, XT_PANIC_W_SUCCESS, "%s reported successful testcase. But continuing to kdb_stop.", name);
398 return KERN_FAILURE;
399
400 case XT_PANIC_UNRELATED:
401 default:
402 T_LOG("UNRELATED: Continuing to kdb_stop.");
403 return KERN_FAILURE;
404 }
405 }
406
407 xt_panic_return_t
_xt_generic_assert_check(const char * s,void * str_to_match,void ** outval)408 _xt_generic_assert_check(const char * s, void * str_to_match, void ** outval)
409 {
410 xt_panic_return_t ret = XT_PANIC_UNRELATED;
411
412 if (NULL != strnstr(__DECONST(char *, s), (char *)str_to_match, strlen(s))) {
413 T_LOG("%s: kdb_stop string: '%s' MATCHED string: '%s'", __func__, s, (char *)str_to_match);
414 ret = XT_RET_W_SUCCESS;
415 }
416
417 if (outval) {
418 *outval = (void *)(uintptr_t)ret;
419 }
420 return ret;
421 }
422
423 kern_return_t
xnupost_reset_tests(xnupost_test_t test_list,uint32_t test_count)424 xnupost_reset_tests(xnupost_test_t test_list, uint32_t test_count)
425 {
426 uint32_t i = 0;
427 xnupost_test_t testp;
428 for (; i < test_count; i++) {
429 testp = &test_list[i];
430 testp->xt_begin_time = 0;
431 testp->xt_end_time = 0;
432 testp->xt_test_actions = XT_ACTION_NONE;
433 testp->xt_retval = -1;
434 }
435 return KERN_SUCCESS;
436 }
437
438
439 kern_return_t
zalloc_test(void)440 zalloc_test(void)
441 {
442 zone_t test_zone;
443 void * test_ptr;
444
445 T_SETUPBEGIN;
446 test_zone = zone_create("test_uint64_zone", sizeof(uint64_t),
447 ZC_DESTRUCTIBLE);
448 T_ASSERT_NOTNULL(test_zone, NULL);
449
450 T_ASSERT_EQ_INT(test_zone->z_elems_free, 0, NULL);
451 T_SETUPEND;
452
453 T_ASSERT_NOTNULL(test_ptr = zalloc(test_zone), NULL);
454
455 zfree(test_zone, test_ptr);
456
457 /* A sample report for perfdata */
458 T_PERF("num_threads_at_ktest", threads_count, "count", "# of threads in system at zalloc_test");
459
460 return KERN_SUCCESS;
461 }
462
463 /*
464 * Function used for comparison by qsort()
465 */
466 static int
compare_numbers_ascending(const void * a,const void * b)467 compare_numbers_ascending(const void * a, const void * b)
468 {
469 const uint64_t x = *(const uint64_t *)a;
470 const uint64_t y = *(const uint64_t *)b;
471 if (x < y) {
472 return -1;
473 } else if (x > y) {
474 return 1;
475 } else {
476 return 0;
477 }
478 }
479
480 /*
481 * Function to count number of bits that are set in a number.
482 * It uses Side Addition using Magic Binary Numbers
483 */
484 static int
count_bits(uint64_t number)485 count_bits(uint64_t number)
486 {
487 return __builtin_popcountll(number);
488 }
489
490 kern_return_t
RandomULong_test()491 RandomULong_test()
492 {
493 /*
494 * Randomness test for RandomULong()
495 *
496 * This test verifies that:
497 * a. RandomULong works
498 * b. The generated numbers match the following entropy criteria:
499 * For a thousand iterations, verify:
500 * 1. mean entropy > 12 bits
501 * 2. min entropy > 4 bits
502 * 3. No Duplicate
503 * 4. No incremental/decremental pattern in a window of 3
504 * 5. No Zero
505 * 6. No -1
506 *
507 * <rdar://problem/22526137> Add test to increase code coverage for /dev/random
508 */
509
510 #define CONF_MIN_ENTROPY 4
511 #define CONF_MEAN_ENTROPY 12
512 #define CONF_ITERATIONS 1000
513 #define CONF_WINDOW_SIZE 3
514 #define CONF_WINDOW_TREND_LIMIT ((CONF_WINDOW_SIZE / 2) + (CONF_WINDOW_SIZE & 1)) >> 0
515
516 int i;
517 uint32_t min_bit_entropy, max_bit_entropy, bit_entropy;
518 uint32_t aggregate_bit_entropy = 0;
519 uint32_t mean_bit_entropy = 0;
520 uint64_t numbers[CONF_ITERATIONS];
521 min_bit_entropy = UINT32_MAX;
522 max_bit_entropy = 0;
523
524 /*
525 * TEST 1: Number generation and basic and basic validation
526 * Check for non-zero (no bits set), -1 (all bits set) and error
527 */
528 for (i = 0; i < CONF_ITERATIONS; i++) {
529 read_random(&numbers[i], sizeof(numbers[i]));
530 if (numbers[i] == 0) {
531 T_ASSERT_NE_ULLONG(numbers[i], 0, "read_random returned zero value.");
532 }
533 if (numbers[i] == UINT64_MAX) {
534 T_ASSERT_NE_ULLONG(numbers[i], UINT64_MAX, "read_random returned -1.");
535 }
536 }
537 T_PASS("Generated %d non-zero random numbers with atleast one bit reset.", CONF_ITERATIONS);
538
539 /*
540 * TEST 2: Mean and Min Bit Entropy
541 * Check the bit entropy and its mean over the generated numbers.
542 */
543 for (i = 1; i < CONF_ITERATIONS; i++) {
544 bit_entropy = count_bits(numbers[i - 1] ^ numbers[i]);
545 if (bit_entropy < min_bit_entropy) {
546 min_bit_entropy = bit_entropy;
547 }
548 if (bit_entropy > max_bit_entropy) {
549 max_bit_entropy = bit_entropy;
550 }
551
552 if (bit_entropy < CONF_MIN_ENTROPY) {
553 T_EXPECT_GE_UINT(bit_entropy, CONF_MIN_ENTROPY,
554 "Number of differing bits in consecutive numbers does not satisfy the min criteria.");
555 }
556
557 aggregate_bit_entropy += bit_entropy;
558 }
559 T_PASS("Passed the min bit entropy expectation of %d bits", CONF_MIN_ENTROPY);
560
561 mean_bit_entropy = aggregate_bit_entropy / CONF_ITERATIONS;
562 T_EXPECT_GE_UINT(mean_bit_entropy, CONF_MEAN_ENTROPY, "Test criteria for mean number of differing bits.");
563 T_PASS("Mean bit entropy criteria satisfied (Required %d, Actual: %d).", CONF_MEAN_ENTROPY, mean_bit_entropy);
564 T_LOG("{PERFORMANCE} iterations: %d, min_bit_entropy: %d, mean_bit_entropy: %d, max_bit_entropy: %d", CONF_ITERATIONS,
565 min_bit_entropy, mean_bit_entropy, max_bit_entropy);
566 T_PERF("min_bit_entropy_" T_TOSTRING(CONF_ITERATIONS), min_bit_entropy, "bits", "minimum bit entropy in RNG. High is better");
567 T_PERF("mean_bit_entropy_" T_TOSTRING(CONF_ITERATIONS), mean_bit_entropy, "bits", "mean bit entropy in RNG. High is better");
568 T_PERF("max_bit_entropy_" T_TOSTRING(CONF_ITERATIONS), max_bit_entropy, "bits", "max bit entropy in RNG. High is better");
569
570 /*
571 * TEST 3: Incremental Pattern Search
572 * Check that incremental/decremental pattern does not exist in the given window
573 */
574 int window_start, window_end, trend;
575 window_start = window_end = trend = 0;
576
577 do {
578 /*
579 * Set the window
580 */
581 window_end = window_start + CONF_WINDOW_SIZE - 1;
582 if (window_end >= CONF_ITERATIONS) {
583 window_end = CONF_ITERATIONS - 1;
584 }
585
586 trend = 0;
587 for (i = window_start; i < window_end; i++) {
588 if (numbers[i] < numbers[i + 1]) {
589 trend++;
590 } else if (numbers[i] > numbers[i + 1]) {
591 trend--;
592 }
593 }
594 /*
595 * Check that there is no increasing or decreasing trend
596 * i.e. trend <= ceil(window_size/2)
597 */
598 if (trend < 0) {
599 trend = -trend;
600 }
601 if (trend > CONF_WINDOW_TREND_LIMIT) {
602 T_ASSERT_LE_INT(trend, CONF_WINDOW_TREND_LIMIT, "Found increasing/decreasing trend in random numbers.");
603 }
604
605 /*
606 * Move to the next window
607 */
608 window_start++;
609 } while (window_start < (CONF_ITERATIONS - 1));
610 T_PASS("Did not find increasing/decreasing trends in a window of %d numbers.", CONF_WINDOW_SIZE);
611
612 /*
613 * TEST 4: Find Duplicates
614 * Check no duplicate values are generated
615 */
616 qsort(numbers, CONF_ITERATIONS, sizeof(numbers[0]), compare_numbers_ascending);
617 for (i = 1; i < CONF_ITERATIONS; i++) {
618 if (numbers[i] == numbers[i - 1]) {
619 T_ASSERT_NE_ULLONG(numbers[i], numbers[i - 1], "read_random generated duplicate values.");
620 }
621 }
622 T_PASS("Test did not find any duplicates as expected.");
623
624 return KERN_SUCCESS;
625 }
626
627
628 /* KCDATA kernel api tests */
629 static struct kcdata_descriptor test_kc_data;//, test_kc_data2;
630 struct sample_disk_io_stats {
631 uint64_t disk_reads_count;
632 uint64_t disk_reads_size;
633 uint64_t io_priority_count[4];
634 uint64_t io_priority_size;
635 } __attribute__((packed));
636
637 struct kcdata_subtype_descriptor test_disk_io_stats_def[] = {
638 {
639 .kcs_flags = KCS_SUBTYPE_FLAGS_NONE,
640 .kcs_elem_type = KC_ST_UINT64,
641 .kcs_elem_offset = 0 * sizeof(uint64_t),
642 .kcs_elem_size = sizeof(uint64_t),
643 .kcs_name = "disk_reads_count"
644 },
645 {
646 .kcs_flags = KCS_SUBTYPE_FLAGS_NONE,
647 .kcs_elem_type = KC_ST_UINT64,
648 .kcs_elem_offset = 1 * sizeof(uint64_t),
649 .kcs_elem_size = sizeof(uint64_t),
650 .kcs_name = "disk_reads_size"
651 },
652 {
653 .kcs_flags = KCS_SUBTYPE_FLAGS_ARRAY,
654 .kcs_elem_type = KC_ST_UINT64,
655 .kcs_elem_offset = 2 * sizeof(uint64_t),
656 .kcs_elem_size = KCS_SUBTYPE_PACK_SIZE(4, sizeof(uint64_t)),
657 .kcs_name = "io_priority_count"
658 },
659 {
660 .kcs_flags = KCS_SUBTYPE_FLAGS_ARRAY,
661 .kcs_elem_type = KC_ST_UINT64,
662 .kcs_elem_offset = (2 + 4) * sizeof(uint64_t),
663 .kcs_elem_size = sizeof(uint64_t),
664 .kcs_name = "io_priority_size"
665 },
666 };
667
668 kern_return_t
kcdata_api_test(void)669 kcdata_api_test(void)
670 {
671 kern_return_t retval = KERN_SUCCESS;
672
673 /* test for NULL input */
674 retval = kcdata_memory_static_init(NULL, (mach_vm_address_t)0, KCDATA_BUFFER_BEGIN_STACKSHOT, 100, KCFLAG_USE_MEMCOPY);
675 T_ASSERT(retval == KERN_INVALID_ARGUMENT, "kcdata_memory_static_init with NULL struct");
676
677 /* another negative test with buffer size < 32 bytes */
678 char data[30] = "sample_disk_io_stats";
679 retval = kcdata_memory_static_init(&test_kc_data, (mach_vm_address_t)&data, KCDATA_BUFFER_BEGIN_CRASHINFO, sizeof(data),
680 KCFLAG_USE_MEMCOPY);
681 T_ASSERT(retval == KERN_INSUFFICIENT_BUFFER_SIZE, "init with 30 bytes failed as expected with KERN_INSUFFICIENT_BUFFER_SIZE");
682
683 /* test with COPYOUT for 0x0 address. Should return KERN_NO_ACCESS */
684 retval = kcdata_memory_static_init(&test_kc_data, (mach_vm_address_t)0, KCDATA_BUFFER_BEGIN_CRASHINFO, PAGE_SIZE,
685 KCFLAG_USE_COPYOUT);
686 T_ASSERT(retval == KERN_NO_ACCESS, "writing to 0x0 returned KERN_NO_ACCESS");
687
688 /* test with successful kcdata_memory_static_init */
689 test_kc_data.kcd_length = 0xdeadbeef;
690
691 void *data_ptr = kalloc_data(PAGE_SIZE, Z_WAITOK_ZERO_NOFAIL);
692 mach_vm_address_t address = (mach_vm_address_t)data_ptr;
693 T_EXPECT_NOTNULL(address, "kalloc of PAGE_SIZE data.");
694
695 retval = kcdata_memory_static_init(&test_kc_data, (mach_vm_address_t)address, KCDATA_BUFFER_BEGIN_STACKSHOT, PAGE_SIZE,
696 KCFLAG_USE_MEMCOPY);
697
698 T_ASSERT(retval == KERN_SUCCESS, "successful kcdata_memory_static_init call");
699
700 T_ASSERT(test_kc_data.kcd_length == PAGE_SIZE, "kcdata length is set correctly to PAGE_SIZE.");
701 T_LOG("addr_begin 0x%llx and end 0x%llx and address 0x%llx", test_kc_data.kcd_addr_begin, test_kc_data.kcd_addr_end, address);
702 T_ASSERT(test_kc_data.kcd_addr_begin == address, "kcdata begin address is correct 0x%llx", (uint64_t)address);
703
704 /* verify we have BEGIN and END HEADERS set */
705 uint32_t * mem = (uint32_t *)address;
706 T_ASSERT(mem[0] == KCDATA_BUFFER_BEGIN_STACKSHOT, "buffer does contain KCDATA_BUFFER_BEGIN_STACKSHOT");
707 T_ASSERT(mem[4] == KCDATA_TYPE_BUFFER_END, "KCDATA_TYPE_BUFFER_END is appended as expected");
708 T_ASSERT(mem[5] == 0, "size of BUFFER_END tag is zero");
709
710 /* verify kcdata_memory_get_used_bytes() */
711 uint64_t bytes_used = 0;
712 bytes_used = kcdata_memory_get_used_bytes(&test_kc_data);
713 T_ASSERT(bytes_used == (2 * sizeof(struct kcdata_item)), "bytes_used api returned expected %llu", bytes_used);
714
715 /* test for kcdata_get_memory_addr() */
716
717 mach_vm_address_t user_addr = 0;
718 /* negative test for NULL user_addr AND/OR kcdata_descriptor */
719 retval = kcdata_get_memory_addr(NULL, KCDATA_TYPE_MACH_ABSOLUTE_TIME, sizeof(uint64_t), &user_addr);
720 T_ASSERT(retval == KERN_INVALID_ARGUMENT, "kcdata_get_memory_addr with NULL struct -> KERN_INVALID_ARGUMENT");
721
722 retval = kcdata_get_memory_addr(&test_kc_data, KCDATA_TYPE_MACH_ABSOLUTE_TIME, sizeof(uint64_t), NULL);
723 T_ASSERT(retval == KERN_INVALID_ARGUMENT, "kcdata_get_memory_addr with NULL user_addr -> KERN_INVALID_ARGUMENT");
724
725 /* successful case with size 0. Yes this is expected to succeed as just a item type could be used as boolean */
726 retval = kcdata_get_memory_addr(&test_kc_data, KCDATA_TYPE_USECS_SINCE_EPOCH, 0, &user_addr);
727 T_ASSERT(retval == KERN_SUCCESS, "Successfully got kcdata entry for 0 size data");
728 T_ASSERT(user_addr == test_kc_data.kcd_addr_end, "0 sized data did not add any extra buffer space");
729
730 /* successful case with valid size. */
731 user_addr = 0xdeadbeef;
732 retval = kcdata_get_memory_addr(&test_kc_data, KCDATA_TYPE_MACH_ABSOLUTE_TIME, sizeof(uint64_t), &user_addr);
733 T_ASSERT(retval == KERN_SUCCESS, "kcdata_get_memory_addr with valid values succeeded.");
734 T_ASSERT(user_addr > test_kc_data.kcd_addr_begin, "user_addr is in range of buffer");
735 T_ASSERT(user_addr < test_kc_data.kcd_addr_end, "user_addr is in range of buffer");
736
737 /* Try creating an item with really large size */
738 user_addr = 0xdeadbeef;
739 bytes_used = kcdata_memory_get_used_bytes(&test_kc_data);
740 retval = kcdata_get_memory_addr(&test_kc_data, KCDATA_TYPE_MACH_ABSOLUTE_TIME, PAGE_SIZE * 4, &user_addr);
741 T_ASSERT(retval == KERN_INSUFFICIENT_BUFFER_SIZE, "Allocating entry with size > buffer -> KERN_INSUFFICIENT_BUFFER_SIZE");
742 T_ASSERT(user_addr == 0xdeadbeef, "user_addr remained unaffected with failed kcdata_get_memory_addr");
743 T_ASSERT(bytes_used == kcdata_memory_get_used_bytes(&test_kc_data), "The data structure should be unaffected");
744
745 /* verify convenience functions for uint32_with_description */
746 retval = kcdata_add_uint32_with_description(&test_kc_data, 0xbdc0ffee, "This is bad coffee");
747 T_ASSERT(retval == KERN_SUCCESS, "add uint32 with description succeeded.");
748
749 retval = kcdata_add_uint64_with_description(&test_kc_data, 0xf001badc0ffee, "another 8 byte no.");
750 T_ASSERT(retval == KERN_SUCCESS, "add uint64 with desc succeeded.");
751
752 /* verify creating an KCDATA_TYPE_ARRAY here */
753 user_addr = 0xdeadbeef;
754 bytes_used = kcdata_memory_get_used_bytes(&test_kc_data);
755 /* save memory address where the array will come up */
756 struct kcdata_item * item_p = (struct kcdata_item *)test_kc_data.kcd_addr_end;
757
758 retval = kcdata_get_memory_addr_for_array(&test_kc_data, KCDATA_TYPE_MACH_ABSOLUTE_TIME, sizeof(uint64_t), 20, &user_addr);
759 T_ASSERT(retval == KERN_SUCCESS, "Array of 20 integers should be possible");
760 T_ASSERT(user_addr != 0xdeadbeef, "user_addr is updated as expected");
761 T_ASSERT((kcdata_memory_get_used_bytes(&test_kc_data) - bytes_used) >= 20 * sizeof(uint64_t), "memory allocation is in range");
762 kcdata_iter_t iter = kcdata_iter(item_p, (unsigned long)(PAGE_SIZE - kcdata_memory_get_used_bytes(&test_kc_data)));
763 T_ASSERT(kcdata_iter_array_elem_count(iter) == 20, "array count is 20");
764
765 /* FIXME add tests here for ranges of sizes and counts */
766
767 T_ASSERT(item_p->flags == (((uint64_t)KCDATA_TYPE_MACH_ABSOLUTE_TIME << 32) | 20), "flags are set correctly");
768
769 /* test adding of custom type */
770
771 retval = kcdata_add_type_definition(&test_kc_data, 0x999, data, &test_disk_io_stats_def[0],
772 sizeof(test_disk_io_stats_def) / sizeof(struct kcdata_subtype_descriptor));
773 T_ASSERT(retval == KERN_SUCCESS, "adding custom type succeeded.");
774
775 kfree_data(data_ptr, PAGE_SIZE);
776 return KERN_SUCCESS;
777 }
778
779 /*
780 * kern_return_t
781 * kcdata_api_assert_tests()
782 * {
783 * kern_return_t retval = 0;
784 * void * assert_check_retval = NULL;
785 * test_kc_data2.kcd_length = 0xdeadbeef;
786 * mach_vm_address_t address = (mach_vm_address_t)kalloc(PAGE_SIZE);
787 * T_EXPECT_NOTNULL(address, "kalloc of PAGE_SIZE data.");
788 *
789 * retval = kcdata_memory_static_init(&test_kc_data2, (mach_vm_address_t)address, KCDATA_BUFFER_BEGIN_STACKSHOT, PAGE_SIZE,
790 * KCFLAG_USE_MEMCOPY);
791 *
792 * T_ASSERT(retval == KERN_SUCCESS, "successful kcdata_memory_static_init call");
793 *
794 * retval = T_REGISTER_ASSERT_CHECK("KCDATA_DESC_MAXLEN", &assert_check_retval);
795 * T_ASSERT(retval == KERN_SUCCESS, "registered assert widget");
796 *
797 * // this will assert
798 * retval = kcdata_add_uint32_with_description(&test_kc_data2, 0xc0ffee, "really long description string for kcdata");
799 * T_ASSERT(retval == KERN_INVALID_ARGUMENT, "API param check returned KERN_INVALID_ARGUMENT correctly");
800 * T_ASSERT(assert_check_retval == (void *)XT_RET_W_SUCCESS, "assertion handler verified that it was hit");
801 *
802 * return KERN_SUCCESS;
803 * }
804 */
805
806 #if defined(__arm64__)
807
808 #include <arm/pmap.h>
809
810 #define MAX_PMAP_OBJECT_ELEMENT 100000
811
812 extern struct vm_object pmap_object_store; /* store pt pages */
813 extern unsigned long gPhysBase, gPhysSize, first_avail;
814
815 /*
816 * Define macros to transverse the pmap object structures and extract
817 * physical page number with information from low global only
818 * This emulate how Astris extracts information from coredump
819 */
820 #if defined(__arm64__)
821
822 static inline uintptr_t
astris_vm_page_unpack_ptr(uintptr_t p)823 astris_vm_page_unpack_ptr(uintptr_t p)
824 {
825 if (!p) {
826 return (uintptr_t)0;
827 }
828
829 return (p & lowGlo.lgPmapMemFromArrayMask)
830 ? lowGlo.lgPmapMemStartAddr + (p & ~(lowGlo.lgPmapMemFromArrayMask)) * lowGlo.lgPmapMemPagesize
831 : lowGlo.lgPmapMemPackedBaseAddr + (p << lowGlo.lgPmapMemPackedShift);
832 }
833
834 // assume next pointer is the first element
835 #define astris_vm_page_queue_next(qc) (astris_vm_page_unpack_ptr(*((uint32_t *)(qc))))
836
837 #endif
838
839 #define astris_vm_page_queue_first(q) astris_vm_page_queue_next(q)
840
841 #define astris_vm_page_queue_end(q, qe) ((q) == (qe))
842
843 #define astris_vm_page_queue_iterate(head, elt) \
844 for ((elt) = (uintptr_t)astris_vm_page_queue_first((head)); !astris_vm_page_queue_end((head), (elt)); \
845 (elt) = (uintptr_t)astris_vm_page_queue_next(((elt) + (uintptr_t)lowGlo.lgPmapMemChainOffset)))
846
847 #define astris_ptoa(x) ((vm_address_t)(x) << lowGlo.lgPageShift)
848
849 static inline ppnum_t
astris_vm_page_get_phys_page(uintptr_t m)850 astris_vm_page_get_phys_page(uintptr_t m)
851 {
852 return (m >= lowGlo.lgPmapMemStartAddr && m < lowGlo.lgPmapMemEndAddr)
853 ? (ppnum_t)((m - lowGlo.lgPmapMemStartAddr) / lowGlo.lgPmapMemPagesize + lowGlo.lgPmapMemFirstppnum)
854 : *((ppnum_t *)(m + lowGlo.lgPmapMemPageOffset));
855 }
856
857 kern_return_t
pmap_coredump_test(void)858 pmap_coredump_test(void)
859 {
860 int iter = 0;
861 uintptr_t p;
862
863 T_LOG("Testing coredump info for PMAP.");
864
865 T_ASSERT_GE_ULONG(lowGlo.lgStaticAddr, gPhysBase, NULL);
866 T_ASSERT_LE_ULONG(lowGlo.lgStaticAddr + lowGlo.lgStaticSize, first_avail, NULL);
867 T_ASSERT_EQ_ULONG(lowGlo.lgLayoutMajorVersion, 3, NULL);
868 T_ASSERT_GE_ULONG(lowGlo.lgLayoutMinorVersion, 2, NULL);
869 T_ASSERT_EQ_ULONG(lowGlo.lgLayoutMagic, LOWGLO_LAYOUT_MAGIC, NULL);
870
871 // check the constant values in lowGlo
872 T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemQ, ((typeof(lowGlo.lgPmapMemQ)) & (pmap_object_store.memq)), NULL);
873 T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemPageOffset, offsetof(struct vm_page_with_ppnum, vmp_phys_page), NULL);
874 T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemChainOffset, offsetof(struct vm_page, vmp_listq), NULL);
875 T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemPagesize, sizeof(struct vm_page), NULL);
876
877 #if defined(__arm64__)
878 T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemFromArrayMask, VM_PAGE_PACKED_FROM_ARRAY, NULL);
879 T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemPackedShift, VM_PAGE_PACKED_PTR_SHIFT, NULL);
880 T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemPackedBaseAddr, VM_PAGE_PACKED_PTR_BASE, NULL);
881 #endif
882
883 vm_object_lock_shared(&pmap_object_store);
884 astris_vm_page_queue_iterate(lowGlo.lgPmapMemQ, p)
885 {
886 ppnum_t ppnum = astris_vm_page_get_phys_page(p);
887 pmap_paddr_t pa = (pmap_paddr_t)astris_ptoa(ppnum);
888 T_ASSERT_GE_ULONG(pa, gPhysBase, NULL);
889 T_ASSERT_LT_ULONG(pa, gPhysBase + gPhysSize, NULL);
890 iter++;
891 T_ASSERT_LT_INT(iter, MAX_PMAP_OBJECT_ELEMENT, NULL);
892 }
893 vm_object_unlock(&pmap_object_store);
894
895 T_ASSERT_GT_INT(iter, 0, NULL);
896 return KERN_SUCCESS;
897 }
898 #endif /* defined(__arm64__) */
899
900 struct ts_kern_prim_test_args {
901 int *end_barrier;
902 int *notify_b;
903 int *wait_event_b;
904 int before_num;
905 int *notify_a;
906 int *wait_event_a;
907 int after_num;
908 int priority_to_check;
909 };
910
911 static void
wait_threads(int * var,int num)912 wait_threads(
913 int* var,
914 int num)
915 {
916 if (var != NULL) {
917 while (os_atomic_load(var, acquire) != num) {
918 assert_wait((event_t) var, THREAD_UNINT);
919 if (os_atomic_load(var, acquire) != num) {
920 (void) thread_block(THREAD_CONTINUE_NULL);
921 } else {
922 clear_wait(current_thread(), THREAD_AWAKENED);
923 }
924 }
925 }
926 }
927
928 static void
wake_threads(int * var)929 wake_threads(
930 int* var)
931 {
932 if (var) {
933 os_atomic_inc(var, relaxed);
934 thread_wakeup((event_t) var);
935 }
936 }
937
938 extern void IOSleep(int);
939
940 static void
thread_lock_unlock_kernel_primitive(void * args,__unused wait_result_t wr)941 thread_lock_unlock_kernel_primitive(
942 void *args,
943 __unused wait_result_t wr)
944 {
945 thread_t thread = current_thread();
946 struct ts_kern_prim_test_args *info = (struct ts_kern_prim_test_args*) args;
947 int pri;
948
949 wait_threads(info->wait_event_b, info->before_num);
950 wake_threads(info->notify_b);
951
952 tstile_test_prim_lock(SYSCTL_TURNSTILE_TEST_KERNEL_DEFAULT);
953
954 wake_threads(info->notify_a);
955 wait_threads(info->wait_event_a, info->after_num);
956
957 IOSleep(100);
958
959 if (info->priority_to_check) {
960 spl_t s = splsched();
961 thread_lock(thread);
962 pri = thread->sched_pri;
963 thread_unlock(thread);
964 splx(s);
965 T_ASSERT(pri == info->priority_to_check, "Priority thread: current sched %d sched wanted %d", pri, info->priority_to_check);
966 }
967
968 tstile_test_prim_unlock(SYSCTL_TURNSTILE_TEST_KERNEL_DEFAULT);
969
970 wake_threads(info->end_barrier);
971 thread_terminate_self();
972 }
973
974 kern_return_t
ts_kernel_primitive_test(void)975 ts_kernel_primitive_test(void)
976 {
977 thread_t owner, thread1, thread2;
978 struct ts_kern_prim_test_args targs[2] = {};
979 kern_return_t result;
980 int end_barrier = 0;
981 int owner_locked = 0;
982 int waiters_ready = 0;
983
984 T_LOG("Testing turnstile kernel primitive");
985
986 targs[0].notify_b = NULL;
987 targs[0].wait_event_b = NULL;
988 targs[0].before_num = 0;
989 targs[0].notify_a = &owner_locked;
990 targs[0].wait_event_a = &waiters_ready;
991 targs[0].after_num = 2;
992 targs[0].priority_to_check = 90;
993 targs[0].end_barrier = &end_barrier;
994
995 // Start owner with priority 80
996 result = kernel_thread_start_priority((thread_continue_t)thread_lock_unlock_kernel_primitive, &targs[0], 80, &owner);
997 T_ASSERT(result == KERN_SUCCESS, "Starting owner");
998
999 targs[1].notify_b = &waiters_ready;
1000 targs[1].wait_event_b = &owner_locked;
1001 targs[1].before_num = 1;
1002 targs[1].notify_a = NULL;
1003 targs[1].wait_event_a = NULL;
1004 targs[1].after_num = 0;
1005 targs[1].priority_to_check = 0;
1006 targs[1].end_barrier = &end_barrier;
1007
1008 // Start waiters with priority 85 and 90
1009 result = kernel_thread_start_priority((thread_continue_t)thread_lock_unlock_kernel_primitive, &targs[1], 85, &thread1);
1010 T_ASSERT(result == KERN_SUCCESS, "Starting thread1");
1011
1012 result = kernel_thread_start_priority((thread_continue_t)thread_lock_unlock_kernel_primitive, &targs[1], 90, &thread2);
1013 T_ASSERT(result == KERN_SUCCESS, "Starting thread2");
1014
1015 wait_threads(&end_barrier, 3);
1016
1017 return KERN_SUCCESS;
1018 }
1019
1020 #define MTX_LOCK 0
1021 #define RW_LOCK 1
1022
1023 #define NUM_THREADS 4
1024
1025 struct synch_test_common {
1026 unsigned int nthreads;
1027 thread_t *threads;
1028 int max_pri;
1029 int test_done;
1030 };
1031
1032 static kern_return_t
init_synch_test_common(struct synch_test_common * info,unsigned int nthreads)1033 init_synch_test_common(struct synch_test_common *info, unsigned int nthreads)
1034 {
1035 info->nthreads = nthreads;
1036 info->threads = kalloc_type(thread_t, nthreads, Z_WAITOK);
1037 if (!info->threads) {
1038 return ENOMEM;
1039 }
1040
1041 return KERN_SUCCESS;
1042 }
1043
1044 static void
destroy_synch_test_common(struct synch_test_common * info)1045 destroy_synch_test_common(struct synch_test_common *info)
1046 {
1047 kfree_type(thread_t, info->nthreads, info->threads);
1048 }
1049
1050 static void
start_threads(thread_continue_t func,struct synch_test_common * info,bool sleep_after_first)1051 start_threads(thread_continue_t func, struct synch_test_common *info, bool sleep_after_first)
1052 {
1053 thread_t thread;
1054 kern_return_t result;
1055 uint i;
1056 int priority = 75;
1057
1058 info->test_done = 0;
1059
1060 for (i = 0; i < info->nthreads; i++) {
1061 info->threads[i] = NULL;
1062 }
1063
1064 info->max_pri = priority + (info->nthreads - 1) * 5;
1065 if (info->max_pri > 95) {
1066 info->max_pri = 95;
1067 }
1068
1069 for (i = 0; i < info->nthreads; i++) {
1070 result = kernel_thread_start_priority((thread_continue_t)func, info, priority, &thread);
1071 os_atomic_store(&info->threads[i], thread, release);
1072 T_ASSERT(result == KERN_SUCCESS, "Starting thread %d, priority %d, %p", i, priority, thread);
1073
1074 priority += 5;
1075
1076 if (i == 0 && sleep_after_first) {
1077 IOSleep(100);
1078 }
1079 }
1080 }
1081
1082 static unsigned int
get_max_pri(struct synch_test_common * info)1083 get_max_pri(struct synch_test_common * info)
1084 {
1085 return info->max_pri;
1086 }
1087
1088 static void
wait_all_thread(struct synch_test_common * info)1089 wait_all_thread(struct synch_test_common * info)
1090 {
1091 wait_threads(&info->test_done, info->nthreads);
1092 }
1093
1094 static void
notify_waiter(struct synch_test_common * info)1095 notify_waiter(struct synch_test_common * info)
1096 {
1097 wake_threads(&info->test_done);
1098 }
1099
1100 static void
wait_for_waiters(struct synch_test_common * info)1101 wait_for_waiters(struct synch_test_common *info)
1102 {
1103 uint i, j;
1104 thread_t thread;
1105
1106 for (i = 0; i < info->nthreads; i++) {
1107 j = 0;
1108 while (os_atomic_load(&info->threads[i], acquire) == NULL) {
1109 if (j % 100 == 0) {
1110 IOSleep(10);
1111 }
1112 j++;
1113 }
1114
1115 if (info->threads[i] != current_thread()) {
1116 j = 0;
1117 do {
1118 thread = os_atomic_load(&info->threads[i], relaxed);
1119 if (thread == (thread_t) 1) {
1120 break;
1121 }
1122
1123 if (!(thread->state & TH_RUN)) {
1124 break;
1125 }
1126
1127 if (j % 100 == 0) {
1128 IOSleep(100);
1129 }
1130 j++;
1131
1132 if (thread->started == FALSE) {
1133 continue;
1134 }
1135 } while (thread->state & TH_RUN);
1136 }
1137 }
1138 }
1139
1140 static void
exclude_current_waiter(struct synch_test_common * info)1141 exclude_current_waiter(struct synch_test_common *info)
1142 {
1143 uint i, j;
1144
1145 for (i = 0; i < info->nthreads; i++) {
1146 j = 0;
1147 while (os_atomic_load(&info->threads[i], acquire) == NULL) {
1148 if (j % 100 == 0) {
1149 IOSleep(10);
1150 }
1151 j++;
1152 }
1153
1154 if (os_atomic_load(&info->threads[i], acquire) == current_thread()) {
1155 os_atomic_store(&info->threads[i], (thread_t)1, release);
1156 return;
1157 }
1158 }
1159 }
1160
1161 struct info_sleep_inheritor_test {
1162 struct synch_test_common head;
1163 lck_mtx_t mtx_lock;
1164 lck_rw_t rw_lock;
1165 decl_lck_mtx_gate_data(, gate);
1166 boolean_t gate_closed;
1167 int prim_type;
1168 boolean_t work_to_do;
1169 unsigned int max_pri;
1170 unsigned int steal_pri;
1171 int synch_value;
1172 int synch;
1173 int value;
1174 int handoff_failure;
1175 thread_t thread_inheritor;
1176 bool use_alloc_gate;
1177 gate_t *alloc_gate;
1178 struct obj_cached **obj_cache;
1179 kern_apfs_reflock_data(, reflock);
1180 int reflock_protected_status;
1181 };
1182
1183 static void
primitive_lock(struct info_sleep_inheritor_test * info)1184 primitive_lock(struct info_sleep_inheritor_test *info)
1185 {
1186 switch (info->prim_type) {
1187 case MTX_LOCK:
1188 lck_mtx_lock(&info->mtx_lock);
1189 break;
1190 case RW_LOCK:
1191 lck_rw_lock(&info->rw_lock, LCK_RW_TYPE_EXCLUSIVE);
1192 break;
1193 default:
1194 panic("invalid type %d", info->prim_type);
1195 }
1196 }
1197
1198 static void
primitive_unlock(struct info_sleep_inheritor_test * info)1199 primitive_unlock(struct info_sleep_inheritor_test *info)
1200 {
1201 switch (info->prim_type) {
1202 case MTX_LOCK:
1203 lck_mtx_unlock(&info->mtx_lock);
1204 break;
1205 case RW_LOCK:
1206 lck_rw_unlock(&info->rw_lock, LCK_RW_TYPE_EXCLUSIVE);
1207 break;
1208 default:
1209 panic("invalid type %d", info->prim_type);
1210 }
1211 }
1212
1213 static wait_result_t
primitive_sleep_with_inheritor(struct info_sleep_inheritor_test * info)1214 primitive_sleep_with_inheritor(struct info_sleep_inheritor_test *info)
1215 {
1216 wait_result_t ret = KERN_SUCCESS;
1217 switch (info->prim_type) {
1218 case MTX_LOCK:
1219 ret = lck_mtx_sleep_with_inheritor(&info->mtx_lock, LCK_SLEEP_DEFAULT, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1220 break;
1221 case RW_LOCK:
1222 ret = lck_rw_sleep_with_inheritor(&info->rw_lock, LCK_SLEEP_DEFAULT, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1223 break;
1224 default:
1225 panic("invalid type %d", info->prim_type);
1226 }
1227
1228 return ret;
1229 }
1230
1231 static void
primitive_wakeup_one_with_inheritor(struct info_sleep_inheritor_test * info)1232 primitive_wakeup_one_with_inheritor(struct info_sleep_inheritor_test *info)
1233 {
1234 switch (info->prim_type) {
1235 case MTX_LOCK:
1236 case RW_LOCK:
1237 wakeup_one_with_inheritor((event_t) &info->thread_inheritor, THREAD_AWAKENED, LCK_WAKE_DEFAULT, &info->thread_inheritor);
1238 break;
1239 default:
1240 panic("invalid type %d", info->prim_type);
1241 }
1242 }
1243
1244 static void
primitive_wakeup_all_with_inheritor(struct info_sleep_inheritor_test * info)1245 primitive_wakeup_all_with_inheritor(struct info_sleep_inheritor_test *info)
1246 {
1247 switch (info->prim_type) {
1248 case MTX_LOCK:
1249 case RW_LOCK:
1250 wakeup_all_with_inheritor((event_t) &info->thread_inheritor, THREAD_AWAKENED);
1251 break;
1252 default:
1253 panic("invalid type %d", info->prim_type);
1254 }
1255 return;
1256 }
1257
1258 static void
primitive_change_sleep_inheritor(struct info_sleep_inheritor_test * info)1259 primitive_change_sleep_inheritor(struct info_sleep_inheritor_test *info)
1260 {
1261 switch (info->prim_type) {
1262 case MTX_LOCK:
1263 case RW_LOCK:
1264 change_sleep_inheritor((event_t) &info->thread_inheritor, info->thread_inheritor);
1265 break;
1266 default:
1267 panic("invalid type %d", info->prim_type);
1268 }
1269 return;
1270 }
1271
1272 static kern_return_t
primitive_gate_try_close(struct info_sleep_inheritor_test * info)1273 primitive_gate_try_close(struct info_sleep_inheritor_test *info)
1274 {
1275 gate_t *gate = &info->gate;
1276 if (info->use_alloc_gate == true) {
1277 gate = info->alloc_gate;
1278 }
1279 kern_return_t ret = KERN_SUCCESS;
1280 switch (info->prim_type) {
1281 case MTX_LOCK:
1282 ret = lck_mtx_gate_try_close(&info->mtx_lock, gate);
1283 break;
1284 case RW_LOCK:
1285 ret = lck_rw_gate_try_close(&info->rw_lock, gate);
1286 break;
1287 default:
1288 panic("invalid type %d", info->prim_type);
1289 }
1290 return ret;
1291 }
1292
1293 static gate_wait_result_t
primitive_gate_wait(struct info_sleep_inheritor_test * info)1294 primitive_gate_wait(struct info_sleep_inheritor_test *info)
1295 {
1296 gate_t *gate = &info->gate;
1297 if (info->use_alloc_gate == true) {
1298 gate = info->alloc_gate;
1299 }
1300 gate_wait_result_t ret = GATE_OPENED;
1301 switch (info->prim_type) {
1302 case MTX_LOCK:
1303 ret = lck_mtx_gate_wait(&info->mtx_lock, gate, LCK_SLEEP_DEFAULT, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1304 break;
1305 case RW_LOCK:
1306 ret = lck_rw_gate_wait(&info->rw_lock, gate, LCK_SLEEP_DEFAULT, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1307 break;
1308 default:
1309 panic("invalid type %d", info->prim_type);
1310 }
1311 return ret;
1312 }
1313
1314 static void
primitive_gate_open(struct info_sleep_inheritor_test * info)1315 primitive_gate_open(struct info_sleep_inheritor_test *info)
1316 {
1317 gate_t *gate = &info->gate;
1318 if (info->use_alloc_gate == true) {
1319 gate = info->alloc_gate;
1320 }
1321 switch (info->prim_type) {
1322 case MTX_LOCK:
1323 lck_mtx_gate_open(&info->mtx_lock, gate);
1324 break;
1325 case RW_LOCK:
1326 lck_rw_gate_open(&info->rw_lock, gate);
1327 break;
1328 default:
1329 panic("invalid type %d", info->prim_type);
1330 }
1331 }
1332
1333 static void
primitive_gate_close(struct info_sleep_inheritor_test * info)1334 primitive_gate_close(struct info_sleep_inheritor_test *info)
1335 {
1336 gate_t *gate = &info->gate;
1337 if (info->use_alloc_gate == true) {
1338 gate = info->alloc_gate;
1339 }
1340
1341 switch (info->prim_type) {
1342 case MTX_LOCK:
1343 lck_mtx_gate_close(&info->mtx_lock, gate);
1344 break;
1345 case RW_LOCK:
1346 lck_rw_gate_close(&info->rw_lock, gate);
1347 break;
1348 default:
1349 panic("invalid type %d", info->prim_type);
1350 }
1351 }
1352
1353 static void
primitive_gate_steal(struct info_sleep_inheritor_test * info)1354 primitive_gate_steal(struct info_sleep_inheritor_test *info)
1355 {
1356 gate_t *gate = &info->gate;
1357 if (info->use_alloc_gate == true) {
1358 gate = info->alloc_gate;
1359 }
1360
1361 switch (info->prim_type) {
1362 case MTX_LOCK:
1363 lck_mtx_gate_steal(&info->mtx_lock, gate);
1364 break;
1365 case RW_LOCK:
1366 lck_rw_gate_steal(&info->rw_lock, gate);
1367 break;
1368 default:
1369 panic("invalid type %d", info->prim_type);
1370 }
1371 }
1372
1373 static kern_return_t
primitive_gate_handoff(struct info_sleep_inheritor_test * info,int flags)1374 primitive_gate_handoff(struct info_sleep_inheritor_test *info, int flags)
1375 {
1376 gate_t *gate = &info->gate;
1377 if (info->use_alloc_gate == true) {
1378 gate = info->alloc_gate;
1379 }
1380
1381 kern_return_t ret = KERN_SUCCESS;
1382 switch (info->prim_type) {
1383 case MTX_LOCK:
1384 ret = lck_mtx_gate_handoff(&info->mtx_lock, gate, flags);
1385 break;
1386 case RW_LOCK:
1387 ret = lck_rw_gate_handoff(&info->rw_lock, gate, flags);
1388 break;
1389 default:
1390 panic("invalid type %d", info->prim_type);
1391 }
1392 return ret;
1393 }
1394
1395 static void
primitive_gate_assert(struct info_sleep_inheritor_test * info,int type)1396 primitive_gate_assert(struct info_sleep_inheritor_test *info, int type)
1397 {
1398 gate_t *gate = &info->gate;
1399 if (info->use_alloc_gate == true) {
1400 gate = info->alloc_gate;
1401 }
1402
1403 switch (info->prim_type) {
1404 case MTX_LOCK:
1405 lck_mtx_gate_assert(&info->mtx_lock, gate, type);
1406 break;
1407 case RW_LOCK:
1408 lck_rw_gate_assert(&info->rw_lock, gate, type);
1409 break;
1410 default:
1411 panic("invalid type %d", info->prim_type);
1412 }
1413 }
1414
1415 static void
primitive_gate_init(struct info_sleep_inheritor_test * info)1416 primitive_gate_init(struct info_sleep_inheritor_test *info)
1417 {
1418 switch (info->prim_type) {
1419 case MTX_LOCK:
1420 lck_mtx_gate_init(&info->mtx_lock, &info->gate);
1421 break;
1422 case RW_LOCK:
1423 lck_rw_gate_init(&info->rw_lock, &info->gate);
1424 break;
1425 default:
1426 panic("invalid type %d", info->prim_type);
1427 }
1428 }
1429
1430 static void
primitive_gate_destroy(struct info_sleep_inheritor_test * info)1431 primitive_gate_destroy(struct info_sleep_inheritor_test *info)
1432 {
1433 switch (info->prim_type) {
1434 case MTX_LOCK:
1435 lck_mtx_gate_destroy(&info->mtx_lock, &info->gate);
1436 break;
1437 case RW_LOCK:
1438 lck_rw_gate_destroy(&info->rw_lock, &info->gate);
1439 break;
1440 default:
1441 panic("invalid type %d", info->prim_type);
1442 }
1443 }
1444
1445 static void
primitive_gate_alloc(struct info_sleep_inheritor_test * info)1446 primitive_gate_alloc(struct info_sleep_inheritor_test *info)
1447 {
1448 gate_t *gate;
1449 switch (info->prim_type) {
1450 case MTX_LOCK:
1451 gate = lck_mtx_gate_alloc_init(&info->mtx_lock);
1452 break;
1453 case RW_LOCK:
1454 gate = lck_rw_gate_alloc_init(&info->rw_lock);
1455 break;
1456 default:
1457 panic("invalid type %d", info->prim_type);
1458 }
1459 info->alloc_gate = gate;
1460 }
1461
1462 static void
primitive_gate_free(struct info_sleep_inheritor_test * info)1463 primitive_gate_free(struct info_sleep_inheritor_test *info)
1464 {
1465 T_ASSERT(info->alloc_gate != NULL, "gate not yet freed");
1466
1467 switch (info->prim_type) {
1468 case MTX_LOCK:
1469 lck_mtx_gate_free(&info->mtx_lock, info->alloc_gate);
1470 break;
1471 case RW_LOCK:
1472 lck_rw_gate_free(&info->rw_lock, info->alloc_gate);
1473 break;
1474 default:
1475 panic("invalid type %d", info->prim_type);
1476 }
1477 info->alloc_gate = NULL;
1478 }
1479
1480 static void
thread_inheritor_like_mutex(void * args,__unused wait_result_t wr)1481 thread_inheritor_like_mutex(
1482 void *args,
1483 __unused wait_result_t wr)
1484 {
1485 wait_result_t wait;
1486
1487 struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
1488 uint my_pri = current_thread()->sched_pri;
1489
1490 T_LOG("Started thread pri %d %p", my_pri, current_thread());
1491
1492 /*
1493 * spin here to start concurrently
1494 */
1495 wake_threads(&info->synch);
1496 wait_threads(&info->synch, info->synch_value);
1497
1498 primitive_lock(info);
1499
1500 if (info->thread_inheritor == NULL) {
1501 info->thread_inheritor = current_thread();
1502 } else {
1503 wait = primitive_sleep_with_inheritor(info);
1504 T_ASSERT(wait == THREAD_AWAKENED || wait == THREAD_NOT_WAITING, "sleep_with_inheritor return");
1505 }
1506 primitive_unlock(info);
1507
1508 IOSleep(100);
1509 info->value++;
1510
1511 primitive_lock(info);
1512
1513 T_ASSERT(info->thread_inheritor == current_thread(), "thread_inheritor is %p", info->thread_inheritor);
1514 primitive_wakeup_one_with_inheritor(info);
1515 T_LOG("woken up %p", info->thread_inheritor);
1516
1517 if (info->thread_inheritor == NULL) {
1518 T_ASSERT(info->handoff_failure == 0, "handoff failures");
1519 info->handoff_failure++;
1520 } else {
1521 T_ASSERT(info->thread_inheritor != current_thread(), "thread_inheritor is %p", info->thread_inheritor);
1522 thread_deallocate(info->thread_inheritor);
1523 }
1524
1525 primitive_unlock(info);
1526
1527 assert(current_thread()->kern_promotion_schedpri == 0);
1528 notify_waiter((struct synch_test_common *)info);
1529
1530 thread_terminate_self();
1531 }
1532
1533 static void
thread_just_inheritor_do_work(void * args,__unused wait_result_t wr)1534 thread_just_inheritor_do_work(
1535 void *args,
1536 __unused wait_result_t wr)
1537 {
1538 struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
1539 uint my_pri = current_thread()->sched_pri;
1540 uint max_pri;
1541
1542 T_LOG("Started thread pri %d %p", my_pri, current_thread());
1543 primitive_lock(info);
1544
1545 if (info->thread_inheritor == NULL) {
1546 info->thread_inheritor = current_thread();
1547 primitive_unlock(info);
1548 T_LOG("Thread pri %d first to run %p", my_pri, current_thread());
1549
1550 wait_threads(&info->synch, info->synch_value - 1);
1551
1552 wait_for_waiters((struct synch_test_common *)info);
1553
1554 max_pri = get_max_pri((struct synch_test_common *) info);
1555 T_ASSERT((uint) current_thread()->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", current_thread()->sched_pri, max_pri);
1556
1557 os_atomic_store(&info->synch, 0, relaxed);
1558 primitive_lock(info);
1559 primitive_wakeup_all_with_inheritor(info);
1560 } else {
1561 wake_threads(&info->synch);
1562 primitive_sleep_with_inheritor(info);
1563 }
1564
1565 primitive_unlock(info);
1566
1567 assert(current_thread()->kern_promotion_schedpri == 0);
1568 notify_waiter((struct synch_test_common *)info);
1569
1570 thread_terminate_self();
1571 }
1572
1573 static void
thread_steal_work(void * args,__unused wait_result_t wr)1574 thread_steal_work(
1575 void *args,
1576 __unused wait_result_t wr)
1577 {
1578 struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
1579 uint my_pri = current_thread()->sched_pri;
1580
1581 T_LOG("Started thread pri %d %p", my_pri, current_thread());
1582 primitive_lock(info);
1583
1584 if (info->thread_inheritor == NULL) {
1585 info->thread_inheritor = current_thread();
1586 exclude_current_waiter((struct synch_test_common *)info);
1587
1588 T_LOG("Thread pri %d first to run %p", my_pri, current_thread());
1589 primitive_unlock(info);
1590
1591 wait_threads(&info->synch, info->synch_value - 2);
1592
1593 wait_for_waiters((struct synch_test_common *)info);
1594 T_LOG("Thread pri %d first to run %p", my_pri, current_thread());
1595 primitive_lock(info);
1596 if (info->thread_inheritor == current_thread()) {
1597 primitive_wakeup_all_with_inheritor(info);
1598 }
1599 } else {
1600 if (info->steal_pri == 0) {
1601 info->steal_pri = my_pri;
1602 info->thread_inheritor = current_thread();
1603 primitive_change_sleep_inheritor(info);
1604 exclude_current_waiter((struct synch_test_common *)info);
1605
1606 primitive_unlock(info);
1607
1608 wait_threads(&info->synch, info->synch_value - 2);
1609
1610 T_LOG("Thread pri %d stole push %p", my_pri, current_thread());
1611 wait_for_waiters((struct synch_test_common *)info);
1612
1613 T_ASSERT((uint) current_thread()->sched_pri == info->steal_pri, "sleep_inheritor inheritor priority current is %d, should be %d", current_thread()->sched_pri, info->steal_pri);
1614
1615 primitive_lock(info);
1616 primitive_wakeup_all_with_inheritor(info);
1617 } else {
1618 if (my_pri > info->steal_pri) {
1619 info->steal_pri = my_pri;
1620 }
1621 wake_threads(&info->synch);
1622 primitive_sleep_with_inheritor(info);
1623 exclude_current_waiter((struct synch_test_common *)info);
1624 }
1625 }
1626 primitive_unlock(info);
1627
1628 assert(current_thread()->kern_promotion_schedpri == 0);
1629 notify_waiter((struct synch_test_common *)info);
1630
1631 thread_terminate_self();
1632 }
1633
1634 static void
thread_no_inheritor_work(void * args,__unused wait_result_t wr)1635 thread_no_inheritor_work(
1636 void *args,
1637 __unused wait_result_t wr)
1638 {
1639 struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
1640 uint my_pri = current_thread()->sched_pri;
1641
1642 T_LOG("Started thread pri %d %p", my_pri, current_thread());
1643 primitive_lock(info);
1644
1645 info->value--;
1646 if (info->value == 0) {
1647 primitive_wakeup_all_with_inheritor(info);
1648 } else {
1649 info->thread_inheritor = NULL;
1650 primitive_sleep_with_inheritor(info);
1651 }
1652
1653 primitive_unlock(info);
1654
1655 assert(current_thread()->kern_promotion_schedpri == 0);
1656 notify_waiter((struct synch_test_common *)info);
1657
1658 thread_terminate_self();
1659 }
1660
1661 static void
thread_mtx_work(void * args,__unused wait_result_t wr)1662 thread_mtx_work(
1663 void *args,
1664 __unused wait_result_t wr)
1665 {
1666 struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
1667 uint my_pri = current_thread()->sched_pri;
1668 int i;
1669 u_int8_t rand;
1670 unsigned int mod_rand;
1671 uint max_pri;
1672
1673 T_LOG("Started thread pri %d %p", my_pri, current_thread());
1674
1675 for (i = 0; i < 10; i++) {
1676 lck_mtx_lock(&info->mtx_lock);
1677 if (info->thread_inheritor == NULL) {
1678 info->thread_inheritor = current_thread();
1679 lck_mtx_unlock(&info->mtx_lock);
1680
1681 T_LOG("Thread pri %d first to run %p", my_pri, current_thread());
1682
1683 wait_threads(&info->synch, info->synch_value - 1);
1684 wait_for_waiters((struct synch_test_common *)info);
1685 max_pri = get_max_pri((struct synch_test_common *) info);
1686 T_ASSERT((uint) current_thread()->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", current_thread()->sched_pri, max_pri);
1687
1688 os_atomic_store(&info->synch, 0, relaxed);
1689
1690 lck_mtx_lock(&info->mtx_lock);
1691 info->thread_inheritor = NULL;
1692 wakeup_all_with_inheritor((event_t) &info->thread_inheritor, THREAD_AWAKENED);
1693 lck_mtx_unlock(&info->mtx_lock);
1694 continue;
1695 }
1696
1697 read_random(&rand, sizeof(rand));
1698 mod_rand = rand % 2;
1699
1700 wake_threads(&info->synch);
1701 switch (mod_rand) {
1702 case 0:
1703 lck_mtx_sleep_with_inheritor(&info->mtx_lock, LCK_SLEEP_DEFAULT, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1704 lck_mtx_unlock(&info->mtx_lock);
1705 break;
1706 case 1:
1707 lck_mtx_sleep_with_inheritor(&info->mtx_lock, LCK_SLEEP_UNLOCK, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1708 break;
1709 default:
1710 panic("rand()mod4 returned %u (random %u)", mod_rand, rand);
1711 }
1712 }
1713
1714 /*
1715 * spin here to stop using the lock as mutex
1716 */
1717 wake_threads(&info->synch);
1718 wait_threads(&info->synch, info->synch_value);
1719
1720 for (i = 0; i < 10; i++) {
1721 /* read_random might sleep so read it before acquiring the mtx as spin */
1722 read_random(&rand, sizeof(rand));
1723
1724 lck_mtx_lock_spin(&info->mtx_lock);
1725 if (info->thread_inheritor == NULL) {
1726 info->thread_inheritor = current_thread();
1727 lck_mtx_unlock(&info->mtx_lock);
1728
1729 T_LOG("Thread pri %d first to run %p", my_pri, current_thread());
1730 wait_for_waiters((struct synch_test_common *)info);
1731 max_pri = get_max_pri((struct synch_test_common *) info);
1732 T_ASSERT((uint) current_thread()->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", current_thread()->sched_pri, max_pri);
1733
1734 lck_mtx_lock_spin(&info->mtx_lock);
1735 info->thread_inheritor = NULL;
1736 wakeup_all_with_inheritor((event_t) &info->thread_inheritor, THREAD_AWAKENED);
1737 lck_mtx_unlock(&info->mtx_lock);
1738 continue;
1739 }
1740
1741 mod_rand = rand % 2;
1742 switch (mod_rand) {
1743 case 0:
1744 lck_mtx_sleep_with_inheritor(&info->mtx_lock, LCK_SLEEP_SPIN, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1745 lck_mtx_unlock(&info->mtx_lock);
1746 break;
1747 case 1:
1748 lck_mtx_sleep_with_inheritor(&info->mtx_lock, LCK_SLEEP_SPIN_ALWAYS, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1749 lck_mtx_unlock(&info->mtx_lock);
1750 break;
1751 default:
1752 panic("rand()mod4 returned %u (random %u)", mod_rand, rand);
1753 }
1754 }
1755 assert(current_thread()->kern_promotion_schedpri == 0);
1756 notify_waiter((struct synch_test_common *)info);
1757
1758 thread_terminate_self();
1759 }
1760
1761 static void
thread_rw_work(void * args,__unused wait_result_t wr)1762 thread_rw_work(
1763 void *args,
1764 __unused wait_result_t wr)
1765 {
1766 struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
1767 uint my_pri = current_thread()->sched_pri;
1768 int i;
1769 lck_rw_type_t type;
1770 u_int8_t rand;
1771 unsigned int mod_rand;
1772 uint max_pri;
1773
1774 T_LOG("Started thread pri %d %p", my_pri, current_thread());
1775
1776 for (i = 0; i < 10; i++) {
1777 try_again:
1778 type = LCK_RW_TYPE_SHARED;
1779 lck_rw_lock(&info->rw_lock, type);
1780 if (info->thread_inheritor == NULL) {
1781 type = LCK_RW_TYPE_EXCLUSIVE;
1782
1783 if (lck_rw_lock_shared_to_exclusive(&info->rw_lock)) {
1784 if (info->thread_inheritor == NULL) {
1785 info->thread_inheritor = current_thread();
1786 lck_rw_unlock(&info->rw_lock, type);
1787 wait_threads(&info->synch, info->synch_value - 1);
1788
1789 T_LOG("Thread pri %d first to run %p", my_pri, current_thread());
1790 wait_for_waiters((struct synch_test_common *)info);
1791 max_pri = get_max_pri((struct synch_test_common *) info);
1792 T_ASSERT((uint) current_thread()->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", current_thread()->sched_pri, max_pri);
1793
1794 os_atomic_store(&info->synch, 0, relaxed);
1795
1796 lck_rw_lock(&info->rw_lock, type);
1797 info->thread_inheritor = NULL;
1798 wakeup_all_with_inheritor((event_t) &info->thread_inheritor, THREAD_AWAKENED);
1799 lck_rw_unlock(&info->rw_lock, type);
1800 continue;
1801 }
1802 } else {
1803 goto try_again;
1804 }
1805 }
1806
1807 read_random(&rand, sizeof(rand));
1808 mod_rand = rand % 4;
1809
1810 wake_threads(&info->synch);
1811 switch (mod_rand) {
1812 case 0:
1813 lck_rw_sleep_with_inheritor(&info->rw_lock, LCK_SLEEP_DEFAULT, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1814 lck_rw_unlock(&info->rw_lock, type);
1815 break;
1816 case 1:
1817 lck_rw_sleep_with_inheritor(&info->rw_lock, LCK_SLEEP_UNLOCK, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1818 break;
1819 case 2:
1820 lck_rw_sleep_with_inheritor(&info->rw_lock, LCK_SLEEP_SHARED, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1821 lck_rw_unlock(&info->rw_lock, LCK_RW_TYPE_SHARED);
1822 break;
1823 case 3:
1824 lck_rw_sleep_with_inheritor(&info->rw_lock, LCK_SLEEP_EXCLUSIVE, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1825 lck_rw_unlock(&info->rw_lock, LCK_RW_TYPE_EXCLUSIVE);
1826 break;
1827 default:
1828 panic("rand()mod4 returned %u (random %u)", mod_rand, rand);
1829 }
1830 }
1831
1832 assert(current_thread()->kern_promotion_schedpri == 0);
1833 notify_waiter((struct synch_test_common *)info);
1834
1835 thread_terminate_self();
1836 }
1837
1838 #define OBJ_STATE_UNUSED 0
1839 #define OBJ_STATE_REAL 1
1840 #define OBJ_STATE_PLACEHOLDER 2
1841
1842 #define OBJ_BUFF_SIZE 11
1843 struct obj_cached {
1844 int obj_id;
1845 int obj_state;
1846 struct kern_apfs_reflock *obj_refcount;
1847 char obj_buff[OBJ_BUFF_SIZE];
1848 };
1849
1850 #define CACHE_SIZE 2
1851 #define USE_CACHE_ROUNDS 15
1852
1853 #define REFCOUNT_REFLOCK_ROUNDS 15
1854
1855 /*
1856 * For the reflock cache test the cache is allocated
1857 * and its pointer is saved in obj_cache.
1858 * The lock for the cache is going to be one of the exclusive
1859 * locks already present in struct info_sleep_inheritor_test.
1860 */
1861
1862 static struct obj_cached *
alloc_init_cache_entry(void)1863 alloc_init_cache_entry(void)
1864 {
1865 struct obj_cached *cache_entry = kalloc_type(struct obj_cached, 1, Z_WAITOK | Z_NOFAIL | Z_ZERO);
1866 cache_entry->obj_id = 0;
1867 cache_entry->obj_state = OBJ_STATE_UNUSED;
1868 cache_entry->obj_refcount = kern_apfs_reflock_alloc_init();
1869 snprintf(cache_entry->obj_buff, OBJ_BUFF_SIZE, "I am groot");
1870 return cache_entry;
1871 }
1872
1873 static void
init_cache(struct info_sleep_inheritor_test * info)1874 init_cache(struct info_sleep_inheritor_test *info)
1875 {
1876 struct obj_cached **obj_cache = kalloc_type(struct obj_cached *, CACHE_SIZE, Z_WAITOK | Z_NOFAIL | Z_ZERO);
1877
1878 int i;
1879 for (i = 0; i < CACHE_SIZE; i++) {
1880 obj_cache[i] = alloc_init_cache_entry();
1881 }
1882
1883 info->obj_cache = obj_cache;
1884 }
1885
1886 static void
check_cache_empty(struct info_sleep_inheritor_test * info)1887 check_cache_empty(struct info_sleep_inheritor_test *info)
1888 {
1889 struct obj_cached **obj_cache = info->obj_cache;
1890
1891 int i, ret;
1892 for (i = 0; i < CACHE_SIZE; i++) {
1893 if (obj_cache[i] != NULL) {
1894 T_ASSERT(obj_cache[i]->obj_state == OBJ_STATE_UNUSED, "checked OBJ_STATE_UNUSED");
1895 T_ASSERT(obj_cache[i]->obj_refcount != NULL, "checked obj_refcount");
1896 ret = memcmp(obj_cache[i]->obj_buff, "I am groot", OBJ_BUFF_SIZE);
1897 T_ASSERT(ret == 0, "checked buff correctly emptied");
1898 }
1899 }
1900 }
1901
1902 static void
free_cache(struct info_sleep_inheritor_test * info)1903 free_cache(struct info_sleep_inheritor_test *info)
1904 {
1905 struct obj_cached **obj_cache = info->obj_cache;
1906
1907 int i;
1908 for (i = 0; i < CACHE_SIZE; i++) {
1909 if (obj_cache[i] != NULL) {
1910 kern_apfs_reflock_free(obj_cache[i]->obj_refcount);
1911 obj_cache[i]->obj_refcount = NULL;
1912 kfree_type(struct obj_cached, 1, obj_cache[i]);
1913 obj_cache[i] = NULL;
1914 }
1915 }
1916
1917 kfree_type(struct obj_cached *, CACHE_SIZE, obj_cache);
1918 info->obj_cache = NULL;
1919 }
1920
1921 static struct obj_cached *
find_id_in_cache(int obj_id,struct info_sleep_inheritor_test * info)1922 find_id_in_cache(int obj_id, struct info_sleep_inheritor_test *info)
1923 {
1924 struct obj_cached **obj_cache = info->obj_cache;
1925 int i;
1926 for (i = 0; i < CACHE_SIZE; i++) {
1927 if (obj_cache[i] != NULL && obj_cache[i]->obj_id == obj_id) {
1928 return obj_cache[i];
1929 }
1930 }
1931 return NULL;
1932 }
1933
1934 static bool
free_id_in_cache(int obj_id,struct info_sleep_inheritor_test * info,__assert_only struct obj_cached * expected)1935 free_id_in_cache(int obj_id, struct info_sleep_inheritor_test *info, __assert_only struct obj_cached *expected)
1936 {
1937 struct obj_cached **obj_cache = info->obj_cache;
1938 int i;
1939 for (i = 0; i < CACHE_SIZE; i++) {
1940 if (obj_cache[i] != NULL && obj_cache[i]->obj_id == obj_id) {
1941 assert(obj_cache[i] == expected);
1942 kfree_type(struct obj_cached, 1, obj_cache[i]);
1943 obj_cache[i] = NULL;
1944 return true;
1945 }
1946 }
1947 return false;
1948 }
1949
1950 static struct obj_cached *
find_empty_spot_in_cache(struct info_sleep_inheritor_test * info)1951 find_empty_spot_in_cache(struct info_sleep_inheritor_test *info)
1952 {
1953 struct obj_cached **obj_cache = info->obj_cache;
1954 int i;
1955 for (i = 0; i < CACHE_SIZE; i++) {
1956 if (obj_cache[i] == NULL) {
1957 obj_cache[i] = alloc_init_cache_entry();
1958 return obj_cache[i];
1959 }
1960 if (obj_cache[i]->obj_state == OBJ_STATE_UNUSED) {
1961 return obj_cache[i];
1962 }
1963 }
1964 return NULL;
1965 }
1966
1967 static int
get_obj_cache(int obj_id,struct info_sleep_inheritor_test * info,char ** buff)1968 get_obj_cache(int obj_id, struct info_sleep_inheritor_test *info, char **buff)
1969 {
1970 struct obj_cached *obj = NULL, *obj2 = NULL;
1971 kern_apfs_reflock_t refcount = NULL;
1972 bool ret;
1973 kern_apfs_reflock_out_flags_t out_flags;
1974
1975 try_again:
1976 primitive_lock(info);
1977 if ((obj = find_id_in_cache(obj_id, info)) != NULL) {
1978 /* Found an allocated object on the cache with same id */
1979
1980 /*
1981 * copy the pointer to obj_refcount as obj might
1982 * get deallocated after primitive_unlock()
1983 */
1984 refcount = obj->obj_refcount;
1985 if (kern_apfs_reflock_try_get_ref(refcount, KERN_APFS_REFLOCK_IN_WILL_WAIT, &out_flags)) {
1986 /*
1987 * Got a ref, let's check the state
1988 */
1989 switch (obj->obj_state) {
1990 case OBJ_STATE_UNUSED:
1991 goto init;
1992 case OBJ_STATE_REAL:
1993 goto done;
1994 case OBJ_STATE_PLACEHOLDER:
1995 panic("Thread %p observed OBJ_STATE_PLACEHOLDER %d for obj %d", current_thread(), obj->obj_state, obj_id);
1996 default:
1997 panic("Thread %p observed an unknown obj_state %d for obj %d", current_thread(), obj->obj_state, obj_id);
1998 }
1999 } else {
2000 /*
2001 * Didn't get a ref.
2002 * This means or an obj_put() of the last ref is ongoing
2003 * or a init of the object is happening.
2004 * Both cases wait for that to finish and retry.
2005 * While waiting the thread that is holding the reflock
2006 * will get a priority at least as the one of this thread.
2007 */
2008 primitive_unlock(info);
2009 kern_apfs_reflock_wait_for_unlock(refcount, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
2010 goto try_again;
2011 }
2012 } else {
2013 /* Look for a spot on the cache where we can save the object */
2014
2015 if ((obj = find_empty_spot_in_cache(info)) == NULL) {
2016 /*
2017 * Sadness cache is full, and everyting in the cache is
2018 * used.
2019 */
2020 primitive_unlock(info);
2021 return -1;
2022 } else {
2023 /*
2024 * copy the pointer to obj_refcount as obj might
2025 * get deallocated after primitive_unlock()
2026 */
2027 refcount = obj->obj_refcount;
2028 if (kern_apfs_reflock_try_get_ref(refcount, KERN_APFS_REFLOCK_IN_WILL_WAIT, &out_flags)) {
2029 /*
2030 * Got a ref on a OBJ_STATE_UNUSED obj.
2031 * Recicle time.
2032 */
2033 obj->obj_id = obj_id;
2034 goto init;
2035 } else {
2036 /*
2037 * This could happen if the obj_put() has just changed the
2038 * state to OBJ_STATE_UNUSED, but not unlocked the reflock yet.
2039 */
2040 primitive_unlock(info);
2041 kern_apfs_reflock_wait_for_unlock(refcount, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
2042 goto try_again;
2043 }
2044 }
2045 }
2046 init:
2047 assert(obj->obj_id == obj_id);
2048 assert(obj->obj_state == OBJ_STATE_UNUSED);
2049 /*
2050 * We already got a ref on the object, but we need
2051 * to initialize it. Mark it as
2052 * OBJ_STATE_PLACEHOLDER and get the obj_reflock.
2053 * In this way all thread waiting for this init
2054 * to finish will push on this thread.
2055 */
2056 ret = kern_apfs_reflock_try_lock(refcount, KERN_APFS_REFLOCK_IN_DEFAULT, NULL);
2057 assert(ret == true);
2058 obj->obj_state = OBJ_STATE_PLACEHOLDER;
2059 primitive_unlock(info);
2060
2061 //let's pretend we are populating the obj
2062 IOSleep(10);
2063 /*
2064 * obj will not be deallocated while I hold a ref.
2065 * So it is safe to access it.
2066 */
2067 snprintf(obj->obj_buff, OBJ_BUFF_SIZE, "I am %d", obj_id);
2068
2069 primitive_lock(info);
2070 obj2 = find_id_in_cache(obj_id, info);
2071 assert(obj == obj2);
2072 assert(obj->obj_state == OBJ_STATE_PLACEHOLDER);
2073
2074 obj->obj_state = OBJ_STATE_REAL;
2075 kern_apfs_reflock_unlock(refcount);
2076
2077 done:
2078 *buff = obj->obj_buff;
2079 primitive_unlock(info);
2080 return 0;
2081 }
2082
2083 static void
put_obj_cache(int obj_id,struct info_sleep_inheritor_test * info,bool free)2084 put_obj_cache(int obj_id, struct info_sleep_inheritor_test *info, bool free)
2085 {
2086 struct obj_cached *obj = NULL, *obj2 = NULL;
2087 bool ret;
2088 kern_apfs_reflock_out_flags_t out_flags;
2089 kern_apfs_reflock_t refcount = NULL;
2090
2091 primitive_lock(info);
2092 obj = find_id_in_cache(obj_id, info);
2093 primitive_unlock(info);
2094
2095 /*
2096 * Nobody should have been able to remove obj_id
2097 * from the cache.
2098 */
2099 assert(obj != NULL);
2100 assert(obj->obj_state == OBJ_STATE_REAL);
2101
2102 refcount = obj->obj_refcount;
2103
2104 /*
2105 * This should never fail, as or the reflock
2106 * was acquired when the state was OBJ_STATE_UNUSED to init,
2107 * or from a put that reached zero. And if the latter
2108 * happened subsequent reflock_get_ref() will had to wait to transition
2109 * to OBJ_STATE_REAL.
2110 */
2111 ret = kern_apfs_reflock_try_put_ref(refcount, KERN_APFS_REFLOCK_IN_LOCK_IF_LAST, &out_flags);
2112 assert(ret == true);
2113 if ((out_flags & KERN_APFS_REFLOCK_OUT_LOCKED) == 0) {
2114 return;
2115 }
2116
2117 /*
2118 * Note: nobody at this point will be able to get a ref or a lock on
2119 * refcount.
2120 * All people waiting on refcount will push on this thread.
2121 */
2122
2123 //let's pretend we are flushing the obj somewhere.
2124 IOSleep(10);
2125 snprintf(obj->obj_buff, OBJ_BUFF_SIZE, "I am groot");
2126
2127 primitive_lock(info);
2128 obj->obj_state = OBJ_STATE_UNUSED;
2129 if (free) {
2130 obj2 = find_id_in_cache(obj_id, info);
2131 assert(obj == obj2);
2132
2133 ret = free_id_in_cache(obj_id, info, obj);
2134 assert(ret == true);
2135 }
2136 primitive_unlock(info);
2137
2138 kern_apfs_reflock_unlock(refcount);
2139
2140 if (free) {
2141 kern_apfs_reflock_free(refcount);
2142 }
2143 }
2144
2145 static void
thread_use_cache(void * args,__unused wait_result_t wr)2146 thread_use_cache(
2147 void *args,
2148 __unused wait_result_t wr)
2149 {
2150 struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
2151 int my_obj;
2152
2153 primitive_lock(info);
2154 my_obj = ((info->value--) % (CACHE_SIZE + 1)) + 1;
2155 primitive_unlock(info);
2156
2157 T_LOG("Thread %p started and it is going to use obj %d", current_thread(), my_obj);
2158 /*
2159 * This is the string I would expect to see
2160 * on my_obj buff.
2161 */
2162 char my_string[OBJ_BUFF_SIZE];
2163 int my_string_size = snprintf(my_string, OBJ_BUFF_SIZE, "I am %d", my_obj);
2164
2165 /*
2166 * spin here to start concurrently with the other threads
2167 */
2168 wake_threads(&info->synch);
2169 wait_threads(&info->synch, info->synch_value);
2170
2171 for (int i = 0; i < USE_CACHE_ROUNDS; i++) {
2172 char *buff;
2173 while (get_obj_cache(my_obj, info, &buff) == -1) {
2174 /*
2175 * Cache is full, wait.
2176 */
2177 IOSleep(10);
2178 }
2179 T_ASSERT(memcmp(buff, my_string, my_string_size) == 0, "reflock: thread %p obj_id %d value in buff", current_thread(), my_obj);
2180 IOSleep(10);
2181 T_ASSERT(memcmp(buff, my_string, my_string_size) == 0, "reflock: thread %p obj_id %d value in buff", current_thread(), my_obj);
2182 put_obj_cache(my_obj, info, (i % 2 == 0));
2183 }
2184
2185 notify_waiter((struct synch_test_common *)info);
2186 thread_terminate_self();
2187 }
2188
2189 static void
thread_refcount_reflock(void * args,__unused wait_result_t wr)2190 thread_refcount_reflock(
2191 void *args,
2192 __unused wait_result_t wr)
2193 {
2194 struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
2195 bool ret;
2196 kern_apfs_reflock_out_flags_t out_flags;
2197 kern_apfs_reflock_in_flags_t in_flags;
2198
2199 T_LOG("Thread %p started", current_thread());
2200 /*
2201 * spin here to start concurrently with the other threads
2202 */
2203 wake_threads(&info->synch);
2204 wait_threads(&info->synch, info->synch_value);
2205
2206 for (int i = 0; i < REFCOUNT_REFLOCK_ROUNDS; i++) {
2207 in_flags = KERN_APFS_REFLOCK_IN_LOCK_IF_FIRST;
2208 if ((i % 2) == 0) {
2209 in_flags |= KERN_APFS_REFLOCK_IN_WILL_WAIT;
2210 }
2211 ret = kern_apfs_reflock_try_get_ref(&info->reflock, in_flags, &out_flags);
2212 if (ret == true) {
2213 /* got reference, check if we did 0->1 */
2214 if ((out_flags & KERN_APFS_REFLOCK_OUT_LOCKED) == KERN_APFS_REFLOCK_OUT_LOCKED) {
2215 T_ASSERT(info->reflock_protected_status == 0, "status init check");
2216 info->reflock_protected_status = 1;
2217 kern_apfs_reflock_unlock(&info->reflock);
2218 } else {
2219 T_ASSERT(info->reflock_protected_status == 1, "status set check");
2220 }
2221 /* release the reference and check if we did 1->0 */
2222 ret = kern_apfs_reflock_try_put_ref(&info->reflock, KERN_APFS_REFLOCK_IN_LOCK_IF_LAST, &out_flags);
2223 T_ASSERT(ret == true, "kern_apfs_reflock_try_put_ref success");
2224 if ((out_flags & KERN_APFS_REFLOCK_OUT_LOCKED) == KERN_APFS_REFLOCK_OUT_LOCKED) {
2225 T_ASSERT(info->reflock_protected_status == 1, "status set check");
2226 info->reflock_protected_status = 0;
2227 kern_apfs_reflock_unlock(&info->reflock);
2228 }
2229 } else {
2230 /* didn't get a reference */
2231 if ((in_flags & KERN_APFS_REFLOCK_IN_WILL_WAIT) == KERN_APFS_REFLOCK_IN_WILL_WAIT) {
2232 kern_apfs_reflock_wait_for_unlock(&info->reflock, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
2233 }
2234 }
2235 }
2236
2237 notify_waiter((struct synch_test_common *)info);
2238 thread_terminate_self();
2239 }
2240
2241 static void
thread_force_reflock(void * args,__unused wait_result_t wr)2242 thread_force_reflock(
2243 void *args,
2244 __unused wait_result_t wr)
2245 {
2246 struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
2247 bool ret;
2248 kern_apfs_reflock_out_flags_t out_flags;
2249 bool lock = false;
2250 uint32_t count;
2251
2252 T_LOG("Thread %p started", current_thread());
2253 if (os_atomic_inc_orig(&info->value, relaxed) == 0) {
2254 T_LOG("Thread %p is locker", current_thread());
2255 lock = true;
2256 ret = kern_apfs_reflock_try_lock(&info->reflock, KERN_APFS_REFLOCK_IN_ALLOW_FORCE, &count);
2257 T_ASSERT(ret == true, "kern_apfs_reflock_try_lock success");
2258 T_ASSERT(count == 0, "refcount value");
2259 }
2260 /*
2261 * spin here to start concurrently with the other threads
2262 */
2263 wake_threads(&info->synch);
2264 wait_threads(&info->synch, info->synch_value);
2265
2266 if (lock) {
2267 IOSleep(100);
2268 kern_apfs_reflock_unlock(&info->reflock);
2269 } else {
2270 for (int i = 0; i < REFCOUNT_REFLOCK_ROUNDS; i++) {
2271 ret = kern_apfs_reflock_try_get_ref(&info->reflock, KERN_APFS_REFLOCK_IN_FORCE, &out_flags);
2272 T_ASSERT(ret == true, "kern_apfs_reflock_try_get_ref success");
2273 ret = kern_apfs_reflock_try_put_ref(&info->reflock, KERN_APFS_REFLOCK_IN_FORCE, &out_flags);
2274 T_ASSERT(ret == true, "kern_apfs_reflock_try_put_ref success");
2275 }
2276 }
2277
2278 notify_waiter((struct synch_test_common *)info);
2279 thread_terminate_self();
2280 }
2281
2282 static void
thread_lock_reflock(void * args,__unused wait_result_t wr)2283 thread_lock_reflock(
2284 void *args,
2285 __unused wait_result_t wr)
2286 {
2287 struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
2288 bool ret;
2289 kern_apfs_reflock_out_flags_t out_flags;
2290 bool lock = false;
2291 uint32_t count;
2292
2293 T_LOG("Thread %p started", current_thread());
2294 if (os_atomic_inc_orig(&info->value, relaxed) == 0) {
2295 T_LOG("Thread %p is locker", current_thread());
2296 lock = true;
2297 ret = kern_apfs_reflock_try_lock(&info->reflock, KERN_APFS_REFLOCK_IN_DEFAULT, &count);
2298 T_ASSERT(ret == true, "kern_apfs_reflock_try_lock success");
2299 T_ASSERT(count == 0, "refcount value");
2300 info->reflock_protected_status = 1;
2301 }
2302 /*
2303 * spin here to start concurrently with the other threads
2304 */
2305 wake_threads(&info->synch);
2306 wait_threads(&info->synch, info->synch_value);
2307
2308 if (lock) {
2309 IOSleep(100);
2310 info->reflock_protected_status = 0;
2311 kern_apfs_reflock_unlock(&info->reflock);
2312 } else {
2313 for (int i = 0; i < REFCOUNT_REFLOCK_ROUNDS; i++) {
2314 ret = kern_apfs_reflock_try_get_ref(&info->reflock, KERN_APFS_REFLOCK_IN_DEFAULT, &out_flags);
2315 if (ret == true) {
2316 T_ASSERT(info->reflock_protected_status == 0, "unlocked status check");
2317 ret = kern_apfs_reflock_try_put_ref(&info->reflock, KERN_APFS_REFLOCK_IN_DEFAULT, &out_flags);
2318 T_ASSERT(ret == true, "kern_apfs_reflock_try_put_ref success");
2319 break;
2320 }
2321 }
2322 }
2323
2324 notify_waiter((struct synch_test_common *)info);
2325 thread_terminate_self();
2326 }
2327
2328 static void
test_cache_reflock(struct info_sleep_inheritor_test * info)2329 test_cache_reflock(struct info_sleep_inheritor_test *info)
2330 {
2331 info->synch = 0;
2332 info->synch_value = info->head.nthreads;
2333
2334 info->value = info->head.nthreads;
2335 /*
2336 * Use the mtx as cache lock
2337 */
2338 info->prim_type = MTX_LOCK;
2339
2340 init_cache(info);
2341
2342 start_threads((thread_continue_t)thread_use_cache, (struct synch_test_common *)info, FALSE);
2343 wait_all_thread((struct synch_test_common *)info);
2344
2345 check_cache_empty(info);
2346 free_cache(info);
2347 }
2348
2349 static void
test_refcount_reflock(struct info_sleep_inheritor_test * info)2350 test_refcount_reflock(struct info_sleep_inheritor_test *info)
2351 {
2352 info->synch = 0;
2353 info->synch_value = info->head.nthreads;
2354 kern_apfs_reflock_init(&info->reflock);
2355 info->reflock_protected_status = 0;
2356
2357 start_threads((thread_continue_t)thread_refcount_reflock, (struct synch_test_common *)info, FALSE);
2358 wait_all_thread((struct synch_test_common *)info);
2359
2360 kern_apfs_reflock_destroy(&info->reflock);
2361
2362 T_ASSERT(info->reflock_protected_status == 0, "unlocked status check");
2363 }
2364
2365 static void
test_force_reflock(struct info_sleep_inheritor_test * info)2366 test_force_reflock(struct info_sleep_inheritor_test *info)
2367 {
2368 info->synch = 0;
2369 info->synch_value = info->head.nthreads;
2370 kern_apfs_reflock_init(&info->reflock);
2371 info->value = 0;
2372
2373 start_threads((thread_continue_t)thread_force_reflock, (struct synch_test_common *)info, FALSE);
2374 wait_all_thread((struct synch_test_common *)info);
2375
2376 kern_apfs_reflock_destroy(&info->reflock);
2377 }
2378
2379 static void
test_lock_reflock(struct info_sleep_inheritor_test * info)2380 test_lock_reflock(struct info_sleep_inheritor_test *info)
2381 {
2382 info->synch = 0;
2383 info->synch_value = info->head.nthreads;
2384 kern_apfs_reflock_init(&info->reflock);
2385 info->value = 0;
2386
2387 start_threads((thread_continue_t)thread_lock_reflock, (struct synch_test_common *)info, FALSE);
2388 wait_all_thread((struct synch_test_common *)info);
2389
2390 kern_apfs_reflock_destroy(&info->reflock);
2391 }
2392
2393 static void
test_sleep_with_wake_all(struct info_sleep_inheritor_test * info,int prim_type)2394 test_sleep_with_wake_all(struct info_sleep_inheritor_test *info, int prim_type)
2395 {
2396 info->prim_type = prim_type;
2397 info->synch = 0;
2398 info->synch_value = info->head.nthreads;
2399
2400 info->thread_inheritor = NULL;
2401
2402 start_threads((thread_continue_t)thread_just_inheritor_do_work, (struct synch_test_common *)info, TRUE);
2403 wait_all_thread((struct synch_test_common *)info);
2404 }
2405
2406 static void
test_sleep_with_wake_one(struct info_sleep_inheritor_test * info,int prim_type)2407 test_sleep_with_wake_one(struct info_sleep_inheritor_test *info, int prim_type)
2408 {
2409 info->prim_type = prim_type;
2410
2411 info->synch = 0;
2412 info->synch_value = info->head.nthreads;
2413 info->value = 0;
2414 info->handoff_failure = 0;
2415 info->thread_inheritor = NULL;
2416
2417 start_threads((thread_continue_t)thread_inheritor_like_mutex, (struct synch_test_common *)info, FALSE);
2418 wait_all_thread((struct synch_test_common *)info);
2419
2420 T_ASSERT(info->value == (int)info->head.nthreads, "value protected by sleep");
2421 T_ASSERT(info->handoff_failure == 1, "handoff failures");
2422 }
2423
2424 static void
test_change_sleep_inheritor(struct info_sleep_inheritor_test * info,int prim_type)2425 test_change_sleep_inheritor(struct info_sleep_inheritor_test *info, int prim_type)
2426 {
2427 info->prim_type = prim_type;
2428
2429 info->thread_inheritor = NULL;
2430 info->steal_pri = 0;
2431 info->synch = 0;
2432 info->synch_value = info->head.nthreads;
2433
2434 start_threads((thread_continue_t)thread_steal_work, (struct synch_test_common *)info, FALSE);
2435 wait_all_thread((struct synch_test_common *)info);
2436 }
2437
2438 static void
test_no_inheritor(struct info_sleep_inheritor_test * info,int prim_type)2439 test_no_inheritor(struct info_sleep_inheritor_test *info, int prim_type)
2440 {
2441 info->prim_type = prim_type;
2442 info->synch = 0;
2443 info->synch_value = info->head.nthreads;
2444
2445 info->thread_inheritor = NULL;
2446 info->value = info->head.nthreads;
2447
2448 start_threads((thread_continue_t)thread_no_inheritor_work, (struct synch_test_common *)info, FALSE);
2449 wait_all_thread((struct synch_test_common *)info);
2450 }
2451
2452 static void
test_rw_lock(struct info_sleep_inheritor_test * info)2453 test_rw_lock(struct info_sleep_inheritor_test *info)
2454 {
2455 info->thread_inheritor = NULL;
2456 info->value = info->head.nthreads;
2457 info->synch = 0;
2458 info->synch_value = info->head.nthreads;
2459
2460 start_threads((thread_continue_t)thread_rw_work, (struct synch_test_common *)info, FALSE);
2461 wait_all_thread((struct synch_test_common *)info);
2462 }
2463
2464 static void
test_mtx_lock(struct info_sleep_inheritor_test * info)2465 test_mtx_lock(struct info_sleep_inheritor_test *info)
2466 {
2467 info->thread_inheritor = NULL;
2468 info->value = info->head.nthreads;
2469 info->synch = 0;
2470 info->synch_value = info->head.nthreads;
2471
2472 start_threads((thread_continue_t)thread_mtx_work, (struct synch_test_common *)info, FALSE);
2473 wait_all_thread((struct synch_test_common *)info);
2474 }
2475
2476 kern_return_t
ts_kernel_sleep_inheritor_test(void)2477 ts_kernel_sleep_inheritor_test(void)
2478 {
2479 struct info_sleep_inheritor_test info = {};
2480
2481 init_synch_test_common((struct synch_test_common *)&info, NUM_THREADS);
2482
2483 lck_attr_t* lck_attr = lck_attr_alloc_init();
2484 lck_grp_attr_t* lck_grp_attr = lck_grp_attr_alloc_init();
2485 lck_grp_t* lck_grp = lck_grp_alloc_init("test sleep_inheritor", lck_grp_attr);
2486
2487 lck_mtx_init(&info.mtx_lock, lck_grp, lck_attr);
2488 lck_rw_init(&info.rw_lock, lck_grp, lck_attr);
2489
2490 /*
2491 * Testing lck_mtx_sleep_with_inheritor and wakeup_all_with_inheritor
2492 */
2493 T_LOG("Testing mtx sleep with inheritor and wake_all_with_inheritor");
2494 test_sleep_with_wake_all(&info, MTX_LOCK);
2495
2496 /*
2497 * Testing rw_mtx_sleep_with_inheritor and wakeup_all_with_inheritor
2498 */
2499 T_LOG("Testing rw sleep with inheritor and wake_all_with_inheritor");
2500 test_sleep_with_wake_all(&info, RW_LOCK);
2501
2502 /*
2503 * Testing lck_mtx_sleep_with_inheritor and wakeup_one_with_inheritor
2504 */
2505 T_LOG("Testing mtx sleep with inheritor and wake_one_with_inheritor");
2506 test_sleep_with_wake_one(&info, MTX_LOCK);
2507
2508 /*
2509 * Testing lck_rw_sleep_with_inheritor and wakeup_one_with_inheritor
2510 */
2511 T_LOG("Testing rw sleep with inheritor and wake_one_with_inheritor");
2512 test_sleep_with_wake_one(&info, RW_LOCK);
2513
2514 /*
2515 * Testing lck_mtx_sleep_with_inheritor and wakeup_all_with_inheritor
2516 * and change_sleep_inheritor
2517 */
2518 T_LOG("Testing change_sleep_inheritor with mxt sleep");
2519 test_change_sleep_inheritor(&info, MTX_LOCK);
2520
2521 /*
2522 * Testing lck_mtx_sleep_with_inheritor and wakeup_all_with_inheritor
2523 * and change_sleep_inheritor
2524 */
2525 T_LOG("Testing change_sleep_inheritor with rw sleep");
2526 test_change_sleep_inheritor(&info, RW_LOCK);
2527
2528 /*
2529 * Testing lck_mtx_sleep_with_inheritor and wakeup_all_with_inheritor
2530 * with inheritor NULL
2531 */
2532 T_LOG("Testing inheritor NULL");
2533 test_no_inheritor(&info, MTX_LOCK);
2534
2535 /*
2536 * Testing lck_mtx_sleep_with_inheritor and wakeup_all_with_inheritor
2537 * with inheritor NULL
2538 */
2539 T_LOG("Testing inheritor NULL");
2540 test_no_inheritor(&info, RW_LOCK);
2541
2542 /*
2543 * Testing mtx locking combinations
2544 */
2545 T_LOG("Testing mtx locking combinations");
2546 test_mtx_lock(&info);
2547
2548 /*
2549 * Testing rw locking combinations
2550 */
2551 T_LOG("Testing rw locking combinations");
2552 test_rw_lock(&info);
2553
2554 /*
2555 * Testing reflock / cond_sleep_with_inheritor
2556 */
2557 T_LOG("Test cache reflock + cond_sleep_with_inheritor");
2558 test_cache_reflock(&info);
2559 T_LOG("Test force reflock + cond_sleep_with_inheritor");
2560 test_force_reflock(&info);
2561 T_LOG("Test refcount reflock + cond_sleep_with_inheritor");
2562 test_refcount_reflock(&info);
2563 T_LOG("Test lock reflock + cond_sleep_with_inheritor");
2564 test_lock_reflock(&info);
2565
2566 destroy_synch_test_common((struct synch_test_common *)&info);
2567
2568 lck_attr_free(lck_attr);
2569 lck_grp_attr_free(lck_grp_attr);
2570 lck_rw_destroy(&info.rw_lock, lck_grp);
2571 lck_mtx_destroy(&info.mtx_lock, lck_grp);
2572 lck_grp_free(lck_grp);
2573
2574 return KERN_SUCCESS;
2575 }
2576
2577 static void
thread_gate_aggressive(void * args,__unused wait_result_t wr)2578 thread_gate_aggressive(
2579 void *args,
2580 __unused wait_result_t wr)
2581 {
2582 struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
2583 uint my_pri = current_thread()->sched_pri;
2584
2585 T_LOG("Started thread pri %d %p", my_pri, current_thread());
2586
2587 primitive_lock(info);
2588 if (info->thread_inheritor == NULL) {
2589 info->thread_inheritor = current_thread();
2590 primitive_gate_assert(info, GATE_ASSERT_OPEN);
2591 primitive_gate_close(info);
2592 exclude_current_waiter((struct synch_test_common *)info);
2593
2594 primitive_unlock(info);
2595
2596 wait_threads(&info->synch, info->synch_value - 2);
2597 wait_for_waiters((struct synch_test_common *)info);
2598 T_LOG("Thread pri %d first to run %p", my_pri, current_thread());
2599
2600 primitive_lock(info);
2601 if (info->thread_inheritor == current_thread()) {
2602 primitive_gate_open(info);
2603 }
2604 } else {
2605 if (info->steal_pri == 0) {
2606 info->steal_pri = my_pri;
2607 info->thread_inheritor = current_thread();
2608 primitive_gate_steal(info);
2609 exclude_current_waiter((struct synch_test_common *)info);
2610
2611 primitive_unlock(info);
2612 wait_threads(&info->synch, info->synch_value - 2);
2613
2614 T_LOG("Thread pri %d stole push %p", my_pri, current_thread());
2615 wait_for_waiters((struct synch_test_common *)info);
2616 T_ASSERT((uint) current_thread()->sched_pri == info->steal_pri, "gate keeper priority current is %d, should be %d", current_thread()->sched_pri, info->steal_pri);
2617
2618 primitive_lock(info);
2619 primitive_gate_open(info);
2620 } else {
2621 if (my_pri > info->steal_pri) {
2622 info->steal_pri = my_pri;
2623 }
2624 wake_threads(&info->synch);
2625 primitive_gate_wait(info);
2626 exclude_current_waiter((struct synch_test_common *)info);
2627 }
2628 }
2629 primitive_unlock(info);
2630
2631 assert(current_thread()->kern_promotion_schedpri == 0);
2632 notify_waiter((struct synch_test_common *)info);
2633
2634 thread_terminate_self();
2635 }
2636
2637 static void
thread_gate_free(void * args,__unused wait_result_t wr)2638 thread_gate_free(
2639 void *args,
2640 __unused wait_result_t wr)
2641 {
2642 struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
2643 uint my_pri = current_thread()->sched_pri;
2644
2645 T_LOG("Started thread pri %d %p", my_pri, current_thread());
2646
2647 primitive_lock(info);
2648
2649 if (primitive_gate_try_close(info) == KERN_SUCCESS) {
2650 primitive_gate_assert(info, GATE_ASSERT_HELD);
2651 primitive_unlock(info);
2652
2653 wait_threads(&info->synch, info->synch_value - 1);
2654 wait_for_waiters((struct synch_test_common *) info);
2655
2656 primitive_lock(info);
2657 primitive_gate_open(info);
2658 primitive_gate_free(info);
2659 } else {
2660 primitive_gate_assert(info, GATE_ASSERT_CLOSED);
2661 wake_threads(&info->synch);
2662 gate_wait_result_t ret = primitive_gate_wait(info);
2663 T_ASSERT(ret == GATE_OPENED, "open gate");
2664 }
2665
2666 primitive_unlock(info);
2667
2668 notify_waiter((struct synch_test_common *)info);
2669
2670 thread_terminate_self();
2671 }
2672
2673 static void
thread_gate_like_mutex(void * args,__unused wait_result_t wr)2674 thread_gate_like_mutex(
2675 void *args,
2676 __unused wait_result_t wr)
2677 {
2678 gate_wait_result_t wait;
2679 kern_return_t ret;
2680 uint my_pri = current_thread()->sched_pri;
2681
2682 struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
2683
2684 T_LOG("Started thread pri %d %p", my_pri, current_thread());
2685
2686 /*
2687 * spin here to start concurrently
2688 */
2689 wake_threads(&info->synch);
2690 wait_threads(&info->synch, info->synch_value);
2691
2692 primitive_lock(info);
2693
2694 if (primitive_gate_try_close(info) != KERN_SUCCESS) {
2695 wait = primitive_gate_wait(info);
2696 T_ASSERT(wait == GATE_HANDOFF, "gate_wait return");
2697 }
2698
2699 primitive_gate_assert(info, GATE_ASSERT_HELD);
2700
2701 primitive_unlock(info);
2702
2703 IOSleep(100);
2704 info->value++;
2705
2706 primitive_lock(info);
2707
2708 ret = primitive_gate_handoff(info, GATE_HANDOFF_DEFAULT);
2709 if (ret == KERN_NOT_WAITING) {
2710 T_ASSERT(info->handoff_failure == 0, "handoff failures");
2711 primitive_gate_handoff(info, GATE_HANDOFF_OPEN_IF_NO_WAITERS);
2712 info->handoff_failure++;
2713 }
2714
2715 primitive_unlock(info);
2716 notify_waiter((struct synch_test_common *)info);
2717
2718 thread_terminate_self();
2719 }
2720
2721 static void
thread_just_one_do_work(void * args,__unused wait_result_t wr)2722 thread_just_one_do_work(
2723 void *args,
2724 __unused wait_result_t wr)
2725 {
2726 struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
2727 uint my_pri = current_thread()->sched_pri;
2728 uint max_pri;
2729
2730 T_LOG("Started thread pri %d %p", my_pri, current_thread());
2731
2732 primitive_lock(info);
2733 check_again:
2734 if (info->work_to_do) {
2735 if (primitive_gate_try_close(info) == KERN_SUCCESS) {
2736 primitive_gate_assert(info, GATE_ASSERT_HELD);
2737 primitive_unlock(info);
2738
2739 T_LOG("Thread pri %d acquired the gate %p", my_pri, current_thread());
2740 wait_threads(&info->synch, info->synch_value - 1);
2741 wait_for_waiters((struct synch_test_common *)info);
2742 max_pri = get_max_pri((struct synch_test_common *) info);
2743 T_ASSERT((uint) current_thread()->sched_pri == max_pri, "gate owner priority current is %d, should be %d", current_thread()->sched_pri, max_pri);
2744 os_atomic_store(&info->synch, 0, relaxed);
2745
2746 primitive_lock(info);
2747 info->work_to_do = FALSE;
2748 primitive_gate_open(info);
2749 } else {
2750 primitive_gate_assert(info, GATE_ASSERT_CLOSED);
2751 wake_threads(&info->synch);
2752 primitive_gate_wait(info);
2753 goto check_again;
2754 }
2755 }
2756 primitive_unlock(info);
2757
2758 assert(current_thread()->kern_promotion_schedpri == 0);
2759 notify_waiter((struct synch_test_common *)info);
2760 thread_terminate_self();
2761 }
2762
2763 static void
test_gate_push(struct info_sleep_inheritor_test * info,int prim_type)2764 test_gate_push(struct info_sleep_inheritor_test *info, int prim_type)
2765 {
2766 info->prim_type = prim_type;
2767 info->use_alloc_gate = false;
2768
2769 primitive_gate_init(info);
2770 info->work_to_do = TRUE;
2771 info->synch = 0;
2772 info->synch_value = NUM_THREADS;
2773
2774 start_threads((thread_continue_t)thread_just_one_do_work, (struct synch_test_common *) info, TRUE);
2775 wait_all_thread((struct synch_test_common *)info);
2776
2777 primitive_gate_destroy(info);
2778 }
2779
2780 static void
test_gate_handoff(struct info_sleep_inheritor_test * info,int prim_type)2781 test_gate_handoff(struct info_sleep_inheritor_test *info, int prim_type)
2782 {
2783 info->prim_type = prim_type;
2784 info->use_alloc_gate = false;
2785
2786 primitive_gate_init(info);
2787
2788 info->synch = 0;
2789 info->synch_value = NUM_THREADS;
2790 info->value = 0;
2791 info->handoff_failure = 0;
2792
2793 start_threads((thread_continue_t)thread_gate_like_mutex, (struct synch_test_common *)info, false);
2794 wait_all_thread((struct synch_test_common *)info);
2795
2796 T_ASSERT(info->value == NUM_THREADS, "value protected by gate");
2797 T_ASSERT(info->handoff_failure == 1, "handoff failures");
2798
2799 primitive_gate_destroy(info);
2800 }
2801
2802 static void
test_gate_steal(struct info_sleep_inheritor_test * info,int prim_type)2803 test_gate_steal(struct info_sleep_inheritor_test *info, int prim_type)
2804 {
2805 info->prim_type = prim_type;
2806 info->use_alloc_gate = false;
2807
2808 primitive_gate_init(info);
2809
2810 info->synch = 0;
2811 info->synch_value = NUM_THREADS;
2812 info->thread_inheritor = NULL;
2813 info->steal_pri = 0;
2814
2815 start_threads((thread_continue_t)thread_gate_aggressive, (struct synch_test_common *)info, FALSE);
2816 wait_all_thread((struct synch_test_common *)info);
2817
2818 primitive_gate_destroy(info);
2819 }
2820
2821 static void
test_gate_alloc_free(struct info_sleep_inheritor_test * info,int prim_type)2822 test_gate_alloc_free(struct info_sleep_inheritor_test *info, int prim_type)
2823 {
2824 (void)info;
2825 (void) prim_type;
2826 info->prim_type = prim_type;
2827 info->use_alloc_gate = true;
2828
2829 primitive_gate_alloc(info);
2830
2831 info->synch = 0;
2832 info->synch_value = NUM_THREADS;
2833
2834 start_threads((thread_continue_t)thread_gate_free, (struct synch_test_common *)info, FALSE);
2835 wait_all_thread((struct synch_test_common *)info);
2836
2837 T_ASSERT(info->alloc_gate == NULL, "gate free");
2838 info->use_alloc_gate = false;
2839 }
2840
2841 kern_return_t
ts_kernel_gate_test(void)2842 ts_kernel_gate_test(void)
2843 {
2844 struct info_sleep_inheritor_test info = {};
2845
2846 T_LOG("Testing gate primitive");
2847
2848 init_synch_test_common((struct synch_test_common *)&info, NUM_THREADS);
2849
2850 lck_attr_t* lck_attr = lck_attr_alloc_init();
2851 lck_grp_attr_t* lck_grp_attr = lck_grp_attr_alloc_init();
2852 lck_grp_t* lck_grp = lck_grp_alloc_init("test gate", lck_grp_attr);
2853
2854 lck_mtx_init(&info.mtx_lock, lck_grp, lck_attr);
2855 lck_rw_init(&info.rw_lock, lck_grp, lck_attr);
2856
2857 /*
2858 * Testing the priority inherited by the keeper
2859 * lck_mtx_gate_try_close, lck_mtx_gate_open, lck_mtx_gate_wait
2860 */
2861 T_LOG("Testing gate push, mtx");
2862 test_gate_push(&info, MTX_LOCK);
2863
2864 T_LOG("Testing gate push, rw");
2865 test_gate_push(&info, RW_LOCK);
2866
2867 /*
2868 * Testing the handoff
2869 * lck_mtx_gate_wait, lck_mtx_gate_handoff
2870 */
2871 T_LOG("Testing gate handoff, mtx");
2872 test_gate_handoff(&info, MTX_LOCK);
2873
2874 T_LOG("Testing gate handoff, rw");
2875 test_gate_handoff(&info, RW_LOCK);
2876
2877 /*
2878 * Testing the steal
2879 * lck_mtx_gate_close, lck_mtx_gate_wait, lck_mtx_gate_steal, lck_mtx_gate_handoff
2880 */
2881 T_LOG("Testing gate steal, mtx");
2882 test_gate_steal(&info, MTX_LOCK);
2883
2884 T_LOG("Testing gate steal, rw");
2885 test_gate_steal(&info, RW_LOCK);
2886
2887 /*
2888 * Testing the alloc/free
2889 * lck_mtx_gate_alloc_init, lck_mtx_gate_close, lck_mtx_gate_wait, lck_mtx_gate_free
2890 */
2891 T_LOG("Testing gate alloc/free, mtx");
2892 test_gate_alloc_free(&info, MTX_LOCK);
2893
2894 T_LOG("Testing gate alloc/free, rw");
2895 test_gate_alloc_free(&info, RW_LOCK);
2896
2897 destroy_synch_test_common((struct synch_test_common *)&info);
2898
2899 lck_attr_free(lck_attr);
2900 lck_grp_attr_free(lck_grp_attr);
2901 lck_mtx_destroy(&info.mtx_lock, lck_grp);
2902 lck_grp_free(lck_grp);
2903
2904 return KERN_SUCCESS;
2905 }
2906
2907 #define NUM_THREAD_CHAIN 6
2908
2909 struct turnstile_chain_test {
2910 struct synch_test_common head;
2911 lck_mtx_t mtx_lock;
2912 int synch_value;
2913 int synch;
2914 int synch2;
2915 gate_t gates[NUM_THREAD_CHAIN];
2916 };
2917
2918 static void
thread_sleep_gate_chain_work(void * args,__unused wait_result_t wr)2919 thread_sleep_gate_chain_work(
2920 void *args,
2921 __unused wait_result_t wr)
2922 {
2923 struct turnstile_chain_test *info = (struct turnstile_chain_test*) args;
2924 thread_t self = current_thread();
2925 uint my_pri = self->sched_pri;
2926 uint max_pri;
2927 uint i;
2928 thread_t inheritor = NULL, woken_up;
2929 event_t wait_event, wake_event;
2930 kern_return_t ret;
2931
2932 T_LOG("Started thread pri %d %p", my_pri, self);
2933
2934 /*
2935 * Need to use the threads ids, wait for all of them to be populated
2936 */
2937
2938 while (os_atomic_load(&info->head.threads[info->head.nthreads - 1], acquire) == NULL) {
2939 IOSleep(10);
2940 }
2941
2942 max_pri = get_max_pri((struct synch_test_common *) info);
2943
2944 for (i = 0; i < info->head.nthreads; i = i + 2) {
2945 // even threads will close a gate
2946 if (info->head.threads[i] == self) {
2947 lck_mtx_lock(&info->mtx_lock);
2948 lck_mtx_gate_close(&info->mtx_lock, &info->gates[i]);
2949 lck_mtx_unlock(&info->mtx_lock);
2950 break;
2951 }
2952 }
2953
2954 wake_threads(&info->synch2);
2955 wait_threads(&info->synch2, info->synch_value);
2956
2957 if (self == os_atomic_load(&info->head.threads[0], acquire)) {
2958 wait_threads(&info->synch, info->synch_value - 1);
2959 wait_for_waiters((struct synch_test_common *)info);
2960
2961 T_ASSERT((uint) self->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", self->sched_pri, max_pri);
2962
2963 lck_mtx_lock(&info->mtx_lock);
2964 lck_mtx_gate_open(&info->mtx_lock, &info->gates[0]);
2965 lck_mtx_unlock(&info->mtx_lock);
2966 } else {
2967 wait_event = NULL;
2968 wake_event = NULL;
2969 for (i = 0; i < info->head.nthreads; i++) {
2970 if (info->head.threads[i] == self) {
2971 inheritor = info->head.threads[i - 1];
2972 wait_event = (event_t) &info->head.threads[i - 1];
2973 wake_event = (event_t) &info->head.threads[i];
2974 break;
2975 }
2976 }
2977 assert(wait_event != NULL);
2978
2979 lck_mtx_lock(&info->mtx_lock);
2980 wake_threads(&info->synch);
2981
2982 if (i % 2 != 0) {
2983 lck_mtx_gate_wait(&info->mtx_lock, &info->gates[i - 1], LCK_SLEEP_UNLOCK, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
2984 T_ASSERT((uint) self->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", self->sched_pri, max_pri);
2985
2986 ret = wakeup_one_with_inheritor(wake_event, THREAD_AWAKENED, LCK_WAKE_DO_NOT_TRANSFER_PUSH, &woken_up);
2987 if (ret == KERN_SUCCESS) {
2988 T_ASSERT(i != (info->head.nthreads - 1), "thread id");
2989 T_ASSERT(woken_up == info->head.threads[i + 1], "wakeup_one_with_inheritor woke next");
2990 } else {
2991 T_ASSERT(i == (info->head.nthreads - 1), "thread id");
2992 }
2993
2994 // i am still the inheritor, wake all to drop inheritership
2995 ret = wakeup_all_with_inheritor(wake_event, LCK_WAKE_DEFAULT);
2996 T_ASSERT(ret == KERN_NOT_WAITING, "waiters on event");
2997 } else {
2998 // I previously closed a gate
2999 lck_mtx_sleep_with_inheritor(&info->mtx_lock, LCK_SLEEP_UNLOCK, wait_event, inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
3000 T_ASSERT((uint) self->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", self->sched_pri, max_pri);
3001
3002 lck_mtx_lock(&info->mtx_lock);
3003 lck_mtx_gate_open(&info->mtx_lock, &info->gates[i]);
3004 lck_mtx_unlock(&info->mtx_lock);
3005 }
3006 }
3007
3008 assert(current_thread()->kern_promotion_schedpri == 0);
3009 notify_waiter((struct synch_test_common *)info);
3010
3011 thread_terminate_self();
3012 }
3013
3014 static void
thread_gate_chain_work(void * args,__unused wait_result_t wr)3015 thread_gate_chain_work(
3016 void *args,
3017 __unused wait_result_t wr)
3018 {
3019 struct turnstile_chain_test *info = (struct turnstile_chain_test*) args;
3020 thread_t self = current_thread();
3021 uint my_pri = self->sched_pri;
3022 uint max_pri;
3023 uint i;
3024 T_LOG("Started thread pri %d %p", my_pri, self);
3025
3026
3027 /*
3028 * Need to use the threads ids, wait for all of them to be populated
3029 */
3030 while (os_atomic_load(&info->head.threads[info->head.nthreads - 1], acquire) == NULL) {
3031 IOSleep(10);
3032 }
3033
3034 max_pri = get_max_pri((struct synch_test_common *) info);
3035
3036 for (i = 0; i < info->head.nthreads; i++) {
3037 if (info->head.threads[i] == self) {
3038 lck_mtx_lock(&info->mtx_lock);
3039 lck_mtx_gate_close(&info->mtx_lock, &info->gates[i]);
3040 lck_mtx_unlock(&info->mtx_lock);
3041 break;
3042 }
3043 }
3044 assert(i != info->head.nthreads);
3045
3046 wake_threads(&info->synch2);
3047 wait_threads(&info->synch2, info->synch_value);
3048
3049 if (self == os_atomic_load(&info->head.threads[0], acquire)) {
3050 wait_threads(&info->synch, info->synch_value - 1);
3051
3052 wait_for_waiters((struct synch_test_common *)info);
3053
3054 T_ASSERT((uint) self->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", self->sched_pri, max_pri);
3055
3056 lck_mtx_lock(&info->mtx_lock);
3057 lck_mtx_gate_open(&info->mtx_lock, &info->gates[0]);
3058 lck_mtx_unlock(&info->mtx_lock);
3059 } else {
3060 lck_mtx_lock(&info->mtx_lock);
3061 wake_threads(&info->synch);
3062 lck_mtx_gate_wait(&info->mtx_lock, &info->gates[i - 1], LCK_SLEEP_UNLOCK, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
3063
3064 T_ASSERT((uint) self->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", self->sched_pri, max_pri);
3065
3066 lck_mtx_lock(&info->mtx_lock);
3067 lck_mtx_gate_open(&info->mtx_lock, &info->gates[i]);
3068 lck_mtx_unlock(&info->mtx_lock);
3069 }
3070
3071 assert(current_thread()->kern_promotion_schedpri == 0);
3072 notify_waiter((struct synch_test_common *)info);
3073
3074 thread_terminate_self();
3075 }
3076
3077 static void
thread_sleep_chain_work(void * args,__unused wait_result_t wr)3078 thread_sleep_chain_work(
3079 void *args,
3080 __unused wait_result_t wr)
3081 {
3082 struct turnstile_chain_test *info = (struct turnstile_chain_test*) args;
3083 thread_t self = current_thread();
3084 uint my_pri = self->sched_pri;
3085 uint max_pri;
3086 event_t wait_event, wake_event;
3087 uint i;
3088 thread_t inheritor = NULL, woken_up = NULL;
3089 kern_return_t ret;
3090
3091 T_LOG("Started thread pri %d %p", my_pri, self);
3092
3093 /*
3094 * Need to use the threads ids, wait for all of them to be populated
3095 */
3096 while (os_atomic_load(&info->head.threads[info->head.nthreads - 1], acquire) == NULL) {
3097 IOSleep(10);
3098 }
3099
3100 max_pri = get_max_pri((struct synch_test_common *) info);
3101
3102 if (self == os_atomic_load(&info->head.threads[0], acquire)) {
3103 wait_threads(&info->synch, info->synch_value - 1);
3104
3105 wait_for_waiters((struct synch_test_common *)info);
3106
3107 T_ASSERT((uint) self->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", self->sched_pri, max_pri);
3108
3109 ret = wakeup_one_with_inheritor((event_t) &info->head.threads[0], THREAD_AWAKENED, LCK_WAKE_DO_NOT_TRANSFER_PUSH, &woken_up);
3110 T_ASSERT(ret == KERN_SUCCESS, "wakeup_one_with_inheritor woke next");
3111 T_ASSERT(woken_up == info->head.threads[1], "thread woken up");
3112
3113 // i am still the inheritor, wake all to drop inheritership
3114 ret = wakeup_all_with_inheritor((event_t) &info->head.threads[0], LCK_WAKE_DEFAULT);
3115 T_ASSERT(ret == KERN_NOT_WAITING, "waiters on event");
3116 } else {
3117 wait_event = NULL;
3118 wake_event = NULL;
3119 for (i = 0; i < info->head.nthreads; i++) {
3120 if (info->head.threads[i] == self) {
3121 inheritor = info->head.threads[i - 1];
3122 wait_event = (event_t) &info->head.threads[i - 1];
3123 wake_event = (event_t) &info->head.threads[i];
3124 break;
3125 }
3126 }
3127
3128 assert(wait_event != NULL);
3129 lck_mtx_lock(&info->mtx_lock);
3130 wake_threads(&info->synch);
3131
3132 lck_mtx_sleep_with_inheritor(&info->mtx_lock, LCK_SLEEP_UNLOCK, wait_event, inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
3133
3134 T_ASSERT((uint) self->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", self->sched_pri, max_pri);
3135
3136 ret = wakeup_one_with_inheritor(wake_event, THREAD_AWAKENED, LCK_WAKE_DO_NOT_TRANSFER_PUSH, &woken_up);
3137 if (ret == KERN_SUCCESS) {
3138 T_ASSERT(i != (info->head.nthreads - 1), "thread id");
3139 T_ASSERT(woken_up == info->head.threads[i + 1], "wakeup_one_with_inheritor woke next");
3140 } else {
3141 T_ASSERT(i == (info->head.nthreads - 1), "thread id");
3142 }
3143
3144 // i am still the inheritor, wake all to drop inheritership
3145 ret = wakeup_all_with_inheritor(wake_event, LCK_WAKE_DEFAULT);
3146 T_ASSERT(ret == KERN_NOT_WAITING, "waiters on event");
3147 }
3148
3149 assert(current_thread()->kern_promotion_schedpri == 0);
3150 notify_waiter((struct synch_test_common *)info);
3151
3152 thread_terminate_self();
3153 }
3154
3155 static void
test_sleep_chain(struct turnstile_chain_test * info)3156 test_sleep_chain(struct turnstile_chain_test *info)
3157 {
3158 info->synch = 0;
3159 info->synch_value = info->head.nthreads;
3160
3161 start_threads((thread_continue_t)thread_sleep_chain_work, (struct synch_test_common *)info, FALSE);
3162 wait_all_thread((struct synch_test_common *)info);
3163 }
3164
3165 static void
test_gate_chain(struct turnstile_chain_test * info)3166 test_gate_chain(struct turnstile_chain_test *info)
3167 {
3168 info->synch = 0;
3169 info->synch2 = 0;
3170 info->synch_value = info->head.nthreads;
3171
3172 start_threads((thread_continue_t)thread_gate_chain_work, (struct synch_test_common *)info, FALSE);
3173 wait_all_thread((struct synch_test_common *)info);
3174 }
3175
3176 static void
test_sleep_gate_chain(struct turnstile_chain_test * info)3177 test_sleep_gate_chain(struct turnstile_chain_test *info)
3178 {
3179 info->synch = 0;
3180 info->synch2 = 0;
3181 info->synch_value = info->head.nthreads;
3182
3183 start_threads((thread_continue_t)thread_sleep_gate_chain_work, (struct synch_test_common *)info, FALSE);
3184 wait_all_thread((struct synch_test_common *)info);
3185 }
3186
3187 kern_return_t
ts_kernel_turnstile_chain_test(void)3188 ts_kernel_turnstile_chain_test(void)
3189 {
3190 struct turnstile_chain_test info = {};
3191 int i;
3192
3193 init_synch_test_common((struct synch_test_common *)&info, NUM_THREAD_CHAIN);
3194 lck_attr_t* lck_attr = lck_attr_alloc_init();
3195 lck_grp_attr_t* lck_grp_attr = lck_grp_attr_alloc_init();
3196 lck_grp_t* lck_grp = lck_grp_alloc_init("test gate", lck_grp_attr);
3197
3198 lck_mtx_init(&info.mtx_lock, lck_grp, lck_attr);
3199 for (i = 0; i < NUM_THREAD_CHAIN; i++) {
3200 lck_mtx_gate_init(&info.mtx_lock, &info.gates[i]);
3201 }
3202
3203 T_LOG("Testing sleep chain, lck");
3204 test_sleep_chain(&info);
3205
3206 T_LOG("Testing gate chain, lck");
3207 test_gate_chain(&info);
3208
3209 T_LOG("Testing sleep and gate chain, lck");
3210 test_sleep_gate_chain(&info);
3211
3212 destroy_synch_test_common((struct synch_test_common *)&info);
3213 for (i = 0; i < NUM_THREAD_CHAIN; i++) {
3214 lck_mtx_gate_destroy(&info.mtx_lock, &info.gates[i]);
3215 }
3216 lck_attr_free(lck_attr);
3217 lck_grp_attr_free(lck_grp_attr);
3218 lck_mtx_destroy(&info.mtx_lock, lck_grp);
3219 lck_grp_free(lck_grp);
3220
3221 return KERN_SUCCESS;
3222 }
3223
3224 kern_return_t
ts_kernel_timingsafe_bcmp_test(void)3225 ts_kernel_timingsafe_bcmp_test(void)
3226 {
3227 int i, buf_size;
3228 char *buf = NULL;
3229
3230 // empty
3231 T_ASSERT(timingsafe_bcmp(NULL, NULL, 0) == 0, NULL);
3232 T_ASSERT(timingsafe_bcmp("foo", "foo", 0) == 0, NULL);
3233 T_ASSERT(timingsafe_bcmp("foo", "bar", 0) == 0, NULL);
3234
3235 // equal
3236 T_ASSERT(timingsafe_bcmp("foo", "foo", strlen("foo")) == 0, NULL);
3237
3238 // unequal
3239 T_ASSERT(timingsafe_bcmp("foo", "bar", strlen("foo")) == 1, NULL);
3240 T_ASSERT(timingsafe_bcmp("foo", "goo", strlen("foo")) == 1, NULL);
3241 T_ASSERT(timingsafe_bcmp("foo", "fpo", strlen("foo")) == 1, NULL);
3242 T_ASSERT(timingsafe_bcmp("foo", "fop", strlen("foo")) == 1, NULL);
3243
3244 // all possible bitwise differences
3245 for (i = 1; i < 256; i += 1) {
3246 unsigned char a = 0;
3247 unsigned char b = (unsigned char)i;
3248
3249 T_ASSERT(timingsafe_bcmp(&a, &b, sizeof(a)) == 1, NULL);
3250 }
3251
3252 // large
3253 buf_size = 1024 * 16;
3254 buf = kalloc_data(buf_size, Z_WAITOK);
3255 T_EXPECT_NOTNULL(buf, "kalloc of buf");
3256
3257 read_random(buf, buf_size);
3258 T_ASSERT(timingsafe_bcmp(buf, buf, buf_size) == 0, NULL);
3259 T_ASSERT(timingsafe_bcmp(buf, buf + 1, buf_size - 1) == 1, NULL);
3260 T_ASSERT(timingsafe_bcmp(buf, buf + 128, 128) == 1, NULL);
3261
3262 memcpy(buf + 128, buf, 128);
3263 T_ASSERT(timingsafe_bcmp(buf, buf + 128, 128) == 0, NULL);
3264
3265 kfree_data(buf, buf_size);
3266
3267 return KERN_SUCCESS;
3268 }
3269
3270 kern_return_t
kprintf_hhx_test(void)3271 kprintf_hhx_test(void)
3272 {
3273 printf("POST hhx test %hx%hx%hx%hx %hhx%hhx%hhx%hhx - %llx",
3274 (unsigned short)0xfeed, (unsigned short)0xface,
3275 (unsigned short)0xabad, (unsigned short)0xcafe,
3276 (unsigned char)'h', (unsigned char)'h', (unsigned char)'x',
3277 (unsigned char)'!',
3278 0xfeedfaceULL);
3279 T_PASS("kprintf_hhx_test passed");
3280 return KERN_SUCCESS;
3281 }
3282
3283 static STATIC_IF_KEY_DEFINE_TRUE(key_true);
3284 static STATIC_IF_KEY_DEFINE_TRUE(key_true_to_false);
3285 static STATIC_IF_KEY_DEFINE_FALSE(key_false);
3286 static STATIC_IF_KEY_DEFINE_FALSE(key_false_to_true);
3287
3288 __static_if_init_func
3289 static void
static_if_tests_setup(const char * args __unused)3290 static_if_tests_setup(const char *args __unused)
3291 {
3292 static_if_key_disable(key_true_to_false);
3293 static_if_key_enable(key_false_to_true);
3294 }
3295 STATIC_IF_INIT(static_if_tests_setup);
3296
3297 static void
static_if_tests(void)3298 static_if_tests(void)
3299 {
3300 int n = 0;
3301
3302 if (static_if(key_true)) {
3303 n++;
3304 }
3305 if (probable_static_if(key_true)) {
3306 n++;
3307 }
3308 if (improbable_static_if(key_true)) {
3309 n++;
3310 }
3311 if (n != 3) {
3312 panic("should still be enabled [n == %d, expected %d]", n, 3);
3313 }
3314
3315 if (static_if(key_true_to_false)) {
3316 n++;
3317 }
3318 if (probable_static_if(key_true_to_false)) {
3319 n++;
3320 }
3321 if (improbable_static_if(key_true_to_false)) {
3322 n++;
3323 }
3324 if (n != 3) {
3325 panic("should now be disabled [n == %d, expected %d]", n, 3);
3326 }
3327
3328 if (static_if(key_false)) {
3329 n++;
3330 }
3331 if (probable_static_if(key_false)) {
3332 n++;
3333 }
3334 if (improbable_static_if(key_false)) {
3335 n++;
3336 }
3337 if (n != 3) {
3338 panic("should still be disabled [n == %d, expected %d]", n, 3);
3339 }
3340
3341 if (static_if(key_false_to_true)) {
3342 n++;
3343 }
3344 if (probable_static_if(key_false_to_true)) {
3345 n++;
3346 }
3347 if (improbable_static_if(key_false_to_true)) {
3348 n++;
3349 }
3350 if (n != 6) {
3351 panic("should now be disabled [n == %d, expected %d]", n, 3);
3352 }
3353 }
3354 STARTUP(EARLY_BOOT, STARTUP_RANK_MIDDLE, static_if_tests);
3355