1 /*
2 * Copyright (c) 2011-2018 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System Copyright (c) 1991,1990,1989,1988,1987 Carnegie
33 * Mellon University All Rights Reserved.
34 *
35 * Permission to use, copy, modify and distribute this software and its
36 * documentation is hereby granted, provided that both the copyright notice
37 * and this permission notice appear in all copies of the software,
38 * derivative works or modified versions, and any portions thereof, and that
39 * both notices appear in supporting documentation.
40 *
41 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION.
42 * CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES
43 * WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
44 *
45 * Carnegie Mellon requests users of this software to return to
46 *
47 * Software Distribution Coordinator or [email protected]
48 * School of Computer Science Carnegie Mellon University Pittsburgh PA
49 * 15213-3890
50 *
51 * any improvements or extensions that they make and grant Carnegie Mellon the
52 * rights to redistribute these changes.
53 */
54
55 #include <mach_ldebug.h>
56
57 #define LOCK_PRIVATE 1
58
59 #include <vm/pmap.h>
60 #include <vm/vm_map.h>
61 #include <kern/kalloc.h>
62 #include <kern/cpu_number.h>
63 #include <kern/locks.h>
64 #include <kern/misc_protos.h>
65 #include <kern/thread.h>
66 #include <kern/processor.h>
67 #include <kern/sched_prim.h>
68 #include <kern/debug.h>
69 #include <string.h>
70 #include <tests/xnupost.h>
71
72 #if MACH_KDB
73 #include <ddb/db_command.h>
74 #include <ddb/db_output.h>
75 #include <ddb/db_sym.h>
76 #include <ddb/db_print.h>
77 #endif /* MACH_KDB */
78
79 #include <san/kasan.h>
80 #include <sys/kdebug.h>
81 #include <sys/munge.h>
82 #include <machine/cpu_capabilities.h>
83 #include <arm/cpu_data_internal.h>
84 #include <arm/pmap.h>
85
86 #if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)
87 #include <arm64/amcc_rorgn.h>
88 #endif // defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)
89
90 #include <arm64/machine_machdep.h>
91
92 kern_return_t arm64_lock_test(void);
93 kern_return_t arm64_munger_test(void);
94 kern_return_t arm64_pan_test(void);
95 kern_return_t arm64_late_pan_test(void);
96 #if defined(HAS_APPLE_PAC)
97 #include <ptrauth.h>
98 kern_return_t arm64_ropjop_test(void);
99 #endif
100 #if defined(KERNEL_INTEGRITY_CTRR)
101 kern_return_t ctrr_test(void);
102 kern_return_t ctrr_test_cpu(void);
103 #endif
104
105 // exception handler ignores this fault address during PAN test
106 #if __ARM_PAN_AVAILABLE__
107 const uint64_t pan_ro_value = 0xFEEDB0B0DEADBEEF;
108 vm_offset_t pan_test_addr = 0;
109 vm_offset_t pan_ro_addr = 0;
110 volatile int pan_exception_level = 0;
111 volatile char pan_fault_value = 0;
112 #endif
113
114 #if CONFIG_SPTM
115 kern_return_t arm64_panic_lockdown_test(void);
116 #endif /* CONFIG_SPTM */
117
118 #include <libkern/OSAtomic.h>
119 #define LOCK_TEST_ITERATIONS 50
120 static hw_lock_data_t lt_hw_lock;
121 static lck_spin_t lt_lck_spin_t;
122 static lck_mtx_t lt_mtx;
123 static lck_rw_t lt_rwlock;
124 static volatile uint32_t lt_counter = 0;
125 static volatile int lt_spinvolatile;
126 static volatile uint32_t lt_max_holders = 0;
127 static volatile uint32_t lt_upgrade_holders = 0;
128 static volatile uint32_t lt_max_upgrade_holders = 0;
129 static volatile uint32_t lt_num_holders = 0;
130 static volatile uint32_t lt_done_threads;
131 static volatile uint32_t lt_target_done_threads;
132 static volatile uint32_t lt_cpu_bind_id = 0;
133
134 static void
lt_note_another_blocking_lock_holder()135 lt_note_another_blocking_lock_holder()
136 {
137 hw_lock_lock(<_hw_lock, LCK_GRP_NULL);
138 lt_num_holders++;
139 lt_max_holders = (lt_max_holders < lt_num_holders) ? lt_num_holders : lt_max_holders;
140 hw_lock_unlock(<_hw_lock);
141 }
142
143 static void
lt_note_blocking_lock_release()144 lt_note_blocking_lock_release()
145 {
146 hw_lock_lock(<_hw_lock, LCK_GRP_NULL);
147 lt_num_holders--;
148 hw_lock_unlock(<_hw_lock);
149 }
150
151 static void
lt_spin_a_little_bit()152 lt_spin_a_little_bit()
153 {
154 uint32_t i;
155
156 for (i = 0; i < 10000; i++) {
157 lt_spinvolatile++;
158 }
159 }
160
161 static void
lt_sleep_a_little_bit()162 lt_sleep_a_little_bit()
163 {
164 delay(100);
165 }
166
167 static void
lt_grab_mutex()168 lt_grab_mutex()
169 {
170 lck_mtx_lock(<_mtx);
171 lt_note_another_blocking_lock_holder();
172 lt_sleep_a_little_bit();
173 lt_counter++;
174 lt_note_blocking_lock_release();
175 lck_mtx_unlock(<_mtx);
176 }
177
178 static void
lt_grab_mutex_with_try()179 lt_grab_mutex_with_try()
180 {
181 while (0 == lck_mtx_try_lock(<_mtx)) {
182 ;
183 }
184 lt_note_another_blocking_lock_holder();
185 lt_sleep_a_little_bit();
186 lt_counter++;
187 lt_note_blocking_lock_release();
188 lck_mtx_unlock(<_mtx);
189 }
190
191 static void
lt_grab_rw_exclusive()192 lt_grab_rw_exclusive()
193 {
194 lck_rw_lock_exclusive(<_rwlock);
195 lt_note_another_blocking_lock_holder();
196 lt_sleep_a_little_bit();
197 lt_counter++;
198 lt_note_blocking_lock_release();
199 lck_rw_done(<_rwlock);
200 }
201
202 static void
lt_grab_rw_exclusive_with_try()203 lt_grab_rw_exclusive_with_try()
204 {
205 while (0 == lck_rw_try_lock_exclusive(<_rwlock)) {
206 lt_sleep_a_little_bit();
207 }
208
209 lt_note_another_blocking_lock_holder();
210 lt_sleep_a_little_bit();
211 lt_counter++;
212 lt_note_blocking_lock_release();
213 lck_rw_done(<_rwlock);
214 }
215
216 /* Disabled until lt_grab_rw_shared() is fixed (rdar://30685840)
217 * static void
218 * lt_grab_rw_shared()
219 * {
220 * lck_rw_lock_shared(<_rwlock);
221 * lt_counter++;
222 *
223 * lt_note_another_blocking_lock_holder();
224 * lt_sleep_a_little_bit();
225 * lt_note_blocking_lock_release();
226 *
227 * lck_rw_done(<_rwlock);
228 * }
229 */
230
231 /* Disabled until lt_grab_rw_shared_with_try() is fixed (rdar://30685840)
232 * static void
233 * lt_grab_rw_shared_with_try()
234 * {
235 * while(0 == lck_rw_try_lock_shared(<_rwlock));
236 * lt_counter++;
237 *
238 * lt_note_another_blocking_lock_holder();
239 * lt_sleep_a_little_bit();
240 * lt_note_blocking_lock_release();
241 *
242 * lck_rw_done(<_rwlock);
243 * }
244 */
245
246 static void
lt_upgrade_downgrade_rw()247 lt_upgrade_downgrade_rw()
248 {
249 boolean_t upgraded, success;
250
251 success = lck_rw_try_lock_shared(<_rwlock);
252 if (!success) {
253 lck_rw_lock_shared(<_rwlock);
254 }
255
256 lt_note_another_blocking_lock_holder();
257 lt_sleep_a_little_bit();
258 lt_note_blocking_lock_release();
259
260 upgraded = lck_rw_lock_shared_to_exclusive(<_rwlock);
261 if (!upgraded) {
262 success = lck_rw_try_lock_exclusive(<_rwlock);
263
264 if (!success) {
265 lck_rw_lock_exclusive(<_rwlock);
266 }
267 }
268
269 lt_upgrade_holders++;
270 if (lt_upgrade_holders > lt_max_upgrade_holders) {
271 lt_max_upgrade_holders = lt_upgrade_holders;
272 }
273
274 lt_counter++;
275 lt_sleep_a_little_bit();
276
277 lt_upgrade_holders--;
278
279 lck_rw_lock_exclusive_to_shared(<_rwlock);
280
281 lt_spin_a_little_bit();
282 lck_rw_done(<_rwlock);
283 }
284
285 #if __AMP__
286 const int limit = 1000000;
287 static int lt_stress_local_counters[MAX_CPUS];
288
289 lck_ticket_t lt_ticket_lock;
290 lck_grp_t lt_ticket_grp;
291
292 static void
lt_stress_ticket_lock()293 lt_stress_ticket_lock()
294 {
295 int local_counter = 0;
296
297 uint cpuid = cpu_number();
298
299 kprintf("%s>cpu %d starting\n", __FUNCTION__, cpuid);
300
301 lck_ticket_lock(<_ticket_lock, <_ticket_grp);
302 lt_counter++;
303 local_counter++;
304 lck_ticket_unlock(<_ticket_lock);
305
306 while (lt_counter < lt_target_done_threads) {
307 ;
308 }
309
310 kprintf("%s>cpu %d started\n", __FUNCTION__, cpuid);
311
312 while (lt_counter < limit) {
313 lck_ticket_lock(<_ticket_lock, <_ticket_grp);
314 if (lt_counter < limit) {
315 lt_counter++;
316 local_counter++;
317 }
318 lck_ticket_unlock(<_ticket_lock);
319 }
320
321 lt_stress_local_counters[cpuid] = local_counter;
322
323 kprintf("%s>final counter %d cpu %d incremented the counter %d times\n", __FUNCTION__, lt_counter, cpuid, local_counter);
324 }
325 #endif
326
327 static void
lt_grab_hw_lock()328 lt_grab_hw_lock()
329 {
330 hw_lock_lock(<_hw_lock, LCK_GRP_NULL);
331 lt_counter++;
332 lt_spin_a_little_bit();
333 hw_lock_unlock(<_hw_lock);
334 }
335
336 static void
lt_grab_hw_lock_with_try()337 lt_grab_hw_lock_with_try()
338 {
339 while (0 == hw_lock_try(<_hw_lock, LCK_GRP_NULL)) {
340 ;
341 }
342 lt_counter++;
343 lt_spin_a_little_bit();
344 hw_lock_unlock(<_hw_lock);
345 }
346
347 static void
lt_grab_hw_lock_with_to()348 lt_grab_hw_lock_with_to()
349 {
350 (void)hw_lock_to(<_hw_lock, &hw_lock_spin_policy, LCK_GRP_NULL);
351 lt_counter++;
352 lt_spin_a_little_bit();
353 hw_lock_unlock(<_hw_lock);
354 }
355
356 static void
lt_grab_spin_lock()357 lt_grab_spin_lock()
358 {
359 lck_spin_lock(<_lck_spin_t);
360 lt_counter++;
361 lt_spin_a_little_bit();
362 lck_spin_unlock(<_lck_spin_t);
363 }
364
365 static void
lt_grab_spin_lock_with_try()366 lt_grab_spin_lock_with_try()
367 {
368 while (0 == lck_spin_try_lock(<_lck_spin_t)) {
369 ;
370 }
371 lt_counter++;
372 lt_spin_a_little_bit();
373 lck_spin_unlock(<_lck_spin_t);
374 }
375
376 static volatile boolean_t lt_thread_lock_grabbed;
377 static volatile boolean_t lt_thread_lock_success;
378
379 static void
lt_reset()380 lt_reset()
381 {
382 lt_counter = 0;
383 lt_max_holders = 0;
384 lt_num_holders = 0;
385 lt_max_upgrade_holders = 0;
386 lt_upgrade_holders = 0;
387 lt_done_threads = 0;
388 lt_target_done_threads = 0;
389 lt_cpu_bind_id = 0;
390
391 OSMemoryBarrier();
392 }
393
394 static void
lt_trylock_hw_lock_with_to()395 lt_trylock_hw_lock_with_to()
396 {
397 OSMemoryBarrier();
398 while (!lt_thread_lock_grabbed) {
399 lt_sleep_a_little_bit();
400 OSMemoryBarrier();
401 }
402 lt_thread_lock_success = hw_lock_to(<_hw_lock,
403 &hw_lock_test_give_up_policy, LCK_GRP_NULL);
404 OSMemoryBarrier();
405 mp_enable_preemption();
406 }
407
408 static void
lt_trylock_spin_try_lock()409 lt_trylock_spin_try_lock()
410 {
411 OSMemoryBarrier();
412 while (!lt_thread_lock_grabbed) {
413 lt_sleep_a_little_bit();
414 OSMemoryBarrier();
415 }
416 lt_thread_lock_success = lck_spin_try_lock(<_lck_spin_t);
417 OSMemoryBarrier();
418 }
419
420 static void
lt_trylock_thread(void * arg,wait_result_t wres __unused)421 lt_trylock_thread(void *arg, wait_result_t wres __unused)
422 {
423 void (*func)(void) = (void (*)(void))arg;
424
425 func();
426
427 OSIncrementAtomic((volatile SInt32*) <_done_threads);
428 }
429
430 static void
lt_start_trylock_thread(thread_continue_t func)431 lt_start_trylock_thread(thread_continue_t func)
432 {
433 thread_t thread;
434 kern_return_t kr;
435
436 kr = kernel_thread_start(lt_trylock_thread, func, &thread);
437 assert(kr == KERN_SUCCESS);
438
439 thread_deallocate(thread);
440 }
441
442 static void
lt_wait_for_lock_test_threads()443 lt_wait_for_lock_test_threads()
444 {
445 OSMemoryBarrier();
446 /* Spin to reduce dependencies */
447 while (lt_done_threads < lt_target_done_threads) {
448 lt_sleep_a_little_bit();
449 OSMemoryBarrier();
450 }
451 OSMemoryBarrier();
452 }
453
454 static kern_return_t
lt_test_trylocks()455 lt_test_trylocks()
456 {
457 boolean_t success;
458 extern unsigned int real_ncpus;
459
460 /*
461 * First mtx try lock succeeds, second fails.
462 */
463 success = lck_mtx_try_lock(<_mtx);
464 T_ASSERT_NOTNULL(success, "First mtx try lock");
465 success = lck_mtx_try_lock(<_mtx);
466 T_ASSERT_NULL(success, "Second mtx try lock for a locked mtx");
467 lck_mtx_unlock(<_mtx);
468
469 /*
470 * After regular grab, can't try lock.
471 */
472 lck_mtx_lock(<_mtx);
473 success = lck_mtx_try_lock(<_mtx);
474 T_ASSERT_NULL(success, "try lock should fail after regular lck_mtx_lock");
475 lck_mtx_unlock(<_mtx);
476
477 /*
478 * Two shared try locks on a previously unheld rwlock suceed, and a
479 * subsequent exclusive attempt fails.
480 */
481 success = lck_rw_try_lock_shared(<_rwlock);
482 T_ASSERT_NOTNULL(success, "Two shared try locks on a previously unheld rwlock should succeed");
483 success = lck_rw_try_lock_shared(<_rwlock);
484 T_ASSERT_NOTNULL(success, "Two shared try locks on a previously unheld rwlock should succeed");
485 success = lck_rw_try_lock_exclusive(<_rwlock);
486 T_ASSERT_NULL(success, "exclusive lock attempt on previously held lock should fail");
487 lck_rw_done(<_rwlock);
488 lck_rw_done(<_rwlock);
489
490 /*
491 * After regular shared grab, can trylock
492 * for shared but not for exclusive.
493 */
494 lck_rw_lock_shared(<_rwlock);
495 success = lck_rw_try_lock_shared(<_rwlock);
496 T_ASSERT_NOTNULL(success, "After regular shared grab another shared try lock should succeed.");
497 success = lck_rw_try_lock_exclusive(<_rwlock);
498 T_ASSERT_NULL(success, "After regular shared grab an exclusive lock attempt should fail.");
499 lck_rw_done(<_rwlock);
500 lck_rw_done(<_rwlock);
501
502 /*
503 * An exclusive try lock succeeds, subsequent shared and exclusive
504 * attempts fail.
505 */
506 success = lck_rw_try_lock_exclusive(<_rwlock);
507 T_ASSERT_NOTNULL(success, "An exclusive try lock should succeed");
508 success = lck_rw_try_lock_shared(<_rwlock);
509 T_ASSERT_NULL(success, "try lock in shared mode attempt after an exclusive grab should fail");
510 success = lck_rw_try_lock_exclusive(<_rwlock);
511 T_ASSERT_NULL(success, "try lock in exclusive mode attempt after an exclusive grab should fail");
512 lck_rw_done(<_rwlock);
513
514 /*
515 * After regular exclusive grab, neither kind of trylock succeeds.
516 */
517 lck_rw_lock_exclusive(<_rwlock);
518 success = lck_rw_try_lock_shared(<_rwlock);
519 T_ASSERT_NULL(success, "After regular exclusive grab, shared trylock should not succeed");
520 success = lck_rw_try_lock_exclusive(<_rwlock);
521 T_ASSERT_NULL(success, "After regular exclusive grab, exclusive trylock should not succeed");
522 lck_rw_done(<_rwlock);
523
524 /*
525 * First spin lock attempts succeed, second attempts fail.
526 */
527 success = hw_lock_try(<_hw_lock, LCK_GRP_NULL);
528 T_ASSERT_NOTNULL(success, "First spin lock attempts should succeed");
529 success = hw_lock_try(<_hw_lock, LCK_GRP_NULL);
530 T_ASSERT_NULL(success, "Second attempt to spin lock should fail");
531 hw_lock_unlock(<_hw_lock);
532
533 hw_lock_lock(<_hw_lock, LCK_GRP_NULL);
534 success = hw_lock_try(<_hw_lock, LCK_GRP_NULL);
535 T_ASSERT_NULL(success, "After taking spin lock, trylock attempt should fail");
536 hw_lock_unlock(<_hw_lock);
537
538 lt_reset();
539 lt_thread_lock_grabbed = false;
540 lt_thread_lock_success = true;
541 lt_target_done_threads = 1;
542 OSMemoryBarrier();
543 lt_start_trylock_thread(lt_trylock_hw_lock_with_to);
544 success = hw_lock_to(<_hw_lock, &hw_lock_test_give_up_policy, LCK_GRP_NULL);
545 T_ASSERT_NOTNULL(success, "First spin lock with timeout should succeed");
546 if (real_ncpus == 1) {
547 mp_enable_preemption(); /* if we re-enable preemption, the other thread can timeout and exit */
548 }
549 OSIncrementAtomic((volatile SInt32*)<_thread_lock_grabbed);
550 lt_wait_for_lock_test_threads();
551 T_ASSERT_NULL(lt_thread_lock_success, "Second spin lock with timeout should fail and timeout");
552 if (real_ncpus == 1) {
553 mp_disable_preemption(); /* don't double-enable when we unlock */
554 }
555 hw_lock_unlock(<_hw_lock);
556
557 lt_reset();
558 lt_thread_lock_grabbed = false;
559 lt_thread_lock_success = true;
560 lt_target_done_threads = 1;
561 OSMemoryBarrier();
562 lt_start_trylock_thread(lt_trylock_hw_lock_with_to);
563 hw_lock_lock(<_hw_lock, LCK_GRP_NULL);
564 if (real_ncpus == 1) {
565 mp_enable_preemption(); /* if we re-enable preemption, the other thread can timeout and exit */
566 }
567 OSIncrementAtomic((volatile SInt32*)<_thread_lock_grabbed);
568 lt_wait_for_lock_test_threads();
569 T_ASSERT_NULL(lt_thread_lock_success, "after taking a spin lock, lock attempt with timeout should fail");
570 if (real_ncpus == 1) {
571 mp_disable_preemption(); /* don't double-enable when we unlock */
572 }
573 hw_lock_unlock(<_hw_lock);
574
575 success = lck_spin_try_lock(<_lck_spin_t);
576 T_ASSERT_NOTNULL(success, "spin trylock of previously unheld lock should succeed");
577 success = lck_spin_try_lock(<_lck_spin_t);
578 T_ASSERT_NULL(success, "spin trylock attempt of previously held lock (with trylock) should fail");
579 lck_spin_unlock(<_lck_spin_t);
580
581 lt_reset();
582 lt_thread_lock_grabbed = false;
583 lt_thread_lock_success = true;
584 lt_target_done_threads = 1;
585 lt_start_trylock_thread(lt_trylock_spin_try_lock);
586 lck_spin_lock(<_lck_spin_t);
587 if (real_ncpus == 1) {
588 mp_enable_preemption(); /* if we re-enable preemption, the other thread can timeout and exit */
589 }
590 OSIncrementAtomic((volatile SInt32*)<_thread_lock_grabbed);
591 lt_wait_for_lock_test_threads();
592 T_ASSERT_NULL(lt_thread_lock_success, "spin trylock attempt of previously held lock should fail");
593 if (real_ncpus == 1) {
594 mp_disable_preemption(); /* don't double-enable when we unlock */
595 }
596 lck_spin_unlock(<_lck_spin_t);
597
598 return KERN_SUCCESS;
599 }
600
601 static void
lt_thread(void * arg,wait_result_t wres __unused)602 lt_thread(void *arg, wait_result_t wres __unused)
603 {
604 void (*func)(void) = (void (*)(void))arg;
605 uint32_t i;
606
607 for (i = 0; i < LOCK_TEST_ITERATIONS; i++) {
608 func();
609 }
610
611 OSIncrementAtomic((volatile SInt32*) <_done_threads);
612 }
613
614 static void
lt_start_lock_thread(thread_continue_t func)615 lt_start_lock_thread(thread_continue_t func)
616 {
617 thread_t thread;
618 kern_return_t kr;
619
620 kr = kernel_thread_start(lt_thread, func, &thread);
621 assert(kr == KERN_SUCCESS);
622
623 thread_deallocate(thread);
624 }
625
626 #if __AMP__
627 static void
lt_bound_thread(void * arg,wait_result_t wres __unused)628 lt_bound_thread(void *arg, wait_result_t wres __unused)
629 {
630 void (*func)(void) = (void (*)(void))arg;
631
632 int cpuid = OSIncrementAtomic((volatile SInt32 *)<_cpu_bind_id);
633
634 processor_t processor = processor_list;
635 while ((processor != NULL) && (processor->cpu_id != cpuid)) {
636 processor = processor->processor_list;
637 }
638
639 if (processor != NULL) {
640 thread_bind(processor);
641 }
642
643 thread_block(THREAD_CONTINUE_NULL);
644
645 func();
646
647 OSIncrementAtomic((volatile SInt32*) <_done_threads);
648 }
649
650 static void
lt_e_thread(void * arg,wait_result_t wres __unused)651 lt_e_thread(void *arg, wait_result_t wres __unused)
652 {
653 void (*func)(void) = (void (*)(void))arg;
654
655 thread_t thread = current_thread();
656
657 thread_bind_cluster_type(thread, 'e', false);
658
659 func();
660
661 OSIncrementAtomic((volatile SInt32*) <_done_threads);
662 }
663
664 static void
lt_p_thread(void * arg,wait_result_t wres __unused)665 lt_p_thread(void *arg, wait_result_t wres __unused)
666 {
667 void (*func)(void) = (void (*)(void))arg;
668
669 thread_t thread = current_thread();
670
671 thread_bind_cluster_type(thread, 'p', false);
672
673 func();
674
675 OSIncrementAtomic((volatile SInt32*) <_done_threads);
676 }
677
678 static void
lt_start_lock_thread_e(thread_continue_t func)679 lt_start_lock_thread_e(thread_continue_t func)
680 {
681 thread_t thread;
682 kern_return_t kr;
683
684 kr = kernel_thread_start(lt_e_thread, func, &thread);
685 assert(kr == KERN_SUCCESS);
686
687 thread_deallocate(thread);
688 }
689
690 static void
lt_start_lock_thread_p(thread_continue_t func)691 lt_start_lock_thread_p(thread_continue_t func)
692 {
693 thread_t thread;
694 kern_return_t kr;
695
696 kr = kernel_thread_start(lt_p_thread, func, &thread);
697 assert(kr == KERN_SUCCESS);
698
699 thread_deallocate(thread);
700 }
701
702 static void
lt_start_lock_thread_bound(thread_continue_t func)703 lt_start_lock_thread_bound(thread_continue_t func)
704 {
705 thread_t thread;
706 kern_return_t kr;
707
708 kr = kernel_thread_start(lt_bound_thread, func, &thread);
709 assert(kr == KERN_SUCCESS);
710
711 thread_deallocate(thread);
712 }
713 #endif
714
715 static kern_return_t
lt_test_locks()716 lt_test_locks()
717 {
718 #if SCHED_HYGIENE_DEBUG
719 /*
720 * When testing, the preemption disable threshold may be hit (for
721 * example when testing a lock timeout). To avoid this, the preemption
722 * disable measurement is temporarily disabled during lock testing.
723 */
724 int old_mode = sched_preemption_disable_debug_mode;
725 if (old_mode == SCHED_HYGIENE_MODE_PANIC) {
726 sched_preemption_disable_debug_mode = SCHED_HYGIENE_MODE_OFF;
727 }
728 #endif /* SCHED_HYGIENE_DEBUG */
729
730 kern_return_t kr = KERN_SUCCESS;
731 lck_grp_attr_t *lga = lck_grp_attr_alloc_init();
732 lck_grp_t *lg = lck_grp_alloc_init("lock test", lga);
733
734 lck_mtx_init(<_mtx, lg, LCK_ATTR_NULL);
735 lck_rw_init(<_rwlock, lg, LCK_ATTR_NULL);
736 lck_spin_init(<_lck_spin_t, lg, LCK_ATTR_NULL);
737 hw_lock_init(<_hw_lock);
738
739 T_LOG("Testing locks.");
740
741 /* Try locks (custom) */
742 lt_reset();
743
744 T_LOG("Running try lock test.");
745 kr = lt_test_trylocks();
746 T_EXPECT_NULL(kr, "try lock test failed.");
747
748 /* Uncontended mutex */
749 T_LOG("Running uncontended mutex test.");
750 lt_reset();
751 lt_target_done_threads = 1;
752 lt_start_lock_thread(lt_grab_mutex);
753 lt_wait_for_lock_test_threads();
754 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
755 T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
756
757 /* Contended mutex:try locks*/
758 T_LOG("Running contended mutex test.");
759 lt_reset();
760 lt_target_done_threads = 3;
761 lt_start_lock_thread(lt_grab_mutex);
762 lt_start_lock_thread(lt_grab_mutex);
763 lt_start_lock_thread(lt_grab_mutex);
764 lt_wait_for_lock_test_threads();
765 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
766 T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
767
768 /* Contended mutex: try locks*/
769 T_LOG("Running contended mutex trylock test.");
770 lt_reset();
771 lt_target_done_threads = 3;
772 lt_start_lock_thread(lt_grab_mutex_with_try);
773 lt_start_lock_thread(lt_grab_mutex_with_try);
774 lt_start_lock_thread(lt_grab_mutex_with_try);
775 lt_wait_for_lock_test_threads();
776 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
777 T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
778
779 /* Uncontended exclusive rwlock */
780 T_LOG("Running uncontended exclusive rwlock test.");
781 lt_reset();
782 lt_target_done_threads = 1;
783 lt_start_lock_thread(lt_grab_rw_exclusive);
784 lt_wait_for_lock_test_threads();
785 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
786 T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
787
788 /* Uncontended shared rwlock */
789
790 /* Disabled until lt_grab_rw_shared() is fixed (rdar://30685840)
791 * T_LOG("Running uncontended shared rwlock test.");
792 * lt_reset();
793 * lt_target_done_threads = 1;
794 * lt_start_lock_thread(lt_grab_rw_shared);
795 * lt_wait_for_lock_test_threads();
796 * T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
797 * T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
798 */
799
800 /* Contended exclusive rwlock */
801 T_LOG("Running contended exclusive rwlock test.");
802 lt_reset();
803 lt_target_done_threads = 3;
804 lt_start_lock_thread(lt_grab_rw_exclusive);
805 lt_start_lock_thread(lt_grab_rw_exclusive);
806 lt_start_lock_thread(lt_grab_rw_exclusive);
807 lt_wait_for_lock_test_threads();
808 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
809 T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
810
811 /* One shared, two exclusive */
812 /* Disabled until lt_grab_rw_shared() is fixed (rdar://30685840)
813 * T_LOG("Running test with one shared and two exclusive rw lock threads.");
814 * lt_reset();
815 * lt_target_done_threads = 3;
816 * lt_start_lock_thread(lt_grab_rw_shared);
817 * lt_start_lock_thread(lt_grab_rw_exclusive);
818 * lt_start_lock_thread(lt_grab_rw_exclusive);
819 * lt_wait_for_lock_test_threads();
820 * T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
821 * T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
822 */
823
824 /* Four shared */
825 /* Disabled until lt_grab_rw_shared() is fixed (rdar://30685840)
826 * T_LOG("Running test with four shared holders.");
827 * lt_reset();
828 * lt_target_done_threads = 4;
829 * lt_start_lock_thread(lt_grab_rw_shared);
830 * lt_start_lock_thread(lt_grab_rw_shared);
831 * lt_start_lock_thread(lt_grab_rw_shared);
832 * lt_start_lock_thread(lt_grab_rw_shared);
833 * lt_wait_for_lock_test_threads();
834 * T_EXPECT_LE_UINT(lt_max_holders, 4, NULL);
835 */
836
837 /* Three doing upgrades and downgrades */
838 T_LOG("Running test with threads upgrading and downgrading.");
839 lt_reset();
840 lt_target_done_threads = 3;
841 lt_start_lock_thread(lt_upgrade_downgrade_rw);
842 lt_start_lock_thread(lt_upgrade_downgrade_rw);
843 lt_start_lock_thread(lt_upgrade_downgrade_rw);
844 lt_wait_for_lock_test_threads();
845 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
846 T_EXPECT_LE_UINT(lt_max_holders, 3, NULL);
847 T_EXPECT_EQ_UINT(lt_max_upgrade_holders, 1, NULL);
848
849 /* Uncontended - exclusive trylocks */
850 T_LOG("Running test with single thread doing exclusive rwlock trylocks.");
851 lt_reset();
852 lt_target_done_threads = 1;
853 lt_start_lock_thread(lt_grab_rw_exclusive_with_try);
854 lt_wait_for_lock_test_threads();
855 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
856 T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
857
858 /* Uncontended - shared trylocks */
859 /* Disabled until lt_grab_rw_shared_with_try() is fixed (rdar://30685840)
860 * T_LOG("Running test with single thread doing shared rwlock trylocks.");
861 * lt_reset();
862 * lt_target_done_threads = 1;
863 * lt_start_lock_thread(lt_grab_rw_shared_with_try);
864 * lt_wait_for_lock_test_threads();
865 * T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
866 * T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
867 */
868
869 /* Three doing exclusive trylocks */
870 T_LOG("Running test with threads doing exclusive rwlock trylocks.");
871 lt_reset();
872 lt_target_done_threads = 3;
873 lt_start_lock_thread(lt_grab_rw_exclusive_with_try);
874 lt_start_lock_thread(lt_grab_rw_exclusive_with_try);
875 lt_start_lock_thread(lt_grab_rw_exclusive_with_try);
876 lt_wait_for_lock_test_threads();
877 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
878 T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
879
880 /* Three doing shared trylocks */
881 /* Disabled until lt_grab_rw_shared_with_try() is fixed (rdar://30685840)
882 * T_LOG("Running test with threads doing shared rwlock trylocks.");
883 * lt_reset();
884 * lt_target_done_threads = 3;
885 * lt_start_lock_thread(lt_grab_rw_shared_with_try);
886 * lt_start_lock_thread(lt_grab_rw_shared_with_try);
887 * lt_start_lock_thread(lt_grab_rw_shared_with_try);
888 * lt_wait_for_lock_test_threads();
889 * T_EXPECT_LE_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
890 * T_EXPECT_LE_UINT(lt_max_holders, 3, NULL);
891 */
892
893 /* Three doing various trylocks */
894 /* Disabled until lt_grab_rw_shared_with_try() is fixed (rdar://30685840)
895 * T_LOG("Running test with threads doing mixed rwlock trylocks.");
896 * lt_reset();
897 * lt_target_done_threads = 4;
898 * lt_start_lock_thread(lt_grab_rw_shared_with_try);
899 * lt_start_lock_thread(lt_grab_rw_shared_with_try);
900 * lt_start_lock_thread(lt_grab_rw_exclusive_with_try);
901 * lt_start_lock_thread(lt_grab_rw_exclusive_with_try);
902 * lt_wait_for_lock_test_threads();
903 * T_EXPECT_LE_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
904 * T_EXPECT_LE_UINT(lt_max_holders, 2, NULL);
905 */
906
907 /* HW locks */
908 T_LOG("Running test with hw_lock_lock()");
909 lt_reset();
910 lt_target_done_threads = 3;
911 lt_start_lock_thread(lt_grab_hw_lock);
912 lt_start_lock_thread(lt_grab_hw_lock);
913 lt_start_lock_thread(lt_grab_hw_lock);
914 lt_wait_for_lock_test_threads();
915 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
916
917 #if __AMP__
918 /* Ticket locks stress test */
919 T_LOG("Running Ticket locks stress test with lck_ticket_lock()");
920 extern unsigned int real_ncpus;
921 lck_grp_init(<_ticket_grp, "ticket lock stress", LCK_GRP_ATTR_NULL);
922 lck_ticket_init(<_ticket_lock, <_ticket_grp);
923 lt_reset();
924 lt_target_done_threads = real_ncpus;
925 for (processor_t processor = processor_list; processor != NULL; processor = processor->processor_list) {
926 lt_start_lock_thread_bound(lt_stress_ticket_lock);
927 }
928 lt_wait_for_lock_test_threads();
929 bool starvation = false;
930 uint total_local_count = 0;
931 for (processor_t processor = processor_list; processor != NULL; processor = processor->processor_list) {
932 starvation = starvation || (lt_stress_local_counters[processor->cpu_id] < 10);
933 total_local_count += lt_stress_local_counters[processor->cpu_id];
934 }
935 if (total_local_count != lt_counter) {
936 T_FAIL("Lock failure\n");
937 } else if (starvation) {
938 T_FAIL("Lock starvation found\n");
939 } else {
940 T_PASS("Ticket locks stress test with lck_ticket_lock()");
941 }
942
943 /* AMP ticket locks stress test */
944 T_LOG("Running AMP Ticket locks stress test bound to clusters with lck_ticket_lock()");
945 lt_reset();
946 lt_target_done_threads = real_ncpus;
947 for (processor_t processor = processor_list; processor != NULL; processor = processor->processor_list) {
948 processor_set_t pset = processor->processor_set;
949 if (pset->pset_cluster_type == PSET_AMP_P) {
950 lt_start_lock_thread_p(lt_stress_ticket_lock);
951 } else if (pset->pset_cluster_type == PSET_AMP_E) {
952 lt_start_lock_thread_e(lt_stress_ticket_lock);
953 } else {
954 lt_start_lock_thread(lt_stress_ticket_lock);
955 }
956 }
957 lt_wait_for_lock_test_threads();
958 #endif
959
960 /* HW locks: trylocks */
961 T_LOG("Running test with hw_lock_try()");
962 lt_reset();
963 lt_target_done_threads = 3;
964 lt_start_lock_thread(lt_grab_hw_lock_with_try);
965 lt_start_lock_thread(lt_grab_hw_lock_with_try);
966 lt_start_lock_thread(lt_grab_hw_lock_with_try);
967 lt_wait_for_lock_test_threads();
968 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
969
970 /* HW locks: with timeout */
971 T_LOG("Running test with hw_lock_to()");
972 lt_reset();
973 lt_target_done_threads = 3;
974 lt_start_lock_thread(lt_grab_hw_lock_with_to);
975 lt_start_lock_thread(lt_grab_hw_lock_with_to);
976 lt_start_lock_thread(lt_grab_hw_lock_with_to);
977 lt_wait_for_lock_test_threads();
978 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
979
980 /* Spin locks */
981 T_LOG("Running test with lck_spin_lock()");
982 lt_reset();
983 lt_target_done_threads = 3;
984 lt_start_lock_thread(lt_grab_spin_lock);
985 lt_start_lock_thread(lt_grab_spin_lock);
986 lt_start_lock_thread(lt_grab_spin_lock);
987 lt_wait_for_lock_test_threads();
988 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
989
990 /* Spin locks: trylocks */
991 T_LOG("Running test with lck_spin_try_lock()");
992 lt_reset();
993 lt_target_done_threads = 3;
994 lt_start_lock_thread(lt_grab_spin_lock_with_try);
995 lt_start_lock_thread(lt_grab_spin_lock_with_try);
996 lt_start_lock_thread(lt_grab_spin_lock_with_try);
997 lt_wait_for_lock_test_threads();
998 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
999
1000 #if SCHED_HYGIENE_DEBUG
1001 sched_preemption_disable_debug_mode = old_mode;
1002 #endif /* SCHED_HYGIENE_DEBUG */
1003
1004 return KERN_SUCCESS;
1005 }
1006
1007 #define MT_MAX_ARGS 8
1008 #define MT_INITIAL_VALUE 0xfeedbeef
1009 #define MT_W_VAL (0x00000000feedbeefULL) /* Drop in zeros */
1010 #define MT_S_VAL (0xfffffffffeedbeefULL) /* High bit is 1, so sign-extends as negative */
1011 #define MT_L_VAL (((uint64_t)MT_INITIAL_VALUE) | (((uint64_t)MT_INITIAL_VALUE) << 32)) /* Two back-to-back */
1012
1013 typedef void (*sy_munge_t)(void*);
1014
1015 #define MT_FUNC(x) #x, x
1016 struct munger_test {
1017 const char *mt_name;
1018 sy_munge_t mt_func;
1019 uint32_t mt_in_words;
1020 uint32_t mt_nout;
1021 uint64_t mt_expected[MT_MAX_ARGS];
1022 } munger_tests[] = {
1023 {MT_FUNC(munge_w), 1, 1, {MT_W_VAL}},
1024 {MT_FUNC(munge_ww), 2, 2, {MT_W_VAL, MT_W_VAL}},
1025 {MT_FUNC(munge_www), 3, 3, {MT_W_VAL, MT_W_VAL, MT_W_VAL}},
1026 {MT_FUNC(munge_wwww), 4, 4, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}},
1027 {MT_FUNC(munge_wwwww), 5, 5, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}},
1028 {MT_FUNC(munge_wwwwww), 6, 6, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}},
1029 {MT_FUNC(munge_wwwwwww), 7, 7, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}},
1030 {MT_FUNC(munge_wwwwwwww), 8, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}},
1031 {MT_FUNC(munge_wl), 3, 2, {MT_W_VAL, MT_L_VAL}},
1032 {MT_FUNC(munge_wwl), 4, 3, {MT_W_VAL, MT_W_VAL, MT_L_VAL}},
1033 {MT_FUNC(munge_wwlll), 8, 5, {MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL}},
1034 {MT_FUNC(munge_wwlllll), 8, 5, {MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL}},
1035 {MT_FUNC(munge_wlw), 4, 3, {MT_W_VAL, MT_L_VAL, MT_W_VAL}},
1036 {MT_FUNC(munge_wlwwwll), 10, 7, {MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL}},
1037 {MT_FUNC(munge_wlwwwllw), 11, 8, {MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_W_VAL}},
1038 {MT_FUNC(munge_wlwwlwlw), 11, 8, {MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL}},
1039 {MT_FUNC(munge_wll), 5, 3, {MT_W_VAL, MT_L_VAL, MT_L_VAL}},
1040 {MT_FUNC(munge_wlll), 7, 4, {MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL}},
1041 {MT_FUNC(munge_wllwwll), 11, 7, {MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL}},
1042 {MT_FUNC(munge_wwwlw), 6, 5, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL}},
1043 {MT_FUNC(munge_wwwlww), 7, 6, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL}},
1044 {MT_FUNC(munge_wwwlwww), 8, 7, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}},
1045 {MT_FUNC(munge_wwwl), 5, 4, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}},
1046 {MT_FUNC(munge_wwwwlw), 7, 6, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL}},
1047 {MT_FUNC(munge_wwwwllww), 10, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL}},
1048 {MT_FUNC(munge_wwwwl), 6, 5, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}},
1049 {MT_FUNC(munge_wwwwwl), 7, 6, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}},
1050 {MT_FUNC(munge_wwwwwlww), 9, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL}},
1051 {MT_FUNC(munge_wwwwwllw), 10, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_W_VAL}},
1052 {MT_FUNC(munge_wwwwwlll), 11, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL}},
1053 {MT_FUNC(munge_wwwwwwl), 8, 7, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}},
1054 {MT_FUNC(munge_wwwwwwlw), 9, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL}},
1055 {MT_FUNC(munge_wwwwwwll), 10, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL}},
1056 {MT_FUNC(munge_wsw), 3, 3, {MT_W_VAL, MT_S_VAL, MT_W_VAL}},
1057 {MT_FUNC(munge_wws), 3, 3, {MT_W_VAL, MT_W_VAL, MT_S_VAL}},
1058 {MT_FUNC(munge_wwwsw), 5, 5, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_S_VAL, MT_W_VAL}},
1059 {MT_FUNC(munge_llllll), 12, 6, {MT_L_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL}},
1060 {MT_FUNC(munge_llll), 8, 4, {MT_L_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL}},
1061 {MT_FUNC(munge_l), 2, 1, {MT_L_VAL}},
1062 {MT_FUNC(munge_lw), 3, 2, {MT_L_VAL, MT_W_VAL}},
1063 {MT_FUNC(munge_lwww), 5, 4, {MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}},
1064 {MT_FUNC(munge_lwwwwwww), 9, 8, {MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}},
1065 {MT_FUNC(munge_wlwwwl), 8, 6, {MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}},
1066 {MT_FUNC(munge_wwlwwwl), 9, 7, {MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}}
1067 };
1068
1069 #define MT_TEST_COUNT (sizeof(munger_tests) / sizeof(struct munger_test))
1070
1071 static void
mt_reset(uint32_t in_words,size_t total_size,uint32_t * data)1072 mt_reset(uint32_t in_words, size_t total_size, uint32_t *data)
1073 {
1074 uint32_t i;
1075
1076 for (i = 0; i < in_words; i++) {
1077 data[i] = MT_INITIAL_VALUE;
1078 }
1079
1080 if (in_words * sizeof(uint32_t) < total_size) {
1081 bzero(&data[in_words], total_size - in_words * sizeof(uint32_t));
1082 }
1083 }
1084
1085 static void
mt_test_mungers()1086 mt_test_mungers()
1087 {
1088 uint64_t data[MT_MAX_ARGS];
1089 uint32_t i, j;
1090
1091 for (i = 0; i < MT_TEST_COUNT; i++) {
1092 struct munger_test *test = &munger_tests[i];
1093 int pass = 1;
1094
1095 T_LOG("Testing %s", test->mt_name);
1096
1097 mt_reset(test->mt_in_words, sizeof(data), (uint32_t*)data);
1098 test->mt_func(data);
1099
1100 for (j = 0; j < test->mt_nout; j++) {
1101 if (data[j] != test->mt_expected[j]) {
1102 T_FAIL("Index %d: expected %llx, got %llx.", j, test->mt_expected[j], data[j]);
1103 pass = 0;
1104 }
1105 }
1106 if (pass) {
1107 T_PASS(test->mt_name);
1108 }
1109 }
1110 }
1111
1112 #if defined(HAS_APPLE_PAC)
1113
1114
1115 kern_return_t
arm64_ropjop_test()1116 arm64_ropjop_test()
1117 {
1118 T_LOG("Testing ROP/JOP");
1119
1120 /* how is ROP/JOP configured */
1121 boolean_t config_rop_enabled = TRUE;
1122 boolean_t config_jop_enabled = TRUE;
1123
1124
1125 if (config_jop_enabled) {
1126 /* jop key */
1127 uint64_t apiakey_hi = __builtin_arm_rsr64("APIAKEYHI_EL1");
1128 uint64_t apiakey_lo = __builtin_arm_rsr64("APIAKEYLO_EL1");
1129
1130 T_EXPECT(apiakey_hi != 0 && apiakey_lo != 0, NULL);
1131 }
1132
1133 if (config_rop_enabled) {
1134 /* rop key */
1135 uint64_t apibkey_hi = __builtin_arm_rsr64("APIBKEYHI_EL1");
1136 uint64_t apibkey_lo = __builtin_arm_rsr64("APIBKEYLO_EL1");
1137
1138 T_EXPECT(apibkey_hi != 0 && apibkey_lo != 0, NULL);
1139
1140 /* sign a KVA (the address of this function) */
1141 uint64_t kva_signed = (uint64_t) ptrauth_sign_unauthenticated((void *)&config_rop_enabled, ptrauth_key_asib, 0);
1142
1143 /* assert it was signed (changed) */
1144 T_EXPECT(kva_signed != (uint64_t)&config_rop_enabled, NULL);
1145
1146 /* authenticate the newly signed KVA */
1147 uint64_t kva_authed = (uint64_t) ml_auth_ptr_unchecked((void *)kva_signed, ptrauth_key_asib, 0);
1148
1149 /* assert the authed KVA is the original KVA */
1150 T_EXPECT(kva_authed == (uint64_t)&config_rop_enabled, NULL);
1151
1152 /* corrupt a signed ptr, auth it, ensure auth failed */
1153 uint64_t kva_corrupted = kva_signed ^ 1;
1154
1155 /* authenticate the corrupted pointer */
1156 kva_authed = (uint64_t) ml_auth_ptr_unchecked((void *)kva_corrupted, ptrauth_key_asib, 0);
1157
1158 /* when AuthIB fails, bits 63:62 will be set to 2'b10 */
1159 uint64_t auth_fail_mask = 3ULL << 61;
1160 uint64_t authib_fail = 2ULL << 61;
1161
1162 /* assert the failed authIB of corrupted pointer is tagged */
1163 T_EXPECT((kva_authed & auth_fail_mask) == authib_fail, NULL);
1164 }
1165
1166 return KERN_SUCCESS;
1167 }
1168 #endif /* defined(HAS_APPLE_PAC) */
1169
1170 #if __ARM_PAN_AVAILABLE__
1171
1172 struct pan_test_thread_args {
1173 volatile bool join;
1174 };
1175
1176 static void
arm64_pan_test_thread(void * arg,wait_result_t __unused wres)1177 arm64_pan_test_thread(void *arg, wait_result_t __unused wres)
1178 {
1179 T_ASSERT(__builtin_arm_rsr("pan") != 0, NULL);
1180
1181 struct pan_test_thread_args *args = arg;
1182
1183 for (processor_t p = processor_list; p != NULL; p = p->processor_list) {
1184 thread_bind(p);
1185 thread_block(THREAD_CONTINUE_NULL);
1186 kprintf("Running PAN test on cpu %d\n", p->cpu_id);
1187 arm64_pan_test();
1188 }
1189
1190 /* unbind thread from specific cpu */
1191 thread_bind(PROCESSOR_NULL);
1192 thread_block(THREAD_CONTINUE_NULL);
1193
1194 while (!args->join) {
1195 ;
1196 }
1197
1198 thread_wakeup(args);
1199 }
1200
1201 kern_return_t
arm64_late_pan_test()1202 arm64_late_pan_test()
1203 {
1204 thread_t thread;
1205 kern_return_t kr;
1206
1207 struct pan_test_thread_args args;
1208 args.join = false;
1209
1210 kr = kernel_thread_start(arm64_pan_test_thread, &args, &thread);
1211 assert(kr == KERN_SUCCESS);
1212
1213 thread_deallocate(thread);
1214
1215 assert_wait(&args, THREAD_UNINT);
1216 args.join = true;
1217 thread_block(THREAD_CONTINUE_NULL);
1218 return KERN_SUCCESS;
1219 }
1220
1221 // Disable KASAN checking for PAN tests as the fixed commpage address doesn't have a shadow mapping
1222
1223 static NOKASAN bool
arm64_pan_test_pan_enabled_fault_handler(arm_saved_state_t * state)1224 arm64_pan_test_pan_enabled_fault_handler(arm_saved_state_t * state)
1225 {
1226 bool retval = false;
1227 uint32_t esr = get_saved_state_esr(state);
1228 esr_exception_class_t class = ESR_EC(esr);
1229 fault_status_t fsc = ISS_IA_FSC(ESR_ISS(esr));
1230 uint32_t cpsr = get_saved_state_cpsr(state);
1231 uint64_t far = get_saved_state_far(state);
1232
1233 if ((class == ESR_EC_DABORT_EL1) && (fsc == FSC_PERMISSION_FAULT_L3) &&
1234 (cpsr & PSR64_PAN) &&
1235 ((esr & ISS_DA_WNR) ? mmu_kvtop_wpreflight(far) : mmu_kvtop(far))) {
1236 ++pan_exception_level;
1237 // read the user-accessible value to make sure
1238 // pan is enabled and produces a 2nd fault from
1239 // the exception handler
1240 if (pan_exception_level == 1) {
1241 ml_expect_fault_begin(arm64_pan_test_pan_enabled_fault_handler, far);
1242 pan_fault_value = *(volatile char *)far;
1243 ml_expect_fault_end();
1244 __builtin_arm_wsr("pan", 1); // turn PAN back on after the nested exception cleared it for this context
1245 }
1246 // this fault address is used for PAN test
1247 // disable PAN and rerun
1248 mask_saved_state_cpsr(state, 0, PSR64_PAN);
1249
1250 retval = true;
1251 }
1252
1253 return retval;
1254 }
1255
1256 static NOKASAN bool
arm64_pan_test_pan_disabled_fault_handler(arm_saved_state_t * state)1257 arm64_pan_test_pan_disabled_fault_handler(arm_saved_state_t * state)
1258 {
1259 bool retval = false;
1260 uint32_t esr = get_saved_state_esr(state);
1261 esr_exception_class_t class = ESR_EC(esr);
1262 fault_status_t fsc = ISS_IA_FSC(ESR_ISS(esr));
1263 uint32_t cpsr = get_saved_state_cpsr(state);
1264
1265 if ((class == ESR_EC_DABORT_EL1) && (fsc == FSC_PERMISSION_FAULT_L3) &&
1266 !(cpsr & PSR64_PAN)) {
1267 ++pan_exception_level;
1268 // On an exception taken from a PAN-disabled context, verify
1269 // that PAN is re-enabled for the exception handler and that
1270 // accessing the test address produces a PAN fault.
1271 ml_expect_fault_begin(arm64_pan_test_pan_enabled_fault_handler, pan_test_addr);
1272 pan_fault_value = *(volatile char *)pan_test_addr;
1273 ml_expect_fault_end();
1274 __builtin_arm_wsr("pan", 1); // turn PAN back on after the nested exception cleared it for this context
1275 add_saved_state_pc(state, 4);
1276
1277 retval = true;
1278 }
1279
1280 return retval;
1281 }
1282
1283 NOKASAN kern_return_t
arm64_pan_test()1284 arm64_pan_test()
1285 {
1286 bool values_match = false;
1287 vm_offset_t priv_addr = 0;
1288
1289 T_LOG("Testing PAN.");
1290
1291
1292 T_ASSERT((__builtin_arm_rsr("SCTLR_EL1") & SCTLR_PAN_UNCHANGED) == 0, "SCTLR_EL1.SPAN must be cleared");
1293
1294 T_ASSERT(__builtin_arm_rsr("pan") != 0, NULL);
1295
1296 pan_exception_level = 0;
1297 pan_fault_value = 0xDE;
1298
1299 // Create an empty pmap, so we can map a user-accessible page
1300 pmap_t pmap = pmap_create_options(NULL, 0, PMAP_CREATE_64BIT);
1301 T_ASSERT(pmap != NULL, NULL);
1302
1303 // Get a physical page to back the mapping
1304 vm_page_t vm_page = vm_page_grab();
1305 T_ASSERT(vm_page != VM_PAGE_NULL, NULL);
1306 ppnum_t pn = VM_PAGE_GET_PHYS_PAGE(vm_page);
1307 pmap_paddr_t pa = ptoa(pn);
1308
1309 // Write to the underlying physical page through the physical aperture
1310 // so we can test against a known value
1311 priv_addr = phystokv((pmap_paddr_t)pa);
1312 *(volatile char *)priv_addr = 0xAB;
1313
1314 // Map the page in the user address space at some, non-zero address
1315 pan_test_addr = PAGE_SIZE;
1316 pmap_enter(pmap, pan_test_addr, pn, VM_PROT_READ, VM_PROT_READ, 0, true, PMAP_MAPPING_TYPE_INFER);
1317
1318 // Context-switch with PAN disabled is prohibited; prevent test logging from
1319 // triggering a voluntary context switch.
1320 mp_disable_preemption();
1321
1322 // Insert the user's pmap root table pointer in TTBR0
1323 pmap_t old_pmap = vm_map_pmap(current_thread()->map);
1324 pmap_switch(pmap);
1325
1326 // Below should trigger a PAN exception as pan_test_addr is accessible
1327 // in user mode
1328 // The exception handler, upon recognizing the fault address is pan_test_addr,
1329 // will disable PAN and rerun this instruction successfully
1330 ml_expect_fault_begin(arm64_pan_test_pan_enabled_fault_handler, pan_test_addr);
1331 values_match = (*(volatile char *)pan_test_addr == *(volatile char *)priv_addr);
1332 ml_expect_fault_end();
1333 T_ASSERT(values_match, NULL);
1334
1335 T_ASSERT(pan_exception_level == 2, NULL);
1336
1337 T_ASSERT(__builtin_arm_rsr("pan") == 0, NULL);
1338
1339 T_ASSERT(pan_fault_value == *(char *)priv_addr, NULL);
1340
1341 pan_exception_level = 0;
1342 pan_fault_value = 0xAD;
1343 pan_ro_addr = (vm_offset_t) &pan_ro_value;
1344
1345 // Force a permission fault while PAN is disabled to make sure PAN is
1346 // re-enabled during the exception handler.
1347 ml_expect_fault_begin(arm64_pan_test_pan_disabled_fault_handler, pan_ro_addr);
1348 *((volatile uint64_t*)pan_ro_addr) = 0xFEEDFACECAFECAFE;
1349 ml_expect_fault_end();
1350
1351 T_ASSERT(pan_exception_level == 2, NULL);
1352
1353 T_ASSERT(__builtin_arm_rsr("pan") == 0, NULL);
1354
1355 T_ASSERT(pan_fault_value == *(char *)priv_addr, NULL);
1356
1357 pmap_switch(old_pmap);
1358
1359 pan_ro_addr = 0;
1360
1361 __builtin_arm_wsr("pan", 1);
1362
1363 mp_enable_preemption();
1364
1365 pmap_remove(pmap, pan_test_addr, pan_test_addr + PAGE_SIZE);
1366 pan_test_addr = 0;
1367
1368 vm_page_lock_queues();
1369 vm_page_free(vm_page);
1370 vm_page_unlock_queues();
1371 pmap_destroy(pmap);
1372
1373 return KERN_SUCCESS;
1374 }
1375 #endif /* __ARM_PAN_AVAILABLE__ */
1376
1377
1378 kern_return_t
arm64_lock_test()1379 arm64_lock_test()
1380 {
1381 return lt_test_locks();
1382 }
1383
1384 kern_return_t
arm64_munger_test()1385 arm64_munger_test()
1386 {
1387 mt_test_mungers();
1388 return 0;
1389 }
1390
1391 #if defined(KERNEL_INTEGRITY_CTRR) && defined(CONFIG_XNUPOST)
1392 SECURITY_READ_ONLY_LATE(uint64_t) ctrr_ro_test;
1393 uint64_t ctrr_nx_test = 0xd65f03c0; /* RET */
1394 volatile uint64_t ctrr_exception_esr;
1395 vm_offset_t ctrr_test_va;
1396 vm_offset_t ctrr_test_page;
1397
1398 kern_return_t
ctrr_test(void)1399 ctrr_test(void)
1400 {
1401 processor_t p;
1402 boolean_t ctrr_disable = FALSE;
1403
1404 PE_parse_boot_argn("-unsafe_kernel_text", &ctrr_disable, sizeof(ctrr_disable));
1405
1406 #if CONFIG_CSR_FROM_DT
1407 if (csr_unsafe_kernel_text) {
1408 ctrr_disable = TRUE;
1409 }
1410 #endif /* CONFIG_CSR_FROM_DT */
1411
1412 if (ctrr_disable) {
1413 T_LOG("Skipping CTRR test when -unsafe_kernel_text boot-arg present");
1414 return KERN_SUCCESS;
1415 }
1416
1417 T_LOG("Running CTRR test.");
1418
1419 for (p = processor_list; p != NULL; p = p->processor_list) {
1420 thread_bind(p);
1421 thread_block(THREAD_CONTINUE_NULL);
1422 T_LOG("Running CTRR test on cpu %d\n", p->cpu_id);
1423 ctrr_test_cpu();
1424 }
1425
1426 /* unbind thread from specific cpu */
1427 thread_bind(PROCESSOR_NULL);
1428 thread_block(THREAD_CONTINUE_NULL);
1429
1430 return KERN_SUCCESS;
1431 }
1432
1433 static bool
ctrr_test_ro_fault_handler(arm_saved_state_t * state)1434 ctrr_test_ro_fault_handler(arm_saved_state_t * state)
1435 {
1436 bool retval = false;
1437 uint32_t esr = get_saved_state_esr(state);
1438 esr_exception_class_t class = ESR_EC(esr);
1439 fault_status_t fsc = ISS_DA_FSC(ESR_ISS(esr));
1440
1441 if ((class == ESR_EC_DABORT_EL1) && (fsc == FSC_PERMISSION_FAULT_L3)) {
1442 ctrr_exception_esr = esr;
1443 add_saved_state_pc(state, 4);
1444 retval = true;
1445 }
1446
1447 return retval;
1448 }
1449
1450 static bool
ctrr_test_nx_fault_handler(arm_saved_state_t * state)1451 ctrr_test_nx_fault_handler(arm_saved_state_t * state)
1452 {
1453 bool retval = false;
1454 uint32_t esr = get_saved_state_esr(state);
1455 esr_exception_class_t class = ESR_EC(esr);
1456 fault_status_t fsc = ISS_IA_FSC(ESR_ISS(esr));
1457
1458 if ((class == ESR_EC_IABORT_EL1) && (fsc == FSC_PERMISSION_FAULT_L3)) {
1459 ctrr_exception_esr = esr;
1460 /* return to the instruction immediately after the call to NX page */
1461 set_saved_state_pc(state, get_saved_state_lr(state));
1462 retval = true;
1463 }
1464
1465 return retval;
1466 }
1467
1468 // Disable KASAN checking for CTRR tests as the test VA doesn't have a shadow mapping
1469
1470 /* test CTRR on a cpu, caller to bind thread to desired cpu */
1471 /* ctrr_test_page was reserved during bootstrap process */
1472 NOKASAN kern_return_t
ctrr_test_cpu(void)1473 ctrr_test_cpu(void)
1474 {
1475 ppnum_t ro_pn, nx_pn;
1476 uint64_t *ctrr_ro_test_ptr;
1477 void (*ctrr_nx_test_ptr)(void);
1478 kern_return_t kr;
1479 uint64_t prot = 0;
1480 extern vm_offset_t virtual_space_start;
1481
1482 /* ctrr read only region = [rorgn_begin_va, rorgn_end_va) */
1483
1484 #if (KERNEL_CTRR_VERSION == 3)
1485 const uint64_t rorgn_lwr = __builtin_arm_rsr64("S3_0_C11_C0_2");
1486 const uint64_t rorgn_upr = __builtin_arm_rsr64("S3_0_C11_C0_3");
1487 #else /* (KERNEL_CTRR_VERSION == 3) */
1488 const uint64_t rorgn_lwr = __builtin_arm_rsr64("S3_4_C15_C2_3");
1489 const uint64_t rorgn_upr = __builtin_arm_rsr64("S3_4_C15_C2_4");
1490 #endif /* (KERNEL_CTRR_VERSION == 3) */
1491 vm_offset_t rorgn_begin_va = phystokv(rorgn_lwr);
1492 vm_offset_t rorgn_end_va = phystokv(rorgn_upr) + 0x1000;
1493 vm_offset_t ro_test_va = (vm_offset_t)&ctrr_ro_test;
1494 vm_offset_t nx_test_va = (vm_offset_t)&ctrr_nx_test;
1495
1496 T_EXPECT(rorgn_begin_va <= ro_test_va && ro_test_va < rorgn_end_va, "Expect ro_test_va to be inside the CTRR region");
1497 T_EXPECT((nx_test_va < rorgn_begin_va) ^ (nx_test_va >= rorgn_end_va), "Expect nx_test_va to be outside the CTRR region");
1498
1499 ro_pn = pmap_find_phys(kernel_pmap, ro_test_va);
1500 nx_pn = pmap_find_phys(kernel_pmap, nx_test_va);
1501 T_EXPECT(ro_pn && nx_pn, "Expect ro page number and nx page number to be non zero");
1502
1503 T_LOG("test virtual page: %p, ctrr_ro_test: %p, ctrr_nx_test: %p, ro_pn: %x, nx_pn: %x ",
1504 (void *)ctrr_test_page, &ctrr_ro_test, &ctrr_nx_test, ro_pn, nx_pn);
1505
1506 prot = pmap_get_arm64_prot(kernel_pmap, ctrr_test_page);
1507 T_EXPECT(~prot & ARM_TTE_VALID, "Expect ctrr_test_page to be unmapped");
1508
1509 T_LOG("Read only region test mapping virtual page %p to CTRR RO page number %d", ctrr_test_page, ro_pn);
1510 kr = pmap_enter(kernel_pmap, ctrr_test_page, ro_pn,
1511 VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE, VM_WIMG_USE_DEFAULT, FALSE, PMAP_MAPPING_TYPE_INFER);
1512 T_EXPECT(kr == KERN_SUCCESS, "Expect pmap_enter of RW mapping to succeed");
1513
1514 // assert entire mmu prot path (Hierarchical protection model) is NOT RO
1515 // fetch effective block level protections from table/block entries
1516 prot = pmap_get_arm64_prot(kernel_pmap, ctrr_test_page);
1517 T_EXPECT(ARM_PTE_EXTRACT_AP(prot) == AP_RWNA && (prot & ARM_PTE_PNX), "Mapping is EL1 RWNX");
1518
1519 ctrr_test_va = ctrr_test_page + (ro_test_va & PAGE_MASK);
1520 ctrr_ro_test_ptr = (void *)ctrr_test_va;
1521
1522 T_LOG("Read only region test writing to %p to provoke data abort", ctrr_ro_test_ptr);
1523
1524 // should cause data abort
1525 ml_expect_fault_begin(ctrr_test_ro_fault_handler, ctrr_test_va);
1526 *ctrr_ro_test_ptr = 1;
1527 ml_expect_fault_end();
1528
1529 // ensure write permission fault at expected level
1530 // data abort handler will set ctrr_exception_esr when ctrr_test_va takes a permission fault
1531
1532 T_EXPECT(ESR_EC(ctrr_exception_esr) == ESR_EC_DABORT_EL1, "Data Abort from EL1 expected");
1533 T_EXPECT(ISS_DA_FSC(ESR_ISS(ctrr_exception_esr)) == FSC_PERMISSION_FAULT_L3, "Permission Fault Expected");
1534 T_EXPECT(ESR_ISS(ctrr_exception_esr) & ISS_DA_WNR, "Write Fault Expected");
1535
1536 ctrr_test_va = 0;
1537 ctrr_exception_esr = 0;
1538 pmap_remove(kernel_pmap, ctrr_test_page, ctrr_test_page + PAGE_SIZE);
1539
1540 T_LOG("No execute test mapping virtual page %p to CTRR PXN page number %d", ctrr_test_page, nx_pn);
1541
1542 kr = pmap_enter(kernel_pmap, ctrr_test_page, nx_pn,
1543 VM_PROT_READ | VM_PROT_EXECUTE, VM_PROT_NONE, VM_WIMG_USE_DEFAULT, FALSE, PMAP_MAPPING_TYPE_INFER);
1544 T_EXPECT(kr == KERN_SUCCESS, "Expect pmap_enter of RX mapping to succeed");
1545
1546 // assert entire mmu prot path (Hierarchical protection model) is NOT XN
1547 prot = pmap_get_arm64_prot(kernel_pmap, ctrr_test_page);
1548 T_EXPECT(ARM_PTE_EXTRACT_AP(prot) == AP_RONA && (~prot & ARM_PTE_PNX), "Mapping is EL1 ROX");
1549
1550 ctrr_test_va = ctrr_test_page + (nx_test_va & PAGE_MASK);
1551 #if __has_feature(ptrauth_calls)
1552 ctrr_nx_test_ptr = ptrauth_sign_unauthenticated((void *)ctrr_test_va, ptrauth_key_function_pointer, 0);
1553 #else
1554 ctrr_nx_test_ptr = (void *)ctrr_test_va;
1555 #endif
1556
1557 T_LOG("No execute test calling ctrr_nx_test_ptr(): %p to provoke instruction abort", ctrr_nx_test_ptr);
1558
1559 // should cause prefetch abort
1560 ml_expect_fault_begin(ctrr_test_nx_fault_handler, ctrr_test_va);
1561 ctrr_nx_test_ptr();
1562 ml_expect_fault_end();
1563
1564 // TODO: ensure execute permission fault at expected level
1565 T_EXPECT(ESR_EC(ctrr_exception_esr) == ESR_EC_IABORT_EL1, "Instruction abort from EL1 Expected");
1566 T_EXPECT(ISS_DA_FSC(ESR_ISS(ctrr_exception_esr)) == FSC_PERMISSION_FAULT_L3, "Permission Fault Expected");
1567
1568 ctrr_test_va = 0;
1569 ctrr_exception_esr = 0;
1570
1571 pmap_remove(kernel_pmap, ctrr_test_page, ctrr_test_page + PAGE_SIZE);
1572
1573 T_LOG("Expect no faults when reading CTRR region to verify correct programming of CTRR limits");
1574 for (vm_offset_t addr = rorgn_begin_va; addr < rorgn_end_va; addr += 8) {
1575 volatile uint64_t x = *(uint64_t *)addr;
1576 (void) x; /* read for side effect only */
1577 }
1578
1579 return KERN_SUCCESS;
1580 }
1581 #endif /* defined(KERNEL_INTEGRITY_CTRR) && defined(CONFIG_XNUPOST) */
1582
1583
1584
1585 #if CONFIG_SPTM
1586 volatile uint8_t xnu_post_panic_lockdown_did_fire = false;
1587 typedef uint64_t (panic_lockdown_helper_fcn_t)(uint64_t raw);
1588 extern panic_lockdown_helper_fcn_t arm64_panic_lockdown_test_load;
1589 extern panic_lockdown_helper_fcn_t arm64_panic_lockdown_test_gdbtrap;
1590 extern panic_lockdown_helper_fcn_t arm64_panic_lockdown_test_pac_brk_c470;
1591 extern panic_lockdown_helper_fcn_t arm64_panic_lockdown_test_pac_brk_c471;
1592 extern panic_lockdown_helper_fcn_t arm64_panic_lockdown_test_pac_brk_c472;
1593 extern panic_lockdown_helper_fcn_t arm64_panic_lockdown_test_pac_brk_c473;
1594 extern panic_lockdown_helper_fcn_t arm64_panic_lockdown_test_telemetry_brk_ff00;
1595 extern panic_lockdown_helper_fcn_t arm64_panic_lockdown_test_br_auth_fail;
1596 extern panic_lockdown_helper_fcn_t arm64_panic_lockdown_test_ldr_auth_fail;
1597 extern panic_lockdown_helper_fcn_t arm64_panic_lockdown_test_fpac;
1598 extern panic_lockdown_helper_fcn_t arm64_panic_lockdown_test_copyio;
1599
1600 extern int gARM_FEAT_FPACCOMBINE;
1601
1602 typedef struct arm64_panic_lockdown_test_case {
1603 const char *func_str;
1604 panic_lockdown_helper_fcn_t *func;
1605 uint64_t arg;
1606 esr_exception_class_t expected_ec;
1607 bool expect_lockdown_exceptions_masked;
1608 bool expect_lockdown_exceptions_unmasked;
1609 bool override_expected_fault_pc_valid;
1610 uint64_t override_expected_fault_pc;
1611 } arm64_panic_lockdown_test_case_s;
1612
1613 static arm64_panic_lockdown_test_case_s *arm64_panic_lockdown_active_test;
1614 static volatile bool arm64_panic_lockdown_caught_exception;
1615
1616 static bool
arm64_panic_lockdown_test_exception_handler(arm_saved_state_t * state)1617 arm64_panic_lockdown_test_exception_handler(arm_saved_state_t * state)
1618 {
1619 uint32_t esr = get_saved_state_esr(state);
1620 esr_exception_class_t class = ESR_EC(esr);
1621
1622 if (!arm64_panic_lockdown_active_test ||
1623 class != arm64_panic_lockdown_active_test->expected_ec) {
1624 return false;
1625 }
1626
1627 /* We got the expected exception, recover by forging an early return */
1628 set_saved_state_pc(state, get_saved_state_lr(state));
1629 arm64_panic_lockdown_caught_exception = true;
1630 return true;
1631 }
1632
1633 static void
panic_lockdown_expect_test(const char * treatment,arm64_panic_lockdown_test_case_s * test,bool expect_lockdown,bool mask_interrupts)1634 panic_lockdown_expect_test(const char *treatment,
1635 arm64_panic_lockdown_test_case_s *test,
1636 bool expect_lockdown,
1637 bool mask_interrupts)
1638 {
1639 int ints = 0;
1640
1641 arm64_panic_lockdown_active_test = test;
1642 xnu_post_panic_lockdown_did_fire = false;
1643 arm64_panic_lockdown_caught_exception = false;
1644
1645 uintptr_t fault_pc;
1646 if (test->override_expected_fault_pc_valid) {
1647 fault_pc = (uintptr_t)test->override_expected_fault_pc;
1648 } else {
1649 fault_pc = (uintptr_t)test->func;
1650 }
1651 ml_expect_fault_pc_begin(
1652 arm64_panic_lockdown_test_exception_handler,
1653 fault_pc);
1654
1655 if (mask_interrupts) {
1656 ints = ml_set_interrupts_enabled(FALSE);
1657 }
1658
1659 (void)test->func(test->arg);
1660
1661 if (mask_interrupts) {
1662 (void)ml_set_interrupts_enabled(ints);
1663 }
1664
1665 ml_expect_fault_end();
1666
1667 if (expect_lockdown == xnu_post_panic_lockdown_did_fire &&
1668 arm64_panic_lockdown_caught_exception) {
1669 T_PASS("%s + %s OK\n", test->func_str, treatment);
1670 } else {
1671 T_FAIL(
1672 "%s + %s FAIL (expected lockdown: %d, did lockdown: %d, caught exception: %d)\n",
1673 test->func_str, treatment,
1674 expect_lockdown, xnu_post_panic_lockdown_did_fire,
1675 arm64_panic_lockdown_caught_exception);
1676 }
1677 }
1678
1679 /**
1680 * Returns a pointer which is guranteed to be invalid under IA with the zero
1681 * discriminator.
1682 *
1683 * This is somewhat over complicating it since it's exceedingly likely that a
1684 * any given pointer will have a zero PAC (and thus break the test), but it's
1685 * easy enough to avoid the problem.
1686 */
1687 static uint64_t
panic_lockdown_pacia_get_invalid_ptr()1688 panic_lockdown_pacia_get_invalid_ptr()
1689 {
1690 char *unsigned_ptr = (char *)0xFFFFFFFFAABBCC00;
1691 char *signed_ptr = NULL;
1692 do {
1693 unsigned_ptr += 4 /* avoid alignment exceptions */;
1694 signed_ptr = ptrauth_sign_unauthenticated(
1695 unsigned_ptr,
1696 ptrauth_key_asia,
1697 0);
1698 } while ((uint64_t)unsigned_ptr == (uint64_t)signed_ptr);
1699
1700 return (uint64_t)unsigned_ptr;
1701 }
1702
1703 /**
1704 * Returns a pointer which is guranteed to be invalid under DA with the zero
1705 * discriminator.
1706 */
1707 static uint64_t
panic_lockdown_pacda_get_invalid_ptr(void)1708 panic_lockdown_pacda_get_invalid_ptr(void)
1709 {
1710 char *unsigned_ptr = (char *)0xFFFFFFFFAABBCC00;
1711 char *signed_ptr = NULL;
1712 do {
1713 unsigned_ptr += 8 /* avoid alignment exceptions */;
1714 signed_ptr = ptrauth_sign_unauthenticated(
1715 unsigned_ptr,
1716 ptrauth_key_asda,
1717 0);
1718 } while ((uint64_t)unsigned_ptr == (uint64_t)signed_ptr);
1719
1720 return (uint64_t)unsigned_ptr;
1721 }
1722
1723 kern_return_t
arm64_panic_lockdown_test(void)1724 arm64_panic_lockdown_test(void)
1725 {
1726 #if __has_feature(ptrauth_calls)
1727 uint64_t ia_invalid = panic_lockdown_pacia_get_invalid_ptr();
1728 #endif /* ptrauth_calls */
1729 arm64_panic_lockdown_test_case_s tests[] = {
1730 {
1731 .func_str = "arm64_panic_lockdown_test_load",
1732 .func = &arm64_panic_lockdown_test_load,
1733 /* Trigger a null deref */
1734 .arg = (uint64_t)NULL,
1735 .expected_ec = ESR_EC_DABORT_EL1,
1736 .expect_lockdown_exceptions_masked = true,
1737 .expect_lockdown_exceptions_unmasked = false,
1738 },
1739 {
1740 .func_str = "arm64_panic_lockdown_test_gdbtrap",
1741 .func = &arm64_panic_lockdown_test_gdbtrap,
1742 .arg = 0,
1743 .expected_ec = ESR_EC_UNCATEGORIZED,
1744 /* GDBTRAP instructions should be allowed everywhere */
1745 .expect_lockdown_exceptions_masked = false,
1746 .expect_lockdown_exceptions_unmasked = false,
1747 },
1748 #if __has_feature(ptrauth_calls)
1749 {
1750 .func_str = "arm64_panic_lockdown_test_pac_brk_c470",
1751 .func = &arm64_panic_lockdown_test_pac_brk_c470,
1752 .arg = 0,
1753 .expected_ec = ESR_EC_BRK_AARCH64,
1754 .expect_lockdown_exceptions_masked = true,
1755 .expect_lockdown_exceptions_unmasked = true,
1756 },
1757 {
1758 .func_str = "arm64_panic_lockdown_test_pac_brk_c471",
1759 .func = &arm64_panic_lockdown_test_pac_brk_c471,
1760 .arg = 0,
1761 .expected_ec = ESR_EC_BRK_AARCH64,
1762 .expect_lockdown_exceptions_masked = true,
1763 .expect_lockdown_exceptions_unmasked = true,
1764 },
1765 {
1766 .func_str = "arm64_panic_lockdown_test_pac_brk_c472",
1767 .func = &arm64_panic_lockdown_test_pac_brk_c472,
1768 .arg = 0,
1769 .expected_ec = ESR_EC_BRK_AARCH64,
1770 .expect_lockdown_exceptions_masked = true,
1771 .expect_lockdown_exceptions_unmasked = true,
1772 },
1773 {
1774 .func_str = "arm64_panic_lockdown_test_pac_brk_c473",
1775 .func = &arm64_panic_lockdown_test_pac_brk_c473,
1776 .arg = 0,
1777 .expected_ec = ESR_EC_BRK_AARCH64,
1778 .expect_lockdown_exceptions_masked = true,
1779 .expect_lockdown_exceptions_unmasked = true,
1780 },
1781 {
1782 .func_str = "arm64_panic_lockdown_test_telemetry_brk_ff00",
1783 .func = &arm64_panic_lockdown_test_telemetry_brk_ff00,
1784 .arg = 0,
1785 .expected_ec = ESR_EC_BRK_AARCH64,
1786 /*
1787 * PAC breakpoints are not the only breakpoints, ensure that other
1788 * BRKs (like those used for telemetry) do not trigger lockdowns.
1789 * This is necessary to avoid conflicts with features like UBSan
1790 * telemetry (which could fire at any time in C code).
1791 */
1792 .expect_lockdown_exceptions_masked = false,
1793 .expect_lockdown_exceptions_unmasked = false,
1794 },
1795 {
1796 .func_str = "arm64_panic_lockdown_test_br_auth_fail",
1797 .func = &arm64_panic_lockdown_test_br_auth_fail,
1798 .arg = ia_invalid,
1799 .expected_ec = gARM_FEAT_FPACCOMBINE ? ESR_EC_PAC_FAIL : ESR_EC_IABORT_EL1,
1800 .expect_lockdown_exceptions_masked = true,
1801 .expect_lockdown_exceptions_unmasked = true,
1802 /*
1803 * Pre-FEAT_FPACCOMBINED, BRAx branches to a poisoned PC so we
1804 * expect to fault on the branch target rather than the branch
1805 * itself. The exact ELR will likely be different from ia_invalid,
1806 * but since the expect logic in sleh only matches on low bits (i.e.
1807 * not bits which will be poisoned), this is fine.
1808 * On FEAT_FPACCOMBINED devices, we will fault on the branch itself.
1809 */
1810 .override_expected_fault_pc_valid = !gARM_FEAT_FPACCOMBINE,
1811 .override_expected_fault_pc = ia_invalid
1812 },
1813 {
1814 .func_str = "arm64_panic_lockdown_test_ldr_auth_fail",
1815 .func = &arm64_panic_lockdown_test_ldr_auth_fail,
1816 .arg = panic_lockdown_pacda_get_invalid_ptr(),
1817 .expected_ec = gARM_FEAT_FPACCOMBINE ? ESR_EC_PAC_FAIL : ESR_EC_DABORT_EL1,
1818 .expect_lockdown_exceptions_masked = true,
1819 .expect_lockdown_exceptions_unmasked = true,
1820 },
1821 {
1822 .func_str = "arm64_panic_lockdown_test_copyio_poison",
1823 .func = arm64_panic_lockdown_test_copyio,
1824 /* fake a poisoned kernel pointer by flipping the bottom PAC bit */
1825 .arg = ((uint64_t)-1) ^ (1LLU << (64 - T1SZ_BOOT)),
1826 .expected_ec = ESR_EC_DABORT_EL1,
1827 .expect_lockdown_exceptions_masked = false,
1828 .expect_lockdown_exceptions_unmasked = false,
1829 },
1830 #if __ARM_ARCH_8_6__
1831 {
1832 .func_str = "arm64_panic_lockdown_test_fpac",
1833 .func = &arm64_panic_lockdown_test_fpac,
1834 .arg = ia_invalid,
1835 .expected_ec = ESR_EC_PAC_FAIL,
1836 .expect_lockdown_exceptions_masked = true,
1837 .expect_lockdown_exceptions_unmasked = true,
1838 },
1839 #endif /* __ARM_ARCH_8_6__ */
1840 #endif /* ptrauth_calls */
1841 {
1842 .func_str = "arm64_panic_lockdown_test_copyio",
1843 .func = arm64_panic_lockdown_test_copyio,
1844 .arg = 0x0 /* load from NULL */,
1845 .expected_ec = ESR_EC_DABORT_EL1,
1846 .expect_lockdown_exceptions_masked = false,
1847 .expect_lockdown_exceptions_unmasked = false,
1848 },
1849 };
1850
1851 size_t test_count = sizeof(tests) / sizeof(*tests);
1852 for (size_t i = 0; i < test_count; i++) {
1853 panic_lockdown_expect_test(
1854 "Exceptions unmasked",
1855 &tests[i],
1856 tests[i].expect_lockdown_exceptions_unmasked,
1857 /* mask_interrupts */ false);
1858
1859 panic_lockdown_expect_test(
1860 "Exceptions masked",
1861 &tests[i],
1862 tests[i].expect_lockdown_exceptions_masked,
1863 /* mask_interrupts */ true);
1864 }
1865 return KERN_SUCCESS;
1866 }
1867 #endif /* CONFIG_SPTM */
1868