1 /*
2 * Copyright (c) 2011-2018 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System Copyright (c) 1991,1990,1989,1988,1987 Carnegie
33 * Mellon University All Rights Reserved.
34 *
35 * Permission to use, copy, modify and distribute this software and its
36 * documentation is hereby granted, provided that both the copyright notice
37 * and this permission notice appear in all copies of the software,
38 * derivative works or modified versions, and any portions thereof, and that
39 * both notices appear in supporting documentation.
40 *
41 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION.
42 * CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES
43 * WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
44 *
45 * Carnegie Mellon requests users of this software to return to
46 *
47 * Software Distribution Coordinator or [email protected]
48 * School of Computer Science Carnegie Mellon University Pittsburgh PA
49 * 15213-3890
50 *
51 * any improvements or extensions that they make and grant Carnegie Mellon the
52 * rights to redistribute these changes.
53 */
54
55 #include <mach_ldebug.h>
56
57 #define LOCK_PRIVATE 1
58
59 #include <vm/pmap.h>
60 #include <vm/vm_map_xnu.h>
61 #include <vm/vm_page_internal.h>
62 #include <vm/vm_kern_xnu.h>
63 #include <mach/vm_map.h>
64 #include <kern/backtrace.h>
65 #include <kern/kalloc.h>
66 #include <kern/cpu_number.h>
67 #include <kern/locks.h>
68 #include <kern/misc_protos.h>
69 #include <kern/thread.h>
70 #include <kern/processor.h>
71 #include <kern/sched_prim.h>
72 #include <kern/debug.h>
73 #include <stdatomic.h>
74 #include <string.h>
75 #include <tests/xnupost.h>
76
77 #if MACH_KDB
78 #include <ddb/db_command.h>
79 #include <ddb/db_output.h>
80 #include <ddb/db_sym.h>
81 #include <ddb/db_print.h>
82 #endif /* MACH_KDB */
83
84 #include <san/kasan.h>
85 #include <sys/errno.h>
86 #include <sys/kdebug.h>
87 #include <sys/munge.h>
88 #include <machine/cpu_capabilities.h>
89 #include <machine/machine_routines.h>
90 #include <arm/cpu_data_internal.h>
91 #include <arm/pmap.h>
92 #include <arm/pmap/pmap_pt_geometry.h>
93
94 #if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) || defined(KERNEL_INTEGRITY_PV_CTRR)
95 #include <arm64/amcc_rorgn.h>
96 #endif // defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) || defined(KERNEL_INTEGRITY_PV_CTRR)
97
98 #include <arm64/machine_machdep.h>
99
100 kern_return_t arm64_backtrace_test(void);
101 kern_return_t arm64_lock_test(void);
102 kern_return_t arm64_munger_test(void);
103 kern_return_t arm64_pan_test(void);
104 kern_return_t arm64_late_pan_test(void);
105 #if defined(HAS_APPLE_PAC)
106 #include <ptrauth.h>
107 kern_return_t arm64_ropjop_test(void);
108 #endif
109 #if defined(KERNEL_INTEGRITY_CTRR) || defined(KERNEL_INTEGRITY_PV_CTRR)
110 kern_return_t ctrr_test(void);
111 kern_return_t ctrr_test_cpu(void);
112 #endif
113 #if BTI_ENFORCED
114 kern_return_t arm64_bti_test(void);
115 #endif /* BTI_ENFORCED */
116 #if HAS_SPECRES
117 extern kern_return_t specres_test(void);
118 #endif
119
120 // exception handler ignores this fault address during PAN test
121 #if __ARM_PAN_AVAILABLE__
122 const uint64_t pan_ro_value = 0xFEEDB0B0DEADBEEF;
123 vm_offset_t pan_test_addr = 0;
124 vm_offset_t pan_ro_addr = 0;
125 volatile int pan_exception_level = 0;
126 volatile char pan_fault_value = 0;
127 #endif
128
129 #if CONFIG_SPTM
130 kern_return_t arm64_panic_lockdown_test(void);
131 #endif /* CONFIG_SPTM */
132
133
134 #include <arm64/speculation.h>
135 kern_return_t arm64_speculation_guard_test(void);
136
137
138 #include <libkern/OSAtomic.h>
139 #define LOCK_TEST_ITERATIONS 50
140 #define LOCK_TEST_SETUP_TIMEOUT_SEC 15
141 static hw_lock_data_t lt_hw_lock;
142 static lck_spin_t lt_lck_spin_t;
143 static lck_mtx_t lt_mtx;
144 static lck_rw_t lt_rwlock;
145 static volatile uint32_t lt_counter = 0;
146 static volatile int lt_spinvolatile;
147 static volatile uint32_t lt_max_holders = 0;
148 static volatile uint32_t lt_upgrade_holders = 0;
149 static volatile uint32_t lt_max_upgrade_holders = 0;
150 static volatile uint32_t lt_num_holders = 0;
151 static volatile uint32_t lt_done_threads;
152 static volatile uint32_t lt_target_done_threads;
153 static volatile uint32_t lt_cpu_bind_id = 0;
154 static uint64_t lt_setup_timeout = 0;
155
156 static void
lt_note_another_blocking_lock_holder()157 lt_note_another_blocking_lock_holder()
158 {
159 hw_lock_lock(<_hw_lock, LCK_GRP_NULL);
160 lt_num_holders++;
161 lt_max_holders = (lt_max_holders < lt_num_holders) ? lt_num_holders : lt_max_holders;
162 hw_lock_unlock(<_hw_lock);
163 }
164
165 static void
lt_note_blocking_lock_release()166 lt_note_blocking_lock_release()
167 {
168 hw_lock_lock(<_hw_lock, LCK_GRP_NULL);
169 lt_num_holders--;
170 hw_lock_unlock(<_hw_lock);
171 }
172
173 static void
lt_spin_a_little_bit()174 lt_spin_a_little_bit()
175 {
176 uint32_t i;
177
178 for (i = 0; i < 10000; i++) {
179 lt_spinvolatile++;
180 }
181 }
182
183 static void
lt_sleep_a_little_bit()184 lt_sleep_a_little_bit()
185 {
186 delay(100);
187 }
188
189 static void
lt_grab_mutex()190 lt_grab_mutex()
191 {
192 lck_mtx_lock(<_mtx);
193 lt_note_another_blocking_lock_holder();
194 lt_sleep_a_little_bit();
195 lt_counter++;
196 lt_note_blocking_lock_release();
197 lck_mtx_unlock(<_mtx);
198 }
199
200 static void
lt_grab_mutex_with_try()201 lt_grab_mutex_with_try()
202 {
203 while (0 == lck_mtx_try_lock(<_mtx)) {
204 ;
205 }
206 lt_note_another_blocking_lock_holder();
207 lt_sleep_a_little_bit();
208 lt_counter++;
209 lt_note_blocking_lock_release();
210 lck_mtx_unlock(<_mtx);
211 }
212
213 static void
lt_grab_rw_exclusive()214 lt_grab_rw_exclusive()
215 {
216 lck_rw_lock_exclusive(<_rwlock);
217 lt_note_another_blocking_lock_holder();
218 lt_sleep_a_little_bit();
219 lt_counter++;
220 lt_note_blocking_lock_release();
221 lck_rw_done(<_rwlock);
222 }
223
224 static void
lt_grab_rw_exclusive_with_try()225 lt_grab_rw_exclusive_with_try()
226 {
227 while (0 == lck_rw_try_lock_exclusive(<_rwlock)) {
228 lt_sleep_a_little_bit();
229 }
230
231 lt_note_another_blocking_lock_holder();
232 lt_sleep_a_little_bit();
233 lt_counter++;
234 lt_note_blocking_lock_release();
235 lck_rw_done(<_rwlock);
236 }
237
238 /* Disabled until lt_grab_rw_shared() is fixed (rdar://30685840)
239 * static void
240 * lt_grab_rw_shared()
241 * {
242 * lck_rw_lock_shared(<_rwlock);
243 * lt_counter++;
244 *
245 * lt_note_another_blocking_lock_holder();
246 * lt_sleep_a_little_bit();
247 * lt_note_blocking_lock_release();
248 *
249 * lck_rw_done(<_rwlock);
250 * }
251 */
252
253 /* Disabled until lt_grab_rw_shared_with_try() is fixed (rdar://30685840)
254 * static void
255 * lt_grab_rw_shared_with_try()
256 * {
257 * while(0 == lck_rw_try_lock_shared(<_rwlock));
258 * lt_counter++;
259 *
260 * lt_note_another_blocking_lock_holder();
261 * lt_sleep_a_little_bit();
262 * lt_note_blocking_lock_release();
263 *
264 * lck_rw_done(<_rwlock);
265 * }
266 */
267
268 static void
lt_upgrade_downgrade_rw()269 lt_upgrade_downgrade_rw()
270 {
271 boolean_t upgraded, success;
272
273 success = lck_rw_try_lock_shared(<_rwlock);
274 if (!success) {
275 lck_rw_lock_shared(<_rwlock);
276 }
277
278 lt_note_another_blocking_lock_holder();
279 lt_sleep_a_little_bit();
280 lt_note_blocking_lock_release();
281
282 upgraded = lck_rw_lock_shared_to_exclusive(<_rwlock);
283 if (!upgraded) {
284 success = lck_rw_try_lock_exclusive(<_rwlock);
285
286 if (!success) {
287 lck_rw_lock_exclusive(<_rwlock);
288 }
289 }
290
291 lt_upgrade_holders++;
292 if (lt_upgrade_holders > lt_max_upgrade_holders) {
293 lt_max_upgrade_holders = lt_upgrade_holders;
294 }
295
296 lt_counter++;
297 lt_sleep_a_little_bit();
298
299 lt_upgrade_holders--;
300
301 lck_rw_lock_exclusive_to_shared(<_rwlock);
302
303 lt_spin_a_little_bit();
304 lck_rw_done(<_rwlock);
305 }
306
307 #if __AMP__
308 const int limit = 1000000;
309 static int lt_stress_local_counters[MAX_CPUS];
310
311 lck_ticket_t lt_ticket_lock;
312 lck_grp_t lt_ticket_grp;
313
314 static void
lt_stress_ticket_lock()315 lt_stress_ticket_lock()
316 {
317 uint local_counter = 0;
318
319 uint cpuid = cpu_number();
320
321 kprintf("%s>cpu %u starting\n", __FUNCTION__, cpuid);
322
323 lck_ticket_lock(<_ticket_lock, <_ticket_grp);
324 lt_counter++;
325 local_counter++;
326 lck_ticket_unlock(<_ticket_lock);
327
328 /* Wait until all test threads have finished any binding */
329 while (lt_counter < lt_target_done_threads) {
330 if (mach_absolute_time() > lt_setup_timeout) {
331 kprintf("%s>cpu %u noticed that we exceeded setup timeout of %d seconds during initial setup phase (only %u out of %u threads checked in)",
332 __FUNCTION__, cpuid, LOCK_TEST_SETUP_TIMEOUT_SEC, lt_counter, lt_target_done_threads);
333 return;
334 }
335 /* Yield to keep the CPUs available for the threads to bind */
336 thread_yield_internal(1);
337 }
338
339 lck_ticket_lock(<_ticket_lock, <_ticket_grp);
340 lt_counter++;
341 local_counter++;
342 lck_ticket_unlock(<_ticket_lock);
343
344 /*
345 * Now that the test threads have finished any binding, wait
346 * until they are all actively spinning on-core (done yielding)
347 * so we get a fairly timed start.
348 */
349 while (lt_counter < 2 * lt_target_done_threads) {
350 if (mach_absolute_time() > lt_setup_timeout) {
351 kprintf("%s>cpu %u noticed that we exceeded setup timeout of %d seconds during secondary setup phase (only %u out of %u threads checked in)",
352 __FUNCTION__, cpuid, LOCK_TEST_SETUP_TIMEOUT_SEC, lt_counter - lt_target_done_threads, lt_target_done_threads);
353 return;
354 }
355 }
356
357 kprintf("%s>cpu %u started\n", __FUNCTION__, cpuid);
358
359 while (lt_counter < limit) {
360 lck_ticket_lock(<_ticket_lock, <_ticket_grp);
361 if (lt_counter < limit) {
362 lt_counter++;
363 local_counter++;
364 }
365 lck_ticket_unlock(<_ticket_lock);
366 }
367
368 lt_stress_local_counters[cpuid] = local_counter;
369
370 kprintf("%s>final counter %u cpu %u incremented the counter %u times\n", __FUNCTION__, lt_counter, cpuid, local_counter);
371 }
372 #endif
373
374 static void
lt_grab_hw_lock()375 lt_grab_hw_lock()
376 {
377 hw_lock_lock(<_hw_lock, LCK_GRP_NULL);
378 lt_counter++;
379 lt_spin_a_little_bit();
380 hw_lock_unlock(<_hw_lock);
381 }
382
383 static void
lt_grab_hw_lock_with_try()384 lt_grab_hw_lock_with_try()
385 {
386 while (0 == hw_lock_try(<_hw_lock, LCK_GRP_NULL)) {
387 ;
388 }
389 lt_counter++;
390 lt_spin_a_little_bit();
391 hw_lock_unlock(<_hw_lock);
392 }
393
394 static void
lt_grab_hw_lock_with_to()395 lt_grab_hw_lock_with_to()
396 {
397 (void)hw_lock_to(<_hw_lock, &hw_lock_spin_policy, LCK_GRP_NULL);
398 lt_counter++;
399 lt_spin_a_little_bit();
400 hw_lock_unlock(<_hw_lock);
401 }
402
403 static void
lt_grab_spin_lock()404 lt_grab_spin_lock()
405 {
406 lck_spin_lock(<_lck_spin_t);
407 lt_counter++;
408 lt_spin_a_little_bit();
409 lck_spin_unlock(<_lck_spin_t);
410 }
411
412 static void
lt_grab_spin_lock_with_try()413 lt_grab_spin_lock_with_try()
414 {
415 while (0 == lck_spin_try_lock(<_lck_spin_t)) {
416 ;
417 }
418 lt_counter++;
419 lt_spin_a_little_bit();
420 lck_spin_unlock(<_lck_spin_t);
421 }
422
423 static volatile boolean_t lt_thread_lock_grabbed;
424 static volatile boolean_t lt_thread_lock_success;
425
426 static void
lt_reset()427 lt_reset()
428 {
429 lt_counter = 0;
430 lt_max_holders = 0;
431 lt_num_holders = 0;
432 lt_max_upgrade_holders = 0;
433 lt_upgrade_holders = 0;
434 lt_done_threads = 0;
435 lt_target_done_threads = 0;
436 lt_cpu_bind_id = 0;
437 /* Reset timeout deadline out from current time */
438 nanoseconds_to_absolutetime(LOCK_TEST_SETUP_TIMEOUT_SEC * NSEC_PER_SEC, <_setup_timeout);
439 lt_setup_timeout += mach_absolute_time();
440
441 OSMemoryBarrier();
442 }
443
444 static void
lt_trylock_hw_lock_with_to()445 lt_trylock_hw_lock_with_to()
446 {
447 OSMemoryBarrier();
448 while (!lt_thread_lock_grabbed) {
449 lt_sleep_a_little_bit();
450 OSMemoryBarrier();
451 }
452 lt_thread_lock_success = hw_lock_to(<_hw_lock,
453 &hw_lock_test_give_up_policy, LCK_GRP_NULL);
454 OSMemoryBarrier();
455 mp_enable_preemption();
456 }
457
458 static void
lt_trylock_spin_try_lock()459 lt_trylock_spin_try_lock()
460 {
461 OSMemoryBarrier();
462 while (!lt_thread_lock_grabbed) {
463 lt_sleep_a_little_bit();
464 OSMemoryBarrier();
465 }
466 lt_thread_lock_success = lck_spin_try_lock(<_lck_spin_t);
467 OSMemoryBarrier();
468 }
469
470 static void
lt_trylock_thread(void * arg,wait_result_t wres __unused)471 lt_trylock_thread(void *arg, wait_result_t wres __unused)
472 {
473 void (*func)(void) = (void (*)(void))arg;
474
475 func();
476
477 OSIncrementAtomic((volatile SInt32*) <_done_threads);
478 }
479
480 static void
lt_start_trylock_thread(thread_continue_t func)481 lt_start_trylock_thread(thread_continue_t func)
482 {
483 thread_t thread;
484 kern_return_t kr;
485
486 kr = kernel_thread_start(lt_trylock_thread, func, &thread);
487 assert(kr == KERN_SUCCESS);
488
489 thread_deallocate(thread);
490 }
491
492 static void
lt_wait_for_lock_test_threads()493 lt_wait_for_lock_test_threads()
494 {
495 OSMemoryBarrier();
496 /* Spin to reduce dependencies */
497 while (lt_done_threads < lt_target_done_threads) {
498 lt_sleep_a_little_bit();
499 OSMemoryBarrier();
500 }
501 OSMemoryBarrier();
502 }
503
504 static kern_return_t
lt_test_trylocks()505 lt_test_trylocks()
506 {
507 boolean_t success;
508 extern unsigned int real_ncpus;
509
510 /*
511 * First mtx try lock succeeds, second fails.
512 */
513 success = lck_mtx_try_lock(<_mtx);
514 T_ASSERT_NOTNULL(success, "First mtx try lock");
515 success = lck_mtx_try_lock(<_mtx);
516 T_ASSERT_NULL(success, "Second mtx try lock for a locked mtx");
517 lck_mtx_unlock(<_mtx);
518
519 /*
520 * After regular grab, can't try lock.
521 */
522 lck_mtx_lock(<_mtx);
523 success = lck_mtx_try_lock(<_mtx);
524 T_ASSERT_NULL(success, "try lock should fail after regular lck_mtx_lock");
525 lck_mtx_unlock(<_mtx);
526
527 /*
528 * Two shared try locks on a previously unheld rwlock suceed, and a
529 * subsequent exclusive attempt fails.
530 */
531 success = lck_rw_try_lock_shared(<_rwlock);
532 T_ASSERT_NOTNULL(success, "Two shared try locks on a previously unheld rwlock should succeed");
533 success = lck_rw_try_lock_shared(<_rwlock);
534 T_ASSERT_NOTNULL(success, "Two shared try locks on a previously unheld rwlock should succeed");
535 success = lck_rw_try_lock_exclusive(<_rwlock);
536 T_ASSERT_NULL(success, "exclusive lock attempt on previously held lock should fail");
537 lck_rw_done(<_rwlock);
538 lck_rw_done(<_rwlock);
539
540 /*
541 * After regular shared grab, can trylock
542 * for shared but not for exclusive.
543 */
544 lck_rw_lock_shared(<_rwlock);
545 success = lck_rw_try_lock_shared(<_rwlock);
546 T_ASSERT_NOTNULL(success, "After regular shared grab another shared try lock should succeed.");
547 success = lck_rw_try_lock_exclusive(<_rwlock);
548 T_ASSERT_NULL(success, "After regular shared grab an exclusive lock attempt should fail.");
549 lck_rw_done(<_rwlock);
550 lck_rw_done(<_rwlock);
551
552 /*
553 * An exclusive try lock succeeds, subsequent shared and exclusive
554 * attempts fail.
555 */
556 success = lck_rw_try_lock_exclusive(<_rwlock);
557 T_ASSERT_NOTNULL(success, "An exclusive try lock should succeed");
558 success = lck_rw_try_lock_shared(<_rwlock);
559 T_ASSERT_NULL(success, "try lock in shared mode attempt after an exclusive grab should fail");
560 success = lck_rw_try_lock_exclusive(<_rwlock);
561 T_ASSERT_NULL(success, "try lock in exclusive mode attempt after an exclusive grab should fail");
562 lck_rw_done(<_rwlock);
563
564 /*
565 * After regular exclusive grab, neither kind of trylock succeeds.
566 */
567 lck_rw_lock_exclusive(<_rwlock);
568 success = lck_rw_try_lock_shared(<_rwlock);
569 T_ASSERT_NULL(success, "After regular exclusive grab, shared trylock should not succeed");
570 success = lck_rw_try_lock_exclusive(<_rwlock);
571 T_ASSERT_NULL(success, "After regular exclusive grab, exclusive trylock should not succeed");
572 lck_rw_done(<_rwlock);
573
574 /*
575 * First spin lock attempts succeed, second attempts fail.
576 */
577 success = hw_lock_try(<_hw_lock, LCK_GRP_NULL);
578 T_ASSERT_NOTNULL(success, "First spin lock attempts should succeed");
579 success = hw_lock_try(<_hw_lock, LCK_GRP_NULL);
580 T_ASSERT_NULL(success, "Second attempt to spin lock should fail");
581 hw_lock_unlock(<_hw_lock);
582
583 hw_lock_lock(<_hw_lock, LCK_GRP_NULL);
584 success = hw_lock_try(<_hw_lock, LCK_GRP_NULL);
585 T_ASSERT_NULL(success, "After taking spin lock, trylock attempt should fail");
586 hw_lock_unlock(<_hw_lock);
587
588 lt_reset();
589 lt_thread_lock_grabbed = false;
590 lt_thread_lock_success = true;
591 lt_target_done_threads = 1;
592 OSMemoryBarrier();
593 lt_start_trylock_thread(lt_trylock_hw_lock_with_to);
594 success = hw_lock_to(<_hw_lock, &hw_lock_test_give_up_policy, LCK_GRP_NULL);
595 T_ASSERT_NOTNULL(success, "First spin lock with timeout should succeed");
596 if (real_ncpus == 1) {
597 mp_enable_preemption(); /* if we re-enable preemption, the other thread can timeout and exit */
598 }
599 OSIncrementAtomic((volatile SInt32*)<_thread_lock_grabbed);
600 lt_wait_for_lock_test_threads();
601 T_ASSERT_NULL(lt_thread_lock_success, "Second spin lock with timeout should fail and timeout");
602 if (real_ncpus == 1) {
603 mp_disable_preemption(); /* don't double-enable when we unlock */
604 }
605 hw_lock_unlock(<_hw_lock);
606
607 lt_reset();
608 lt_thread_lock_grabbed = false;
609 lt_thread_lock_success = true;
610 lt_target_done_threads = 1;
611 OSMemoryBarrier();
612 lt_start_trylock_thread(lt_trylock_hw_lock_with_to);
613 hw_lock_lock(<_hw_lock, LCK_GRP_NULL);
614 if (real_ncpus == 1) {
615 mp_enable_preemption(); /* if we re-enable preemption, the other thread can timeout and exit */
616 }
617 OSIncrementAtomic((volatile SInt32*)<_thread_lock_grabbed);
618 lt_wait_for_lock_test_threads();
619 T_ASSERT_NULL(lt_thread_lock_success, "after taking a spin lock, lock attempt with timeout should fail");
620 if (real_ncpus == 1) {
621 mp_disable_preemption(); /* don't double-enable when we unlock */
622 }
623 hw_lock_unlock(<_hw_lock);
624
625 success = lck_spin_try_lock(<_lck_spin_t);
626 T_ASSERT_NOTNULL(success, "spin trylock of previously unheld lock should succeed");
627 success = lck_spin_try_lock(<_lck_spin_t);
628 T_ASSERT_NULL(success, "spin trylock attempt of previously held lock (with trylock) should fail");
629 lck_spin_unlock(<_lck_spin_t);
630
631 lt_reset();
632 lt_thread_lock_grabbed = false;
633 lt_thread_lock_success = true;
634 lt_target_done_threads = 1;
635 lt_start_trylock_thread(lt_trylock_spin_try_lock);
636 lck_spin_lock(<_lck_spin_t);
637 if (real_ncpus == 1) {
638 mp_enable_preemption(); /* if we re-enable preemption, the other thread can timeout and exit */
639 }
640 OSIncrementAtomic((volatile SInt32*)<_thread_lock_grabbed);
641 lt_wait_for_lock_test_threads();
642 T_ASSERT_NULL(lt_thread_lock_success, "spin trylock attempt of previously held lock should fail");
643 if (real_ncpus == 1) {
644 mp_disable_preemption(); /* don't double-enable when we unlock */
645 }
646 lck_spin_unlock(<_lck_spin_t);
647
648 return KERN_SUCCESS;
649 }
650
651 static void
lt_thread(void * arg,wait_result_t wres __unused)652 lt_thread(void *arg, wait_result_t wres __unused)
653 {
654 void (*func)(void) = (void (*)(void))arg;
655 uint32_t i;
656
657 for (i = 0; i < LOCK_TEST_ITERATIONS; i++) {
658 func();
659 }
660
661 OSIncrementAtomic((volatile SInt32*) <_done_threads);
662 }
663
664 static void
lt_start_lock_thread(thread_continue_t func)665 lt_start_lock_thread(thread_continue_t func)
666 {
667 thread_t thread;
668 kern_return_t kr;
669
670 kr = kernel_thread_start(lt_thread, func, &thread);
671 assert(kr == KERN_SUCCESS);
672
673 thread_deallocate(thread);
674 }
675
676 #if __AMP__
677 static void
lt_bound_thread(void * arg,wait_result_t wres __unused)678 lt_bound_thread(void *arg, wait_result_t wres __unused)
679 {
680 void (*func)(void) = (void (*)(void))arg;
681
682 int cpuid = OSIncrementAtomic((volatile SInt32 *)<_cpu_bind_id);
683
684 processor_t processor = processor_list;
685 while ((processor != NULL) && (processor->cpu_id != cpuid)) {
686 processor = processor->processor_list;
687 }
688
689 if (processor != NULL) {
690 thread_bind(processor);
691 }
692
693 thread_block(THREAD_CONTINUE_NULL);
694
695 func();
696
697 OSIncrementAtomic((volatile SInt32*) <_done_threads);
698 }
699
700 static void
lt_cluster_bound_thread(void * arg,char cluster_type)701 lt_cluster_bound_thread(void *arg, char cluster_type)
702 {
703 void (*func)(void) = (void (*)(void))arg;
704
705 thread_t thread = current_thread();
706
707 kern_return_t kr = thread_soft_bind_cluster_type(thread, cluster_type);
708 if (kr != KERN_SUCCESS) {
709 kprintf("%s>failed to bind to cluster type %c\n", __FUNCTION__, cluster_type);
710 }
711
712 func();
713
714 OSIncrementAtomic((volatile SInt32*) <_done_threads);
715 }
716
717 static void
lt_e_thread(void * arg,wait_result_t wres __unused)718 lt_e_thread(void *arg, wait_result_t wres __unused)
719 {
720 lt_cluster_bound_thread(arg, 'e');
721 }
722
723
724 static void
lt_p_thread(void * arg,wait_result_t wres __unused)725 lt_p_thread(void *arg, wait_result_t wres __unused)
726 {
727 lt_cluster_bound_thread(arg, 'p');
728 }
729
730 static void
lt_start_lock_thread_with_bind(thread_continue_t bind_type,thread_continue_t func)731 lt_start_lock_thread_with_bind(thread_continue_t bind_type, thread_continue_t func)
732 {
733 thread_t thread;
734 kern_return_t kr;
735
736 kr = kernel_thread_start(bind_type, func, &thread);
737 assert(kr == KERN_SUCCESS);
738
739 thread_deallocate(thread);
740 }
741 #endif /* __AMP__ */
742
743 static kern_return_t
lt_test_locks()744 lt_test_locks()
745 {
746 #if SCHED_HYGIENE_DEBUG
747 /*
748 * When testing, the preemption disable threshold may be hit (for
749 * example when testing a lock timeout). To avoid this, the preemption
750 * disable measurement is temporarily disabled during lock testing.
751 */
752 int old_mode = sched_preemption_disable_debug_mode;
753 if (old_mode == SCHED_HYGIENE_MODE_PANIC) {
754 sched_preemption_disable_debug_mode = SCHED_HYGIENE_MODE_OFF;
755 }
756 #endif /* SCHED_HYGIENE_DEBUG */
757
758 kern_return_t kr = KERN_SUCCESS;
759 lck_grp_attr_t *lga = lck_grp_attr_alloc_init();
760 lck_grp_t *lg = lck_grp_alloc_init("lock test", lga);
761
762 lck_mtx_init(<_mtx, lg, LCK_ATTR_NULL);
763 lck_rw_init(<_rwlock, lg, LCK_ATTR_NULL);
764 lck_spin_init(<_lck_spin_t, lg, LCK_ATTR_NULL);
765 hw_lock_init(<_hw_lock);
766
767 T_LOG("Testing locks.");
768
769 /* Try locks (custom) */
770 lt_reset();
771
772 T_LOG("Running try lock test.");
773 kr = lt_test_trylocks();
774 T_EXPECT_NULL(kr, "try lock test failed.");
775
776 /* Uncontended mutex */
777 T_LOG("Running uncontended mutex test.");
778 lt_reset();
779 lt_target_done_threads = 1;
780 lt_start_lock_thread(lt_grab_mutex);
781 lt_wait_for_lock_test_threads();
782 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
783 T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
784
785 /* Contended mutex:try locks*/
786 T_LOG("Running contended mutex test.");
787 lt_reset();
788 lt_target_done_threads = 3;
789 lt_start_lock_thread(lt_grab_mutex);
790 lt_start_lock_thread(lt_grab_mutex);
791 lt_start_lock_thread(lt_grab_mutex);
792 lt_wait_for_lock_test_threads();
793 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
794 T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
795
796 /* Contended mutex: try locks*/
797 T_LOG("Running contended mutex trylock test.");
798 lt_reset();
799 lt_target_done_threads = 3;
800 lt_start_lock_thread(lt_grab_mutex_with_try);
801 lt_start_lock_thread(lt_grab_mutex_with_try);
802 lt_start_lock_thread(lt_grab_mutex_with_try);
803 lt_wait_for_lock_test_threads();
804 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
805 T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
806
807 /* Uncontended exclusive rwlock */
808 T_LOG("Running uncontended exclusive rwlock test.");
809 lt_reset();
810 lt_target_done_threads = 1;
811 lt_start_lock_thread(lt_grab_rw_exclusive);
812 lt_wait_for_lock_test_threads();
813 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
814 T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
815
816 /* Uncontended shared rwlock */
817
818 /* Disabled until lt_grab_rw_shared() is fixed (rdar://30685840)
819 * T_LOG("Running uncontended shared rwlock test.");
820 * lt_reset();
821 * lt_target_done_threads = 1;
822 * lt_start_lock_thread(lt_grab_rw_shared);
823 * lt_wait_for_lock_test_threads();
824 * T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
825 * T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
826 */
827
828 /* Contended exclusive rwlock */
829 T_LOG("Running contended exclusive rwlock test.");
830 lt_reset();
831 lt_target_done_threads = 3;
832 lt_start_lock_thread(lt_grab_rw_exclusive);
833 lt_start_lock_thread(lt_grab_rw_exclusive);
834 lt_start_lock_thread(lt_grab_rw_exclusive);
835 lt_wait_for_lock_test_threads();
836 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
837 T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
838
839 /* One shared, two exclusive */
840 /* Disabled until lt_grab_rw_shared() is fixed (rdar://30685840)
841 * T_LOG("Running test with one shared and two exclusive rw lock threads.");
842 * lt_reset();
843 * lt_target_done_threads = 3;
844 * lt_start_lock_thread(lt_grab_rw_shared);
845 * lt_start_lock_thread(lt_grab_rw_exclusive);
846 * lt_start_lock_thread(lt_grab_rw_exclusive);
847 * lt_wait_for_lock_test_threads();
848 * T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
849 * T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
850 */
851
852 /* Four shared */
853 /* Disabled until lt_grab_rw_shared() is fixed (rdar://30685840)
854 * T_LOG("Running test with four shared holders.");
855 * lt_reset();
856 * lt_target_done_threads = 4;
857 * lt_start_lock_thread(lt_grab_rw_shared);
858 * lt_start_lock_thread(lt_grab_rw_shared);
859 * lt_start_lock_thread(lt_grab_rw_shared);
860 * lt_start_lock_thread(lt_grab_rw_shared);
861 * lt_wait_for_lock_test_threads();
862 * T_EXPECT_LE_UINT(lt_max_holders, 4, NULL);
863 */
864
865 /* Three doing upgrades and downgrades */
866 T_LOG("Running test with threads upgrading and downgrading.");
867 lt_reset();
868 lt_target_done_threads = 3;
869 lt_start_lock_thread(lt_upgrade_downgrade_rw);
870 lt_start_lock_thread(lt_upgrade_downgrade_rw);
871 lt_start_lock_thread(lt_upgrade_downgrade_rw);
872 lt_wait_for_lock_test_threads();
873 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
874 T_EXPECT_LE_UINT(lt_max_holders, 3, NULL);
875 T_EXPECT_EQ_UINT(lt_max_upgrade_holders, 1, NULL);
876
877 /* Uncontended - exclusive trylocks */
878 T_LOG("Running test with single thread doing exclusive rwlock trylocks.");
879 lt_reset();
880 lt_target_done_threads = 1;
881 lt_start_lock_thread(lt_grab_rw_exclusive_with_try);
882 lt_wait_for_lock_test_threads();
883 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
884 T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
885
886 /* Uncontended - shared trylocks */
887 /* Disabled until lt_grab_rw_shared_with_try() is fixed (rdar://30685840)
888 * T_LOG("Running test with single thread doing shared rwlock trylocks.");
889 * lt_reset();
890 * lt_target_done_threads = 1;
891 * lt_start_lock_thread(lt_grab_rw_shared_with_try);
892 * lt_wait_for_lock_test_threads();
893 * T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
894 * T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
895 */
896
897 /* Three doing exclusive trylocks */
898 T_LOG("Running test with threads doing exclusive rwlock trylocks.");
899 lt_reset();
900 lt_target_done_threads = 3;
901 lt_start_lock_thread(lt_grab_rw_exclusive_with_try);
902 lt_start_lock_thread(lt_grab_rw_exclusive_with_try);
903 lt_start_lock_thread(lt_grab_rw_exclusive_with_try);
904 lt_wait_for_lock_test_threads();
905 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
906 T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
907
908 /* Three doing shared trylocks */
909 /* Disabled until lt_grab_rw_shared_with_try() is fixed (rdar://30685840)
910 * T_LOG("Running test with threads doing shared rwlock trylocks.");
911 * lt_reset();
912 * lt_target_done_threads = 3;
913 * lt_start_lock_thread(lt_grab_rw_shared_with_try);
914 * lt_start_lock_thread(lt_grab_rw_shared_with_try);
915 * lt_start_lock_thread(lt_grab_rw_shared_with_try);
916 * lt_wait_for_lock_test_threads();
917 * T_EXPECT_LE_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
918 * T_EXPECT_LE_UINT(lt_max_holders, 3, NULL);
919 */
920
921 /* Three doing various trylocks */
922 /* Disabled until lt_grab_rw_shared_with_try() is fixed (rdar://30685840)
923 * T_LOG("Running test with threads doing mixed rwlock trylocks.");
924 * lt_reset();
925 * lt_target_done_threads = 4;
926 * lt_start_lock_thread(lt_grab_rw_shared_with_try);
927 * lt_start_lock_thread(lt_grab_rw_shared_with_try);
928 * lt_start_lock_thread(lt_grab_rw_exclusive_with_try);
929 * lt_start_lock_thread(lt_grab_rw_exclusive_with_try);
930 * lt_wait_for_lock_test_threads();
931 * T_EXPECT_LE_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
932 * T_EXPECT_LE_UINT(lt_max_holders, 2, NULL);
933 */
934
935 /* HW locks */
936 T_LOG("Running test with hw_lock_lock()");
937 lt_reset();
938 lt_target_done_threads = 3;
939 lt_start_lock_thread(lt_grab_hw_lock);
940 lt_start_lock_thread(lt_grab_hw_lock);
941 lt_start_lock_thread(lt_grab_hw_lock);
942 lt_wait_for_lock_test_threads();
943 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
944
945 #if __AMP__
946 /* Ticket locks stress test */
947 T_LOG("Running Ticket locks stress test with lck_ticket_lock()");
948 extern unsigned int real_ncpus;
949 lck_grp_init(<_ticket_grp, "ticket lock stress", LCK_GRP_ATTR_NULL);
950 lck_ticket_init(<_ticket_lock, <_ticket_grp);
951 lt_reset();
952 lt_target_done_threads = real_ncpus;
953 uint thread_count = 0;
954 for (processor_t processor = processor_list; processor != NULL; processor = processor->processor_list) {
955 lt_start_lock_thread_with_bind(lt_bound_thread, lt_stress_ticket_lock);
956 thread_count++;
957 }
958 T_EXPECT_GE_UINT(thread_count, lt_target_done_threads, "Spawned enough threads for valid test");
959 lt_wait_for_lock_test_threads();
960 bool starvation = false;
961 uint total_local_count = 0;
962 for (processor_t processor = processor_list; processor != NULL; processor = processor->processor_list) {
963 starvation = starvation || (lt_stress_local_counters[processor->cpu_id] < 10);
964 total_local_count += lt_stress_local_counters[processor->cpu_id];
965 }
966 if (mach_absolute_time() > lt_setup_timeout) {
967 T_FAIL("Stress test setup timed out after %d seconds", LOCK_TEST_SETUP_TIMEOUT_SEC);
968 } else if (total_local_count != lt_counter) {
969 T_FAIL("Lock failure\n");
970 } else if (starvation) {
971 T_FAIL("Lock starvation found\n");
972 } else {
973 T_PASS("Ticket locks stress test with lck_ticket_lock() (%u total acquires)", total_local_count);
974 }
975
976 /* AMP ticket locks stress test */
977 T_LOG("Running AMP Ticket locks stress test bound to clusters with lck_ticket_lock()");
978 lt_reset();
979 lt_target_done_threads = real_ncpus;
980 thread_count = 0;
981 for (processor_t processor = processor_list; processor != NULL; processor = processor->processor_list) {
982 processor_set_t pset = processor->processor_set;
983 switch (pset->pset_cluster_type) {
984 case PSET_AMP_P:
985 lt_start_lock_thread_with_bind(lt_p_thread, lt_stress_ticket_lock);
986 break;
987 case PSET_AMP_E:
988 lt_start_lock_thread_with_bind(lt_e_thread, lt_stress_ticket_lock);
989 break;
990 default:
991 lt_start_lock_thread(lt_stress_ticket_lock);
992 break;
993 }
994 thread_count++;
995 }
996 T_EXPECT_GE_UINT(thread_count, lt_target_done_threads, "Spawned enough threads for valid test");
997 lt_wait_for_lock_test_threads();
998 #endif /* __AMP__ */
999
1000 /* HW locks: trylocks */
1001 T_LOG("Running test with hw_lock_try()");
1002 lt_reset();
1003 lt_target_done_threads = 3;
1004 lt_start_lock_thread(lt_grab_hw_lock_with_try);
1005 lt_start_lock_thread(lt_grab_hw_lock_with_try);
1006 lt_start_lock_thread(lt_grab_hw_lock_with_try);
1007 lt_wait_for_lock_test_threads();
1008 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
1009
1010 /* HW locks: with timeout */
1011 T_LOG("Running test with hw_lock_to()");
1012 lt_reset();
1013 lt_target_done_threads = 3;
1014 lt_start_lock_thread(lt_grab_hw_lock_with_to);
1015 lt_start_lock_thread(lt_grab_hw_lock_with_to);
1016 lt_start_lock_thread(lt_grab_hw_lock_with_to);
1017 lt_wait_for_lock_test_threads();
1018 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
1019
1020 /* Spin locks */
1021 T_LOG("Running test with lck_spin_lock()");
1022 lt_reset();
1023 lt_target_done_threads = 3;
1024 lt_start_lock_thread(lt_grab_spin_lock);
1025 lt_start_lock_thread(lt_grab_spin_lock);
1026 lt_start_lock_thread(lt_grab_spin_lock);
1027 lt_wait_for_lock_test_threads();
1028 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
1029
1030 /* Spin locks: trylocks */
1031 T_LOG("Running test with lck_spin_try_lock()");
1032 lt_reset();
1033 lt_target_done_threads = 3;
1034 lt_start_lock_thread(lt_grab_spin_lock_with_try);
1035 lt_start_lock_thread(lt_grab_spin_lock_with_try);
1036 lt_start_lock_thread(lt_grab_spin_lock_with_try);
1037 lt_wait_for_lock_test_threads();
1038 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
1039
1040 #if SCHED_HYGIENE_DEBUG
1041 sched_preemption_disable_debug_mode = old_mode;
1042 #endif /* SCHED_HYGIENE_DEBUG */
1043
1044 return KERN_SUCCESS;
1045 }
1046
1047 #define MT_MAX_ARGS 8
1048 #define MT_INITIAL_VALUE 0xfeedbeef
1049 #define MT_W_VAL (0x00000000feedbeefULL) /* Drop in zeros */
1050 #define MT_S_VAL (0xfffffffffeedbeefULL) /* High bit is 1, so sign-extends as negative */
1051 #define MT_L_VAL (((uint64_t)MT_INITIAL_VALUE) | (((uint64_t)MT_INITIAL_VALUE) << 32)) /* Two back-to-back */
1052
1053 typedef void (*sy_munge_t)(void*);
1054
1055 #define MT_FUNC(x) #x, x
1056 struct munger_test {
1057 const char *mt_name;
1058 sy_munge_t mt_func;
1059 uint32_t mt_in_words;
1060 uint32_t mt_nout;
1061 uint64_t mt_expected[MT_MAX_ARGS];
1062 } munger_tests[] = {
1063 {MT_FUNC(munge_w), 1, 1, {MT_W_VAL}},
1064 {MT_FUNC(munge_ww), 2, 2, {MT_W_VAL, MT_W_VAL}},
1065 {MT_FUNC(munge_www), 3, 3, {MT_W_VAL, MT_W_VAL, MT_W_VAL}},
1066 {MT_FUNC(munge_wwww), 4, 4, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}},
1067 {MT_FUNC(munge_wwwww), 5, 5, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}},
1068 {MT_FUNC(munge_wwwwww), 6, 6, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}},
1069 {MT_FUNC(munge_wwwwwww), 7, 7, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}},
1070 {MT_FUNC(munge_wwwwwwww), 8, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}},
1071 {MT_FUNC(munge_wl), 3, 2, {MT_W_VAL, MT_L_VAL}},
1072 {MT_FUNC(munge_wwl), 4, 3, {MT_W_VAL, MT_W_VAL, MT_L_VAL}},
1073 {MT_FUNC(munge_wwlll), 8, 5, {MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL}},
1074 {MT_FUNC(munge_wwlllll), 12, 7, {MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL}},
1075 {MT_FUNC(munge_wwllllll), 14, 8, {MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL}},
1076 {MT_FUNC(munge_wlw), 4, 3, {MT_W_VAL, MT_L_VAL, MT_W_VAL}},
1077 {MT_FUNC(munge_wlwwwll), 10, 7, {MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL}},
1078 {MT_FUNC(munge_wlwwwllw), 11, 8, {MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_W_VAL}},
1079 {MT_FUNC(munge_wlwwlwlw), 11, 8, {MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL}},
1080 {MT_FUNC(munge_wll), 5, 3, {MT_W_VAL, MT_L_VAL, MT_L_VAL}},
1081 {MT_FUNC(munge_wlll), 7, 4, {MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL}},
1082 {MT_FUNC(munge_wllwwll), 11, 7, {MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL}},
1083 {MT_FUNC(munge_wwwlw), 6, 5, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL}},
1084 {MT_FUNC(munge_wwwlww), 7, 6, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL}},
1085 {MT_FUNC(munge_wwwlwww), 8, 7, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}},
1086 {MT_FUNC(munge_wwwl), 5, 4, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}},
1087 {MT_FUNC(munge_wwwwlw), 7, 6, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL}},
1088 {MT_FUNC(munge_wwwwllww), 10, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL}},
1089 {MT_FUNC(munge_wwwwl), 6, 5, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}},
1090 {MT_FUNC(munge_wwwwwl), 7, 6, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}},
1091 {MT_FUNC(munge_wwwwwlww), 9, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL}},
1092 {MT_FUNC(munge_wwwwwllw), 10, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_W_VAL}},
1093 {MT_FUNC(munge_wwwwwlll), 11, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL}},
1094 {MT_FUNC(munge_wwwwwwl), 8, 7, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}},
1095 {MT_FUNC(munge_wwwwwwlw), 9, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL}},
1096 {MT_FUNC(munge_wwwwwwll), 10, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL}},
1097 {MT_FUNC(munge_wsw), 3, 3, {MT_W_VAL, MT_S_VAL, MT_W_VAL}},
1098 {MT_FUNC(munge_wws), 3, 3, {MT_W_VAL, MT_W_VAL, MT_S_VAL}},
1099 {MT_FUNC(munge_wwwsw), 5, 5, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_S_VAL, MT_W_VAL}},
1100 {MT_FUNC(munge_llllll), 12, 6, {MT_L_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL}},
1101 {MT_FUNC(munge_llll), 8, 4, {MT_L_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL}},
1102 {MT_FUNC(munge_l), 2, 1, {MT_L_VAL}},
1103 {MT_FUNC(munge_lw), 3, 2, {MT_L_VAL, MT_W_VAL}},
1104 {MT_FUNC(munge_lww), 4, 3, {MT_L_VAL, MT_W_VAL, MT_W_VAL}},
1105 {MT_FUNC(munge_lwww), 5, 4, {MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}},
1106 {MT_FUNC(munge_lwwwwwww), 9, 8, {MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}},
1107 {MT_FUNC(munge_wlwwwl), 8, 6, {MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}},
1108 {MT_FUNC(munge_wwlwwwl), 9, 7, {MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}}
1109 };
1110
1111 #define MT_TEST_COUNT (sizeof(munger_tests) / sizeof(struct munger_test))
1112
1113 static void
mt_reset(uint32_t in_words,size_t total_size,uint32_t * data)1114 mt_reset(uint32_t in_words, size_t total_size, uint32_t *data)
1115 {
1116 uint32_t i;
1117
1118 for (i = 0; i < in_words; i++) {
1119 data[i] = MT_INITIAL_VALUE;
1120 }
1121
1122 if (in_words * sizeof(uint32_t) < total_size) {
1123 bzero(&data[in_words], total_size - in_words * sizeof(uint32_t));
1124 }
1125 }
1126
1127 static void
mt_test_mungers()1128 mt_test_mungers()
1129 {
1130 uint64_t data[MT_MAX_ARGS];
1131 uint32_t i, j;
1132
1133 for (i = 0; i < MT_TEST_COUNT; i++) {
1134 struct munger_test *test = &munger_tests[i];
1135 int pass = 1;
1136
1137 T_LOG("Testing %s", test->mt_name);
1138
1139 mt_reset(test->mt_in_words, sizeof(data), (uint32_t*)data);
1140 test->mt_func(data);
1141
1142 for (j = 0; j < test->mt_nout; j++) {
1143 if (data[j] != test->mt_expected[j]) {
1144 T_FAIL("Index %d: expected %llx, got %llx.", j, test->mt_expected[j], data[j]);
1145 pass = 0;
1146 }
1147 }
1148 if (pass) {
1149 T_PASS(test->mt_name);
1150 }
1151 }
1152 }
1153
1154 #if defined(HAS_APPLE_PAC)
1155
1156
1157 kern_return_t
arm64_ropjop_test()1158 arm64_ropjop_test()
1159 {
1160 T_LOG("Testing ROP/JOP");
1161
1162 /* how is ROP/JOP configured */
1163 boolean_t config_rop_enabled = TRUE;
1164 boolean_t config_jop_enabled = TRUE;
1165
1166
1167 if (config_jop_enabled) {
1168 /* jop key */
1169 uint64_t apiakey_hi = __builtin_arm_rsr64("APIAKEYHI_EL1");
1170 uint64_t apiakey_lo = __builtin_arm_rsr64("APIAKEYLO_EL1");
1171
1172 T_EXPECT(apiakey_hi != 0 && apiakey_lo != 0, NULL);
1173 }
1174
1175 if (config_rop_enabled) {
1176 /* rop key */
1177 uint64_t apibkey_hi = __builtin_arm_rsr64("APIBKEYHI_EL1");
1178 uint64_t apibkey_lo = __builtin_arm_rsr64("APIBKEYLO_EL1");
1179
1180 T_EXPECT(apibkey_hi != 0 && apibkey_lo != 0, NULL);
1181
1182 /* sign a KVA (the address of this function) */
1183 uint64_t kva_signed = (uint64_t) ptrauth_sign_unauthenticated((void *)&config_rop_enabled, ptrauth_key_asib, 0);
1184
1185 /* assert it was signed (changed) */
1186 T_EXPECT(kva_signed != (uint64_t)&config_rop_enabled, NULL);
1187
1188 /* authenticate the newly signed KVA */
1189 uint64_t kva_authed = (uint64_t) ml_auth_ptr_unchecked((void *)kva_signed, ptrauth_key_asib, 0);
1190
1191 /* assert the authed KVA is the original KVA */
1192 T_EXPECT(kva_authed == (uint64_t)&config_rop_enabled, NULL);
1193
1194 /* corrupt a signed ptr, auth it, ensure auth failed */
1195 uint64_t kva_corrupted = kva_signed ^ 1;
1196
1197 /* authenticate the corrupted pointer */
1198 kva_authed = (uint64_t) ml_auth_ptr_unchecked((void *)kva_corrupted, ptrauth_key_asib, 0);
1199
1200 /* when AuthIB fails, bits 63:62 will be set to 2'b10 */
1201 uint64_t auth_fail_mask = 3ULL << 61;
1202 uint64_t authib_fail = 2ULL << 61;
1203
1204 /* assert the failed authIB of corrupted pointer is tagged */
1205 T_EXPECT((kva_authed & auth_fail_mask) == authib_fail, NULL);
1206 }
1207
1208 return KERN_SUCCESS;
1209 }
1210 #endif /* defined(HAS_APPLE_PAC) */
1211
1212 #if __ARM_PAN_AVAILABLE__
1213
1214 struct pan_test_thread_args {
1215 volatile bool join;
1216 };
1217
1218 static void
arm64_pan_test_thread(void * arg,wait_result_t __unused wres)1219 arm64_pan_test_thread(void *arg, wait_result_t __unused wres)
1220 {
1221 T_ASSERT(__builtin_arm_rsr("pan") != 0, NULL);
1222
1223 struct pan_test_thread_args *args = arg;
1224
1225 for (processor_t p = processor_list; p != NULL; p = p->processor_list) {
1226 thread_bind(p);
1227 thread_block(THREAD_CONTINUE_NULL);
1228 kprintf("Running PAN test on cpu %d\n", p->cpu_id);
1229 arm64_pan_test();
1230 }
1231
1232 /* unbind thread from specific cpu */
1233 thread_bind(PROCESSOR_NULL);
1234 thread_block(THREAD_CONTINUE_NULL);
1235
1236 while (!args->join) {
1237 ;
1238 }
1239
1240 thread_wakeup(args);
1241 }
1242
1243 kern_return_t
arm64_late_pan_test()1244 arm64_late_pan_test()
1245 {
1246 thread_t thread;
1247 kern_return_t kr;
1248
1249 struct pan_test_thread_args args;
1250 args.join = false;
1251
1252 kr = kernel_thread_start(arm64_pan_test_thread, &args, &thread);
1253 assert(kr == KERN_SUCCESS);
1254
1255 thread_deallocate(thread);
1256
1257 assert_wait(&args, THREAD_UNINT);
1258 args.join = true;
1259 thread_block(THREAD_CONTINUE_NULL);
1260 return KERN_SUCCESS;
1261 }
1262
1263 // Disable KASAN checking for PAN tests as the fixed commpage address doesn't have a shadow mapping
1264
1265 static NOKASAN bool
arm64_pan_test_pan_enabled_fault_handler(arm_saved_state_t * state)1266 arm64_pan_test_pan_enabled_fault_handler(arm_saved_state_t * state)
1267 {
1268 bool retval = false;
1269 uint64_t esr = get_saved_state_esr(state);
1270 esr_exception_class_t class = ESR_EC(esr);
1271 fault_status_t fsc = ISS_IA_FSC(ESR_ISS(esr));
1272 uint32_t cpsr = get_saved_state_cpsr(state);
1273 uint64_t far = get_saved_state_far(state);
1274
1275 if ((class == ESR_EC_DABORT_EL1) && (fsc == FSC_PERMISSION_FAULT_L3) &&
1276 (cpsr & PSR64_PAN) &&
1277 ((esr & ISS_DA_WNR) ? mmu_kvtop_wpreflight(far) : mmu_kvtop(far))) {
1278 ++pan_exception_level;
1279 // read the user-accessible value to make sure
1280 // pan is enabled and produces a 2nd fault from
1281 // the exception handler
1282 if (pan_exception_level == 1) {
1283 ml_expect_fault_begin(arm64_pan_test_pan_enabled_fault_handler, far);
1284 pan_fault_value = *(volatile char *)far;
1285 ml_expect_fault_end();
1286 __builtin_arm_wsr("pan", 1); // turn PAN back on after the nested exception cleared it for this context
1287 }
1288 // this fault address is used for PAN test
1289 // disable PAN and rerun
1290 mask_saved_state_cpsr(state, 0, PSR64_PAN);
1291
1292 retval = true;
1293 }
1294
1295 return retval;
1296 }
1297
1298 static NOKASAN bool
arm64_pan_test_pan_disabled_fault_handler(arm_saved_state_t * state)1299 arm64_pan_test_pan_disabled_fault_handler(arm_saved_state_t * state)
1300 {
1301 bool retval = false;
1302 uint64_t esr = get_saved_state_esr(state);
1303 esr_exception_class_t class = ESR_EC(esr);
1304 fault_status_t fsc = ISS_IA_FSC(ESR_ISS(esr));
1305 uint32_t cpsr = get_saved_state_cpsr(state);
1306
1307 if ((class == ESR_EC_DABORT_EL1) && (fsc == FSC_PERMISSION_FAULT_L3) &&
1308 !(cpsr & PSR64_PAN)) {
1309 ++pan_exception_level;
1310 // On an exception taken from a PAN-disabled context, verify
1311 // that PAN is re-enabled for the exception handler and that
1312 // accessing the test address produces a PAN fault.
1313 ml_expect_fault_begin(arm64_pan_test_pan_enabled_fault_handler, pan_test_addr);
1314 pan_fault_value = *(volatile char *)pan_test_addr;
1315 ml_expect_fault_end();
1316 __builtin_arm_wsr("pan", 1); // turn PAN back on after the nested exception cleared it for this context
1317 add_saved_state_pc(state, 4);
1318
1319 retval = true;
1320 }
1321
1322 return retval;
1323 }
1324
1325 NOKASAN kern_return_t
arm64_pan_test()1326 arm64_pan_test()
1327 {
1328 bool values_match = false;
1329 vm_offset_t priv_addr = 0;
1330
1331 T_LOG("Testing PAN.");
1332
1333
1334 T_ASSERT((__builtin_arm_rsr("SCTLR_EL1") & SCTLR_PAN_UNCHANGED) == 0, "SCTLR_EL1.SPAN must be cleared");
1335
1336 T_ASSERT(__builtin_arm_rsr("pan") != 0, NULL);
1337
1338 pan_exception_level = 0;
1339 pan_fault_value = 0xDE;
1340
1341 // Create an empty pmap, so we can map a user-accessible page
1342 pmap_t pmap = pmap_create_options(NULL, 0, PMAP_CREATE_64BIT);
1343 T_ASSERT(pmap != NULL, NULL);
1344
1345 // Get a physical page to back the mapping
1346 vm_page_t vm_page = vm_page_grab();
1347 T_ASSERT(vm_page != VM_PAGE_NULL, NULL);
1348 ppnum_t pn = VM_PAGE_GET_PHYS_PAGE(vm_page);
1349 pmap_paddr_t pa = ptoa(pn);
1350
1351 // Write to the underlying physical page through the physical aperture
1352 // so we can test against a known value
1353 priv_addr = phystokv((pmap_paddr_t)pa);
1354 *(volatile char *)priv_addr = 0xAB;
1355
1356 // Map the page in the user address space at some, non-zero address
1357 pan_test_addr = PAGE_SIZE;
1358 pmap_enter(pmap, pan_test_addr, pn, VM_PROT_READ, VM_PROT_READ, 0, true, PMAP_MAPPING_TYPE_INFER);
1359
1360 // Context-switch with PAN disabled is prohibited; prevent test logging from
1361 // triggering a voluntary context switch.
1362 mp_disable_preemption();
1363
1364 // Insert the user's pmap root table pointer in TTBR0
1365 thread_t thread = current_thread();
1366 pmap_t old_pmap = vm_map_pmap(thread->map);
1367 pmap_switch(pmap, thread);
1368
1369 // Below should trigger a PAN exception as pan_test_addr is accessible
1370 // in user mode
1371 // The exception handler, upon recognizing the fault address is pan_test_addr,
1372 // will disable PAN and rerun this instruction successfully
1373 ml_expect_fault_begin(arm64_pan_test_pan_enabled_fault_handler, pan_test_addr);
1374 values_match = (*(volatile char *)pan_test_addr == *(volatile char *)priv_addr);
1375 ml_expect_fault_end();
1376 T_ASSERT(values_match, NULL);
1377
1378 T_ASSERT(pan_exception_level == 2, NULL);
1379
1380 T_ASSERT(__builtin_arm_rsr("pan") == 0, NULL);
1381
1382 T_ASSERT(pan_fault_value == *(char *)priv_addr, NULL);
1383
1384 pan_exception_level = 0;
1385 pan_fault_value = 0xAD;
1386 pan_ro_addr = (vm_offset_t) &pan_ro_value;
1387
1388 // Force a permission fault while PAN is disabled to make sure PAN is
1389 // re-enabled during the exception handler.
1390 ml_expect_fault_begin(arm64_pan_test_pan_disabled_fault_handler, pan_ro_addr);
1391 *((volatile uint64_t*)pan_ro_addr) = 0xFEEDFACECAFECAFE;
1392 ml_expect_fault_end();
1393
1394 T_ASSERT(pan_exception_level == 2, NULL);
1395
1396 T_ASSERT(__builtin_arm_rsr("pan") == 0, NULL);
1397
1398 T_ASSERT(pan_fault_value == *(char *)priv_addr, NULL);
1399
1400 pmap_switch(old_pmap, thread);
1401
1402 pan_ro_addr = 0;
1403
1404 __builtin_arm_wsr("pan", 1);
1405
1406 mp_enable_preemption();
1407
1408 pmap_remove(pmap, pan_test_addr, pan_test_addr + PAGE_SIZE);
1409 pan_test_addr = 0;
1410
1411 vm_page_lock_queues();
1412 vm_page_free(vm_page);
1413 vm_page_unlock_queues();
1414 pmap_destroy(pmap);
1415
1416 return KERN_SUCCESS;
1417 }
1418 #endif /* __ARM_PAN_AVAILABLE__ */
1419
1420
1421 kern_return_t
arm64_lock_test()1422 arm64_lock_test()
1423 {
1424 return lt_test_locks();
1425 }
1426
1427 kern_return_t
arm64_munger_test()1428 arm64_munger_test()
1429 {
1430 mt_test_mungers();
1431 return 0;
1432 }
1433
1434 #if (defined(KERNEL_INTEGRITY_CTRR) || defined(KERNEL_INTEGRITY_PV_CTRR)) && defined(CONFIG_XNUPOST)
1435 SECURITY_READ_ONLY_LATE(uint64_t) ctrr_ro_test;
1436 uint64_t ctrr_nx_test = 0xd65f03c0; /* RET */
1437 volatile uint64_t ctrr_exception_esr;
1438 vm_offset_t ctrr_test_va;
1439 vm_offset_t ctrr_test_page;
1440 atomic_bool ctrr_test_in_progress;
1441
1442 kern_return_t
ctrr_test(void)1443 ctrr_test(void)
1444 {
1445 processor_t p;
1446
1447 /*
1448 * The test uses some globals and also a specific reserved VA region, so it
1449 * can't run concurrently. This might otherwise happen via the sysctl
1450 * interface.
1451 */
1452 bool expected = false;
1453 if (!atomic_compare_exchange_strong_explicit(&ctrr_test_in_progress,
1454 &expected, true,
1455 memory_order_acq_rel, memory_order_relaxed)) {
1456 T_FAIL("Can't run multiple CTRR tests at once");
1457 return KERN_SUCCESS;
1458 }
1459
1460
1461 T_LOG("Running CTRR test.");
1462
1463 for (p = processor_list; p != NULL; p = p->processor_list) {
1464 thread_bind(p);
1465 thread_block(THREAD_CONTINUE_NULL);
1466 T_LOG("Running CTRR test on CPU %d\n", p->cpu_id);
1467 ctrr_test_cpu();
1468 }
1469
1470 /* unbind thread from specific cpu */
1471 thread_bind(PROCESSOR_NULL);
1472 thread_block(THREAD_CONTINUE_NULL);
1473
1474 T_PASS("Done running CTRR test on all CPUs");
1475 atomic_store_explicit(&ctrr_test_in_progress, false, memory_order_release);
1476
1477 return KERN_SUCCESS;
1478 }
1479
1480 static bool
ctrr_test_ro_fault_handler(arm_saved_state_t * state)1481 ctrr_test_ro_fault_handler(arm_saved_state_t * state)
1482 {
1483 bool retval = false;
1484 uint64_t esr = get_saved_state_esr(state);
1485 esr_exception_class_t class = ESR_EC(esr);
1486 fault_status_t fsc = ISS_DA_FSC(ESR_ISS(esr));
1487
1488 if ((class == ESR_EC_DABORT_EL1) && (fsc == FSC_PERMISSION_FAULT_L3)) {
1489 ctrr_exception_esr = esr;
1490 add_saved_state_pc(state, 4);
1491 retval = true;
1492 }
1493
1494 return retval;
1495 }
1496
1497 static bool
ctrr_test_nx_fault_handler(arm_saved_state_t * state)1498 ctrr_test_nx_fault_handler(arm_saved_state_t * state)
1499 {
1500 bool retval = false;
1501 uint64_t esr = get_saved_state_esr(state);
1502 esr_exception_class_t class = ESR_EC(esr);
1503 fault_status_t fsc = ISS_IA_FSC(ESR_ISS(esr));
1504
1505 if ((class == ESR_EC_IABORT_EL1) && (fsc == FSC_PERMISSION_FAULT_L3)) {
1506 ctrr_exception_esr = esr;
1507 /* return to the instruction immediately after the call to NX page */
1508 set_saved_state_pc(state, get_saved_state_lr(state));
1509 #if BTI_ENFORCED
1510 /* Clear BTYPE to prevent taking another exception on ERET */
1511 uint32_t spsr = get_saved_state_cpsr(state);
1512 spsr &= ~PSR_BTYPE_MASK;
1513 set_saved_state_cpsr(state, spsr);
1514 #endif /* BTI_ENFORCED */
1515 retval = true;
1516 }
1517
1518 return retval;
1519 }
1520
1521 // Disable KASAN checking for CTRR tests as the test VA doesn't have a shadow mapping
1522
1523 /* test CTRR on a cpu, caller to bind thread to desired cpu */
1524 /* ctrr_test_page was reserved during bootstrap process if no SPTM */
1525 NOKASAN kern_return_t
ctrr_test_cpu(void)1526 ctrr_test_cpu(void)
1527 {
1528 ppnum_t ro_pn, nx_pn;
1529 uint64_t *ctrr_ro_test_ptr;
1530 void (*ctrr_nx_test_ptr)(void);
1531 kern_return_t kr;
1532 uint64_t prot = 0;
1533 extern vm_offset_t virtual_space_start;
1534 extern vm_offset_t rorgn_begin;
1535 extern vm_offset_t rorgn_end;
1536
1537 vm_offset_t ro_test_va = (vm_offset_t)&ctrr_ro_test;
1538 vm_offset_t nx_test_va = (vm_offset_t)&ctrr_nx_test;
1539 bool ctrr_enabled = !ml_unsafe_kernel_text();
1540
1541 #if CONFIG_SPTM
1542 if (/* DISABLES CODE */ (1)) {
1543 T_SKIP("Skipping CTRR test because testing under SPTM is not supported yet");
1544 return KERN_SUCCESS;
1545 }
1546 #endif
1547
1548 #if defined(KERNEL_INTEGRITY_PV_CTRR)
1549 if (rorgn_begin == 0 && rorgn_end == 0) {
1550 // Under paravirtualized CTRR, it's possible that we want CTRR to be
1551 // enabled but we're running under an older host that doesn't support
1552 // it.
1553 ctrr_enabled = false;
1554 T_LOG("Treating paravirtualized CTRR as disabled due to lack of support");
1555 }
1556 #endif
1557
1558 // The CTRR read-only region is the physical address range [rorgn_begin, rorgn_end].
1559 // rorgn_end will be one byte short of a page boundary.
1560 if (ctrr_enabled) {
1561 T_EXPECT(rorgn_begin != 0, "Expect rorgn_begin to be set when CTRR enabled");
1562 T_EXPECT_GE_ULONG(rorgn_end, rorgn_begin, "Expect rorgn_end to be >= rorgn_begin when CTRR enabled");
1563
1564 pmap_paddr_t ro_test_pa = kvtophys_nofail(ro_test_va);
1565 pmap_paddr_t nx_test_pa = kvtophys_nofail(nx_test_va);
1566
1567 T_EXPECT(rorgn_begin <= ro_test_pa && ro_test_pa <= rorgn_end, "Expect ro_test_pa to be inside the CTRR region");
1568 T_EXPECT((nx_test_pa < rorgn_begin) ^ (nx_test_pa > rorgn_end), "Expect nx_test_pa to be outside the CTRR region");
1569 } else {
1570 T_EXPECT_EQ_ULONG(rorgn_begin, 0, "Expect rorgn_begin to be unset when CTRR disabled");
1571 T_EXPECT_EQ_ULONG(rorgn_end, 0, "Expect rorgn_end to be unset when CTRR disabled");
1572 T_LOG("Skipping region check because CTRR is disabled");
1573 }
1574
1575 if (ctrr_enabled) {
1576 T_LOG("Expect no faults when reading CTRR region to verify correct programming of CTRR limits");
1577 for (pmap_paddr_t page_pa = rorgn_begin; page_pa <= rorgn_end; page_pa += PAGE_SIZE) {
1578 vm_offset_t page_va = phystokv(page_pa);
1579 for (vm_offset_t va = page_va; va < page_va + PAGE_SIZE; va += 8) {
1580 volatile uint64_t x = *(uint64_t *)va;
1581 (void) x; /* read for side effect only */
1582 }
1583 }
1584 } else {
1585 T_LOG("Skipping read test because CTRR is disabled");
1586 }
1587
1588 ro_pn = pmap_find_phys(kernel_pmap, ro_test_va);
1589 nx_pn = pmap_find_phys(kernel_pmap, nx_test_va);
1590 T_EXPECT(ro_pn && nx_pn, "Expect ro page number and nx page number to be non zero");
1591
1592 T_LOG("test virtual page: %p, ctrr_ro_test: %p, ctrr_nx_test: %p, ro_pn: %x, nx_pn: %x ",
1593 (void *)ctrr_test_page, &ctrr_ro_test, &ctrr_nx_test, ro_pn, nx_pn);
1594 T_ASSERT(ctrr_test_page != 0, "Expect ctrr_test_page to be initialized");
1595
1596 prot = pmap_get_arm64_prot(kernel_pmap, ctrr_test_page);
1597 T_EXPECT(~prot & ARM_TTE_VALID, "Expect ctrr_test_page to be unmapped");
1598
1599 T_LOG("Read only region test mapping virtual page %p to CTRR RO page number %d", ctrr_test_page, ro_pn);
1600 kr = pmap_enter(kernel_pmap, ctrr_test_page, ro_pn,
1601 VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE, VM_WIMG_USE_DEFAULT, FALSE, PMAP_MAPPING_TYPE_INFER);
1602 T_EXPECT(kr == KERN_SUCCESS, "Expect pmap_enter of RW mapping to succeed");
1603
1604 // assert entire mmu prot path (Hierarchical protection model) is NOT RO
1605 // fetch effective block level protections from table/block entries
1606 prot = pmap_get_arm64_prot(kernel_pmap, ctrr_test_page);
1607 T_EXPECT(ARM_PTE_EXTRACT_AP(prot) == AP_RWNA && (prot & ARM_PTE_PNX), "Mapping is EL1 RWNX");
1608
1609 ctrr_test_va = ctrr_test_page + (ro_test_va & PAGE_MASK);
1610 ctrr_ro_test_ptr = (void *)ctrr_test_va;
1611
1612 T_LOG("Read only region test writing to %p to provoke data abort", ctrr_ro_test_ptr);
1613
1614 // should cause data abort
1615 ml_expect_fault_begin(ctrr_test_ro_fault_handler, ctrr_test_va);
1616 *ctrr_ro_test_ptr = 1;
1617 ml_expect_fault_end();
1618
1619 // ensure write permission fault at expected level
1620 // data abort handler will set ctrr_exception_esr when ctrr_test_va takes a permission fault
1621
1622 if (ctrr_enabled) {
1623 T_EXPECT(ESR_EC(ctrr_exception_esr) == ESR_EC_DABORT_EL1, "Data Abort from EL1 expected");
1624 T_EXPECT(ISS_DA_FSC(ESR_ISS(ctrr_exception_esr)) == FSC_PERMISSION_FAULT_L3, "Permission Fault Expected");
1625 T_EXPECT(ESR_ISS(ctrr_exception_esr) & ISS_DA_WNR, "Write Fault Expected");
1626 } else {
1627 T_EXPECT(ctrr_exception_esr == 0, "No fault expected with CTRR disabled");
1628 }
1629
1630 ctrr_test_va = 0;
1631 ctrr_exception_esr = 0;
1632 pmap_remove(kernel_pmap, ctrr_test_page, ctrr_test_page + PAGE_SIZE);
1633
1634 T_LOG("No execute test mapping virtual page %p to CTRR PXN page number %d", ctrr_test_page, nx_pn);
1635
1636 kr = pmap_enter(kernel_pmap, ctrr_test_page, nx_pn,
1637 VM_PROT_READ | VM_PROT_EXECUTE, VM_PROT_NONE, VM_WIMG_USE_DEFAULT, FALSE, PMAP_MAPPING_TYPE_INFER);
1638
1639 T_EXPECT(kr == KERN_SUCCESS, "Expect pmap_enter of RX mapping to succeed");
1640
1641 // assert entire mmu prot path (Hierarchical protection model) is NOT XN
1642 prot = pmap_get_arm64_prot(kernel_pmap, ctrr_test_page);
1643 T_EXPECT(ARM_PTE_EXTRACT_AP(prot) == AP_RONA && (~prot & ARM_PTE_PNX), "Mapping is EL1 ROX (prot=0x%lx)", prot);
1644
1645 ctrr_test_va = ctrr_test_page + (nx_test_va & PAGE_MASK);
1646 #if __has_feature(ptrauth_calls)
1647 ctrr_nx_test_ptr = ptrauth_sign_unauthenticated((void *)ctrr_test_va, ptrauth_key_function_pointer, 0);
1648 #else
1649 ctrr_nx_test_ptr = (void *)ctrr_test_va;
1650 #endif
1651
1652 T_LOG("No execute test calling ctrr_nx_test_ptr(): %p to provoke instruction abort", ctrr_nx_test_ptr);
1653
1654 // should cause prefetch abort
1655 ml_expect_fault_begin(ctrr_test_nx_fault_handler, ctrr_test_va);
1656 ctrr_nx_test_ptr();
1657 ml_expect_fault_end();
1658
1659 if (ctrr_enabled) {
1660 // FIXME: rdar://143430725 (xnu support for paravirtualized CTXR)
1661 // Without FEAT_XNX support on the host side, we cannot test kernel execution outside CTXR regions.
1662 #if !defined(KERNEL_INTEGRITY_PV_CTRR)
1663 // TODO: ensure execute permission fault at expected level
1664 T_EXPECT(ESR_EC(ctrr_exception_esr) == ESR_EC_IABORT_EL1, "Instruction abort from EL1 Expected");
1665 T_EXPECT(ISS_DA_FSC(ESR_ISS(ctrr_exception_esr)) == FSC_PERMISSION_FAULT_L3, "Permission Fault Expected");
1666 #endif /* !defined(KERNEL_INTEGRITY_PV_CTRR) */
1667 } else {
1668 T_EXPECT(ctrr_exception_esr == 0, "No fault expected with CTRR disabled");
1669 }
1670
1671 ctrr_test_va = 0;
1672 ctrr_exception_esr = 0;
1673
1674 pmap_remove(kernel_pmap, ctrr_test_page, ctrr_test_page + PAGE_SIZE);
1675
1676 return KERN_SUCCESS;
1677 }
1678 #endif /* (defined(KERNEL_INTEGRITY_CTRR) || defined(KERNEL_INTEGRITY_PV_CTRR)) && defined(CONFIG_XNUPOST) */
1679
1680
1681 /**
1682 * Explicitly assert that xnu is still uniprocessor before running a POST test.
1683 *
1684 * In practice, tests in this module can safely manipulate CPU state without
1685 * fear of getting preempted. There's no way for cpu_boot_thread() to bring up
1686 * the secondary CPUs until StartIOKitMatching() completes, and arm64 orders
1687 * kern_post_test() before StartIOKitMatching().
1688 *
1689 * But this is also an implementation detail. Tests that rely on this ordering
1690 * should call assert_uniprocessor(), so that we can figure out a workaround
1691 * on the off-chance this ordering ever changes.
1692 */
1693 __unused static void
assert_uniprocessor(void)1694 assert_uniprocessor(void)
1695 {
1696 extern unsigned int real_ncpus;
1697 unsigned int ncpus = os_atomic_load(&real_ncpus, relaxed);
1698 T_QUIET; T_ASSERT_EQ_UINT(1, ncpus, "arm64 kernel POST tests should run before any secondary CPUs are brought up");
1699 }
1700
1701
1702 #if CONFIG_SPTM
1703 volatile uint8_t xnu_post_panic_lockdown_did_fire = false;
1704 typedef uint64_t (panic_lockdown_helper_fcn_t)(uint64_t raw);
1705 typedef bool (panic_lockdown_precondition_fcn_t)(void);
1706 typedef bool (panic_lockdown_recovery_fcn_t)(arm_saved_state_t *);
1707
1708 /* SP0 vector tests */
1709 extern panic_lockdown_helper_fcn_t arm64_panic_lockdown_test_load;
1710 extern panic_lockdown_helper_fcn_t arm64_panic_lockdown_test_gdbtrap;
1711 extern panic_lockdown_helper_fcn_t arm64_panic_lockdown_test_pac_brk_c470;
1712 extern panic_lockdown_helper_fcn_t arm64_panic_lockdown_test_pac_brk_c471;
1713 extern panic_lockdown_helper_fcn_t arm64_panic_lockdown_test_pac_brk_c472;
1714 extern panic_lockdown_helper_fcn_t arm64_panic_lockdown_test_pac_brk_c473;
1715 extern panic_lockdown_helper_fcn_t arm64_panic_lockdown_test_telemetry_brk_ff00;
1716 extern panic_lockdown_helper_fcn_t arm64_panic_lockdown_test_br_auth_fail;
1717 extern panic_lockdown_helper_fcn_t arm64_panic_lockdown_test_ldr_auth_fail;
1718 extern panic_lockdown_helper_fcn_t arm64_panic_lockdown_test_fpac;
1719 extern panic_lockdown_helper_fcn_t arm64_panic_lockdown_test_copyio;
1720 extern uint8_t arm64_panic_lockdown_test_copyio_fault_pc;
1721
1722 extern int gARM_FEAT_FPACCOMBINE;
1723
1724 /* SP1 vector tests */
1725 extern panic_lockdown_helper_fcn_t arm64_panic_lockdown_test_sp1_invalid_stack;
1726 extern bool arm64_panic_lockdown_test_sp1_invalid_stack_handler(arm_saved_state_t *);
1727 extern panic_lockdown_helper_fcn_t arm64_panic_lockdown_test_sp1_exception_in_vector;
1728 extern panic_lockdown_helper_fcn_t el1_sp1_synchronous_raise_exception_in_vector;
1729 extern bool arm64_panic_lockdown_test_sp1_exception_in_vector_handler(arm_saved_state_t *);
1730
1731 #if DEVELOPMENT || DEBUG
1732 extern struct panic_lockdown_initiator_state debug_panic_lockdown_initiator_state;
1733 #endif /* DEVELOPMENT || DEBUG */
1734
1735 typedef struct arm64_panic_lockdown_test_case {
1736 const char *name;
1737 panic_lockdown_helper_fcn_t *func;
1738 uint64_t arg;
1739 panic_lockdown_precondition_fcn_t *precondition;
1740 esr_exception_class_t expected_ec;
1741 bool check_fs;
1742 fault_status_t expected_fs;
1743 bool expect_lockdown_exceptions_masked;
1744 bool expect_lockdown_exceptions_unmasked;
1745 bool override_expected_fault_pc_valid;
1746 uint64_t override_expected_fault_pc;
1747 } arm64_panic_lockdown_test_case_s;
1748
1749 static arm64_panic_lockdown_test_case_s *arm64_panic_lockdown_active_test;
1750 static volatile bool arm64_panic_lockdown_caught_exception;
1751
1752 static bool
arm64_panic_lockdown_test_exception_handler(arm_saved_state_t * state)1753 arm64_panic_lockdown_test_exception_handler(arm_saved_state_t * state)
1754 {
1755 uint64_t esr = get_saved_state_esr(state);
1756 esr_exception_class_t class = ESR_EC(esr);
1757 fault_status_t fs = ISS_DA_FSC(ESR_ISS(esr));
1758
1759 if (!arm64_panic_lockdown_active_test ||
1760 class != arm64_panic_lockdown_active_test->expected_ec ||
1761 (arm64_panic_lockdown_active_test->check_fs &&
1762 fs != arm64_panic_lockdown_active_test->expected_fs)) {
1763 return false;
1764 }
1765
1766
1767 #if BTI_ENFORCED
1768 /* Clear BTYPE to prevent taking another exception on ERET */
1769 uint32_t spsr = get_saved_state_cpsr(state);
1770 spsr &= ~PSR_BTYPE_MASK;
1771 set_saved_state_cpsr(state, spsr);
1772 #endif /* BTI_ENFORCED */
1773
1774 /* We got the expected exception, recover by forging an early return */
1775 set_saved_state_pc(state, get_saved_state_lr(state));
1776 arm64_panic_lockdown_caught_exception = true;
1777
1778 return true;
1779 }
1780
1781 static void
panic_lockdown_expect_test(const char * treatment,arm64_panic_lockdown_test_case_s * test,bool expect_lockdown,bool mask_interrupts)1782 panic_lockdown_expect_test(const char *treatment,
1783 arm64_panic_lockdown_test_case_s *test,
1784 bool expect_lockdown,
1785 bool mask_interrupts)
1786 {
1787 int ints = 0;
1788
1789 arm64_panic_lockdown_active_test = test;
1790 xnu_post_panic_lockdown_did_fire = false;
1791 arm64_panic_lockdown_caught_exception = false;
1792
1793 uintptr_t fault_pc;
1794 if (test->override_expected_fault_pc_valid) {
1795 fault_pc = (uintptr_t)test->override_expected_fault_pc;
1796 } else {
1797 fault_pc = (uintptr_t)test->func;
1798 #ifdef BTI_ENFORCED
1799 /* When BTI is enabled, we expect the fault to occur after the landing pad */
1800 fault_pc += 4;
1801 #endif /* BTI_ENFORCED */
1802 }
1803
1804
1805 ml_expect_fault_pc_begin(
1806 arm64_panic_lockdown_test_exception_handler,
1807 fault_pc);
1808
1809 if (mask_interrupts) {
1810 ints = ml_set_interrupts_enabled(FALSE);
1811 }
1812
1813 (void)test->func(test->arg);
1814
1815 if (mask_interrupts) {
1816 (void)ml_set_interrupts_enabled(ints);
1817 }
1818
1819 ml_expect_fault_end();
1820
1821 if (expect_lockdown == xnu_post_panic_lockdown_did_fire &&
1822 arm64_panic_lockdown_caught_exception) {
1823 T_PASS("%s + %s OK\n", test->name, treatment);
1824 } else {
1825 T_FAIL(
1826 "%s + %s FAIL (expected lockdown: %d, did lockdown: %d, caught exception: %d)\n",
1827 test->name, treatment,
1828 expect_lockdown, xnu_post_panic_lockdown_did_fire,
1829 arm64_panic_lockdown_caught_exception);
1830 }
1831
1832 #if DEVELOPMENT || DEBUG
1833 /* Check that the debug info is minimally functional */
1834 if (expect_lockdown) {
1835 T_EXPECT_NE_ULLONG(debug_panic_lockdown_initiator_state.initiator_pc,
1836 0ULL, "Initiator PC set");
1837 } else {
1838 T_EXPECT_EQ_ULLONG(debug_panic_lockdown_initiator_state.initiator_pc,
1839 0ULL, "Initiator PC not set");
1840 }
1841
1842 /* Reset the debug data so it can be filled later if needed */
1843 debug_panic_lockdown_initiator_state.initiator_pc = 0;
1844 #endif /* DEVELOPMENT || DEBUG */
1845 }
1846
1847 static void
panic_lockdown_expect_fault_raw(const char * label,panic_lockdown_helper_fcn_t entrypoint,panic_lockdown_helper_fcn_t faulting_function,expected_fault_handler_t fault_handler)1848 panic_lockdown_expect_fault_raw(const char *label,
1849 panic_lockdown_helper_fcn_t entrypoint,
1850 panic_lockdown_helper_fcn_t faulting_function,
1851 expected_fault_handler_t fault_handler)
1852 {
1853 uint64_t test_success = 0;
1854 xnu_post_panic_lockdown_did_fire = false;
1855
1856 uintptr_t fault_pc = (uintptr_t)faulting_function;
1857 #ifdef BTI_ENFORCED
1858 /* When BTI is enabled, we expect the fault to occur after the landing pad */
1859 fault_pc += 4;
1860 #endif /* BTI_ENFORCED */
1861
1862 ml_expect_fault_pc_begin(fault_handler, fault_pc);
1863
1864 test_success = entrypoint(0);
1865
1866 ml_expect_fault_end();
1867
1868 if (test_success && xnu_post_panic_lockdown_did_fire) {
1869 T_PASS("%s OK\n", label);
1870 } else {
1871 T_FAIL("%s FAIL (test returned: %d, did lockdown: %d)\n",
1872 label, test_success, xnu_post_panic_lockdown_did_fire);
1873 }
1874 }
1875
1876 /**
1877 * Returns a pointer which is guranteed to be invalid under IA with the zero
1878 * discriminator.
1879 *
1880 * This is somewhat over complicating it since it's exceedingly likely that a
1881 * any given pointer will have a zero PAC (and thus break the test), but it's
1882 * easy enough to avoid the problem.
1883 */
1884 static uint64_t
panic_lockdown_pacia_get_invalid_ptr()1885 panic_lockdown_pacia_get_invalid_ptr()
1886 {
1887 char *unsigned_ptr = (char *)0xFFFFFFFFAABBCC00;
1888 char *signed_ptr = NULL;
1889 do {
1890 unsigned_ptr += 4 /* avoid alignment exceptions */;
1891 signed_ptr = ptrauth_sign_unauthenticated(
1892 unsigned_ptr,
1893 ptrauth_key_asia,
1894 0);
1895 } while ((uint64_t)unsigned_ptr == (uint64_t)signed_ptr);
1896
1897 return (uint64_t)unsigned_ptr;
1898 }
1899
1900 /**
1901 * Returns a pointer which is guranteed to be invalid under DA with the zero
1902 * discriminator.
1903 */
1904 static uint64_t
panic_lockdown_pacda_get_invalid_ptr(void)1905 panic_lockdown_pacda_get_invalid_ptr(void)
1906 {
1907 char *unsigned_ptr = (char *)0xFFFFFFFFAABBCC00;
1908 char *signed_ptr = NULL;
1909 do {
1910 unsigned_ptr += 8 /* avoid alignment exceptions */;
1911 signed_ptr = ptrauth_sign_unauthenticated(
1912 unsigned_ptr,
1913 ptrauth_key_asda,
1914 0);
1915 } while ((uint64_t)unsigned_ptr == (uint64_t)signed_ptr);
1916
1917 return (uint64_t)unsigned_ptr;
1918 }
1919
1920
1921 kern_return_t
arm64_panic_lockdown_test(void)1922 arm64_panic_lockdown_test(void)
1923 {
1924 #if __has_feature(ptrauth_calls)
1925 uint64_t ia_invalid = panic_lockdown_pacia_get_invalid_ptr();
1926 #endif /* ptrauth_calls */
1927
1928 arm64_panic_lockdown_test_case_s tests[] = {
1929 {
1930 .name = "arm64_panic_lockdown_test_load",
1931 .func = &arm64_panic_lockdown_test_load,
1932 /* Trigger a null deref */
1933 .arg = (uint64_t)NULL,
1934 .expected_ec = ESR_EC_DABORT_EL1,
1935 .expect_lockdown_exceptions_masked = true,
1936 .expect_lockdown_exceptions_unmasked = false,
1937 },
1938 {
1939 .name = "arm64_panic_lockdown_test_gdbtrap",
1940 .func = &arm64_panic_lockdown_test_gdbtrap,
1941 .arg = 0,
1942 .expected_ec = ESR_EC_UNCATEGORIZED,
1943 /* GDBTRAP instructions should be allowed everywhere */
1944 .expect_lockdown_exceptions_masked = false,
1945 .expect_lockdown_exceptions_unmasked = false,
1946 },
1947 #if __has_feature(ptrauth_calls)
1948 {
1949 .name = "arm64_panic_lockdown_test_pac_brk_c470",
1950 .func = &arm64_panic_lockdown_test_pac_brk_c470,
1951 .arg = 0,
1952 .expected_ec = ESR_EC_BRK_AARCH64,
1953 .expect_lockdown_exceptions_masked = true,
1954 .expect_lockdown_exceptions_unmasked = true,
1955 },
1956 {
1957 .name = "arm64_panic_lockdown_test_pac_brk_c471",
1958 .func = &arm64_panic_lockdown_test_pac_brk_c471,
1959 .arg = 0,
1960 .expected_ec = ESR_EC_BRK_AARCH64,
1961 .expect_lockdown_exceptions_masked = true,
1962 .expect_lockdown_exceptions_unmasked = true,
1963 },
1964 {
1965 .name = "arm64_panic_lockdown_test_pac_brk_c472",
1966 .func = &arm64_panic_lockdown_test_pac_brk_c472,
1967 .arg = 0,
1968 .expected_ec = ESR_EC_BRK_AARCH64,
1969 .expect_lockdown_exceptions_masked = true,
1970 .expect_lockdown_exceptions_unmasked = true,
1971 },
1972 {
1973 .name = "arm64_panic_lockdown_test_pac_brk_c473",
1974 .func = &arm64_panic_lockdown_test_pac_brk_c473,
1975 .arg = 0,
1976 .expected_ec = ESR_EC_BRK_AARCH64,
1977 .expect_lockdown_exceptions_masked = true,
1978 .expect_lockdown_exceptions_unmasked = true,
1979 },
1980 {
1981 .name = "arm64_panic_lockdown_test_telemetry_brk_ff00",
1982 .func = &arm64_panic_lockdown_test_telemetry_brk_ff00,
1983 .arg = 0,
1984 .expected_ec = ESR_EC_BRK_AARCH64,
1985 /*
1986 * PAC breakpoints are not the only breakpoints, ensure that other
1987 * BRKs (like those used for telemetry) do not trigger lockdowns.
1988 * This is necessary to avoid conflicts with features like UBSan
1989 * telemetry (which could fire at any time in C code).
1990 */
1991 .expect_lockdown_exceptions_masked = false,
1992 .expect_lockdown_exceptions_unmasked = false,
1993 },
1994 {
1995 .name = "arm64_panic_lockdown_test_br_auth_fail",
1996 .func = &arm64_panic_lockdown_test_br_auth_fail,
1997 .arg = ia_invalid,
1998 .expected_ec = gARM_FEAT_FPACCOMBINE ? ESR_EC_PAC_FAIL : ESR_EC_IABORT_EL1,
1999 .expect_lockdown_exceptions_masked = true,
2000 .expect_lockdown_exceptions_unmasked = true,
2001 /*
2002 * Pre-FEAT_FPACCOMBINED, BRAx branches to a poisoned PC so we
2003 * expect to fault on the branch target rather than the branch
2004 * itself. The exact ELR will likely be different from ia_invalid,
2005 * but since the expect logic in sleh only matches on low bits (i.e.
2006 * not bits which will be poisoned), this is fine.
2007 * On FEAT_FPACCOMBINED devices, we will fault on the branch itself.
2008 */
2009 .override_expected_fault_pc_valid = !gARM_FEAT_FPACCOMBINE,
2010 .override_expected_fault_pc = ia_invalid
2011 },
2012 {
2013 .name = "arm64_panic_lockdown_test_ldr_auth_fail",
2014 .func = &arm64_panic_lockdown_test_ldr_auth_fail,
2015 .arg = panic_lockdown_pacda_get_invalid_ptr(),
2016 .expected_ec = gARM_FEAT_FPACCOMBINE ? ESR_EC_PAC_FAIL : ESR_EC_DABORT_EL1,
2017 .expect_lockdown_exceptions_masked = true,
2018 .expect_lockdown_exceptions_unmasked = true,
2019 },
2020 {
2021 .name = "arm64_panic_lockdown_test_copyio_poison",
2022 .func = &arm64_panic_lockdown_test_copyio,
2023 /* fake a poisoned kernel pointer by flipping the bottom PAC bit */
2024 .arg = ((uint64_t)-1) ^ (1LLU << (64 - T1SZ_BOOT)),
2025 .expected_ec = ESR_EC_DABORT_EL1,
2026 .expect_lockdown_exceptions_masked = false,
2027 .expect_lockdown_exceptions_unmasked = false,
2028 .override_expected_fault_pc_valid = true,
2029 .override_expected_fault_pc = (uint64_t)&arm64_panic_lockdown_test_copyio_fault_pc,
2030 },
2031 #if __ARM_ARCH_8_6__
2032 {
2033 .name = "arm64_panic_lockdown_test_fpac",
2034 .func = &arm64_panic_lockdown_test_fpac,
2035 .arg = ia_invalid,
2036 .expected_ec = ESR_EC_PAC_FAIL,
2037 .expect_lockdown_exceptions_masked = true,
2038 .expect_lockdown_exceptions_unmasked = true,
2039 },
2040 #endif /* __ARM_ARCH_8_6__ */
2041 #endif /* ptrauth_calls */
2042 {
2043 .name = "arm64_panic_lockdown_test_copyio",
2044 .func = &arm64_panic_lockdown_test_copyio,
2045 .arg = 0x0 /* load from NULL */,
2046 .expected_ec = ESR_EC_DABORT_EL1,
2047 .expect_lockdown_exceptions_masked = false,
2048 .expect_lockdown_exceptions_unmasked = false,
2049 .override_expected_fault_pc_valid = true,
2050 .override_expected_fault_pc = (uint64_t)&arm64_panic_lockdown_test_copyio_fault_pc,
2051 },
2052 };
2053
2054 size_t test_count = sizeof(tests) / sizeof(*tests);
2055 for (size_t i = 0; i < test_count; i++) {
2056 if (tests[i].precondition &&
2057 !tests[i].precondition()) {
2058 T_LOG("%s skipped due to precondition check", tests[i].name);
2059 continue;
2060 }
2061
2062 panic_lockdown_expect_test(
2063 "Exceptions unmasked",
2064 &tests[i],
2065 tests[i].expect_lockdown_exceptions_unmasked,
2066 /* mask_interrupts */ false);
2067
2068 panic_lockdown_expect_test(
2069 "Exceptions masked",
2070 &tests[i],
2071 tests[i].expect_lockdown_exceptions_masked,
2072 /* mask_interrupts */ true);
2073 }
2074
2075 panic_lockdown_expect_fault_raw("arm64_panic_lockdown_test_sp1_invalid_stack",
2076 arm64_panic_lockdown_test_sp1_invalid_stack,
2077 arm64_panic_lockdown_test_pac_brk_c470,
2078 arm64_panic_lockdown_test_sp1_invalid_stack_handler);
2079
2080 panic_lockdown_expect_fault_raw("arm64_panic_lockdown_test_sp1_exception_in_vector",
2081 arm64_panic_lockdown_test_sp1_exception_in_vector,
2082 el1_sp1_synchronous_raise_exception_in_vector,
2083 arm64_panic_lockdown_test_sp1_exception_in_vector_handler);
2084 return KERN_SUCCESS;
2085 }
2086 #endif /* CONFIG_SPTM */
2087
2088
2089
2090
2091
2092 #if HAS_SPECRES
2093
2094 /*** CPS RCTX ***/
2095
2096
2097 /*** SPECRES ***/
2098
2099 #if HAS_SPECRES2
2100 /*
2101 * Execute a COSP RCTX instruction.
2102 */
2103 static void
_cosprctx_exec(uint64_t raw)2104 _cosprctx_exec(uint64_t raw)
2105 {
2106 asm volatile ( "ISB SY");
2107 __asm__ volatile ("COSP RCTX, %0" :: "r" (raw));
2108 asm volatile ( "DSB SY");
2109 asm volatile ( "ISB SY");
2110 }
2111 #endif
2112
2113 /*
2114 * Execute a CFP RCTX instruction.
2115 */
2116 static void
_cfprctx_exec(uint64_t raw)2117 _cfprctx_exec(uint64_t raw)
2118 {
2119 asm volatile ( "ISB SY");
2120 __asm__ volatile ("CFP RCTX, %0" :: "r" (raw));
2121 asm volatile ( "DSB SY");
2122 asm volatile ( "ISB SY");
2123 }
2124
2125 /*
2126 * Execute a CPP RCTX instruction.
2127 */
2128 static void
_cpprctx_exec(uint64_t raw)2129 _cpprctx_exec(uint64_t raw)
2130 {
2131 asm volatile ( "ISB SY");
2132 __asm__ volatile ("CPP RCTX, %0" :: "r" (raw));
2133 asm volatile ( "DSB SY");
2134 asm volatile ( "ISB SY");
2135 }
2136
2137 /*
2138 * Execute a DVP RCTX instruction.
2139 */
2140 static void
_dvprctx_exec(uint64_t raw)2141 _dvprctx_exec(uint64_t raw)
2142 {
2143 asm volatile ( "ISB SY");
2144 __asm__ volatile ("DVP RCTX, %0" :: "r" (raw));
2145 asm volatile ( "DSB SY");
2146 asm volatile ( "ISB SY");
2147 }
2148
2149 static void
_specres_do_test_std(void (* impl)(uint64_t raw))2150 _specres_do_test_std(void (*impl)(uint64_t raw))
2151 {
2152 typedef struct {
2153 union {
2154 struct {
2155 uint64_t ASID:16;
2156 uint64_t GASID:1;
2157 uint64_t :7;
2158 uint64_t EL:2;
2159 uint64_t NS:1;
2160 uint64_t NSE:1;
2161 uint64_t :4;
2162 uint64_t VMID:16;
2163 uint64_t GVMID:1;
2164 };
2165 uint64_t raw;
2166 };
2167 } specres_ctx;
2168
2169 assert(sizeof(specres_ctx) == 8);
2170
2171 /*
2172 * Test various possible meaningful COSP_RCTX context ID.
2173 */
2174
2175 /* el : EL0 / EL1 / EL2. */
2176 for (uint8_t el = 0; el < 3; el++) {
2177 /* Always non-secure. */
2178 const uint8_t ns = 1;
2179 const uint8_t nse = 0;
2180
2181 /* Iterate over some couples of ASIDs / VMIDs. */
2182 for (uint16_t xxid = 0; xxid < 256; xxid++) {
2183 const uint16_t asid = (uint16_t) (xxid << 4);
2184 const uint16_t vmid = (uint16_t) (256 - (xxid << 4));
2185
2186 /* Test 4 G[AS|VM]ID combinations. */
2187 for (uint8_t bid = 0; bid < 4; bid++) {
2188 const uint8_t gasid = bid & 1;
2189 const uint8_t gvmid = bid & 2;
2190
2191 /* Generate the context descriptor. */
2192 specres_ctx ctx = {0};
2193 ctx.ASID = asid;
2194 ctx.GASID = gasid;
2195 ctx.EL = el;
2196 ctx.NS = ns;
2197 ctx.NSE = nse;
2198 ctx.VMID = vmid;
2199 ctx.GVMID = gvmid;
2200
2201 /* Execute the COSP instruction. */
2202 (*impl)(ctx.raw);
2203
2204 /* Insert some operation. */
2205 volatile uint8_t sum = 0;
2206 for (volatile uint8_t i = 0; i < 64; i++) {
2207 sum += i * sum + 3;
2208 }
2209
2210 /* If el0 is not targetted, just need to do it once. */
2211 if (el != 0) {
2212 goto not_el0_skip;
2213 }
2214 }
2215 }
2216
2217 /* El0 skip. */
2218 not_el0_skip: ;
2219 }
2220 }
2221
2222 /*** RCTX ***/
2223
2224 static void
_rctx_do_test(void)2225 _rctx_do_test(void)
2226 {
2227 _specres_do_test_std(&_cfprctx_exec);
2228 _specres_do_test_std(&_cpprctx_exec);
2229 _specres_do_test_std(&_dvprctx_exec);
2230 #if HAS_SPECRES2
2231 _specres_do_test_std(&_cosprctx_exec);
2232 #endif
2233 }
2234
2235 kern_return_t
specres_test(void)2236 specres_test(void)
2237 {
2238 /* Basic instructions test. */
2239 _cfprctx_exec(0);
2240 _cpprctx_exec(0);
2241 _dvprctx_exec(0);
2242 #if HAS_SPECRES2
2243 _cosprctx_exec(0);
2244 #endif
2245
2246 /* More advanced instructions test. */
2247 _rctx_do_test();
2248
2249 return KERN_SUCCESS;
2250 }
2251
2252 #endif /* HAS_SPECRES */
2253 #if BTI_ENFORCED
2254 typedef uint64_t (bti_landing_pad_func_t)(void);
2255 typedef uint64_t (bti_shim_func_t)(bti_landing_pad_func_t *);
2256
2257 extern bti_shim_func_t arm64_bti_test_jump_shim;
2258 extern bti_shim_func_t arm64_bti_test_call_shim;
2259
2260 extern bti_landing_pad_func_t arm64_bti_test_func_with_no_landing_pad;
2261 extern bti_landing_pad_func_t arm64_bti_test_func_with_call_landing_pad;
2262 extern bti_landing_pad_func_t arm64_bti_test_func_with_jump_landing_pad;
2263 extern bti_landing_pad_func_t arm64_bti_test_func_with_jump_call_landing_pad;
2264 #if __has_feature(ptrauth_returns)
2265 extern bti_landing_pad_func_t arm64_bti_test_func_with_pac_landing_pad;
2266 #endif /* __has_feature(ptrauth_returns) */
2267
2268 typedef struct arm64_bti_test_func_case {
2269 const char *func_str;
2270 bti_landing_pad_func_t *func;
2271 uint64_t expect_return_value;
2272 uint8_t expect_call_ok;
2273 uint8_t expect_jump_ok;
2274 } arm64_bti_test_func_case_s;
2275
2276 static volatile uintptr_t bti_exception_handler_pc = 0;
2277
2278 static bool
arm64_bti_test_exception_handler(arm_saved_state_t * state)2279 arm64_bti_test_exception_handler(arm_saved_state_t * state)
2280 {
2281 uint64_t esr = get_saved_state_esr(state);
2282 esr_exception_class_t class = ESR_EC(esr);
2283
2284 if (class != ESR_EC_BTI_FAIL) {
2285 return false;
2286 }
2287
2288 /* Capture any desired exception metrics */
2289 bti_exception_handler_pc = get_saved_state_pc(state);
2290
2291 /* "Cancel" the function call by forging an early return */
2292 set_saved_state_pc(state, get_saved_state_lr(state));
2293
2294 /* Clear BTYPE to prevent taking another exception after ERET */
2295 uint32_t spsr = get_saved_state_cpsr(state);
2296 spsr &= ~PSR_BTYPE_MASK;
2297 set_saved_state_cpsr(state, spsr);
2298
2299 return true;
2300 }
2301
2302 static void
arm64_bti_test_func_with_shim(uint8_t expect_ok,const char * shim_str,bti_shim_func_t * shim,arm64_bti_test_func_case_s * test_case)2303 arm64_bti_test_func_with_shim(
2304 uint8_t expect_ok,
2305 const char *shim_str,
2306 bti_shim_func_t *shim,
2307 arm64_bti_test_func_case_s *test_case)
2308 {
2309 uint64_t result = -1;
2310
2311 /* Capture BTI exceptions triggered by our target function */
2312 uintptr_t raw_func = (uintptr_t)ptrauth_strip(
2313 (void *)test_case->func,
2314 ptrauth_key_function_pointer);
2315 ml_expect_fault_pc_begin(arm64_bti_test_exception_handler, raw_func);
2316 bti_exception_handler_pc = 0;
2317
2318 /*
2319 * The assembly routines do not support C function type discriminators, so
2320 * strip and resign with zero if needed
2321 */
2322 bti_landing_pad_func_t *resigned = ptrauth_auth_and_resign(
2323 test_case->func,
2324 ptrauth_key_function_pointer,
2325 ptrauth_type_discriminator(bti_landing_pad_func_t),
2326 ptrauth_key_function_pointer, 0);
2327
2328 result = shim(resigned);
2329
2330 ml_expect_fault_end();
2331
2332 if (!expect_ok && raw_func != bti_exception_handler_pc) {
2333 T_FAIL("Expected BTI exception at 0x%llx but got one at %llx instead\n",
2334 raw_func, bti_exception_handler_pc);
2335 } else if (expect_ok && bti_exception_handler_pc) {
2336 T_FAIL("Did not expect BTI exception but got on at 0x%llx\n",
2337 bti_exception_handler_pc);
2338 } else if (!expect_ok && !bti_exception_handler_pc) {
2339 T_FAIL("Failed to hit expected exception!\n");
2340 } else if (expect_ok && result != test_case->expect_return_value) {
2341 T_FAIL("Incorrect test function result (expected=%llu, result=%llu\n)",
2342 test_case->expect_return_value, result);
2343 } else {
2344 T_PASS("%s (shim=%s)\n", test_case->func_str, shim_str);
2345 }
2346 }
2347
2348 /**
2349 * This test works to ensure that BTI exceptions are raised where expected
2350 * and only where they are expected by exhaustively testing all indirect branch
2351 * combinations with all landing pad options.
2352 */
2353 kern_return_t
arm64_bti_test(void)2354 arm64_bti_test(void)
2355 {
2356 static arm64_bti_test_func_case_s tests[] = {
2357 {
2358 .func_str = "arm64_bti_test_func_with_no_landing_pad",
2359 .func = &arm64_bti_test_func_with_no_landing_pad,
2360 .expect_return_value = 1,
2361 .expect_call_ok = 0,
2362 .expect_jump_ok = 0,
2363 },
2364 {
2365 .func_str = "arm64_bti_test_func_with_call_landing_pad",
2366 .func = &arm64_bti_test_func_with_call_landing_pad,
2367 .expect_return_value = 2,
2368 .expect_call_ok = 1,
2369 .expect_jump_ok = 0,
2370 },
2371 {
2372 .func_str = "arm64_bti_test_func_with_jump_landing_pad",
2373 .func = &arm64_bti_test_func_with_jump_landing_pad,
2374 .expect_return_value = 3,
2375 .expect_call_ok = 0,
2376 .expect_jump_ok = 1,
2377 },
2378 {
2379 .func_str = "arm64_bti_test_func_with_jump_call_landing_pad",
2380 .func = &arm64_bti_test_func_with_jump_call_landing_pad,
2381 .expect_return_value = 4,
2382 .expect_call_ok = 1,
2383 .expect_jump_ok = 1,
2384 },
2385 #if __has_feature(ptrauth_returns)
2386 {
2387 .func_str = "arm64_bti_test_func_with_pac_landing_pad",
2388 .func = &arm64_bti_test_func_with_pac_landing_pad,
2389 .expect_return_value = 5,
2390 .expect_call_ok = 1,
2391 .expect_jump_ok = 0,
2392 },
2393 #endif /* __has_feature(ptrauth_returns) */
2394 };
2395
2396 size_t test_count = sizeof(tests) / sizeof(*tests);
2397 for (size_t i = 0; i < test_count; i++) {
2398 arm64_bti_test_func_case_s *test_case = tests + i;
2399
2400 arm64_bti_test_func_with_shim(test_case->expect_call_ok,
2401 "arm64_bti_test_call_shim",
2402 arm64_bti_test_call_shim,
2403 test_case);
2404
2405
2406 arm64_bti_test_func_with_shim(test_case->expect_jump_ok,
2407 "arm64_bti_test_jump_shim",
2408 arm64_bti_test_jump_shim,
2409 test_case);
2410 }
2411
2412 return KERN_SUCCESS;
2413 }
2414 #endif /* BTI_ENFORCED */
2415
2416
2417
2418 /**
2419 * Test the speculation guards
2420 * We can't easily ensure that the guards actually behave correctly under
2421 * speculation, but we can at least ensure that the guards are non-speculatively
2422 * correct.
2423 */
2424 kern_return_t
arm64_speculation_guard_test(void)2425 arm64_speculation_guard_test(void)
2426 {
2427 uint64_t cookie1_64 = 0x5350454354524521ULL; /* SPECTRE! */
2428 uint64_t cookie2_64 = 0x5941592043505553ULL; /* YAY CPUS */
2429 uint32_t cookie1_32 = (uint32_t)cookie1_64;
2430 uint32_t cookie2_32 = (uint32_t)cookie2_64;
2431 uint64_t result64 = 0;
2432 uint32_t result32 = 0;
2433 bool result_valid;
2434
2435 /*
2436 * Test the zeroing guard
2437 * Since failing the guard triggers a panic, we don't actually test that
2438 * part as part of the automated tests.
2439 */
2440
2441 result64 = 0;
2442 SPECULATION_GUARD_ZEROING_XXX(
2443 /* out */ result64, /* out_valid */ result_valid,
2444 /* value */ cookie1_64,
2445 /* cmp_1 */ 0ULL, /* cmp_2 */ 1ULL, /* cc */ "NE");
2446 T_EXPECT(result_valid, "result valid");
2447 T_EXPECT_EQ_ULLONG(result64, cookie1_64, "64, 64 zeroing guard works");
2448
2449 result64 = 0;
2450 SPECULATION_GUARD_ZEROING_XWW(
2451 /* out */ result64, /* out_valid */ result_valid,
2452 /* value */ cookie1_64,
2453 /* cmp_1 */ 1U, /* cmp_2 */ 0U, /* cc */ "HI");
2454 T_EXPECT(result_valid, "result valid");
2455 T_EXPECT_EQ_ULLONG(result64, cookie1_64, "64, 32 zeroing guard works");
2456
2457 result32 = 0;
2458 SPECULATION_GUARD_ZEROING_WXX(
2459 /* out */ result32, /* out_valid */ result_valid,
2460 /* value */ cookie1_32,
2461 /* cmp_1 */ -1LL, /* cmp_2 */ 4LL, /* cc */ "LT");
2462 T_EXPECT(result_valid, "result valid");
2463 T_EXPECT_EQ_UINT(result32, cookie1_32, "32, 64 zeroing guard works");
2464
2465 result32 = 0;
2466 SPECULATION_GUARD_ZEROING_WWW(
2467 /* out */ result32, /* out_valid */ result_valid,
2468 /* value */ cookie1_32,
2469 /* cmp_1 */ 1, /* cmp_2 */ -4, /* cc */ "GT");
2470 T_EXPECT(result_valid, "result valid");
2471 T_EXPECT_EQ_UINT(result32, cookie1_32, "32, 32 zeroing guard works");
2472
2473 result32 = 0x41;
2474 SPECULATION_GUARD_ZEROING_WWW(
2475 /* out */ result32, /* out_valid */ result_valid,
2476 /* value */ cookie1_32,
2477 /* cmp_1 */ 1, /* cmp_2 */ -4, /* cc */ "LT");
2478 T_EXPECT(!result_valid, "result invalid");
2479 T_EXPECT_EQ_UINT(result32, 0, "zeroing guard works with failing condition");
2480
2481 /*
2482 * Test the selection guard
2483 */
2484
2485 result64 = 0;
2486 SPECULATION_GUARD_SELECT_XXX(
2487 /* out */ result64,
2488 /* cmp_1 */ 16ULL, /* cmp_2 */ 32ULL,
2489 /* cc */ "EQ", /* sel_1 */ cookie1_64,
2490 /* n_cc */ "NE", /* sel_2 */ cookie2_64);
2491 T_EXPECT_EQ_ULLONG(result64, cookie2_64, "64, 64 select guard works (1)");
2492
2493 result64 = 0;
2494 SPECULATION_GUARD_SELECT_XXX(
2495 /* out */ result64,
2496 /* cmp_1 */ 32ULL, /* cmp_2 */ 32ULL,
2497 /* cc */ "EQ", /* sel_1 */ cookie1_64,
2498 /* n_cc */ "NE", /* sel_2 */ cookie2_64);
2499 T_EXPECT_EQ_ULLONG(result64, cookie1_64, "64, 64 select guard works (2)");
2500
2501
2502 result32 = 0;
2503 SPECULATION_GUARD_SELECT_WXX(
2504 /* out */ result32,
2505 /* cmp_1 */ 16ULL, /* cmp_2 */ 32ULL,
2506 /* cc */ "HI", /* sel_1 */ cookie1_64,
2507 /* n_cc */ "LS", /* sel_2 */ cookie2_64);
2508 T_EXPECT_EQ_ULLONG(result32, cookie2_32, "32, 64 select guard works (1)");
2509
2510 result32 = 0;
2511 SPECULATION_GUARD_SELECT_WXX(
2512 /* out */ result32,
2513 /* cmp_1 */ 16ULL, /* cmp_2 */ 2ULL,
2514 /* cc */ "HI", /* sel_1 */ cookie1_64,
2515 /* n_cc */ "LS", /* sel_2 */ cookie2_64);
2516 T_EXPECT_EQ_ULLONG(result32, cookie1_32, "32, 64 select guard works (2)");
2517
2518 return KERN_SUCCESS;
2519 }
2520
2521
2522 extern void arm64_brk_lr_gpr(void);
2523 extern void arm64_brk_lr_fault(void);
2524
2525 static NOKASAN bool
arm64_backtrace_test_fault_handler(arm_saved_state_t * state)2526 arm64_backtrace_test_fault_handler(arm_saved_state_t * state)
2527 {
2528 /* Similar setup to backtrace_kernel_sysctl() */
2529 const unsigned int bt_len = 24;
2530 const size_t bt_size = sizeof(uint8_t) * bt_len;
2531 uint8_t *bt = kalloc_data(bt_size, Z_WAITOK | Z_ZERO);
2532 backtrace_info_t packed_info = BTI_NONE;
2533
2534 /* Call the backtrace function */
2535 backtrace_packed(BTP_KERN_OFFSET_32, bt, bt_size, NULL, &packed_info);
2536
2537 add_saved_state_pc(state, 4);
2538 return true;
2539 }
2540
2541 /**
2542 * Make sure EL1 fleh doesn't push a bogus stack frame when LR is being used as
2543 * a GPR in the caller.
2544 *
2545 * This test writes a GPR-like value into LR that is >4GB away from any kernel
2546 * address and tries to run backtrace_packed() from a sync handler.
2547 * backtrace_packed() has an invariant that all addresses in the stack frame are
2548 * within 4GB of the kernel text.
2549 */
2550 kern_return_t
arm64_backtrace_test(void)2551 arm64_backtrace_test(void)
2552 {
2553 ml_expect_fault_pc_begin(arm64_backtrace_test_fault_handler, (uintptr_t)&arm64_brk_lr_fault);
2554 arm64_brk_lr_gpr();
2555 ml_expect_fault_end();
2556
2557 #if CONFIG_SPTM && (DEVELOPMENT || DEBUG)
2558 /* Reset the debug data so it can be filled later if needed */
2559 debug_panic_lockdown_initiator_state.initiator_pc = 0;
2560 #endif /* CONFIG_SPTM && (DEVELOPMENT || DEBUG) */
2561 return KERN_SUCCESS;
2562 }
2563