1 /*
2 * Copyright (c) 2011-2018 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System Copyright (c) 1991,1990,1989,1988,1987 Carnegie
33 * Mellon University All Rights Reserved.
34 *
35 * Permission to use, copy, modify and distribute this software and its
36 * documentation is hereby granted, provided that both the copyright notice
37 * and this permission notice appear in all copies of the software,
38 * derivative works or modified versions, and any portions thereof, and that
39 * both notices appear in supporting documentation.
40 *
41 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION.
42 * CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES
43 * WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
44 *
45 * Carnegie Mellon requests users of this software to return to
46 *
47 * Software Distribution Coordinator or [email protected]
48 * School of Computer Science Carnegie Mellon University Pittsburgh PA
49 * 15213-3890
50 *
51 * any improvements or extensions that they make and grant Carnegie Mellon the
52 * rights to redistribute these changes.
53 */
54
55 #include <mach_ldebug.h>
56
57 #define LOCK_PRIVATE 1
58
59 #include <vm/pmap.h>
60 #include <vm/vm_map_xnu.h>
61 #include <vm/vm_page_internal.h>
62 #include <vm/vm_kern_xnu.h>
63 #include <kern/kalloc.h>
64 #include <kern/cpu_number.h>
65 #include <kern/locks.h>
66 #include <kern/misc_protos.h>
67 #include <kern/thread.h>
68 #include <kern/processor.h>
69 #include <kern/sched_prim.h>
70 #include <kern/debug.h>
71 #include <string.h>
72 #include <tests/xnupost.h>
73
74 #if MACH_KDB
75 #include <ddb/db_command.h>
76 #include <ddb/db_output.h>
77 #include <ddb/db_sym.h>
78 #include <ddb/db_print.h>
79 #endif /* MACH_KDB */
80
81 #include <san/kasan.h>
82 #include <sys/kdebug.h>
83 #include <sys/munge.h>
84 #include <machine/cpu_capabilities.h>
85 #include <arm/cpu_data_internal.h>
86 #include <arm/pmap.h>
87
88 #if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)
89 #include <arm64/amcc_rorgn.h>
90 #endif // defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)
91
92 #include <arm64/machine_machdep.h>
93
94 kern_return_t arm64_lock_test(void);
95 kern_return_t arm64_munger_test(void);
96 kern_return_t arm64_pan_test(void);
97 kern_return_t arm64_late_pan_test(void);
98 #if defined(HAS_APPLE_PAC)
99 #include <ptrauth.h>
100 kern_return_t arm64_ropjop_test(void);
101 #endif
102 #if defined(KERNEL_INTEGRITY_CTRR)
103 kern_return_t ctrr_test(void);
104 kern_return_t ctrr_test_cpu(void);
105 #endif
106 #if BTI_ENFORCED
107 kern_return_t arm64_bti_test(void);
108 #endif /* BTI_ENFORCED */
109 #if HAS_SPECRES
110 extern kern_return_t specres_test(void);
111 #endif
112
113 // exception handler ignores this fault address during PAN test
114 #if __ARM_PAN_AVAILABLE__
115 const uint64_t pan_ro_value = 0xFEEDB0B0DEADBEEF;
116 vm_offset_t pan_test_addr = 0;
117 vm_offset_t pan_ro_addr = 0;
118 volatile int pan_exception_level = 0;
119 volatile char pan_fault_value = 0;
120 #endif
121
122 #if CONFIG_SPTM
123 kern_return_t arm64_panic_lockdown_test(void);
124 #endif /* CONFIG_SPTM */
125
126 #include <libkern/OSAtomic.h>
127 #define LOCK_TEST_ITERATIONS 50
128 #define LOCK_TEST_SETUP_TIMEOUT_SEC 15
129 static hw_lock_data_t lt_hw_lock;
130 static lck_spin_t lt_lck_spin_t;
131 static lck_mtx_t lt_mtx;
132 static lck_rw_t lt_rwlock;
133 static volatile uint32_t lt_counter = 0;
134 static volatile int lt_spinvolatile;
135 static volatile uint32_t lt_max_holders = 0;
136 static volatile uint32_t lt_upgrade_holders = 0;
137 static volatile uint32_t lt_max_upgrade_holders = 0;
138 static volatile uint32_t lt_num_holders = 0;
139 static volatile uint32_t lt_done_threads;
140 static volatile uint32_t lt_target_done_threads;
141 static volatile uint32_t lt_cpu_bind_id = 0;
142 static uint64_t lt_setup_timeout = 0;
143
144 static void
lt_note_another_blocking_lock_holder()145 lt_note_another_blocking_lock_holder()
146 {
147 hw_lock_lock(<_hw_lock, LCK_GRP_NULL);
148 lt_num_holders++;
149 lt_max_holders = (lt_max_holders < lt_num_holders) ? lt_num_holders : lt_max_holders;
150 hw_lock_unlock(<_hw_lock);
151 }
152
153 static void
lt_note_blocking_lock_release()154 lt_note_blocking_lock_release()
155 {
156 hw_lock_lock(<_hw_lock, LCK_GRP_NULL);
157 lt_num_holders--;
158 hw_lock_unlock(<_hw_lock);
159 }
160
161 static void
lt_spin_a_little_bit()162 lt_spin_a_little_bit()
163 {
164 uint32_t i;
165
166 for (i = 0; i < 10000; i++) {
167 lt_spinvolatile++;
168 }
169 }
170
171 static void
lt_sleep_a_little_bit()172 lt_sleep_a_little_bit()
173 {
174 delay(100);
175 }
176
177 static void
lt_grab_mutex()178 lt_grab_mutex()
179 {
180 lck_mtx_lock(<_mtx);
181 lt_note_another_blocking_lock_holder();
182 lt_sleep_a_little_bit();
183 lt_counter++;
184 lt_note_blocking_lock_release();
185 lck_mtx_unlock(<_mtx);
186 }
187
188 static void
lt_grab_mutex_with_try()189 lt_grab_mutex_with_try()
190 {
191 while (0 == lck_mtx_try_lock(<_mtx)) {
192 ;
193 }
194 lt_note_another_blocking_lock_holder();
195 lt_sleep_a_little_bit();
196 lt_counter++;
197 lt_note_blocking_lock_release();
198 lck_mtx_unlock(<_mtx);
199 }
200
201 static void
lt_grab_rw_exclusive()202 lt_grab_rw_exclusive()
203 {
204 lck_rw_lock_exclusive(<_rwlock);
205 lt_note_another_blocking_lock_holder();
206 lt_sleep_a_little_bit();
207 lt_counter++;
208 lt_note_blocking_lock_release();
209 lck_rw_done(<_rwlock);
210 }
211
212 static void
lt_grab_rw_exclusive_with_try()213 lt_grab_rw_exclusive_with_try()
214 {
215 while (0 == lck_rw_try_lock_exclusive(<_rwlock)) {
216 lt_sleep_a_little_bit();
217 }
218
219 lt_note_another_blocking_lock_holder();
220 lt_sleep_a_little_bit();
221 lt_counter++;
222 lt_note_blocking_lock_release();
223 lck_rw_done(<_rwlock);
224 }
225
226 /* Disabled until lt_grab_rw_shared() is fixed (rdar://30685840)
227 * static void
228 * lt_grab_rw_shared()
229 * {
230 * lck_rw_lock_shared(<_rwlock);
231 * lt_counter++;
232 *
233 * lt_note_another_blocking_lock_holder();
234 * lt_sleep_a_little_bit();
235 * lt_note_blocking_lock_release();
236 *
237 * lck_rw_done(<_rwlock);
238 * }
239 */
240
241 /* Disabled until lt_grab_rw_shared_with_try() is fixed (rdar://30685840)
242 * static void
243 * lt_grab_rw_shared_with_try()
244 * {
245 * while(0 == lck_rw_try_lock_shared(<_rwlock));
246 * lt_counter++;
247 *
248 * lt_note_another_blocking_lock_holder();
249 * lt_sleep_a_little_bit();
250 * lt_note_blocking_lock_release();
251 *
252 * lck_rw_done(<_rwlock);
253 * }
254 */
255
256 static void
lt_upgrade_downgrade_rw()257 lt_upgrade_downgrade_rw()
258 {
259 boolean_t upgraded, success;
260
261 success = lck_rw_try_lock_shared(<_rwlock);
262 if (!success) {
263 lck_rw_lock_shared(<_rwlock);
264 }
265
266 lt_note_another_blocking_lock_holder();
267 lt_sleep_a_little_bit();
268 lt_note_blocking_lock_release();
269
270 upgraded = lck_rw_lock_shared_to_exclusive(<_rwlock);
271 if (!upgraded) {
272 success = lck_rw_try_lock_exclusive(<_rwlock);
273
274 if (!success) {
275 lck_rw_lock_exclusive(<_rwlock);
276 }
277 }
278
279 lt_upgrade_holders++;
280 if (lt_upgrade_holders > lt_max_upgrade_holders) {
281 lt_max_upgrade_holders = lt_upgrade_holders;
282 }
283
284 lt_counter++;
285 lt_sleep_a_little_bit();
286
287 lt_upgrade_holders--;
288
289 lck_rw_lock_exclusive_to_shared(<_rwlock);
290
291 lt_spin_a_little_bit();
292 lck_rw_done(<_rwlock);
293 }
294
295 #if __AMP__
296 const int limit = 1000000;
297 static int lt_stress_local_counters[MAX_CPUS];
298
299 lck_ticket_t lt_ticket_lock;
300 lck_grp_t lt_ticket_grp;
301
302 static void
lt_stress_ticket_lock()303 lt_stress_ticket_lock()
304 {
305 uint local_counter = 0;
306
307 uint cpuid = cpu_number();
308
309 kprintf("%s>cpu %d starting\n", __FUNCTION__, cpuid);
310
311 lck_ticket_lock(<_ticket_lock, <_ticket_grp);
312 lt_counter++;
313 local_counter++;
314 lck_ticket_unlock(<_ticket_lock);
315
316 /* Wait until all test threads have finished any binding */
317 while (lt_counter < lt_target_done_threads) {
318 if (mach_absolute_time() > lt_setup_timeout) {
319 kprintf("%s>cpu %d noticed that we exceeded setup timeout of %d seconds during initial setup phase (only %d out of %d threads checked in)",
320 __FUNCTION__, cpuid, LOCK_TEST_SETUP_TIMEOUT_SEC, lt_counter, lt_target_done_threads);
321 return;
322 }
323 /* Yield to keep the CPUs available for the threads to bind */
324 thread_yield_internal(1);
325 }
326
327 lck_ticket_lock(<_ticket_lock, <_ticket_grp);
328 lt_counter++;
329 local_counter++;
330 lck_ticket_unlock(<_ticket_lock);
331
332 /*
333 * Now that the test threads have finished any binding, wait
334 * until they are all actively spinning on-core (done yielding)
335 * so we get a fairly timed start.
336 */
337 while (lt_counter < 2 * lt_target_done_threads) {
338 if (mach_absolute_time() > lt_setup_timeout) {
339 kprintf("%s>cpu %d noticed that we exceeded setup timeout of %d seconds during secondary setup phase (only %d out of %d threads checked in)",
340 __FUNCTION__, cpuid, LOCK_TEST_SETUP_TIMEOUT_SEC, lt_counter - lt_target_done_threads, lt_target_done_threads);
341 return;
342 }
343 }
344
345 kprintf("%s>cpu %d started\n", __FUNCTION__, cpuid);
346
347 while (lt_counter < limit) {
348 lck_ticket_lock(<_ticket_lock, <_ticket_grp);
349 if (lt_counter < limit) {
350 lt_counter++;
351 local_counter++;
352 }
353 lck_ticket_unlock(<_ticket_lock);
354 }
355
356 lt_stress_local_counters[cpuid] = local_counter;
357
358 kprintf("%s>final counter %d cpu %d incremented the counter %d times\n", __FUNCTION__, lt_counter, cpuid, local_counter);
359 }
360 #endif
361
362 static void
lt_grab_hw_lock()363 lt_grab_hw_lock()
364 {
365 hw_lock_lock(<_hw_lock, LCK_GRP_NULL);
366 lt_counter++;
367 lt_spin_a_little_bit();
368 hw_lock_unlock(<_hw_lock);
369 }
370
371 static void
lt_grab_hw_lock_with_try()372 lt_grab_hw_lock_with_try()
373 {
374 while (0 == hw_lock_try(<_hw_lock, LCK_GRP_NULL)) {
375 ;
376 }
377 lt_counter++;
378 lt_spin_a_little_bit();
379 hw_lock_unlock(<_hw_lock);
380 }
381
382 static void
lt_grab_hw_lock_with_to()383 lt_grab_hw_lock_with_to()
384 {
385 (void)hw_lock_to(<_hw_lock, &hw_lock_spin_policy, LCK_GRP_NULL);
386 lt_counter++;
387 lt_spin_a_little_bit();
388 hw_lock_unlock(<_hw_lock);
389 }
390
391 static void
lt_grab_spin_lock()392 lt_grab_spin_lock()
393 {
394 lck_spin_lock(<_lck_spin_t);
395 lt_counter++;
396 lt_spin_a_little_bit();
397 lck_spin_unlock(<_lck_spin_t);
398 }
399
400 static void
lt_grab_spin_lock_with_try()401 lt_grab_spin_lock_with_try()
402 {
403 while (0 == lck_spin_try_lock(<_lck_spin_t)) {
404 ;
405 }
406 lt_counter++;
407 lt_spin_a_little_bit();
408 lck_spin_unlock(<_lck_spin_t);
409 }
410
411 static volatile boolean_t lt_thread_lock_grabbed;
412 static volatile boolean_t lt_thread_lock_success;
413
414 static void
lt_reset()415 lt_reset()
416 {
417 lt_counter = 0;
418 lt_max_holders = 0;
419 lt_num_holders = 0;
420 lt_max_upgrade_holders = 0;
421 lt_upgrade_holders = 0;
422 lt_done_threads = 0;
423 lt_target_done_threads = 0;
424 lt_cpu_bind_id = 0;
425 /* Reset timeout deadline out from current time */
426 nanoseconds_to_absolutetime(LOCK_TEST_SETUP_TIMEOUT_SEC * NSEC_PER_SEC, <_setup_timeout);
427 lt_setup_timeout += mach_absolute_time();
428
429 OSMemoryBarrier();
430 }
431
432 static void
lt_trylock_hw_lock_with_to()433 lt_trylock_hw_lock_with_to()
434 {
435 OSMemoryBarrier();
436 while (!lt_thread_lock_grabbed) {
437 lt_sleep_a_little_bit();
438 OSMemoryBarrier();
439 }
440 lt_thread_lock_success = hw_lock_to(<_hw_lock,
441 &hw_lock_test_give_up_policy, LCK_GRP_NULL);
442 OSMemoryBarrier();
443 mp_enable_preemption();
444 }
445
446 static void
lt_trylock_spin_try_lock()447 lt_trylock_spin_try_lock()
448 {
449 OSMemoryBarrier();
450 while (!lt_thread_lock_grabbed) {
451 lt_sleep_a_little_bit();
452 OSMemoryBarrier();
453 }
454 lt_thread_lock_success = lck_spin_try_lock(<_lck_spin_t);
455 OSMemoryBarrier();
456 }
457
458 static void
lt_trylock_thread(void * arg,wait_result_t wres __unused)459 lt_trylock_thread(void *arg, wait_result_t wres __unused)
460 {
461 void (*func)(void) = (void (*)(void))arg;
462
463 func();
464
465 OSIncrementAtomic((volatile SInt32*) <_done_threads);
466 }
467
468 static void
lt_start_trylock_thread(thread_continue_t func)469 lt_start_trylock_thread(thread_continue_t func)
470 {
471 thread_t thread;
472 kern_return_t kr;
473
474 kr = kernel_thread_start(lt_trylock_thread, func, &thread);
475 assert(kr == KERN_SUCCESS);
476
477 thread_deallocate(thread);
478 }
479
480 static void
lt_wait_for_lock_test_threads()481 lt_wait_for_lock_test_threads()
482 {
483 OSMemoryBarrier();
484 /* Spin to reduce dependencies */
485 while (lt_done_threads < lt_target_done_threads) {
486 lt_sleep_a_little_bit();
487 OSMemoryBarrier();
488 }
489 OSMemoryBarrier();
490 }
491
492 static kern_return_t
lt_test_trylocks()493 lt_test_trylocks()
494 {
495 boolean_t success;
496 extern unsigned int real_ncpus;
497
498 /*
499 * First mtx try lock succeeds, second fails.
500 */
501 success = lck_mtx_try_lock(<_mtx);
502 T_ASSERT_NOTNULL(success, "First mtx try lock");
503 success = lck_mtx_try_lock(<_mtx);
504 T_ASSERT_NULL(success, "Second mtx try lock for a locked mtx");
505 lck_mtx_unlock(<_mtx);
506
507 /*
508 * After regular grab, can't try lock.
509 */
510 lck_mtx_lock(<_mtx);
511 success = lck_mtx_try_lock(<_mtx);
512 T_ASSERT_NULL(success, "try lock should fail after regular lck_mtx_lock");
513 lck_mtx_unlock(<_mtx);
514
515 /*
516 * Two shared try locks on a previously unheld rwlock suceed, and a
517 * subsequent exclusive attempt fails.
518 */
519 success = lck_rw_try_lock_shared(<_rwlock);
520 T_ASSERT_NOTNULL(success, "Two shared try locks on a previously unheld rwlock should succeed");
521 success = lck_rw_try_lock_shared(<_rwlock);
522 T_ASSERT_NOTNULL(success, "Two shared try locks on a previously unheld rwlock should succeed");
523 success = lck_rw_try_lock_exclusive(<_rwlock);
524 T_ASSERT_NULL(success, "exclusive lock attempt on previously held lock should fail");
525 lck_rw_done(<_rwlock);
526 lck_rw_done(<_rwlock);
527
528 /*
529 * After regular shared grab, can trylock
530 * for shared but not for exclusive.
531 */
532 lck_rw_lock_shared(<_rwlock);
533 success = lck_rw_try_lock_shared(<_rwlock);
534 T_ASSERT_NOTNULL(success, "After regular shared grab another shared try lock should succeed.");
535 success = lck_rw_try_lock_exclusive(<_rwlock);
536 T_ASSERT_NULL(success, "After regular shared grab an exclusive lock attempt should fail.");
537 lck_rw_done(<_rwlock);
538 lck_rw_done(<_rwlock);
539
540 /*
541 * An exclusive try lock succeeds, subsequent shared and exclusive
542 * attempts fail.
543 */
544 success = lck_rw_try_lock_exclusive(<_rwlock);
545 T_ASSERT_NOTNULL(success, "An exclusive try lock should succeed");
546 success = lck_rw_try_lock_shared(<_rwlock);
547 T_ASSERT_NULL(success, "try lock in shared mode attempt after an exclusive grab should fail");
548 success = lck_rw_try_lock_exclusive(<_rwlock);
549 T_ASSERT_NULL(success, "try lock in exclusive mode attempt after an exclusive grab should fail");
550 lck_rw_done(<_rwlock);
551
552 /*
553 * After regular exclusive grab, neither kind of trylock succeeds.
554 */
555 lck_rw_lock_exclusive(<_rwlock);
556 success = lck_rw_try_lock_shared(<_rwlock);
557 T_ASSERT_NULL(success, "After regular exclusive grab, shared trylock should not succeed");
558 success = lck_rw_try_lock_exclusive(<_rwlock);
559 T_ASSERT_NULL(success, "After regular exclusive grab, exclusive trylock should not succeed");
560 lck_rw_done(<_rwlock);
561
562 /*
563 * First spin lock attempts succeed, second attempts fail.
564 */
565 success = hw_lock_try(<_hw_lock, LCK_GRP_NULL);
566 T_ASSERT_NOTNULL(success, "First spin lock attempts should succeed");
567 success = hw_lock_try(<_hw_lock, LCK_GRP_NULL);
568 T_ASSERT_NULL(success, "Second attempt to spin lock should fail");
569 hw_lock_unlock(<_hw_lock);
570
571 hw_lock_lock(<_hw_lock, LCK_GRP_NULL);
572 success = hw_lock_try(<_hw_lock, LCK_GRP_NULL);
573 T_ASSERT_NULL(success, "After taking spin lock, trylock attempt should fail");
574 hw_lock_unlock(<_hw_lock);
575
576 lt_reset();
577 lt_thread_lock_grabbed = false;
578 lt_thread_lock_success = true;
579 lt_target_done_threads = 1;
580 OSMemoryBarrier();
581 lt_start_trylock_thread(lt_trylock_hw_lock_with_to);
582 success = hw_lock_to(<_hw_lock, &hw_lock_test_give_up_policy, LCK_GRP_NULL);
583 T_ASSERT_NOTNULL(success, "First spin lock with timeout should succeed");
584 if (real_ncpus == 1) {
585 mp_enable_preemption(); /* if we re-enable preemption, the other thread can timeout and exit */
586 }
587 OSIncrementAtomic((volatile SInt32*)<_thread_lock_grabbed);
588 lt_wait_for_lock_test_threads();
589 T_ASSERT_NULL(lt_thread_lock_success, "Second spin lock with timeout should fail and timeout");
590 if (real_ncpus == 1) {
591 mp_disable_preemption(); /* don't double-enable when we unlock */
592 }
593 hw_lock_unlock(<_hw_lock);
594
595 lt_reset();
596 lt_thread_lock_grabbed = false;
597 lt_thread_lock_success = true;
598 lt_target_done_threads = 1;
599 OSMemoryBarrier();
600 lt_start_trylock_thread(lt_trylock_hw_lock_with_to);
601 hw_lock_lock(<_hw_lock, LCK_GRP_NULL);
602 if (real_ncpus == 1) {
603 mp_enable_preemption(); /* if we re-enable preemption, the other thread can timeout and exit */
604 }
605 OSIncrementAtomic((volatile SInt32*)<_thread_lock_grabbed);
606 lt_wait_for_lock_test_threads();
607 T_ASSERT_NULL(lt_thread_lock_success, "after taking a spin lock, lock attempt with timeout should fail");
608 if (real_ncpus == 1) {
609 mp_disable_preemption(); /* don't double-enable when we unlock */
610 }
611 hw_lock_unlock(<_hw_lock);
612
613 success = lck_spin_try_lock(<_lck_spin_t);
614 T_ASSERT_NOTNULL(success, "spin trylock of previously unheld lock should succeed");
615 success = lck_spin_try_lock(<_lck_spin_t);
616 T_ASSERT_NULL(success, "spin trylock attempt of previously held lock (with trylock) should fail");
617 lck_spin_unlock(<_lck_spin_t);
618
619 lt_reset();
620 lt_thread_lock_grabbed = false;
621 lt_thread_lock_success = true;
622 lt_target_done_threads = 1;
623 lt_start_trylock_thread(lt_trylock_spin_try_lock);
624 lck_spin_lock(<_lck_spin_t);
625 if (real_ncpus == 1) {
626 mp_enable_preemption(); /* if we re-enable preemption, the other thread can timeout and exit */
627 }
628 OSIncrementAtomic((volatile SInt32*)<_thread_lock_grabbed);
629 lt_wait_for_lock_test_threads();
630 T_ASSERT_NULL(lt_thread_lock_success, "spin trylock attempt of previously held lock should fail");
631 if (real_ncpus == 1) {
632 mp_disable_preemption(); /* don't double-enable when we unlock */
633 }
634 lck_spin_unlock(<_lck_spin_t);
635
636 return KERN_SUCCESS;
637 }
638
639 static void
lt_thread(void * arg,wait_result_t wres __unused)640 lt_thread(void *arg, wait_result_t wres __unused)
641 {
642 void (*func)(void) = (void (*)(void))arg;
643 uint32_t i;
644
645 for (i = 0; i < LOCK_TEST_ITERATIONS; i++) {
646 func();
647 }
648
649 OSIncrementAtomic((volatile SInt32*) <_done_threads);
650 }
651
652 static void
lt_start_lock_thread(thread_continue_t func)653 lt_start_lock_thread(thread_continue_t func)
654 {
655 thread_t thread;
656 kern_return_t kr;
657
658 kr = kernel_thread_start(lt_thread, func, &thread);
659 assert(kr == KERN_SUCCESS);
660
661 thread_deallocate(thread);
662 }
663
664 #if __AMP__
665 static void
lt_bound_thread(void * arg,wait_result_t wres __unused)666 lt_bound_thread(void *arg, wait_result_t wres __unused)
667 {
668 void (*func)(void) = (void (*)(void))arg;
669
670 int cpuid = OSIncrementAtomic((volatile SInt32 *)<_cpu_bind_id);
671
672 processor_t processor = processor_list;
673 while ((processor != NULL) && (processor->cpu_id != cpuid)) {
674 processor = processor->processor_list;
675 }
676
677 if (processor != NULL) {
678 thread_bind(processor);
679 }
680
681 thread_block(THREAD_CONTINUE_NULL);
682
683 func();
684
685 OSIncrementAtomic((volatile SInt32*) <_done_threads);
686 }
687
688 static void
lt_e_thread(void * arg,wait_result_t wres __unused)689 lt_e_thread(void *arg, wait_result_t wres __unused)
690 {
691 void (*func)(void) = (void (*)(void))arg;
692
693 thread_t thread = current_thread();
694
695 thread_bind_cluster_type(thread, 'e', false);
696
697 func();
698
699 OSIncrementAtomic((volatile SInt32*) <_done_threads);
700 }
701
702 static void
lt_p_thread(void * arg,wait_result_t wres __unused)703 lt_p_thread(void *arg, wait_result_t wres __unused)
704 {
705 void (*func)(void) = (void (*)(void))arg;
706
707 thread_t thread = current_thread();
708
709 thread_bind_cluster_type(thread, 'p', false);
710
711 func();
712
713 OSIncrementAtomic((volatile SInt32*) <_done_threads);
714 }
715
716 static void
lt_start_lock_thread_with_bind(thread_continue_t bind_type,thread_continue_t func)717 lt_start_lock_thread_with_bind(thread_continue_t bind_type, thread_continue_t func)
718 {
719 thread_t thread;
720 kern_return_t kr;
721
722 kr = kernel_thread_start(bind_type, func, &thread);
723 assert(kr == KERN_SUCCESS);
724
725 thread_deallocate(thread);
726 }
727 #endif /* __AMP__ */
728
729 static kern_return_t
lt_test_locks()730 lt_test_locks()
731 {
732 #if SCHED_HYGIENE_DEBUG
733 /*
734 * When testing, the preemption disable threshold may be hit (for
735 * example when testing a lock timeout). To avoid this, the preemption
736 * disable measurement is temporarily disabled during lock testing.
737 */
738 int old_mode = sched_preemption_disable_debug_mode;
739 if (old_mode == SCHED_HYGIENE_MODE_PANIC) {
740 sched_preemption_disable_debug_mode = SCHED_HYGIENE_MODE_OFF;
741 }
742 #endif /* SCHED_HYGIENE_DEBUG */
743
744 kern_return_t kr = KERN_SUCCESS;
745 lck_grp_attr_t *lga = lck_grp_attr_alloc_init();
746 lck_grp_t *lg = lck_grp_alloc_init("lock test", lga);
747
748 lck_mtx_init(<_mtx, lg, LCK_ATTR_NULL);
749 lck_rw_init(<_rwlock, lg, LCK_ATTR_NULL);
750 lck_spin_init(<_lck_spin_t, lg, LCK_ATTR_NULL);
751 hw_lock_init(<_hw_lock);
752
753 T_LOG("Testing locks.");
754
755 /* Try locks (custom) */
756 lt_reset();
757
758 T_LOG("Running try lock test.");
759 kr = lt_test_trylocks();
760 T_EXPECT_NULL(kr, "try lock test failed.");
761
762 /* Uncontended mutex */
763 T_LOG("Running uncontended mutex test.");
764 lt_reset();
765 lt_target_done_threads = 1;
766 lt_start_lock_thread(lt_grab_mutex);
767 lt_wait_for_lock_test_threads();
768 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
769 T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
770
771 /* Contended mutex:try locks*/
772 T_LOG("Running contended mutex test.");
773 lt_reset();
774 lt_target_done_threads = 3;
775 lt_start_lock_thread(lt_grab_mutex);
776 lt_start_lock_thread(lt_grab_mutex);
777 lt_start_lock_thread(lt_grab_mutex);
778 lt_wait_for_lock_test_threads();
779 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
780 T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
781
782 /* Contended mutex: try locks*/
783 T_LOG("Running contended mutex trylock test.");
784 lt_reset();
785 lt_target_done_threads = 3;
786 lt_start_lock_thread(lt_grab_mutex_with_try);
787 lt_start_lock_thread(lt_grab_mutex_with_try);
788 lt_start_lock_thread(lt_grab_mutex_with_try);
789 lt_wait_for_lock_test_threads();
790 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
791 T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
792
793 /* Uncontended exclusive rwlock */
794 T_LOG("Running uncontended exclusive rwlock test.");
795 lt_reset();
796 lt_target_done_threads = 1;
797 lt_start_lock_thread(lt_grab_rw_exclusive);
798 lt_wait_for_lock_test_threads();
799 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
800 T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
801
802 /* Uncontended shared rwlock */
803
804 /* Disabled until lt_grab_rw_shared() is fixed (rdar://30685840)
805 * T_LOG("Running uncontended shared rwlock test.");
806 * lt_reset();
807 * lt_target_done_threads = 1;
808 * lt_start_lock_thread(lt_grab_rw_shared);
809 * lt_wait_for_lock_test_threads();
810 * T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
811 * T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
812 */
813
814 /* Contended exclusive rwlock */
815 T_LOG("Running contended exclusive rwlock test.");
816 lt_reset();
817 lt_target_done_threads = 3;
818 lt_start_lock_thread(lt_grab_rw_exclusive);
819 lt_start_lock_thread(lt_grab_rw_exclusive);
820 lt_start_lock_thread(lt_grab_rw_exclusive);
821 lt_wait_for_lock_test_threads();
822 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
823 T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
824
825 /* One shared, two exclusive */
826 /* Disabled until lt_grab_rw_shared() is fixed (rdar://30685840)
827 * T_LOG("Running test with one shared and two exclusive rw lock threads.");
828 * lt_reset();
829 * lt_target_done_threads = 3;
830 * lt_start_lock_thread(lt_grab_rw_shared);
831 * lt_start_lock_thread(lt_grab_rw_exclusive);
832 * lt_start_lock_thread(lt_grab_rw_exclusive);
833 * lt_wait_for_lock_test_threads();
834 * T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
835 * T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
836 */
837
838 /* Four shared */
839 /* Disabled until lt_grab_rw_shared() is fixed (rdar://30685840)
840 * T_LOG("Running test with four shared holders.");
841 * lt_reset();
842 * lt_target_done_threads = 4;
843 * lt_start_lock_thread(lt_grab_rw_shared);
844 * lt_start_lock_thread(lt_grab_rw_shared);
845 * lt_start_lock_thread(lt_grab_rw_shared);
846 * lt_start_lock_thread(lt_grab_rw_shared);
847 * lt_wait_for_lock_test_threads();
848 * T_EXPECT_LE_UINT(lt_max_holders, 4, NULL);
849 */
850
851 /* Three doing upgrades and downgrades */
852 T_LOG("Running test with threads upgrading and downgrading.");
853 lt_reset();
854 lt_target_done_threads = 3;
855 lt_start_lock_thread(lt_upgrade_downgrade_rw);
856 lt_start_lock_thread(lt_upgrade_downgrade_rw);
857 lt_start_lock_thread(lt_upgrade_downgrade_rw);
858 lt_wait_for_lock_test_threads();
859 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
860 T_EXPECT_LE_UINT(lt_max_holders, 3, NULL);
861 T_EXPECT_EQ_UINT(lt_max_upgrade_holders, 1, NULL);
862
863 /* Uncontended - exclusive trylocks */
864 T_LOG("Running test with single thread doing exclusive rwlock trylocks.");
865 lt_reset();
866 lt_target_done_threads = 1;
867 lt_start_lock_thread(lt_grab_rw_exclusive_with_try);
868 lt_wait_for_lock_test_threads();
869 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
870 T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
871
872 /* Uncontended - shared trylocks */
873 /* Disabled until lt_grab_rw_shared_with_try() is fixed (rdar://30685840)
874 * T_LOG("Running test with single thread doing shared rwlock trylocks.");
875 * lt_reset();
876 * lt_target_done_threads = 1;
877 * lt_start_lock_thread(lt_grab_rw_shared_with_try);
878 * lt_wait_for_lock_test_threads();
879 * T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
880 * T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
881 */
882
883 /* Three doing exclusive trylocks */
884 T_LOG("Running test with threads doing exclusive rwlock trylocks.");
885 lt_reset();
886 lt_target_done_threads = 3;
887 lt_start_lock_thread(lt_grab_rw_exclusive_with_try);
888 lt_start_lock_thread(lt_grab_rw_exclusive_with_try);
889 lt_start_lock_thread(lt_grab_rw_exclusive_with_try);
890 lt_wait_for_lock_test_threads();
891 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
892 T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
893
894 /* Three doing shared trylocks */
895 /* Disabled until lt_grab_rw_shared_with_try() is fixed (rdar://30685840)
896 * T_LOG("Running test with threads doing shared rwlock trylocks.");
897 * lt_reset();
898 * lt_target_done_threads = 3;
899 * lt_start_lock_thread(lt_grab_rw_shared_with_try);
900 * lt_start_lock_thread(lt_grab_rw_shared_with_try);
901 * lt_start_lock_thread(lt_grab_rw_shared_with_try);
902 * lt_wait_for_lock_test_threads();
903 * T_EXPECT_LE_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
904 * T_EXPECT_LE_UINT(lt_max_holders, 3, NULL);
905 */
906
907 /* Three doing various trylocks */
908 /* Disabled until lt_grab_rw_shared_with_try() is fixed (rdar://30685840)
909 * T_LOG("Running test with threads doing mixed rwlock trylocks.");
910 * lt_reset();
911 * lt_target_done_threads = 4;
912 * lt_start_lock_thread(lt_grab_rw_shared_with_try);
913 * lt_start_lock_thread(lt_grab_rw_shared_with_try);
914 * lt_start_lock_thread(lt_grab_rw_exclusive_with_try);
915 * lt_start_lock_thread(lt_grab_rw_exclusive_with_try);
916 * lt_wait_for_lock_test_threads();
917 * T_EXPECT_LE_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
918 * T_EXPECT_LE_UINT(lt_max_holders, 2, NULL);
919 */
920
921 /* HW locks */
922 T_LOG("Running test with hw_lock_lock()");
923 lt_reset();
924 lt_target_done_threads = 3;
925 lt_start_lock_thread(lt_grab_hw_lock);
926 lt_start_lock_thread(lt_grab_hw_lock);
927 lt_start_lock_thread(lt_grab_hw_lock);
928 lt_wait_for_lock_test_threads();
929 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
930
931 #if __AMP__
932 /* Ticket locks stress test */
933 T_LOG("Running Ticket locks stress test with lck_ticket_lock()");
934 extern unsigned int real_ncpus;
935 lck_grp_init(<_ticket_grp, "ticket lock stress", LCK_GRP_ATTR_NULL);
936 lck_ticket_init(<_ticket_lock, <_ticket_grp);
937 lt_reset();
938 lt_target_done_threads = real_ncpus;
939 uint thread_count = 0;
940 for (processor_t processor = processor_list; processor != NULL; processor = processor->processor_list) {
941 lt_start_lock_thread_with_bind(lt_bound_thread, lt_stress_ticket_lock);
942 thread_count++;
943 }
944 T_EXPECT_GE_UINT(thread_count, lt_target_done_threads, "Spawned enough threads for valid test");
945 lt_wait_for_lock_test_threads();
946 bool starvation = false;
947 uint total_local_count = 0;
948 for (processor_t processor = processor_list; processor != NULL; processor = processor->processor_list) {
949 starvation = starvation || (lt_stress_local_counters[processor->cpu_id] < 10);
950 total_local_count += lt_stress_local_counters[processor->cpu_id];
951 }
952 if (mach_absolute_time() > lt_setup_timeout) {
953 T_FAIL("Stress test setup timed out after %d seconds", LOCK_TEST_SETUP_TIMEOUT_SEC);
954 } else if (total_local_count != lt_counter) {
955 T_FAIL("Lock failure\n");
956 } else if (starvation) {
957 T_FAIL("Lock starvation found\n");
958 } else {
959 T_PASS("Ticket locks stress test with lck_ticket_lock() (%u total acquires)", total_local_count);
960 }
961
962 /* AMP ticket locks stress test */
963 T_LOG("Running AMP Ticket locks stress test bound to clusters with lck_ticket_lock()");
964 lt_reset();
965 lt_target_done_threads = real_ncpus;
966 thread_count = 0;
967 for (processor_t processor = processor_list; processor != NULL; processor = processor->processor_list) {
968 processor_set_t pset = processor->processor_set;
969 switch (pset->pset_cluster_type) {
970 case PSET_AMP_P:
971 lt_start_lock_thread_with_bind(lt_p_thread, lt_stress_ticket_lock);
972 break;
973 case PSET_AMP_E:
974 lt_start_lock_thread_with_bind(lt_e_thread, lt_stress_ticket_lock);
975 break;
976 default:
977 lt_start_lock_thread(lt_stress_ticket_lock);
978 break;
979 }
980 thread_count++;
981 }
982 T_EXPECT_GE_UINT(thread_count, lt_target_done_threads, "Spawned enough threads for valid test");
983 lt_wait_for_lock_test_threads();
984 #endif /* __AMP__ */
985
986 /* HW locks: trylocks */
987 T_LOG("Running test with hw_lock_try()");
988 lt_reset();
989 lt_target_done_threads = 3;
990 lt_start_lock_thread(lt_grab_hw_lock_with_try);
991 lt_start_lock_thread(lt_grab_hw_lock_with_try);
992 lt_start_lock_thread(lt_grab_hw_lock_with_try);
993 lt_wait_for_lock_test_threads();
994 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
995
996 /* HW locks: with timeout */
997 T_LOG("Running test with hw_lock_to()");
998 lt_reset();
999 lt_target_done_threads = 3;
1000 lt_start_lock_thread(lt_grab_hw_lock_with_to);
1001 lt_start_lock_thread(lt_grab_hw_lock_with_to);
1002 lt_start_lock_thread(lt_grab_hw_lock_with_to);
1003 lt_wait_for_lock_test_threads();
1004 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
1005
1006 /* Spin locks */
1007 T_LOG("Running test with lck_spin_lock()");
1008 lt_reset();
1009 lt_target_done_threads = 3;
1010 lt_start_lock_thread(lt_grab_spin_lock);
1011 lt_start_lock_thread(lt_grab_spin_lock);
1012 lt_start_lock_thread(lt_grab_spin_lock);
1013 lt_wait_for_lock_test_threads();
1014 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
1015
1016 /* Spin locks: trylocks */
1017 T_LOG("Running test with lck_spin_try_lock()");
1018 lt_reset();
1019 lt_target_done_threads = 3;
1020 lt_start_lock_thread(lt_grab_spin_lock_with_try);
1021 lt_start_lock_thread(lt_grab_spin_lock_with_try);
1022 lt_start_lock_thread(lt_grab_spin_lock_with_try);
1023 lt_wait_for_lock_test_threads();
1024 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
1025
1026 #if SCHED_HYGIENE_DEBUG
1027 sched_preemption_disable_debug_mode = old_mode;
1028 #endif /* SCHED_HYGIENE_DEBUG */
1029
1030 return KERN_SUCCESS;
1031 }
1032
1033 #define MT_MAX_ARGS 8
1034 #define MT_INITIAL_VALUE 0xfeedbeef
1035 #define MT_W_VAL (0x00000000feedbeefULL) /* Drop in zeros */
1036 #define MT_S_VAL (0xfffffffffeedbeefULL) /* High bit is 1, so sign-extends as negative */
1037 #define MT_L_VAL (((uint64_t)MT_INITIAL_VALUE) | (((uint64_t)MT_INITIAL_VALUE) << 32)) /* Two back-to-back */
1038
1039 typedef void (*sy_munge_t)(void*);
1040
1041 #define MT_FUNC(x) #x, x
1042 struct munger_test {
1043 const char *mt_name;
1044 sy_munge_t mt_func;
1045 uint32_t mt_in_words;
1046 uint32_t mt_nout;
1047 uint64_t mt_expected[MT_MAX_ARGS];
1048 } munger_tests[] = {
1049 {MT_FUNC(munge_w), 1, 1, {MT_W_VAL}},
1050 {MT_FUNC(munge_ww), 2, 2, {MT_W_VAL, MT_W_VAL}},
1051 {MT_FUNC(munge_www), 3, 3, {MT_W_VAL, MT_W_VAL, MT_W_VAL}},
1052 {MT_FUNC(munge_wwww), 4, 4, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}},
1053 {MT_FUNC(munge_wwwww), 5, 5, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}},
1054 {MT_FUNC(munge_wwwwww), 6, 6, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}},
1055 {MT_FUNC(munge_wwwwwww), 7, 7, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}},
1056 {MT_FUNC(munge_wwwwwwww), 8, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}},
1057 {MT_FUNC(munge_wl), 3, 2, {MT_W_VAL, MT_L_VAL}},
1058 {MT_FUNC(munge_wwl), 4, 3, {MT_W_VAL, MT_W_VAL, MT_L_VAL}},
1059 {MT_FUNC(munge_wwlll), 8, 5, {MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL}},
1060 {MT_FUNC(munge_wwlllll), 8, 5, {MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL}},
1061 {MT_FUNC(munge_wlw), 4, 3, {MT_W_VAL, MT_L_VAL, MT_W_VAL}},
1062 {MT_FUNC(munge_wlwwwll), 10, 7, {MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL}},
1063 {MT_FUNC(munge_wlwwwllw), 11, 8, {MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_W_VAL}},
1064 {MT_FUNC(munge_wlwwlwlw), 11, 8, {MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL}},
1065 {MT_FUNC(munge_wll), 5, 3, {MT_W_VAL, MT_L_VAL, MT_L_VAL}},
1066 {MT_FUNC(munge_wlll), 7, 4, {MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL}},
1067 {MT_FUNC(munge_wllwwll), 11, 7, {MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL}},
1068 {MT_FUNC(munge_wwwlw), 6, 5, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL}},
1069 {MT_FUNC(munge_wwwlww), 7, 6, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL}},
1070 {MT_FUNC(munge_wwwlwww), 8, 7, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}},
1071 {MT_FUNC(munge_wwwl), 5, 4, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}},
1072 {MT_FUNC(munge_wwwwlw), 7, 6, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL}},
1073 {MT_FUNC(munge_wwwwllww), 10, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL}},
1074 {MT_FUNC(munge_wwwwl), 6, 5, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}},
1075 {MT_FUNC(munge_wwwwwl), 7, 6, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}},
1076 {MT_FUNC(munge_wwwwwlww), 9, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL}},
1077 {MT_FUNC(munge_wwwwwllw), 10, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_W_VAL}},
1078 {MT_FUNC(munge_wwwwwlll), 11, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL}},
1079 {MT_FUNC(munge_wwwwwwl), 8, 7, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}},
1080 {MT_FUNC(munge_wwwwwwlw), 9, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL}},
1081 {MT_FUNC(munge_wwwwwwll), 10, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL}},
1082 {MT_FUNC(munge_wsw), 3, 3, {MT_W_VAL, MT_S_VAL, MT_W_VAL}},
1083 {MT_FUNC(munge_wws), 3, 3, {MT_W_VAL, MT_W_VAL, MT_S_VAL}},
1084 {MT_FUNC(munge_wwwsw), 5, 5, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_S_VAL, MT_W_VAL}},
1085 {MT_FUNC(munge_llllll), 12, 6, {MT_L_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL}},
1086 {MT_FUNC(munge_llll), 8, 4, {MT_L_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL}},
1087 {MT_FUNC(munge_l), 2, 1, {MT_L_VAL}},
1088 {MT_FUNC(munge_lw), 3, 2, {MT_L_VAL, MT_W_VAL}},
1089 {MT_FUNC(munge_lww), 4, 3, {MT_L_VAL, MT_W_VAL, MT_W_VAL}},
1090 {MT_FUNC(munge_lwww), 5, 4, {MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}},
1091 {MT_FUNC(munge_lwwwwwww), 9, 8, {MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}},
1092 {MT_FUNC(munge_wlwwwl), 8, 6, {MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}},
1093 {MT_FUNC(munge_wwlwwwl), 9, 7, {MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}}
1094 };
1095
1096 #define MT_TEST_COUNT (sizeof(munger_tests) / sizeof(struct munger_test))
1097
1098 static void
mt_reset(uint32_t in_words,size_t total_size,uint32_t * data)1099 mt_reset(uint32_t in_words, size_t total_size, uint32_t *data)
1100 {
1101 uint32_t i;
1102
1103 for (i = 0; i < in_words; i++) {
1104 data[i] = MT_INITIAL_VALUE;
1105 }
1106
1107 if (in_words * sizeof(uint32_t) < total_size) {
1108 bzero(&data[in_words], total_size - in_words * sizeof(uint32_t));
1109 }
1110 }
1111
1112 static void
mt_test_mungers()1113 mt_test_mungers()
1114 {
1115 uint64_t data[MT_MAX_ARGS];
1116 uint32_t i, j;
1117
1118 for (i = 0; i < MT_TEST_COUNT; i++) {
1119 struct munger_test *test = &munger_tests[i];
1120 int pass = 1;
1121
1122 T_LOG("Testing %s", test->mt_name);
1123
1124 mt_reset(test->mt_in_words, sizeof(data), (uint32_t*)data);
1125 test->mt_func(data);
1126
1127 for (j = 0; j < test->mt_nout; j++) {
1128 if (data[j] != test->mt_expected[j]) {
1129 T_FAIL("Index %d: expected %llx, got %llx.", j, test->mt_expected[j], data[j]);
1130 pass = 0;
1131 }
1132 }
1133 if (pass) {
1134 T_PASS(test->mt_name);
1135 }
1136 }
1137 }
1138
1139 #if defined(HAS_APPLE_PAC)
1140
1141
1142 kern_return_t
arm64_ropjop_test()1143 arm64_ropjop_test()
1144 {
1145 T_LOG("Testing ROP/JOP");
1146
1147 /* how is ROP/JOP configured */
1148 boolean_t config_rop_enabled = TRUE;
1149 boolean_t config_jop_enabled = TRUE;
1150
1151
1152 if (config_jop_enabled) {
1153 /* jop key */
1154 uint64_t apiakey_hi = __builtin_arm_rsr64("APIAKEYHI_EL1");
1155 uint64_t apiakey_lo = __builtin_arm_rsr64("APIAKEYLO_EL1");
1156
1157 T_EXPECT(apiakey_hi != 0 && apiakey_lo != 0, NULL);
1158 }
1159
1160 if (config_rop_enabled) {
1161 /* rop key */
1162 uint64_t apibkey_hi = __builtin_arm_rsr64("APIBKEYHI_EL1");
1163 uint64_t apibkey_lo = __builtin_arm_rsr64("APIBKEYLO_EL1");
1164
1165 T_EXPECT(apibkey_hi != 0 && apibkey_lo != 0, NULL);
1166
1167 /* sign a KVA (the address of this function) */
1168 uint64_t kva_signed = (uint64_t) ptrauth_sign_unauthenticated((void *)&config_rop_enabled, ptrauth_key_asib, 0);
1169
1170 /* assert it was signed (changed) */
1171 T_EXPECT(kva_signed != (uint64_t)&config_rop_enabled, NULL);
1172
1173 /* authenticate the newly signed KVA */
1174 uint64_t kva_authed = (uint64_t) ml_auth_ptr_unchecked((void *)kva_signed, ptrauth_key_asib, 0);
1175
1176 /* assert the authed KVA is the original KVA */
1177 T_EXPECT(kva_authed == (uint64_t)&config_rop_enabled, NULL);
1178
1179 /* corrupt a signed ptr, auth it, ensure auth failed */
1180 uint64_t kva_corrupted = kva_signed ^ 1;
1181
1182 /* authenticate the corrupted pointer */
1183 kva_authed = (uint64_t) ml_auth_ptr_unchecked((void *)kva_corrupted, ptrauth_key_asib, 0);
1184
1185 /* when AuthIB fails, bits 63:62 will be set to 2'b10 */
1186 uint64_t auth_fail_mask = 3ULL << 61;
1187 uint64_t authib_fail = 2ULL << 61;
1188
1189 /* assert the failed authIB of corrupted pointer is tagged */
1190 T_EXPECT((kva_authed & auth_fail_mask) == authib_fail, NULL);
1191 }
1192
1193 return KERN_SUCCESS;
1194 }
1195 #endif /* defined(HAS_APPLE_PAC) */
1196
1197 #if __ARM_PAN_AVAILABLE__
1198
1199 struct pan_test_thread_args {
1200 volatile bool join;
1201 };
1202
1203 static void
arm64_pan_test_thread(void * arg,wait_result_t __unused wres)1204 arm64_pan_test_thread(void *arg, wait_result_t __unused wres)
1205 {
1206 T_ASSERT(__builtin_arm_rsr("pan") != 0, NULL);
1207
1208 struct pan_test_thread_args *args = arg;
1209
1210 for (processor_t p = processor_list; p != NULL; p = p->processor_list) {
1211 thread_bind(p);
1212 thread_block(THREAD_CONTINUE_NULL);
1213 kprintf("Running PAN test on cpu %d\n", p->cpu_id);
1214 arm64_pan_test();
1215 }
1216
1217 /* unbind thread from specific cpu */
1218 thread_bind(PROCESSOR_NULL);
1219 thread_block(THREAD_CONTINUE_NULL);
1220
1221 while (!args->join) {
1222 ;
1223 }
1224
1225 thread_wakeup(args);
1226 }
1227
1228 kern_return_t
arm64_late_pan_test()1229 arm64_late_pan_test()
1230 {
1231 thread_t thread;
1232 kern_return_t kr;
1233
1234 struct pan_test_thread_args args;
1235 args.join = false;
1236
1237 kr = kernel_thread_start(arm64_pan_test_thread, &args, &thread);
1238 assert(kr == KERN_SUCCESS);
1239
1240 thread_deallocate(thread);
1241
1242 assert_wait(&args, THREAD_UNINT);
1243 args.join = true;
1244 thread_block(THREAD_CONTINUE_NULL);
1245 return KERN_SUCCESS;
1246 }
1247
1248 // Disable KASAN checking for PAN tests as the fixed commpage address doesn't have a shadow mapping
1249
1250 static NOKASAN bool
arm64_pan_test_pan_enabled_fault_handler(arm_saved_state_t * state)1251 arm64_pan_test_pan_enabled_fault_handler(arm_saved_state_t * state)
1252 {
1253 bool retval = false;
1254 uint64_t esr = get_saved_state_esr(state);
1255 esr_exception_class_t class = ESR_EC(esr);
1256 fault_status_t fsc = ISS_IA_FSC(ESR_ISS(esr));
1257 uint32_t cpsr = get_saved_state_cpsr(state);
1258 uint64_t far = get_saved_state_far(state);
1259
1260 if ((class == ESR_EC_DABORT_EL1) && (fsc == FSC_PERMISSION_FAULT_L3) &&
1261 (cpsr & PSR64_PAN) &&
1262 ((esr & ISS_DA_WNR) ? mmu_kvtop_wpreflight(far) : mmu_kvtop(far))) {
1263 ++pan_exception_level;
1264 // read the user-accessible value to make sure
1265 // pan is enabled and produces a 2nd fault from
1266 // the exception handler
1267 if (pan_exception_level == 1) {
1268 ml_expect_fault_begin(arm64_pan_test_pan_enabled_fault_handler, far);
1269 pan_fault_value = *(volatile char *)far;
1270 ml_expect_fault_end();
1271 __builtin_arm_wsr("pan", 1); // turn PAN back on after the nested exception cleared it for this context
1272 }
1273 // this fault address is used for PAN test
1274 // disable PAN and rerun
1275 mask_saved_state_cpsr(state, 0, PSR64_PAN);
1276
1277 retval = true;
1278 }
1279
1280 return retval;
1281 }
1282
1283 static NOKASAN bool
arm64_pan_test_pan_disabled_fault_handler(arm_saved_state_t * state)1284 arm64_pan_test_pan_disabled_fault_handler(arm_saved_state_t * state)
1285 {
1286 bool retval = false;
1287 uint64_t esr = get_saved_state_esr(state);
1288 esr_exception_class_t class = ESR_EC(esr);
1289 fault_status_t fsc = ISS_IA_FSC(ESR_ISS(esr));
1290 uint32_t cpsr = get_saved_state_cpsr(state);
1291
1292 if ((class == ESR_EC_DABORT_EL1) && (fsc == FSC_PERMISSION_FAULT_L3) &&
1293 !(cpsr & PSR64_PAN)) {
1294 ++pan_exception_level;
1295 // On an exception taken from a PAN-disabled context, verify
1296 // that PAN is re-enabled for the exception handler and that
1297 // accessing the test address produces a PAN fault.
1298 ml_expect_fault_begin(arm64_pan_test_pan_enabled_fault_handler, pan_test_addr);
1299 pan_fault_value = *(volatile char *)pan_test_addr;
1300 ml_expect_fault_end();
1301 __builtin_arm_wsr("pan", 1); // turn PAN back on after the nested exception cleared it for this context
1302 add_saved_state_pc(state, 4);
1303
1304 retval = true;
1305 }
1306
1307 return retval;
1308 }
1309
1310 NOKASAN kern_return_t
arm64_pan_test()1311 arm64_pan_test()
1312 {
1313 bool values_match = false;
1314 vm_offset_t priv_addr = 0;
1315
1316 T_LOG("Testing PAN.");
1317
1318
1319 T_ASSERT((__builtin_arm_rsr("SCTLR_EL1") & SCTLR_PAN_UNCHANGED) == 0, "SCTLR_EL1.SPAN must be cleared");
1320
1321 T_ASSERT(__builtin_arm_rsr("pan") != 0, NULL);
1322
1323 pan_exception_level = 0;
1324 pan_fault_value = 0xDE;
1325
1326 // Create an empty pmap, so we can map a user-accessible page
1327 pmap_t pmap = pmap_create_options(NULL, 0, PMAP_CREATE_64BIT);
1328 T_ASSERT(pmap != NULL, NULL);
1329
1330 // Get a physical page to back the mapping
1331 vm_page_t vm_page = vm_page_grab();
1332 T_ASSERT(vm_page != VM_PAGE_NULL, NULL);
1333 ppnum_t pn = VM_PAGE_GET_PHYS_PAGE(vm_page);
1334 pmap_paddr_t pa = ptoa(pn);
1335
1336 // Write to the underlying physical page through the physical aperture
1337 // so we can test against a known value
1338 priv_addr = phystokv((pmap_paddr_t)pa);
1339 *(volatile char *)priv_addr = 0xAB;
1340
1341 // Map the page in the user address space at some, non-zero address
1342 pan_test_addr = PAGE_SIZE;
1343 pmap_enter(pmap, pan_test_addr, pn, VM_PROT_READ, VM_PROT_READ, 0, true, PMAP_MAPPING_TYPE_INFER);
1344
1345 // Context-switch with PAN disabled is prohibited; prevent test logging from
1346 // triggering a voluntary context switch.
1347 mp_disable_preemption();
1348
1349 // Insert the user's pmap root table pointer in TTBR0
1350 pmap_t old_pmap = vm_map_pmap(current_thread()->map);
1351 pmap_switch(pmap);
1352
1353 // Below should trigger a PAN exception as pan_test_addr is accessible
1354 // in user mode
1355 // The exception handler, upon recognizing the fault address is pan_test_addr,
1356 // will disable PAN and rerun this instruction successfully
1357 ml_expect_fault_begin(arm64_pan_test_pan_enabled_fault_handler, pan_test_addr);
1358 values_match = (*(volatile char *)pan_test_addr == *(volatile char *)priv_addr);
1359 ml_expect_fault_end();
1360 T_ASSERT(values_match, NULL);
1361
1362 T_ASSERT(pan_exception_level == 2, NULL);
1363
1364 T_ASSERT(__builtin_arm_rsr("pan") == 0, NULL);
1365
1366 T_ASSERT(pan_fault_value == *(char *)priv_addr, NULL);
1367
1368 pan_exception_level = 0;
1369 pan_fault_value = 0xAD;
1370 pan_ro_addr = (vm_offset_t) &pan_ro_value;
1371
1372 // Force a permission fault while PAN is disabled to make sure PAN is
1373 // re-enabled during the exception handler.
1374 ml_expect_fault_begin(arm64_pan_test_pan_disabled_fault_handler, pan_ro_addr);
1375 *((volatile uint64_t*)pan_ro_addr) = 0xFEEDFACECAFECAFE;
1376 ml_expect_fault_end();
1377
1378 T_ASSERT(pan_exception_level == 2, NULL);
1379
1380 T_ASSERT(__builtin_arm_rsr("pan") == 0, NULL);
1381
1382 T_ASSERT(pan_fault_value == *(char *)priv_addr, NULL);
1383
1384 pmap_switch(old_pmap);
1385
1386 pan_ro_addr = 0;
1387
1388 __builtin_arm_wsr("pan", 1);
1389
1390 mp_enable_preemption();
1391
1392 pmap_remove(pmap, pan_test_addr, pan_test_addr + PAGE_SIZE);
1393 pan_test_addr = 0;
1394
1395 vm_page_lock_queues();
1396 vm_page_free(vm_page);
1397 vm_page_unlock_queues();
1398 pmap_destroy(pmap);
1399
1400 return KERN_SUCCESS;
1401 }
1402 #endif /* __ARM_PAN_AVAILABLE__ */
1403
1404
1405 kern_return_t
arm64_lock_test()1406 arm64_lock_test()
1407 {
1408 return lt_test_locks();
1409 }
1410
1411 kern_return_t
arm64_munger_test()1412 arm64_munger_test()
1413 {
1414 mt_test_mungers();
1415 return 0;
1416 }
1417
1418 #if defined(KERNEL_INTEGRITY_CTRR) && defined(CONFIG_XNUPOST)
1419 SECURITY_READ_ONLY_LATE(uint64_t) ctrr_ro_test;
1420 uint64_t ctrr_nx_test = 0xd65f03c0; /* RET */
1421 volatile uint64_t ctrr_exception_esr;
1422 vm_offset_t ctrr_test_va;
1423 vm_offset_t ctrr_test_page;
1424
1425 kern_return_t
ctrr_test(void)1426 ctrr_test(void)
1427 {
1428 processor_t p;
1429 boolean_t ctrr_disable = FALSE;
1430
1431 PE_parse_boot_argn("-unsafe_kernel_text", &ctrr_disable, sizeof(ctrr_disable));
1432
1433 #if CONFIG_CSR_FROM_DT
1434 if (csr_unsafe_kernel_text) {
1435 ctrr_disable = TRUE;
1436 }
1437 #endif /* CONFIG_CSR_FROM_DT */
1438
1439 if (ctrr_disable) {
1440 T_LOG("Skipping CTRR test when -unsafe_kernel_text boot-arg present");
1441 return KERN_SUCCESS;
1442 }
1443
1444 T_LOG("Running CTRR test.");
1445
1446 for (p = processor_list; p != NULL; p = p->processor_list) {
1447 thread_bind(p);
1448 thread_block(THREAD_CONTINUE_NULL);
1449 T_LOG("Running CTRR test on cpu %d\n", p->cpu_id);
1450 ctrr_test_cpu();
1451 }
1452
1453 /* unbind thread from specific cpu */
1454 thread_bind(PROCESSOR_NULL);
1455 thread_block(THREAD_CONTINUE_NULL);
1456
1457 return KERN_SUCCESS;
1458 }
1459
1460 static bool
ctrr_test_ro_fault_handler(arm_saved_state_t * state)1461 ctrr_test_ro_fault_handler(arm_saved_state_t * state)
1462 {
1463 bool retval = false;
1464 uint64_t esr = get_saved_state_esr(state);
1465 esr_exception_class_t class = ESR_EC(esr);
1466 fault_status_t fsc = ISS_DA_FSC(ESR_ISS(esr));
1467
1468 if ((class == ESR_EC_DABORT_EL1) && (fsc == FSC_PERMISSION_FAULT_L3)) {
1469 ctrr_exception_esr = esr;
1470 add_saved_state_pc(state, 4);
1471 retval = true;
1472 }
1473
1474 return retval;
1475 }
1476
1477 static bool
ctrr_test_nx_fault_handler(arm_saved_state_t * state)1478 ctrr_test_nx_fault_handler(arm_saved_state_t * state)
1479 {
1480 bool retval = false;
1481 uint64_t esr = get_saved_state_esr(state);
1482 esr_exception_class_t class = ESR_EC(esr);
1483 fault_status_t fsc = ISS_IA_FSC(ESR_ISS(esr));
1484
1485 if ((class == ESR_EC_IABORT_EL1) && (fsc == FSC_PERMISSION_FAULT_L3)) {
1486 ctrr_exception_esr = esr;
1487 /* return to the instruction immediately after the call to NX page */
1488 set_saved_state_pc(state, get_saved_state_lr(state));
1489 #if BTI_ENFORCED
1490 /* Clear BTYPE to prevent taking another exception on ERET */
1491 uint32_t spsr = get_saved_state_cpsr(state);
1492 spsr &= ~PSR_BTYPE_MASK;
1493 set_saved_state_cpsr(state, spsr);
1494 #endif /* BTI_ENFORCED */
1495 retval = true;
1496 }
1497
1498 return retval;
1499 }
1500
1501 // Disable KASAN checking for CTRR tests as the test VA doesn't have a shadow mapping
1502
1503 /* test CTRR on a cpu, caller to bind thread to desired cpu */
1504 /* ctrr_test_page was reserved during bootstrap process */
1505 NOKASAN kern_return_t
ctrr_test_cpu(void)1506 ctrr_test_cpu(void)
1507 {
1508 ppnum_t ro_pn, nx_pn;
1509 uint64_t *ctrr_ro_test_ptr;
1510 void (*ctrr_nx_test_ptr)(void);
1511 kern_return_t kr;
1512 uint64_t prot = 0;
1513 extern vm_offset_t virtual_space_start;
1514
1515 /* ctrr read only region = [rorgn_begin_va, rorgn_end_va) */
1516
1517 #if (KERNEL_CTRR_VERSION == 3)
1518 const uint64_t rorgn_lwr = __builtin_arm_rsr64("S3_0_C11_C0_2");
1519 const uint64_t rorgn_upr = __builtin_arm_rsr64("S3_0_C11_C0_3");
1520 #else /* (KERNEL_CTRR_VERSION == 3) */
1521 const uint64_t rorgn_lwr = __builtin_arm_rsr64("S3_4_C15_C2_3");
1522 const uint64_t rorgn_upr = __builtin_arm_rsr64("S3_4_C15_C2_4");
1523 #endif /* (KERNEL_CTRR_VERSION == 3) */
1524 vm_offset_t rorgn_begin_va = phystokv(rorgn_lwr);
1525 vm_offset_t rorgn_end_va = phystokv(rorgn_upr) + 0x1000;
1526 vm_offset_t ro_test_va = (vm_offset_t)&ctrr_ro_test;
1527 vm_offset_t nx_test_va = (vm_offset_t)&ctrr_nx_test;
1528
1529 T_EXPECT(rorgn_begin_va <= ro_test_va && ro_test_va < rorgn_end_va, "Expect ro_test_va to be inside the CTRR region");
1530 T_EXPECT((nx_test_va < rorgn_begin_va) ^ (nx_test_va >= rorgn_end_va), "Expect nx_test_va to be outside the CTRR region");
1531
1532 ro_pn = pmap_find_phys(kernel_pmap, ro_test_va);
1533 nx_pn = pmap_find_phys(kernel_pmap, nx_test_va);
1534 T_EXPECT(ro_pn && nx_pn, "Expect ro page number and nx page number to be non zero");
1535
1536 T_LOG("test virtual page: %p, ctrr_ro_test: %p, ctrr_nx_test: %p, ro_pn: %x, nx_pn: %x ",
1537 (void *)ctrr_test_page, &ctrr_ro_test, &ctrr_nx_test, ro_pn, nx_pn);
1538
1539 prot = pmap_get_arm64_prot(kernel_pmap, ctrr_test_page);
1540 T_EXPECT(~prot & ARM_TTE_VALID, "Expect ctrr_test_page to be unmapped");
1541
1542 T_LOG("Read only region test mapping virtual page %p to CTRR RO page number %d", ctrr_test_page, ro_pn);
1543 kr = pmap_enter(kernel_pmap, ctrr_test_page, ro_pn,
1544 VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE, VM_WIMG_USE_DEFAULT, FALSE, PMAP_MAPPING_TYPE_INFER);
1545 T_EXPECT(kr == KERN_SUCCESS, "Expect pmap_enter of RW mapping to succeed");
1546
1547 // assert entire mmu prot path (Hierarchical protection model) is NOT RO
1548 // fetch effective block level protections from table/block entries
1549 prot = pmap_get_arm64_prot(kernel_pmap, ctrr_test_page);
1550 T_EXPECT(ARM_PTE_EXTRACT_AP(prot) == AP_RWNA && (prot & ARM_PTE_PNX), "Mapping is EL1 RWNX");
1551
1552 ctrr_test_va = ctrr_test_page + (ro_test_va & PAGE_MASK);
1553 ctrr_ro_test_ptr = (void *)ctrr_test_va;
1554
1555 T_LOG("Read only region test writing to %p to provoke data abort", ctrr_ro_test_ptr);
1556
1557 // should cause data abort
1558 ml_expect_fault_begin(ctrr_test_ro_fault_handler, ctrr_test_va);
1559 *ctrr_ro_test_ptr = 1;
1560 ml_expect_fault_end();
1561
1562 // ensure write permission fault at expected level
1563 // data abort handler will set ctrr_exception_esr when ctrr_test_va takes a permission fault
1564
1565 T_EXPECT(ESR_EC(ctrr_exception_esr) == ESR_EC_DABORT_EL1, "Data Abort from EL1 expected");
1566 T_EXPECT(ISS_DA_FSC(ESR_ISS(ctrr_exception_esr)) == FSC_PERMISSION_FAULT_L3, "Permission Fault Expected");
1567 T_EXPECT(ESR_ISS(ctrr_exception_esr) & ISS_DA_WNR, "Write Fault Expected");
1568
1569 ctrr_test_va = 0;
1570 ctrr_exception_esr = 0;
1571 pmap_remove(kernel_pmap, ctrr_test_page, ctrr_test_page + PAGE_SIZE);
1572
1573 T_LOG("No execute test mapping virtual page %p to CTRR PXN page number %d", ctrr_test_page, nx_pn);
1574
1575 kr = pmap_enter(kernel_pmap, ctrr_test_page, nx_pn,
1576 VM_PROT_READ | VM_PROT_EXECUTE, VM_PROT_NONE, VM_WIMG_USE_DEFAULT, FALSE, PMAP_MAPPING_TYPE_INFER);
1577 T_EXPECT(kr == KERN_SUCCESS, "Expect pmap_enter of RX mapping to succeed");
1578
1579 // assert entire mmu prot path (Hierarchical protection model) is NOT XN
1580 prot = pmap_get_arm64_prot(kernel_pmap, ctrr_test_page);
1581 T_EXPECT(ARM_PTE_EXTRACT_AP(prot) == AP_RONA && (~prot & ARM_PTE_PNX), "Mapping is EL1 ROX");
1582
1583 ctrr_test_va = ctrr_test_page + (nx_test_va & PAGE_MASK);
1584 #if __has_feature(ptrauth_calls)
1585 ctrr_nx_test_ptr = ptrauth_sign_unauthenticated((void *)ctrr_test_va, ptrauth_key_function_pointer, 0);
1586 #else
1587 ctrr_nx_test_ptr = (void *)ctrr_test_va;
1588 #endif
1589
1590 T_LOG("No execute test calling ctrr_nx_test_ptr(): %p to provoke instruction abort", ctrr_nx_test_ptr);
1591
1592 // should cause prefetch abort
1593 ml_expect_fault_begin(ctrr_test_nx_fault_handler, ctrr_test_va);
1594 ctrr_nx_test_ptr();
1595 ml_expect_fault_end();
1596
1597 // TODO: ensure execute permission fault at expected level
1598 T_EXPECT(ESR_EC(ctrr_exception_esr) == ESR_EC_IABORT_EL1, "Instruction abort from EL1 Expected");
1599 T_EXPECT(ISS_DA_FSC(ESR_ISS(ctrr_exception_esr)) == FSC_PERMISSION_FAULT_L3, "Permission Fault Expected");
1600
1601 ctrr_test_va = 0;
1602 ctrr_exception_esr = 0;
1603
1604 pmap_remove(kernel_pmap, ctrr_test_page, ctrr_test_page + PAGE_SIZE);
1605
1606 T_LOG("Expect no faults when reading CTRR region to verify correct programming of CTRR limits");
1607 for (vm_offset_t addr = rorgn_begin_va; addr < rorgn_end_va; addr += 8) {
1608 volatile uint64_t x = *(uint64_t *)addr;
1609 (void) x; /* read for side effect only */
1610 }
1611
1612 return KERN_SUCCESS;
1613 }
1614 #endif /* defined(KERNEL_INTEGRITY_CTRR) && defined(CONFIG_XNUPOST) */
1615
1616
1617 /**
1618 * Explicitly assert that xnu is still uniprocessor before running a POST test.
1619 *
1620 * In practice, tests in this module can safely manipulate CPU state without
1621 * fear of getting preempted. There's no way for cpu_boot_thread() to bring up
1622 * the secondary CPUs until StartIOKitMatching() completes, and arm64 orders
1623 * kern_post_test() before StartIOKitMatching().
1624 *
1625 * But this is also an implementation detail. Tests that rely on this ordering
1626 * should call assert_uniprocessor(), so that we can figure out a workaround
1627 * on the off-chance this ordering ever changes.
1628 */
1629 __unused static void
assert_uniprocessor(void)1630 assert_uniprocessor(void)
1631 {
1632 extern unsigned int real_ncpus;
1633 unsigned int ncpus = os_atomic_load(&real_ncpus, relaxed);
1634 T_QUIET; T_ASSERT_EQ_UINT(1, ncpus, "arm64 kernel POST tests should run before any secondary CPUs are brought up");
1635 }
1636
1637
1638 #if CONFIG_SPTM
1639 volatile uint8_t xnu_post_panic_lockdown_did_fire = false;
1640 typedef uint64_t (panic_lockdown_helper_fcn_t)(uint64_t raw);
1641 typedef bool (panic_lockdown_recovery_fcn_t)(arm_saved_state_t *);
1642
1643 /* SP0 vector tests */
1644 extern panic_lockdown_helper_fcn_t arm64_panic_lockdown_test_load;
1645 extern panic_lockdown_helper_fcn_t arm64_panic_lockdown_test_gdbtrap;
1646 extern panic_lockdown_helper_fcn_t arm64_panic_lockdown_test_pac_brk_c470;
1647 extern panic_lockdown_helper_fcn_t arm64_panic_lockdown_test_pac_brk_c471;
1648 extern panic_lockdown_helper_fcn_t arm64_panic_lockdown_test_pac_brk_c472;
1649 extern panic_lockdown_helper_fcn_t arm64_panic_lockdown_test_pac_brk_c473;
1650 extern panic_lockdown_helper_fcn_t arm64_panic_lockdown_test_telemetry_brk_ff00;
1651 extern panic_lockdown_helper_fcn_t arm64_panic_lockdown_test_br_auth_fail;
1652 extern panic_lockdown_helper_fcn_t arm64_panic_lockdown_test_ldr_auth_fail;
1653 extern panic_lockdown_helper_fcn_t arm64_panic_lockdown_test_fpac;
1654 extern panic_lockdown_helper_fcn_t arm64_panic_lockdown_test_copyio;
1655 extern panic_lockdown_helper_fcn_t arm64_panic_lockdown_test_bti_telemetry;
1656
1657 extern int gARM_FEAT_FPACCOMBINE;
1658
1659 /* SP1 vector tests */
1660 extern panic_lockdown_helper_fcn_t arm64_panic_lockdown_test_sp1_invalid_stack;
1661 extern bool arm64_panic_lockdown_test_sp1_invalid_stack_handler(arm_saved_state_t *);
1662 extern panic_lockdown_helper_fcn_t arm64_panic_lockdown_test_sp1_exception_in_vector;
1663 extern panic_lockdown_helper_fcn_t el1_sp1_synchronous_raise_exception_in_vector;
1664 extern bool arm64_panic_lockdown_test_sp1_exception_in_vector_handler(arm_saved_state_t *);
1665
1666 typedef struct arm64_panic_lockdown_test_case {
1667 const char *func_str;
1668 panic_lockdown_helper_fcn_t *func;
1669 uint64_t arg;
1670 esr_exception_class_t expected_ec;
1671 bool expect_lockdown_exceptions_masked;
1672 bool expect_lockdown_exceptions_unmasked;
1673 bool override_expected_fault_pc_valid;
1674 uint64_t override_expected_fault_pc;
1675 } arm64_panic_lockdown_test_case_s;
1676
1677 static arm64_panic_lockdown_test_case_s *arm64_panic_lockdown_active_test;
1678 static volatile bool arm64_panic_lockdown_caught_exception;
1679
1680 static bool
arm64_panic_lockdown_test_exception_handler(arm_saved_state_t * state)1681 arm64_panic_lockdown_test_exception_handler(arm_saved_state_t * state)
1682 {
1683 uint64_t esr = get_saved_state_esr(state);
1684 esr_exception_class_t class = ESR_EC(esr);
1685
1686 if (!arm64_panic_lockdown_active_test ||
1687 class != arm64_panic_lockdown_active_test->expected_ec) {
1688 return false;
1689 }
1690
1691 #if BTI_ENFORCED
1692 /* Clear BTYPE to prevent taking another exception on ERET */
1693 uint32_t spsr = get_saved_state_cpsr(state);
1694 spsr &= ~PSR_BTYPE_MASK;
1695 set_saved_state_cpsr(state, spsr);
1696 #endif /* BTI_ENFORCED */
1697
1698 /* We got the expected exception, recover by forging an early return */
1699 set_saved_state_pc(state, get_saved_state_lr(state));
1700 arm64_panic_lockdown_caught_exception = true;
1701
1702 return true;
1703 }
1704
1705 static void
panic_lockdown_expect_test(const char * treatment,arm64_panic_lockdown_test_case_s * test,bool expect_lockdown,bool mask_interrupts)1706 panic_lockdown_expect_test(const char *treatment,
1707 arm64_panic_lockdown_test_case_s *test,
1708 bool expect_lockdown,
1709 bool mask_interrupts)
1710 {
1711 int ints = 0;
1712
1713 arm64_panic_lockdown_active_test = test;
1714 xnu_post_panic_lockdown_did_fire = false;
1715 arm64_panic_lockdown_caught_exception = false;
1716
1717 uintptr_t fault_pc;
1718 if (test->override_expected_fault_pc_valid) {
1719 fault_pc = (uintptr_t)test->override_expected_fault_pc;
1720 } else {
1721 fault_pc = (uintptr_t)test->func;
1722 #ifdef BTI_ENFORCED
1723 /* When BTI is enabled, we expect the fault to occur after the landing pad */
1724 fault_pc += 4;
1725 #endif /* BTI_ENFORCED */
1726 }
1727
1728
1729 ml_expect_fault_pc_begin(
1730 arm64_panic_lockdown_test_exception_handler,
1731 fault_pc);
1732
1733 if (mask_interrupts) {
1734 ints = ml_set_interrupts_enabled(FALSE);
1735 }
1736
1737 (void)test->func(test->arg);
1738
1739 if (mask_interrupts) {
1740 (void)ml_set_interrupts_enabled(ints);
1741 }
1742
1743 ml_expect_fault_end();
1744
1745 if (expect_lockdown == xnu_post_panic_lockdown_did_fire &&
1746 arm64_panic_lockdown_caught_exception) {
1747 T_PASS("%s + %s OK\n", test->func_str, treatment);
1748 } else {
1749 T_FAIL(
1750 "%s + %s FAIL (expected lockdown: %d, did lockdown: %d, caught exception: %d)\n",
1751 test->func_str, treatment,
1752 expect_lockdown, xnu_post_panic_lockdown_did_fire,
1753 arm64_panic_lockdown_caught_exception);
1754 }
1755 }
1756
1757 static void
panic_lockdown_expect_fault_raw(const char * label,panic_lockdown_helper_fcn_t entrypoint,panic_lockdown_helper_fcn_t faulting_function,expected_fault_handler_t fault_handler)1758 panic_lockdown_expect_fault_raw(const char *label,
1759 panic_lockdown_helper_fcn_t entrypoint,
1760 panic_lockdown_helper_fcn_t faulting_function,
1761 expected_fault_handler_t fault_handler)
1762 {
1763 uint64_t test_success = 0;
1764 xnu_post_panic_lockdown_did_fire = false;
1765
1766 uintptr_t fault_pc = (uintptr_t)faulting_function;
1767 #ifdef BTI_ENFORCED
1768 /* When BTI is enabled, we expect the fault to occur after the landing pad */
1769 fault_pc += 4;
1770 #endif /* BTI_ENFORCED */
1771
1772 ml_expect_fault_pc_begin(fault_handler, fault_pc);
1773
1774 test_success = entrypoint(0);
1775
1776 ml_expect_fault_end();
1777
1778 if (test_success && xnu_post_panic_lockdown_did_fire) {
1779 T_PASS("%s OK\n", label);
1780 } else {
1781 T_FAIL("%s FAIL (test returned: %d, did lockdown: %d)\n",
1782 label, test_success, xnu_post_panic_lockdown_did_fire);
1783 }
1784 }
1785
1786 /**
1787 * Returns a pointer which is guranteed to be invalid under IA with the zero
1788 * discriminator.
1789 *
1790 * This is somewhat over complicating it since it's exceedingly likely that a
1791 * any given pointer will have a zero PAC (and thus break the test), but it's
1792 * easy enough to avoid the problem.
1793 */
1794 static uint64_t
panic_lockdown_pacia_get_invalid_ptr()1795 panic_lockdown_pacia_get_invalid_ptr()
1796 {
1797 char *unsigned_ptr = (char *)0xFFFFFFFFAABBCC00;
1798 char *signed_ptr = NULL;
1799 do {
1800 unsigned_ptr += 4 /* avoid alignment exceptions */;
1801 signed_ptr = ptrauth_sign_unauthenticated(
1802 unsigned_ptr,
1803 ptrauth_key_asia,
1804 0);
1805 } while ((uint64_t)unsigned_ptr == (uint64_t)signed_ptr);
1806
1807 return (uint64_t)unsigned_ptr;
1808 }
1809
1810 /**
1811 * Returns a pointer which is guranteed to be invalid under DA with the zero
1812 * discriminator.
1813 */
1814 static uint64_t
panic_lockdown_pacda_get_invalid_ptr(void)1815 panic_lockdown_pacda_get_invalid_ptr(void)
1816 {
1817 char *unsigned_ptr = (char *)0xFFFFFFFFAABBCC00;
1818 char *signed_ptr = NULL;
1819 do {
1820 unsigned_ptr += 8 /* avoid alignment exceptions */;
1821 signed_ptr = ptrauth_sign_unauthenticated(
1822 unsigned_ptr,
1823 ptrauth_key_asda,
1824 0);
1825 } while ((uint64_t)unsigned_ptr == (uint64_t)signed_ptr);
1826
1827 return (uint64_t)unsigned_ptr;
1828 }
1829
1830 kern_return_t
arm64_panic_lockdown_test(void)1831 arm64_panic_lockdown_test(void)
1832 {
1833 #if __has_feature(ptrauth_calls)
1834 uint64_t ia_invalid = panic_lockdown_pacia_get_invalid_ptr();
1835 #endif /* ptrauth_calls */
1836 arm64_panic_lockdown_test_case_s tests[] = {
1837 {
1838 .func_str = "arm64_panic_lockdown_test_load",
1839 .func = &arm64_panic_lockdown_test_load,
1840 /* Trigger a null deref */
1841 .arg = (uint64_t)NULL,
1842 .expected_ec = ESR_EC_DABORT_EL1,
1843 .expect_lockdown_exceptions_masked = true,
1844 .expect_lockdown_exceptions_unmasked = false,
1845 },
1846 {
1847 .func_str = "arm64_panic_lockdown_test_gdbtrap",
1848 .func = &arm64_panic_lockdown_test_gdbtrap,
1849 .arg = 0,
1850 .expected_ec = ESR_EC_UNCATEGORIZED,
1851 /* GDBTRAP instructions should be allowed everywhere */
1852 .expect_lockdown_exceptions_masked = false,
1853 .expect_lockdown_exceptions_unmasked = false,
1854 },
1855 #if __has_feature(ptrauth_calls)
1856 {
1857 .func_str = "arm64_panic_lockdown_test_pac_brk_c470",
1858 .func = &arm64_panic_lockdown_test_pac_brk_c470,
1859 .arg = 0,
1860 .expected_ec = ESR_EC_BRK_AARCH64,
1861 .expect_lockdown_exceptions_masked = true,
1862 .expect_lockdown_exceptions_unmasked = true,
1863 },
1864 {
1865 .func_str = "arm64_panic_lockdown_test_pac_brk_c471",
1866 .func = &arm64_panic_lockdown_test_pac_brk_c471,
1867 .arg = 0,
1868 .expected_ec = ESR_EC_BRK_AARCH64,
1869 .expect_lockdown_exceptions_masked = true,
1870 .expect_lockdown_exceptions_unmasked = true,
1871 },
1872 {
1873 .func_str = "arm64_panic_lockdown_test_pac_brk_c472",
1874 .func = &arm64_panic_lockdown_test_pac_brk_c472,
1875 .arg = 0,
1876 .expected_ec = ESR_EC_BRK_AARCH64,
1877 .expect_lockdown_exceptions_masked = true,
1878 .expect_lockdown_exceptions_unmasked = true,
1879 },
1880 {
1881 .func_str = "arm64_panic_lockdown_test_pac_brk_c473",
1882 .func = &arm64_panic_lockdown_test_pac_brk_c473,
1883 .arg = 0,
1884 .expected_ec = ESR_EC_BRK_AARCH64,
1885 .expect_lockdown_exceptions_masked = true,
1886 .expect_lockdown_exceptions_unmasked = true,
1887 },
1888 {
1889 .func_str = "arm64_panic_lockdown_test_telemetry_brk_ff00",
1890 .func = &arm64_panic_lockdown_test_telemetry_brk_ff00,
1891 .arg = 0,
1892 .expected_ec = ESR_EC_BRK_AARCH64,
1893 /*
1894 * PAC breakpoints are not the only breakpoints, ensure that other
1895 * BRKs (like those used for telemetry) do not trigger lockdowns.
1896 * This is necessary to avoid conflicts with features like UBSan
1897 * telemetry (which could fire at any time in C code).
1898 */
1899 .expect_lockdown_exceptions_masked = false,
1900 .expect_lockdown_exceptions_unmasked = false,
1901 },
1902 {
1903 .func_str = "arm64_panic_lockdown_test_br_auth_fail",
1904 .func = &arm64_panic_lockdown_test_br_auth_fail,
1905 .arg = ia_invalid,
1906 .expected_ec = gARM_FEAT_FPACCOMBINE ? ESR_EC_PAC_FAIL : ESR_EC_IABORT_EL1,
1907 .expect_lockdown_exceptions_masked = true,
1908 .expect_lockdown_exceptions_unmasked = true,
1909 /*
1910 * Pre-FEAT_FPACCOMBINED, BRAx branches to a poisoned PC so we
1911 * expect to fault on the branch target rather than the branch
1912 * itself. The exact ELR will likely be different from ia_invalid,
1913 * but since the expect logic in sleh only matches on low bits (i.e.
1914 * not bits which will be poisoned), this is fine.
1915 * On FEAT_FPACCOMBINED devices, we will fault on the branch itself.
1916 */
1917 .override_expected_fault_pc_valid = !gARM_FEAT_FPACCOMBINE,
1918 .override_expected_fault_pc = ia_invalid
1919 },
1920 {
1921 .func_str = "arm64_panic_lockdown_test_ldr_auth_fail",
1922 .func = &arm64_panic_lockdown_test_ldr_auth_fail,
1923 .arg = panic_lockdown_pacda_get_invalid_ptr(),
1924 .expected_ec = gARM_FEAT_FPACCOMBINE ? ESR_EC_PAC_FAIL : ESR_EC_DABORT_EL1,
1925 .expect_lockdown_exceptions_masked = true,
1926 .expect_lockdown_exceptions_unmasked = true,
1927 },
1928 {
1929 .func_str = "arm64_panic_lockdown_test_copyio_poison",
1930 .func = arm64_panic_lockdown_test_copyio,
1931 /* fake a poisoned kernel pointer by flipping the bottom PAC bit */
1932 .arg = ((uint64_t)-1) ^ (1LLU << (64 - T1SZ_BOOT)),
1933 .expected_ec = ESR_EC_DABORT_EL1,
1934 .expect_lockdown_exceptions_masked = false,
1935 .expect_lockdown_exceptions_unmasked = false,
1936 },
1937 #if __ARM_ARCH_8_6__
1938 {
1939 .func_str = "arm64_panic_lockdown_test_fpac",
1940 .func = &arm64_panic_lockdown_test_fpac,
1941 .arg = ia_invalid,
1942 .expected_ec = ESR_EC_PAC_FAIL,
1943 .expect_lockdown_exceptions_masked = true,
1944 .expect_lockdown_exceptions_unmasked = true,
1945 },
1946 #endif /* __ARM_ARCH_8_6__ */
1947 #endif /* ptrauth_calls */
1948 {
1949 .func_str = "arm64_panic_lockdown_test_copyio",
1950 .func = arm64_panic_lockdown_test_copyio,
1951 .arg = 0x0 /* load from NULL */,
1952 .expected_ec = ESR_EC_DABORT_EL1,
1953 .expect_lockdown_exceptions_masked = false,
1954 .expect_lockdown_exceptions_unmasked = false,
1955 },
1956 };
1957
1958 size_t test_count = sizeof(tests) / sizeof(*tests);
1959 for (size_t i = 0; i < test_count; i++) {
1960 panic_lockdown_expect_test(
1961 "Exceptions unmasked",
1962 &tests[i],
1963 tests[i].expect_lockdown_exceptions_unmasked,
1964 /* mask_interrupts */ false);
1965
1966 panic_lockdown_expect_test(
1967 "Exceptions masked",
1968 &tests[i],
1969 tests[i].expect_lockdown_exceptions_masked,
1970 /* mask_interrupts */ true);
1971 }
1972
1973 panic_lockdown_expect_fault_raw("arm64_panic_lockdown_test_sp1_invalid_stack",
1974 arm64_panic_lockdown_test_sp1_invalid_stack,
1975 arm64_panic_lockdown_test_pac_brk_c470,
1976 arm64_panic_lockdown_test_sp1_invalid_stack_handler);
1977
1978 panic_lockdown_expect_fault_raw("arm64_panic_lockdown_test_sp1_exception_in_vector",
1979 arm64_panic_lockdown_test_sp1_exception_in_vector,
1980 el1_sp1_synchronous_raise_exception_in_vector,
1981 arm64_panic_lockdown_test_sp1_exception_in_vector_handler);
1982 return KERN_SUCCESS;
1983 }
1984 #endif /* CONFIG_SPTM */
1985
1986
1987
1988 #if HAS_SPECRES
1989
1990 /*** CPS RCTX ***/
1991
1992 #if HAS_CPSRCTX
1993
1994 static inline void
_cpsrctx_exec(uint64_t ctx)1995 _cpsrctx_exec(uint64_t ctx)
1996 {
1997 asm volatile ( "ISB SY");
1998 asm volatile ( "CPS RCTX, %0" :: "r"(ctx));
1999 asm volatile ( "DSB SY");
2000 asm volatile ( "ISB SY");
2001 }
2002
2003 static void
_cpsrctx_do_test(void)2004 _cpsrctx_do_test(void)
2005 {
2006 typedef struct {
2007 union {
2008 struct {
2009 uint64_t ASID:16;
2010 uint64_t GASID:1;
2011 uint64_t :7;
2012 uint64_t EL:2;
2013 uint64_t NS:1;
2014 uint64_t NSE:1;
2015 uint64_t :4;
2016 uint64_t VMID:16;
2017 uint64_t GVMID:1;
2018 uint64_t :7;
2019 uint64_t GM:1;
2020 uint64_t :3;
2021 uint64_t IS:3;
2022 uint64_t :1;
2023 };
2024 uint64_t raw;
2025 };
2026 } cpsrctx_ctx;
2027
2028 assert(sizeof(cpsrctx_ctx) == 8);
2029
2030 /*
2031 * Test various possible meaningful CPS_RCTX context ID.
2032 */
2033
2034 /* el : EL0 / EL1 / EL2. */
2035 for (uint8_t el = 0; el < 3; el++) {
2036 /* Always non-secure. */
2037 const uint8_t ns = 1;
2038 const uint8_t nse = 0;
2039
2040 /* Iterat eover some couples of ASIDs / VMIDs. */
2041 for (uint16_t xxid = 0; xxid < 256; xxid++) {
2042 const uint16_t asid = (uint16_t) (xxid << 4);
2043 const uint16_t vmid = (uint16_t) (256 - (xxid << 4));
2044
2045 /* Test 4 G[AS|VM]ID combinations. */
2046 for (uint8_t bid = 0; bid < 4; bid++) {
2047 const uint8_t gasid = bid & 1;
2048 const uint8_t gvmid = bid & 2;
2049
2050 /* Test all GM / IS combinations. */
2051 for (uint8_t gid = 0; gid < 0x8; gid++) {
2052 const uint8_t gm = gid & 1;
2053 const uint8_t is = gid >> 1;
2054
2055 /* Generate the context descriptor. */
2056 cpsrctx_ctx ctx = {0};
2057 ctx.ASID = asid;
2058 ctx.GASID = gasid;
2059 ctx.EL = el;
2060 ctx.NS = ns;
2061 ctx.NSE = nse;
2062 ctx.VMID = vmid;
2063 ctx.GVMID = gvmid;
2064 ctx.GM = gm;
2065 ctx.IS = is;
2066
2067 /* Execute the CPS instruction. */
2068 _cpsrctx_exec(ctx.raw);
2069
2070 /* Insert some operation. */
2071 volatile uint8_t sum = 0;
2072 for (volatile uint8_t i = 0; i < 64; i++) {
2073 sum += i * sum + 3;
2074 }
2075 }
2076
2077 /* If el0 is not targetted, just need to do it once. */
2078 if (el != 0) {
2079 goto not_el0_skip;
2080 }
2081 }
2082 }
2083
2084 /* El0 skip. */
2085 not_el0_skip: ;
2086 }
2087 }
2088
2089 #endif /* HAS_CPSRCTX */
2090
2091 /*** SPECRES ***/
2092
2093 #if HAS_SPECRES2
2094 /*
2095 * Execute a COSP RCTX instruction.
2096 */
2097 static void
_cosprctx_exec(uint64_t raw)2098 _cosprctx_exec(uint64_t raw)
2099 {
2100 asm volatile ( "ISB SY");
2101 __asm__ volatile ("COSP RCTX, %0" :: "r" (raw));
2102 asm volatile ( "DSB SY");
2103 asm volatile ( "ISB SY");
2104 }
2105 #endif
2106
2107 /*
2108 * Execute a CFP RCTX instruction.
2109 */
2110 static void
_cfprctx_exec(uint64_t raw)2111 _cfprctx_exec(uint64_t raw)
2112 {
2113 asm volatile ( "ISB SY");
2114 __asm__ volatile ("CFP RCTX, %0" :: "r" (raw));
2115 asm volatile ( "DSB SY");
2116 asm volatile ( "ISB SY");
2117 }
2118
2119 /*
2120 * Execute a CPP RCTX instruction.
2121 */
2122 static void
_cpprctx_exec(uint64_t raw)2123 _cpprctx_exec(uint64_t raw)
2124 {
2125 asm volatile ( "ISB SY");
2126 __asm__ volatile ("CPP RCTX, %0" :: "r" (raw));
2127 asm volatile ( "DSB SY");
2128 asm volatile ( "ISB SY");
2129 }
2130
2131 /*
2132 * Execute a DVP RCTX instruction.
2133 */
2134 static void
_dvprctx_exec(uint64_t raw)2135 _dvprctx_exec(uint64_t raw)
2136 {
2137 asm volatile ( "ISB SY");
2138 __asm__ volatile ("DVP RCTX, %0" :: "r" (raw));
2139 asm volatile ( "DSB SY");
2140 asm volatile ( "ISB SY");
2141 }
2142
2143 static void
_specres_do_test_std(void (* impl)(uint64_t raw))2144 _specres_do_test_std(void (*impl)(uint64_t raw))
2145 {
2146 typedef struct {
2147 union {
2148 struct {
2149 uint64_t ASID:16;
2150 uint64_t GASID:1;
2151 uint64_t :7;
2152 uint64_t EL:2;
2153 uint64_t NS:1;
2154 uint64_t NSE:1;
2155 uint64_t :4;
2156 uint64_t VMID:16;
2157 uint64_t GVMID:1;
2158 };
2159 uint64_t raw;
2160 };
2161 } specres_ctx;
2162
2163 assert(sizeof(specres_ctx) == 8);
2164
2165 /*
2166 * Test various possible meaningful COSP_RCTX context ID.
2167 */
2168
2169 /* el : EL0 / EL1 / EL2. */
2170 for (uint8_t el = 0; el < 3; el++) {
2171 /* Always non-secure. */
2172 const uint8_t ns = 1;
2173 const uint8_t nse = 0;
2174
2175 /* Iterate over some couples of ASIDs / VMIDs. */
2176 for (uint16_t xxid = 0; xxid < 256; xxid++) {
2177 const uint16_t asid = (uint16_t) (xxid << 4);
2178 const uint16_t vmid = (uint16_t) (256 - (xxid << 4));
2179
2180 /* Test 4 G[AS|VM]ID combinations. */
2181 for (uint8_t bid = 0; bid < 4; bid++) {
2182 const uint8_t gasid = bid & 1;
2183 const uint8_t gvmid = bid & 2;
2184
2185 /* Generate the context descriptor. */
2186 specres_ctx ctx = {0};
2187 ctx.ASID = asid;
2188 ctx.GASID = gasid;
2189 ctx.EL = el;
2190 ctx.NS = ns;
2191 ctx.NSE = nse;
2192 ctx.VMID = vmid;
2193 ctx.GVMID = gvmid;
2194
2195 /* Execute the COSP instruction. */
2196 (*impl)(ctx.raw);
2197
2198 /* Insert some operation. */
2199 volatile uint8_t sum = 0;
2200 for (volatile uint8_t i = 0; i < 64; i++) {
2201 sum += i * sum + 3;
2202 }
2203
2204 /* If el0 is not targetted, just need to do it once. */
2205 if (el != 0) {
2206 goto not_el0_skip;
2207 }
2208 }
2209 }
2210
2211 /* El0 skip. */
2212 not_el0_skip: ;
2213 }
2214 }
2215
2216 /*** RCTX ***/
2217
2218 static void
_rctx_do_test(void)2219 _rctx_do_test(void)
2220 {
2221 _specres_do_test_std(&_cfprctx_exec);
2222 _specres_do_test_std(&_cpprctx_exec);
2223 _specres_do_test_std(&_dvprctx_exec);
2224 #if HAS_SPECRES2
2225 _specres_do_test_std(&_cosprctx_exec);
2226 #endif
2227 #if HAS_CPSRCTX
2228 _cpsrctx_do_test();
2229 #endif
2230 }
2231
2232 kern_return_t
specres_test(void)2233 specres_test(void)
2234 {
2235 /* Basic instructions test. */
2236 _cfprctx_exec(0);
2237 _cpprctx_exec(0);
2238 _dvprctx_exec(0);
2239 #if HAS_SPECRES2
2240 _cosprctx_exec(0);
2241 #endif
2242 #if HAS_CPSRCTX
2243 _cpsrctx_exec(0);
2244 #endif
2245
2246 /* More advanced instructions test. */
2247 _rctx_do_test();
2248
2249 return KERN_SUCCESS;
2250 }
2251
2252 #endif /* HAS_SPECRES */
2253 #if BTI_ENFORCED
2254 typedef uint64_t (bti_landing_pad_func_t)(void);
2255 typedef uint64_t (bti_shim_func_t)(bti_landing_pad_func_t *);
2256
2257 extern bti_shim_func_t arm64_bti_test_jump_shim;
2258 extern bti_shim_func_t arm64_bti_test_call_shim;
2259
2260 extern bti_landing_pad_func_t arm64_bti_test_func_with_no_landing_pad;
2261 extern bti_landing_pad_func_t arm64_bti_test_func_with_call_landing_pad;
2262 extern bti_landing_pad_func_t arm64_bti_test_func_with_jump_landing_pad;
2263 extern bti_landing_pad_func_t arm64_bti_test_func_with_jump_call_landing_pad;
2264 #if __has_feature(ptrauth_returns)
2265 extern bti_landing_pad_func_t arm64_bti_test_func_with_pac_landing_pad;
2266 #endif /* __has_feature(ptrauth_returns) */
2267
2268 typedef struct arm64_bti_test_func_case {
2269 const char *func_str;
2270 bti_landing_pad_func_t *func;
2271 uint64_t expect_return_value;
2272 uint8_t expect_call_ok;
2273 uint8_t expect_jump_ok;
2274 } arm64_bti_test_func_case_s;
2275
2276 static volatile uintptr_t bti_exception_handler_pc = 0;
2277
2278 static bool
arm64_bti_test_exception_handler(arm_saved_state_t * state)2279 arm64_bti_test_exception_handler(arm_saved_state_t * state)
2280 {
2281 uint64_t esr = get_saved_state_esr(state);
2282 esr_exception_class_t class = ESR_EC(esr);
2283
2284 if (class != ESR_EC_BTI_FAIL) {
2285 return false;
2286 }
2287
2288 /* Capture any desired exception metrics */
2289 bti_exception_handler_pc = get_saved_state_pc(state);
2290
2291 /* "Cancel" the function call by forging an early return */
2292 set_saved_state_pc(state, get_saved_state_lr(state));
2293
2294 /* Clear BTYPE to prevent taking another exception after ERET */
2295 uint32_t spsr = get_saved_state_cpsr(state);
2296 spsr &= ~PSR_BTYPE_MASK;
2297 set_saved_state_cpsr(state, spsr);
2298
2299 return true;
2300 }
2301
2302 static void
arm64_bti_test_func_with_shim(uint8_t expect_ok,const char * shim_str,bti_shim_func_t * shim,arm64_bti_test_func_case_s * test_case)2303 arm64_bti_test_func_with_shim(
2304 uint8_t expect_ok,
2305 const char *shim_str,
2306 bti_shim_func_t *shim,
2307 arm64_bti_test_func_case_s *test_case)
2308 {
2309 uint64_t result = -1;
2310
2311 /* Capture BTI exceptions triggered by our target function */
2312 uintptr_t raw_func = (uintptr_t)ptrauth_strip(
2313 (void *)test_case->func,
2314 ptrauth_key_function_pointer);
2315 ml_expect_fault_pc_begin(arm64_bti_test_exception_handler, raw_func);
2316 bti_exception_handler_pc = 0;
2317
2318 /*
2319 * The assembly routines do not support C function type discriminators, so
2320 * strip and resign with zero if needed
2321 */
2322 bti_landing_pad_func_t *resigned = ptrauth_auth_and_resign(
2323 test_case->func,
2324 ptrauth_key_function_pointer,
2325 ptrauth_type_discriminator(bti_landing_pad_func_t),
2326 ptrauth_key_function_pointer, 0);
2327
2328 result = shim(resigned);
2329
2330 ml_expect_fault_end();
2331
2332 if (!expect_ok && raw_func != bti_exception_handler_pc) {
2333 T_FAIL("Expected BTI exception at 0x%llx but got one at %llx instead\n",
2334 raw_func, bti_exception_handler_pc);
2335 } else if (expect_ok && bti_exception_handler_pc) {
2336 T_FAIL("Did not expect BTI exception but got on at 0x%llx\n",
2337 bti_exception_handler_pc);
2338 } else if (!expect_ok && !bti_exception_handler_pc) {
2339 T_FAIL("Failed to hit expected exception!\n");
2340 } else if (expect_ok && result != test_case->expect_return_value) {
2341 T_FAIL("Incorrect test function result (expected=%llu, result=%llu\n)",
2342 test_case->expect_return_value, result);
2343 } else {
2344 T_PASS("%s (shim=%s)\n", test_case->func_str, shim_str);
2345 }
2346 }
2347
2348 /**
2349 * This test works to ensure that BTI exceptions are raised where expected
2350 * and only where they are expected by exhaustively testing all indirect branch
2351 * combinations with all landing pad options.
2352 */
2353 kern_return_t
arm64_bti_test(void)2354 arm64_bti_test(void)
2355 {
2356 static arm64_bti_test_func_case_s tests[] = {
2357 {
2358 .func_str = "arm64_bti_test_func_with_no_landing_pad",
2359 .func = &arm64_bti_test_func_with_no_landing_pad,
2360 .expect_return_value = 1,
2361 .expect_call_ok = 0,
2362 .expect_jump_ok = 0,
2363 },
2364 {
2365 .func_str = "arm64_bti_test_func_with_call_landing_pad",
2366 .func = &arm64_bti_test_func_with_call_landing_pad,
2367 .expect_return_value = 2,
2368 .expect_call_ok = 1,
2369 .expect_jump_ok = 0,
2370 },
2371 {
2372 .func_str = "arm64_bti_test_func_with_jump_landing_pad",
2373 .func = &arm64_bti_test_func_with_jump_landing_pad,
2374 .expect_return_value = 3,
2375 .expect_call_ok = 0,
2376 .expect_jump_ok = 1,
2377 },
2378 {
2379 .func_str = "arm64_bti_test_func_with_jump_call_landing_pad",
2380 .func = &arm64_bti_test_func_with_jump_call_landing_pad,
2381 .expect_return_value = 4,
2382 .expect_call_ok = 1,
2383 .expect_jump_ok = 1,
2384 },
2385 #if __has_feature(ptrauth_returns)
2386 {
2387 .func_str = "arm64_bti_test_func_with_pac_landing_pad",
2388 .func = &arm64_bti_test_func_with_pac_landing_pad,
2389 .expect_return_value = 5,
2390 .expect_call_ok = 1,
2391 .expect_jump_ok = 0,
2392 },
2393 #endif /* __has_feature(ptrauth_returns) */
2394 };
2395
2396 size_t test_count = sizeof(tests) / sizeof(*tests);
2397 for (size_t i = 0; i < test_count; i++) {
2398 arm64_bti_test_func_case_s *test_case = tests + i;
2399
2400 arm64_bti_test_func_with_shim(test_case->expect_call_ok,
2401 "arm64_bti_test_call_shim",
2402 arm64_bti_test_call_shim,
2403 test_case);
2404
2405
2406 arm64_bti_test_func_with_shim(test_case->expect_jump_ok,
2407 "arm64_bti_test_jump_shim",
2408 arm64_bti_test_jump_shim,
2409 test_case);
2410 }
2411
2412 return KERN_SUCCESS;
2413 }
2414 #endif /* BTI_ENFORCED */
2415
2416