1 /*
2 * Copyright (c) 2011-2018 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System Copyright (c) 1991,1990,1989,1988,1987 Carnegie
33 * Mellon University All Rights Reserved.
34 *
35 * Permission to use, copy, modify and distribute this software and its
36 * documentation is hereby granted, provided that both the copyright notice
37 * and this permission notice appear in all copies of the software,
38 * derivative works or modified versions, and any portions thereof, and that
39 * both notices appear in supporting documentation.
40 *
41 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION.
42 * CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES
43 * WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
44 *
45 * Carnegie Mellon requests users of this software to return to
46 *
47 * Software Distribution Coordinator or [email protected]
48 * School of Computer Science Carnegie Mellon University Pittsburgh PA
49 * 15213-3890
50 *
51 * any improvements or extensions that they make and grant Carnegie Mellon the
52 * rights to redistribute these changes.
53 */
54
55 #include <mach_ldebug.h>
56
57 #define LOCK_PRIVATE 1
58
59 #include <vm/pmap.h>
60 #include <vm/vm_map_xnu.h>
61 #include <vm/vm_page_internal.h>
62 #include <vm/vm_kern_xnu.h>
63 #include <kern/kalloc.h>
64 #include <kern/cpu_number.h>
65 #include <kern/locks.h>
66 #include <kern/misc_protos.h>
67 #include <kern/thread.h>
68 #include <kern/processor.h>
69 #include <kern/sched_prim.h>
70 #include <kern/debug.h>
71 #include <string.h>
72 #include <tests/xnupost.h>
73
74 #if MACH_KDB
75 #include <ddb/db_command.h>
76 #include <ddb/db_output.h>
77 #include <ddb/db_sym.h>
78 #include <ddb/db_print.h>
79 #endif /* MACH_KDB */
80
81 #include <san/kasan.h>
82 #include <sys/kdebug.h>
83 #include <sys/munge.h>
84 #include <machine/cpu_capabilities.h>
85 #include <arm/cpu_data_internal.h>
86 #include <arm/pmap.h>
87
88 #if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)
89 #include <arm64/amcc_rorgn.h>
90 #endif // defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)
91
92 #include <arm64/machine_machdep.h>
93
94 kern_return_t arm64_lock_test(void);
95 kern_return_t arm64_munger_test(void);
96 kern_return_t arm64_pan_test(void);
97 kern_return_t arm64_late_pan_test(void);
98 #if defined(HAS_APPLE_PAC)
99 #include <ptrauth.h>
100 kern_return_t arm64_ropjop_test(void);
101 #endif
102 #if defined(KERNEL_INTEGRITY_CTRR)
103 kern_return_t ctrr_test(void);
104 kern_return_t ctrr_test_cpu(void);
105 #endif
106 #if BTI_ENFORCED
107 kern_return_t arm64_bti_test(void);
108 #endif /* BTI_ENFORCED */
109 #if HAS_SPECRES
110 extern kern_return_t specres_test(void);
111 #endif
112
113 // exception handler ignores this fault address during PAN test
114 #if __ARM_PAN_AVAILABLE__
115 const uint64_t pan_ro_value = 0xFEEDB0B0DEADBEEF;
116 vm_offset_t pan_test_addr = 0;
117 vm_offset_t pan_ro_addr = 0;
118 volatile int pan_exception_level = 0;
119 volatile char pan_fault_value = 0;
120 #endif
121
122 #if CONFIG_SPTM
123 kern_return_t arm64_panic_lockdown_test(void);
124 #endif /* CONFIG_SPTM */
125
126 #include <arm64/speculation.h>
127 kern_return_t arm64_speculation_guard_test(void);
128
129 #include <libkern/OSAtomic.h>
130 #define LOCK_TEST_ITERATIONS 50
131 #define LOCK_TEST_SETUP_TIMEOUT_SEC 15
132 static hw_lock_data_t lt_hw_lock;
133 static lck_spin_t lt_lck_spin_t;
134 static lck_mtx_t lt_mtx;
135 static lck_rw_t lt_rwlock;
136 static volatile uint32_t lt_counter = 0;
137 static volatile int lt_spinvolatile;
138 static volatile uint32_t lt_max_holders = 0;
139 static volatile uint32_t lt_upgrade_holders = 0;
140 static volatile uint32_t lt_max_upgrade_holders = 0;
141 static volatile uint32_t lt_num_holders = 0;
142 static volatile uint32_t lt_done_threads;
143 static volatile uint32_t lt_target_done_threads;
144 static volatile uint32_t lt_cpu_bind_id = 0;
145 static uint64_t lt_setup_timeout = 0;
146
147 static void
lt_note_another_blocking_lock_holder()148 lt_note_another_blocking_lock_holder()
149 {
150 hw_lock_lock(<_hw_lock, LCK_GRP_NULL);
151 lt_num_holders++;
152 lt_max_holders = (lt_max_holders < lt_num_holders) ? lt_num_holders : lt_max_holders;
153 hw_lock_unlock(<_hw_lock);
154 }
155
156 static void
lt_note_blocking_lock_release()157 lt_note_blocking_lock_release()
158 {
159 hw_lock_lock(<_hw_lock, LCK_GRP_NULL);
160 lt_num_holders--;
161 hw_lock_unlock(<_hw_lock);
162 }
163
164 static void
lt_spin_a_little_bit()165 lt_spin_a_little_bit()
166 {
167 uint32_t i;
168
169 for (i = 0; i < 10000; i++) {
170 lt_spinvolatile++;
171 }
172 }
173
174 static void
lt_sleep_a_little_bit()175 lt_sleep_a_little_bit()
176 {
177 delay(100);
178 }
179
180 static void
lt_grab_mutex()181 lt_grab_mutex()
182 {
183 lck_mtx_lock(<_mtx);
184 lt_note_another_blocking_lock_holder();
185 lt_sleep_a_little_bit();
186 lt_counter++;
187 lt_note_blocking_lock_release();
188 lck_mtx_unlock(<_mtx);
189 }
190
191 static void
lt_grab_mutex_with_try()192 lt_grab_mutex_with_try()
193 {
194 while (0 == lck_mtx_try_lock(<_mtx)) {
195 ;
196 }
197 lt_note_another_blocking_lock_holder();
198 lt_sleep_a_little_bit();
199 lt_counter++;
200 lt_note_blocking_lock_release();
201 lck_mtx_unlock(<_mtx);
202 }
203
204 static void
lt_grab_rw_exclusive()205 lt_grab_rw_exclusive()
206 {
207 lck_rw_lock_exclusive(<_rwlock);
208 lt_note_another_blocking_lock_holder();
209 lt_sleep_a_little_bit();
210 lt_counter++;
211 lt_note_blocking_lock_release();
212 lck_rw_done(<_rwlock);
213 }
214
215 static void
lt_grab_rw_exclusive_with_try()216 lt_grab_rw_exclusive_with_try()
217 {
218 while (0 == lck_rw_try_lock_exclusive(<_rwlock)) {
219 lt_sleep_a_little_bit();
220 }
221
222 lt_note_another_blocking_lock_holder();
223 lt_sleep_a_little_bit();
224 lt_counter++;
225 lt_note_blocking_lock_release();
226 lck_rw_done(<_rwlock);
227 }
228
229 /* Disabled until lt_grab_rw_shared() is fixed (rdar://30685840)
230 * static void
231 * lt_grab_rw_shared()
232 * {
233 * lck_rw_lock_shared(<_rwlock);
234 * lt_counter++;
235 *
236 * lt_note_another_blocking_lock_holder();
237 * lt_sleep_a_little_bit();
238 * lt_note_blocking_lock_release();
239 *
240 * lck_rw_done(<_rwlock);
241 * }
242 */
243
244 /* Disabled until lt_grab_rw_shared_with_try() is fixed (rdar://30685840)
245 * static void
246 * lt_grab_rw_shared_with_try()
247 * {
248 * while(0 == lck_rw_try_lock_shared(<_rwlock));
249 * lt_counter++;
250 *
251 * lt_note_another_blocking_lock_holder();
252 * lt_sleep_a_little_bit();
253 * lt_note_blocking_lock_release();
254 *
255 * lck_rw_done(<_rwlock);
256 * }
257 */
258
259 static void
lt_upgrade_downgrade_rw()260 lt_upgrade_downgrade_rw()
261 {
262 boolean_t upgraded, success;
263
264 success = lck_rw_try_lock_shared(<_rwlock);
265 if (!success) {
266 lck_rw_lock_shared(<_rwlock);
267 }
268
269 lt_note_another_blocking_lock_holder();
270 lt_sleep_a_little_bit();
271 lt_note_blocking_lock_release();
272
273 upgraded = lck_rw_lock_shared_to_exclusive(<_rwlock);
274 if (!upgraded) {
275 success = lck_rw_try_lock_exclusive(<_rwlock);
276
277 if (!success) {
278 lck_rw_lock_exclusive(<_rwlock);
279 }
280 }
281
282 lt_upgrade_holders++;
283 if (lt_upgrade_holders > lt_max_upgrade_holders) {
284 lt_max_upgrade_holders = lt_upgrade_holders;
285 }
286
287 lt_counter++;
288 lt_sleep_a_little_bit();
289
290 lt_upgrade_holders--;
291
292 lck_rw_lock_exclusive_to_shared(<_rwlock);
293
294 lt_spin_a_little_bit();
295 lck_rw_done(<_rwlock);
296 }
297
298 #if __AMP__
299 const int limit = 1000000;
300 static int lt_stress_local_counters[MAX_CPUS];
301
302 lck_ticket_t lt_ticket_lock;
303 lck_grp_t lt_ticket_grp;
304
305 static void
lt_stress_ticket_lock()306 lt_stress_ticket_lock()
307 {
308 uint local_counter = 0;
309
310 uint cpuid = cpu_number();
311
312 kprintf("%s>cpu %d starting\n", __FUNCTION__, cpuid);
313
314 lck_ticket_lock(<_ticket_lock, <_ticket_grp);
315 lt_counter++;
316 local_counter++;
317 lck_ticket_unlock(<_ticket_lock);
318
319 /* Wait until all test threads have finished any binding */
320 while (lt_counter < lt_target_done_threads) {
321 if (mach_absolute_time() > lt_setup_timeout) {
322 kprintf("%s>cpu %d noticed that we exceeded setup timeout of %d seconds during initial setup phase (only %d out of %d threads checked in)",
323 __FUNCTION__, cpuid, LOCK_TEST_SETUP_TIMEOUT_SEC, lt_counter, lt_target_done_threads);
324 return;
325 }
326 /* Yield to keep the CPUs available for the threads to bind */
327 thread_yield_internal(1);
328 }
329
330 lck_ticket_lock(<_ticket_lock, <_ticket_grp);
331 lt_counter++;
332 local_counter++;
333 lck_ticket_unlock(<_ticket_lock);
334
335 /*
336 * Now that the test threads have finished any binding, wait
337 * until they are all actively spinning on-core (done yielding)
338 * so we get a fairly timed start.
339 */
340 while (lt_counter < 2 * lt_target_done_threads) {
341 if (mach_absolute_time() > lt_setup_timeout) {
342 kprintf("%s>cpu %d noticed that we exceeded setup timeout of %d seconds during secondary setup phase (only %d out of %d threads checked in)",
343 __FUNCTION__, cpuid, LOCK_TEST_SETUP_TIMEOUT_SEC, lt_counter - lt_target_done_threads, lt_target_done_threads);
344 return;
345 }
346 }
347
348 kprintf("%s>cpu %d started\n", __FUNCTION__, cpuid);
349
350 while (lt_counter < limit) {
351 lck_ticket_lock(<_ticket_lock, <_ticket_grp);
352 if (lt_counter < limit) {
353 lt_counter++;
354 local_counter++;
355 }
356 lck_ticket_unlock(<_ticket_lock);
357 }
358
359 lt_stress_local_counters[cpuid] = local_counter;
360
361 kprintf("%s>final counter %d cpu %d incremented the counter %d times\n", __FUNCTION__, lt_counter, cpuid, local_counter);
362 }
363 #endif
364
365 static void
lt_grab_hw_lock()366 lt_grab_hw_lock()
367 {
368 hw_lock_lock(<_hw_lock, LCK_GRP_NULL);
369 lt_counter++;
370 lt_spin_a_little_bit();
371 hw_lock_unlock(<_hw_lock);
372 }
373
374 static void
lt_grab_hw_lock_with_try()375 lt_grab_hw_lock_with_try()
376 {
377 while (0 == hw_lock_try(<_hw_lock, LCK_GRP_NULL)) {
378 ;
379 }
380 lt_counter++;
381 lt_spin_a_little_bit();
382 hw_lock_unlock(<_hw_lock);
383 }
384
385 static void
lt_grab_hw_lock_with_to()386 lt_grab_hw_lock_with_to()
387 {
388 (void)hw_lock_to(<_hw_lock, &hw_lock_spin_policy, LCK_GRP_NULL);
389 lt_counter++;
390 lt_spin_a_little_bit();
391 hw_lock_unlock(<_hw_lock);
392 }
393
394 static void
lt_grab_spin_lock()395 lt_grab_spin_lock()
396 {
397 lck_spin_lock(<_lck_spin_t);
398 lt_counter++;
399 lt_spin_a_little_bit();
400 lck_spin_unlock(<_lck_spin_t);
401 }
402
403 static void
lt_grab_spin_lock_with_try()404 lt_grab_spin_lock_with_try()
405 {
406 while (0 == lck_spin_try_lock(<_lck_spin_t)) {
407 ;
408 }
409 lt_counter++;
410 lt_spin_a_little_bit();
411 lck_spin_unlock(<_lck_spin_t);
412 }
413
414 static volatile boolean_t lt_thread_lock_grabbed;
415 static volatile boolean_t lt_thread_lock_success;
416
417 static void
lt_reset()418 lt_reset()
419 {
420 lt_counter = 0;
421 lt_max_holders = 0;
422 lt_num_holders = 0;
423 lt_max_upgrade_holders = 0;
424 lt_upgrade_holders = 0;
425 lt_done_threads = 0;
426 lt_target_done_threads = 0;
427 lt_cpu_bind_id = 0;
428 /* Reset timeout deadline out from current time */
429 nanoseconds_to_absolutetime(LOCK_TEST_SETUP_TIMEOUT_SEC * NSEC_PER_SEC, <_setup_timeout);
430 lt_setup_timeout += mach_absolute_time();
431
432 OSMemoryBarrier();
433 }
434
435 static void
lt_trylock_hw_lock_with_to()436 lt_trylock_hw_lock_with_to()
437 {
438 OSMemoryBarrier();
439 while (!lt_thread_lock_grabbed) {
440 lt_sleep_a_little_bit();
441 OSMemoryBarrier();
442 }
443 lt_thread_lock_success = hw_lock_to(<_hw_lock,
444 &hw_lock_test_give_up_policy, LCK_GRP_NULL);
445 OSMemoryBarrier();
446 mp_enable_preemption();
447 }
448
449 static void
lt_trylock_spin_try_lock()450 lt_trylock_spin_try_lock()
451 {
452 OSMemoryBarrier();
453 while (!lt_thread_lock_grabbed) {
454 lt_sleep_a_little_bit();
455 OSMemoryBarrier();
456 }
457 lt_thread_lock_success = lck_spin_try_lock(<_lck_spin_t);
458 OSMemoryBarrier();
459 }
460
461 static void
lt_trylock_thread(void * arg,wait_result_t wres __unused)462 lt_trylock_thread(void *arg, wait_result_t wres __unused)
463 {
464 void (*func)(void) = (void (*)(void))arg;
465
466 func();
467
468 OSIncrementAtomic((volatile SInt32*) <_done_threads);
469 }
470
471 static void
lt_start_trylock_thread(thread_continue_t func)472 lt_start_trylock_thread(thread_continue_t func)
473 {
474 thread_t thread;
475 kern_return_t kr;
476
477 kr = kernel_thread_start(lt_trylock_thread, func, &thread);
478 assert(kr == KERN_SUCCESS);
479
480 thread_deallocate(thread);
481 }
482
483 static void
lt_wait_for_lock_test_threads()484 lt_wait_for_lock_test_threads()
485 {
486 OSMemoryBarrier();
487 /* Spin to reduce dependencies */
488 while (lt_done_threads < lt_target_done_threads) {
489 lt_sleep_a_little_bit();
490 OSMemoryBarrier();
491 }
492 OSMemoryBarrier();
493 }
494
495 static kern_return_t
lt_test_trylocks()496 lt_test_trylocks()
497 {
498 boolean_t success;
499 extern unsigned int real_ncpus;
500
501 /*
502 * First mtx try lock succeeds, second fails.
503 */
504 success = lck_mtx_try_lock(<_mtx);
505 T_ASSERT_NOTNULL(success, "First mtx try lock");
506 success = lck_mtx_try_lock(<_mtx);
507 T_ASSERT_NULL(success, "Second mtx try lock for a locked mtx");
508 lck_mtx_unlock(<_mtx);
509
510 /*
511 * After regular grab, can't try lock.
512 */
513 lck_mtx_lock(<_mtx);
514 success = lck_mtx_try_lock(<_mtx);
515 T_ASSERT_NULL(success, "try lock should fail after regular lck_mtx_lock");
516 lck_mtx_unlock(<_mtx);
517
518 /*
519 * Two shared try locks on a previously unheld rwlock suceed, and a
520 * subsequent exclusive attempt fails.
521 */
522 success = lck_rw_try_lock_shared(<_rwlock);
523 T_ASSERT_NOTNULL(success, "Two shared try locks on a previously unheld rwlock should succeed");
524 success = lck_rw_try_lock_shared(<_rwlock);
525 T_ASSERT_NOTNULL(success, "Two shared try locks on a previously unheld rwlock should succeed");
526 success = lck_rw_try_lock_exclusive(<_rwlock);
527 T_ASSERT_NULL(success, "exclusive lock attempt on previously held lock should fail");
528 lck_rw_done(<_rwlock);
529 lck_rw_done(<_rwlock);
530
531 /*
532 * After regular shared grab, can trylock
533 * for shared but not for exclusive.
534 */
535 lck_rw_lock_shared(<_rwlock);
536 success = lck_rw_try_lock_shared(<_rwlock);
537 T_ASSERT_NOTNULL(success, "After regular shared grab another shared try lock should succeed.");
538 success = lck_rw_try_lock_exclusive(<_rwlock);
539 T_ASSERT_NULL(success, "After regular shared grab an exclusive lock attempt should fail.");
540 lck_rw_done(<_rwlock);
541 lck_rw_done(<_rwlock);
542
543 /*
544 * An exclusive try lock succeeds, subsequent shared and exclusive
545 * attempts fail.
546 */
547 success = lck_rw_try_lock_exclusive(<_rwlock);
548 T_ASSERT_NOTNULL(success, "An exclusive try lock should succeed");
549 success = lck_rw_try_lock_shared(<_rwlock);
550 T_ASSERT_NULL(success, "try lock in shared mode attempt after an exclusive grab should fail");
551 success = lck_rw_try_lock_exclusive(<_rwlock);
552 T_ASSERT_NULL(success, "try lock in exclusive mode attempt after an exclusive grab should fail");
553 lck_rw_done(<_rwlock);
554
555 /*
556 * After regular exclusive grab, neither kind of trylock succeeds.
557 */
558 lck_rw_lock_exclusive(<_rwlock);
559 success = lck_rw_try_lock_shared(<_rwlock);
560 T_ASSERT_NULL(success, "After regular exclusive grab, shared trylock should not succeed");
561 success = lck_rw_try_lock_exclusive(<_rwlock);
562 T_ASSERT_NULL(success, "After regular exclusive grab, exclusive trylock should not succeed");
563 lck_rw_done(<_rwlock);
564
565 /*
566 * First spin lock attempts succeed, second attempts fail.
567 */
568 success = hw_lock_try(<_hw_lock, LCK_GRP_NULL);
569 T_ASSERT_NOTNULL(success, "First spin lock attempts should succeed");
570 success = hw_lock_try(<_hw_lock, LCK_GRP_NULL);
571 T_ASSERT_NULL(success, "Second attempt to spin lock should fail");
572 hw_lock_unlock(<_hw_lock);
573
574 hw_lock_lock(<_hw_lock, LCK_GRP_NULL);
575 success = hw_lock_try(<_hw_lock, LCK_GRP_NULL);
576 T_ASSERT_NULL(success, "After taking spin lock, trylock attempt should fail");
577 hw_lock_unlock(<_hw_lock);
578
579 lt_reset();
580 lt_thread_lock_grabbed = false;
581 lt_thread_lock_success = true;
582 lt_target_done_threads = 1;
583 OSMemoryBarrier();
584 lt_start_trylock_thread(lt_trylock_hw_lock_with_to);
585 success = hw_lock_to(<_hw_lock, &hw_lock_test_give_up_policy, LCK_GRP_NULL);
586 T_ASSERT_NOTNULL(success, "First spin lock with timeout should succeed");
587 if (real_ncpus == 1) {
588 mp_enable_preemption(); /* if we re-enable preemption, the other thread can timeout and exit */
589 }
590 OSIncrementAtomic((volatile SInt32*)<_thread_lock_grabbed);
591 lt_wait_for_lock_test_threads();
592 T_ASSERT_NULL(lt_thread_lock_success, "Second spin lock with timeout should fail and timeout");
593 if (real_ncpus == 1) {
594 mp_disable_preemption(); /* don't double-enable when we unlock */
595 }
596 hw_lock_unlock(<_hw_lock);
597
598 lt_reset();
599 lt_thread_lock_grabbed = false;
600 lt_thread_lock_success = true;
601 lt_target_done_threads = 1;
602 OSMemoryBarrier();
603 lt_start_trylock_thread(lt_trylock_hw_lock_with_to);
604 hw_lock_lock(<_hw_lock, LCK_GRP_NULL);
605 if (real_ncpus == 1) {
606 mp_enable_preemption(); /* if we re-enable preemption, the other thread can timeout and exit */
607 }
608 OSIncrementAtomic((volatile SInt32*)<_thread_lock_grabbed);
609 lt_wait_for_lock_test_threads();
610 T_ASSERT_NULL(lt_thread_lock_success, "after taking a spin lock, lock attempt with timeout should fail");
611 if (real_ncpus == 1) {
612 mp_disable_preemption(); /* don't double-enable when we unlock */
613 }
614 hw_lock_unlock(<_hw_lock);
615
616 success = lck_spin_try_lock(<_lck_spin_t);
617 T_ASSERT_NOTNULL(success, "spin trylock of previously unheld lock should succeed");
618 success = lck_spin_try_lock(<_lck_spin_t);
619 T_ASSERT_NULL(success, "spin trylock attempt of previously held lock (with trylock) should fail");
620 lck_spin_unlock(<_lck_spin_t);
621
622 lt_reset();
623 lt_thread_lock_grabbed = false;
624 lt_thread_lock_success = true;
625 lt_target_done_threads = 1;
626 lt_start_trylock_thread(lt_trylock_spin_try_lock);
627 lck_spin_lock(<_lck_spin_t);
628 if (real_ncpus == 1) {
629 mp_enable_preemption(); /* if we re-enable preemption, the other thread can timeout and exit */
630 }
631 OSIncrementAtomic((volatile SInt32*)<_thread_lock_grabbed);
632 lt_wait_for_lock_test_threads();
633 T_ASSERT_NULL(lt_thread_lock_success, "spin trylock attempt of previously held lock should fail");
634 if (real_ncpus == 1) {
635 mp_disable_preemption(); /* don't double-enable when we unlock */
636 }
637 lck_spin_unlock(<_lck_spin_t);
638
639 return KERN_SUCCESS;
640 }
641
642 static void
lt_thread(void * arg,wait_result_t wres __unused)643 lt_thread(void *arg, wait_result_t wres __unused)
644 {
645 void (*func)(void) = (void (*)(void))arg;
646 uint32_t i;
647
648 for (i = 0; i < LOCK_TEST_ITERATIONS; i++) {
649 func();
650 }
651
652 OSIncrementAtomic((volatile SInt32*) <_done_threads);
653 }
654
655 static void
lt_start_lock_thread(thread_continue_t func)656 lt_start_lock_thread(thread_continue_t func)
657 {
658 thread_t thread;
659 kern_return_t kr;
660
661 kr = kernel_thread_start(lt_thread, func, &thread);
662 assert(kr == KERN_SUCCESS);
663
664 thread_deallocate(thread);
665 }
666
667 #if __AMP__
668 static void
lt_bound_thread(void * arg,wait_result_t wres __unused)669 lt_bound_thread(void *arg, wait_result_t wres __unused)
670 {
671 void (*func)(void) = (void (*)(void))arg;
672
673 int cpuid = OSIncrementAtomic((volatile SInt32 *)<_cpu_bind_id);
674
675 processor_t processor = processor_list;
676 while ((processor != NULL) && (processor->cpu_id != cpuid)) {
677 processor = processor->processor_list;
678 }
679
680 if (processor != NULL) {
681 thread_bind(processor);
682 }
683
684 thread_block(THREAD_CONTINUE_NULL);
685
686 func();
687
688 OSIncrementAtomic((volatile SInt32*) <_done_threads);
689 }
690
691 static void
lt_e_thread(void * arg,wait_result_t wres __unused)692 lt_e_thread(void *arg, wait_result_t wres __unused)
693 {
694 void (*func)(void) = (void (*)(void))arg;
695
696 thread_t thread = current_thread();
697
698 thread_bind_cluster_type(thread, 'e', false);
699
700 func();
701
702 OSIncrementAtomic((volatile SInt32*) <_done_threads);
703 }
704
705 static void
lt_p_thread(void * arg,wait_result_t wres __unused)706 lt_p_thread(void *arg, wait_result_t wres __unused)
707 {
708 void (*func)(void) = (void (*)(void))arg;
709
710 thread_t thread = current_thread();
711
712 thread_bind_cluster_type(thread, 'p', false);
713
714 func();
715
716 OSIncrementAtomic((volatile SInt32*) <_done_threads);
717 }
718
719 static void
lt_start_lock_thread_with_bind(thread_continue_t bind_type,thread_continue_t func)720 lt_start_lock_thread_with_bind(thread_continue_t bind_type, thread_continue_t func)
721 {
722 thread_t thread;
723 kern_return_t kr;
724
725 kr = kernel_thread_start(bind_type, func, &thread);
726 assert(kr == KERN_SUCCESS);
727
728 thread_deallocate(thread);
729 }
730 #endif /* __AMP__ */
731
732 static kern_return_t
lt_test_locks()733 lt_test_locks()
734 {
735 #if SCHED_HYGIENE_DEBUG
736 /*
737 * When testing, the preemption disable threshold may be hit (for
738 * example when testing a lock timeout). To avoid this, the preemption
739 * disable measurement is temporarily disabled during lock testing.
740 */
741 int old_mode = sched_preemption_disable_debug_mode;
742 if (old_mode == SCHED_HYGIENE_MODE_PANIC) {
743 sched_preemption_disable_debug_mode = SCHED_HYGIENE_MODE_OFF;
744 }
745 #endif /* SCHED_HYGIENE_DEBUG */
746
747 kern_return_t kr = KERN_SUCCESS;
748 lck_grp_attr_t *lga = lck_grp_attr_alloc_init();
749 lck_grp_t *lg = lck_grp_alloc_init("lock test", lga);
750
751 lck_mtx_init(<_mtx, lg, LCK_ATTR_NULL);
752 lck_rw_init(<_rwlock, lg, LCK_ATTR_NULL);
753 lck_spin_init(<_lck_spin_t, lg, LCK_ATTR_NULL);
754 hw_lock_init(<_hw_lock);
755
756 T_LOG("Testing locks.");
757
758 /* Try locks (custom) */
759 lt_reset();
760
761 T_LOG("Running try lock test.");
762 kr = lt_test_trylocks();
763 T_EXPECT_NULL(kr, "try lock test failed.");
764
765 /* Uncontended mutex */
766 T_LOG("Running uncontended mutex test.");
767 lt_reset();
768 lt_target_done_threads = 1;
769 lt_start_lock_thread(lt_grab_mutex);
770 lt_wait_for_lock_test_threads();
771 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
772 T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
773
774 /* Contended mutex:try locks*/
775 T_LOG("Running contended mutex test.");
776 lt_reset();
777 lt_target_done_threads = 3;
778 lt_start_lock_thread(lt_grab_mutex);
779 lt_start_lock_thread(lt_grab_mutex);
780 lt_start_lock_thread(lt_grab_mutex);
781 lt_wait_for_lock_test_threads();
782 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
783 T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
784
785 /* Contended mutex: try locks*/
786 T_LOG("Running contended mutex trylock test.");
787 lt_reset();
788 lt_target_done_threads = 3;
789 lt_start_lock_thread(lt_grab_mutex_with_try);
790 lt_start_lock_thread(lt_grab_mutex_with_try);
791 lt_start_lock_thread(lt_grab_mutex_with_try);
792 lt_wait_for_lock_test_threads();
793 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
794 T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
795
796 /* Uncontended exclusive rwlock */
797 T_LOG("Running uncontended exclusive rwlock test.");
798 lt_reset();
799 lt_target_done_threads = 1;
800 lt_start_lock_thread(lt_grab_rw_exclusive);
801 lt_wait_for_lock_test_threads();
802 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
803 T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
804
805 /* Uncontended shared rwlock */
806
807 /* Disabled until lt_grab_rw_shared() is fixed (rdar://30685840)
808 * T_LOG("Running uncontended shared rwlock test.");
809 * lt_reset();
810 * lt_target_done_threads = 1;
811 * lt_start_lock_thread(lt_grab_rw_shared);
812 * lt_wait_for_lock_test_threads();
813 * T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
814 * T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
815 */
816
817 /* Contended exclusive rwlock */
818 T_LOG("Running contended exclusive rwlock test.");
819 lt_reset();
820 lt_target_done_threads = 3;
821 lt_start_lock_thread(lt_grab_rw_exclusive);
822 lt_start_lock_thread(lt_grab_rw_exclusive);
823 lt_start_lock_thread(lt_grab_rw_exclusive);
824 lt_wait_for_lock_test_threads();
825 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
826 T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
827
828 /* One shared, two exclusive */
829 /* Disabled until lt_grab_rw_shared() is fixed (rdar://30685840)
830 * T_LOG("Running test with one shared and two exclusive rw lock threads.");
831 * lt_reset();
832 * lt_target_done_threads = 3;
833 * lt_start_lock_thread(lt_grab_rw_shared);
834 * lt_start_lock_thread(lt_grab_rw_exclusive);
835 * lt_start_lock_thread(lt_grab_rw_exclusive);
836 * lt_wait_for_lock_test_threads();
837 * T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
838 * T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
839 */
840
841 /* Four shared */
842 /* Disabled until lt_grab_rw_shared() is fixed (rdar://30685840)
843 * T_LOG("Running test with four shared holders.");
844 * lt_reset();
845 * lt_target_done_threads = 4;
846 * lt_start_lock_thread(lt_grab_rw_shared);
847 * lt_start_lock_thread(lt_grab_rw_shared);
848 * lt_start_lock_thread(lt_grab_rw_shared);
849 * lt_start_lock_thread(lt_grab_rw_shared);
850 * lt_wait_for_lock_test_threads();
851 * T_EXPECT_LE_UINT(lt_max_holders, 4, NULL);
852 */
853
854 /* Three doing upgrades and downgrades */
855 T_LOG("Running test with threads upgrading and downgrading.");
856 lt_reset();
857 lt_target_done_threads = 3;
858 lt_start_lock_thread(lt_upgrade_downgrade_rw);
859 lt_start_lock_thread(lt_upgrade_downgrade_rw);
860 lt_start_lock_thread(lt_upgrade_downgrade_rw);
861 lt_wait_for_lock_test_threads();
862 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
863 T_EXPECT_LE_UINT(lt_max_holders, 3, NULL);
864 T_EXPECT_EQ_UINT(lt_max_upgrade_holders, 1, NULL);
865
866 /* Uncontended - exclusive trylocks */
867 T_LOG("Running test with single thread doing exclusive rwlock trylocks.");
868 lt_reset();
869 lt_target_done_threads = 1;
870 lt_start_lock_thread(lt_grab_rw_exclusive_with_try);
871 lt_wait_for_lock_test_threads();
872 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
873 T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
874
875 /* Uncontended - shared trylocks */
876 /* Disabled until lt_grab_rw_shared_with_try() is fixed (rdar://30685840)
877 * T_LOG("Running test with single thread doing shared rwlock trylocks.");
878 * lt_reset();
879 * lt_target_done_threads = 1;
880 * lt_start_lock_thread(lt_grab_rw_shared_with_try);
881 * lt_wait_for_lock_test_threads();
882 * T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
883 * T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
884 */
885
886 /* Three doing exclusive trylocks */
887 T_LOG("Running test with threads doing exclusive rwlock trylocks.");
888 lt_reset();
889 lt_target_done_threads = 3;
890 lt_start_lock_thread(lt_grab_rw_exclusive_with_try);
891 lt_start_lock_thread(lt_grab_rw_exclusive_with_try);
892 lt_start_lock_thread(lt_grab_rw_exclusive_with_try);
893 lt_wait_for_lock_test_threads();
894 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
895 T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
896
897 /* Three doing shared trylocks */
898 /* Disabled until lt_grab_rw_shared_with_try() is fixed (rdar://30685840)
899 * T_LOG("Running test with threads doing shared rwlock trylocks.");
900 * lt_reset();
901 * lt_target_done_threads = 3;
902 * lt_start_lock_thread(lt_grab_rw_shared_with_try);
903 * lt_start_lock_thread(lt_grab_rw_shared_with_try);
904 * lt_start_lock_thread(lt_grab_rw_shared_with_try);
905 * lt_wait_for_lock_test_threads();
906 * T_EXPECT_LE_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
907 * T_EXPECT_LE_UINT(lt_max_holders, 3, NULL);
908 */
909
910 /* Three doing various trylocks */
911 /* Disabled until lt_grab_rw_shared_with_try() is fixed (rdar://30685840)
912 * T_LOG("Running test with threads doing mixed rwlock trylocks.");
913 * lt_reset();
914 * lt_target_done_threads = 4;
915 * lt_start_lock_thread(lt_grab_rw_shared_with_try);
916 * lt_start_lock_thread(lt_grab_rw_shared_with_try);
917 * lt_start_lock_thread(lt_grab_rw_exclusive_with_try);
918 * lt_start_lock_thread(lt_grab_rw_exclusive_with_try);
919 * lt_wait_for_lock_test_threads();
920 * T_EXPECT_LE_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
921 * T_EXPECT_LE_UINT(lt_max_holders, 2, NULL);
922 */
923
924 /* HW locks */
925 T_LOG("Running test with hw_lock_lock()");
926 lt_reset();
927 lt_target_done_threads = 3;
928 lt_start_lock_thread(lt_grab_hw_lock);
929 lt_start_lock_thread(lt_grab_hw_lock);
930 lt_start_lock_thread(lt_grab_hw_lock);
931 lt_wait_for_lock_test_threads();
932 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
933
934 #if __AMP__
935 /* Ticket locks stress test */
936 T_LOG("Running Ticket locks stress test with lck_ticket_lock()");
937 extern unsigned int real_ncpus;
938 lck_grp_init(<_ticket_grp, "ticket lock stress", LCK_GRP_ATTR_NULL);
939 lck_ticket_init(<_ticket_lock, <_ticket_grp);
940 lt_reset();
941 lt_target_done_threads = real_ncpus;
942 uint thread_count = 0;
943 for (processor_t processor = processor_list; processor != NULL; processor = processor->processor_list) {
944 lt_start_lock_thread_with_bind(lt_bound_thread, lt_stress_ticket_lock);
945 thread_count++;
946 }
947 T_EXPECT_GE_UINT(thread_count, lt_target_done_threads, "Spawned enough threads for valid test");
948 lt_wait_for_lock_test_threads();
949 bool starvation = false;
950 uint total_local_count = 0;
951 for (processor_t processor = processor_list; processor != NULL; processor = processor->processor_list) {
952 starvation = starvation || (lt_stress_local_counters[processor->cpu_id] < 10);
953 total_local_count += lt_stress_local_counters[processor->cpu_id];
954 }
955 if (mach_absolute_time() > lt_setup_timeout) {
956 T_FAIL("Stress test setup timed out after %d seconds", LOCK_TEST_SETUP_TIMEOUT_SEC);
957 } else if (total_local_count != lt_counter) {
958 T_FAIL("Lock failure\n");
959 } else if (starvation) {
960 T_FAIL("Lock starvation found\n");
961 } else {
962 T_PASS("Ticket locks stress test with lck_ticket_lock() (%u total acquires)", total_local_count);
963 }
964
965 /* AMP ticket locks stress test */
966 T_LOG("Running AMP Ticket locks stress test bound to clusters with lck_ticket_lock()");
967 lt_reset();
968 lt_target_done_threads = real_ncpus;
969 thread_count = 0;
970 for (processor_t processor = processor_list; processor != NULL; processor = processor->processor_list) {
971 processor_set_t pset = processor->processor_set;
972 switch (pset->pset_cluster_type) {
973 case PSET_AMP_P:
974 lt_start_lock_thread_with_bind(lt_p_thread, lt_stress_ticket_lock);
975 break;
976 case PSET_AMP_E:
977 lt_start_lock_thread_with_bind(lt_e_thread, lt_stress_ticket_lock);
978 break;
979 default:
980 lt_start_lock_thread(lt_stress_ticket_lock);
981 break;
982 }
983 thread_count++;
984 }
985 T_EXPECT_GE_UINT(thread_count, lt_target_done_threads, "Spawned enough threads for valid test");
986 lt_wait_for_lock_test_threads();
987 #endif /* __AMP__ */
988
989 /* HW locks: trylocks */
990 T_LOG("Running test with hw_lock_try()");
991 lt_reset();
992 lt_target_done_threads = 3;
993 lt_start_lock_thread(lt_grab_hw_lock_with_try);
994 lt_start_lock_thread(lt_grab_hw_lock_with_try);
995 lt_start_lock_thread(lt_grab_hw_lock_with_try);
996 lt_wait_for_lock_test_threads();
997 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
998
999 /* HW locks: with timeout */
1000 T_LOG("Running test with hw_lock_to()");
1001 lt_reset();
1002 lt_target_done_threads = 3;
1003 lt_start_lock_thread(lt_grab_hw_lock_with_to);
1004 lt_start_lock_thread(lt_grab_hw_lock_with_to);
1005 lt_start_lock_thread(lt_grab_hw_lock_with_to);
1006 lt_wait_for_lock_test_threads();
1007 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
1008
1009 /* Spin locks */
1010 T_LOG("Running test with lck_spin_lock()");
1011 lt_reset();
1012 lt_target_done_threads = 3;
1013 lt_start_lock_thread(lt_grab_spin_lock);
1014 lt_start_lock_thread(lt_grab_spin_lock);
1015 lt_start_lock_thread(lt_grab_spin_lock);
1016 lt_wait_for_lock_test_threads();
1017 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
1018
1019 /* Spin locks: trylocks */
1020 T_LOG("Running test with lck_spin_try_lock()");
1021 lt_reset();
1022 lt_target_done_threads = 3;
1023 lt_start_lock_thread(lt_grab_spin_lock_with_try);
1024 lt_start_lock_thread(lt_grab_spin_lock_with_try);
1025 lt_start_lock_thread(lt_grab_spin_lock_with_try);
1026 lt_wait_for_lock_test_threads();
1027 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
1028
1029 #if SCHED_HYGIENE_DEBUG
1030 sched_preemption_disable_debug_mode = old_mode;
1031 #endif /* SCHED_HYGIENE_DEBUG */
1032
1033 return KERN_SUCCESS;
1034 }
1035
1036 #define MT_MAX_ARGS 8
1037 #define MT_INITIAL_VALUE 0xfeedbeef
1038 #define MT_W_VAL (0x00000000feedbeefULL) /* Drop in zeros */
1039 #define MT_S_VAL (0xfffffffffeedbeefULL) /* High bit is 1, so sign-extends as negative */
1040 #define MT_L_VAL (((uint64_t)MT_INITIAL_VALUE) | (((uint64_t)MT_INITIAL_VALUE) << 32)) /* Two back-to-back */
1041
1042 typedef void (*sy_munge_t)(void*);
1043
1044 #define MT_FUNC(x) #x, x
1045 struct munger_test {
1046 const char *mt_name;
1047 sy_munge_t mt_func;
1048 uint32_t mt_in_words;
1049 uint32_t mt_nout;
1050 uint64_t mt_expected[MT_MAX_ARGS];
1051 } munger_tests[] = {
1052 {MT_FUNC(munge_w), 1, 1, {MT_W_VAL}},
1053 {MT_FUNC(munge_ww), 2, 2, {MT_W_VAL, MT_W_VAL}},
1054 {MT_FUNC(munge_www), 3, 3, {MT_W_VAL, MT_W_VAL, MT_W_VAL}},
1055 {MT_FUNC(munge_wwww), 4, 4, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}},
1056 {MT_FUNC(munge_wwwww), 5, 5, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}},
1057 {MT_FUNC(munge_wwwwww), 6, 6, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}},
1058 {MT_FUNC(munge_wwwwwww), 7, 7, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}},
1059 {MT_FUNC(munge_wwwwwwww), 8, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}},
1060 {MT_FUNC(munge_wl), 3, 2, {MT_W_VAL, MT_L_VAL}},
1061 {MT_FUNC(munge_wwl), 4, 3, {MT_W_VAL, MT_W_VAL, MT_L_VAL}},
1062 {MT_FUNC(munge_wwlll), 8, 5, {MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL}},
1063 {MT_FUNC(munge_wwlllll), 12, 7, {MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL}},
1064 {MT_FUNC(munge_wwllllll), 14, 8, {MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL}},
1065 {MT_FUNC(munge_wlw), 4, 3, {MT_W_VAL, MT_L_VAL, MT_W_VAL}},
1066 {MT_FUNC(munge_wlwwwll), 10, 7, {MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL}},
1067 {MT_FUNC(munge_wlwwwllw), 11, 8, {MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_W_VAL}},
1068 {MT_FUNC(munge_wlwwlwlw), 11, 8, {MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL}},
1069 {MT_FUNC(munge_wll), 5, 3, {MT_W_VAL, MT_L_VAL, MT_L_VAL}},
1070 {MT_FUNC(munge_wlll), 7, 4, {MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL}},
1071 {MT_FUNC(munge_wllwwll), 11, 7, {MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL}},
1072 {MT_FUNC(munge_wwwlw), 6, 5, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL}},
1073 {MT_FUNC(munge_wwwlww), 7, 6, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL}},
1074 {MT_FUNC(munge_wwwlwww), 8, 7, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}},
1075 {MT_FUNC(munge_wwwl), 5, 4, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}},
1076 {MT_FUNC(munge_wwwwlw), 7, 6, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL}},
1077 {MT_FUNC(munge_wwwwllww), 10, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL}},
1078 {MT_FUNC(munge_wwwwl), 6, 5, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}},
1079 {MT_FUNC(munge_wwwwwl), 7, 6, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}},
1080 {MT_FUNC(munge_wwwwwlww), 9, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL}},
1081 {MT_FUNC(munge_wwwwwllw), 10, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_W_VAL}},
1082 {MT_FUNC(munge_wwwwwlll), 11, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL}},
1083 {MT_FUNC(munge_wwwwwwl), 8, 7, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}},
1084 {MT_FUNC(munge_wwwwwwlw), 9, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL}},
1085 {MT_FUNC(munge_wwwwwwll), 10, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL}},
1086 {MT_FUNC(munge_wsw), 3, 3, {MT_W_VAL, MT_S_VAL, MT_W_VAL}},
1087 {MT_FUNC(munge_wws), 3, 3, {MT_W_VAL, MT_W_VAL, MT_S_VAL}},
1088 {MT_FUNC(munge_wwwsw), 5, 5, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_S_VAL, MT_W_VAL}},
1089 {MT_FUNC(munge_llllll), 12, 6, {MT_L_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL}},
1090 {MT_FUNC(munge_llll), 8, 4, {MT_L_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL}},
1091 {MT_FUNC(munge_l), 2, 1, {MT_L_VAL}},
1092 {MT_FUNC(munge_lw), 3, 2, {MT_L_VAL, MT_W_VAL}},
1093 {MT_FUNC(munge_lww), 4, 3, {MT_L_VAL, MT_W_VAL, MT_W_VAL}},
1094 {MT_FUNC(munge_lwww), 5, 4, {MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}},
1095 {MT_FUNC(munge_lwwwwwww), 9, 8, {MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}},
1096 {MT_FUNC(munge_wlwwwl), 8, 6, {MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}},
1097 {MT_FUNC(munge_wwlwwwl), 9, 7, {MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}}
1098 };
1099
1100 #define MT_TEST_COUNT (sizeof(munger_tests) / sizeof(struct munger_test))
1101
1102 static void
mt_reset(uint32_t in_words,size_t total_size,uint32_t * data)1103 mt_reset(uint32_t in_words, size_t total_size, uint32_t *data)
1104 {
1105 uint32_t i;
1106
1107 for (i = 0; i < in_words; i++) {
1108 data[i] = MT_INITIAL_VALUE;
1109 }
1110
1111 if (in_words * sizeof(uint32_t) < total_size) {
1112 bzero(&data[in_words], total_size - in_words * sizeof(uint32_t));
1113 }
1114 }
1115
1116 static void
mt_test_mungers()1117 mt_test_mungers()
1118 {
1119 uint64_t data[MT_MAX_ARGS];
1120 uint32_t i, j;
1121
1122 for (i = 0; i < MT_TEST_COUNT; i++) {
1123 struct munger_test *test = &munger_tests[i];
1124 int pass = 1;
1125
1126 T_LOG("Testing %s", test->mt_name);
1127
1128 mt_reset(test->mt_in_words, sizeof(data), (uint32_t*)data);
1129 test->mt_func(data);
1130
1131 for (j = 0; j < test->mt_nout; j++) {
1132 if (data[j] != test->mt_expected[j]) {
1133 T_FAIL("Index %d: expected %llx, got %llx.", j, test->mt_expected[j], data[j]);
1134 pass = 0;
1135 }
1136 }
1137 if (pass) {
1138 T_PASS(test->mt_name);
1139 }
1140 }
1141 }
1142
1143 #if defined(HAS_APPLE_PAC)
1144
1145
1146 kern_return_t
arm64_ropjop_test()1147 arm64_ropjop_test()
1148 {
1149 T_LOG("Testing ROP/JOP");
1150
1151 /* how is ROP/JOP configured */
1152 boolean_t config_rop_enabled = TRUE;
1153 boolean_t config_jop_enabled = TRUE;
1154
1155
1156 if (config_jop_enabled) {
1157 /* jop key */
1158 uint64_t apiakey_hi = __builtin_arm_rsr64("APIAKEYHI_EL1");
1159 uint64_t apiakey_lo = __builtin_arm_rsr64("APIAKEYLO_EL1");
1160
1161 T_EXPECT(apiakey_hi != 0 && apiakey_lo != 0, NULL);
1162 }
1163
1164 if (config_rop_enabled) {
1165 /* rop key */
1166 uint64_t apibkey_hi = __builtin_arm_rsr64("APIBKEYHI_EL1");
1167 uint64_t apibkey_lo = __builtin_arm_rsr64("APIBKEYLO_EL1");
1168
1169 T_EXPECT(apibkey_hi != 0 && apibkey_lo != 0, NULL);
1170
1171 /* sign a KVA (the address of this function) */
1172 uint64_t kva_signed = (uint64_t) ptrauth_sign_unauthenticated((void *)&config_rop_enabled, ptrauth_key_asib, 0);
1173
1174 /* assert it was signed (changed) */
1175 T_EXPECT(kva_signed != (uint64_t)&config_rop_enabled, NULL);
1176
1177 /* authenticate the newly signed KVA */
1178 uint64_t kva_authed = (uint64_t) ml_auth_ptr_unchecked((void *)kva_signed, ptrauth_key_asib, 0);
1179
1180 /* assert the authed KVA is the original KVA */
1181 T_EXPECT(kva_authed == (uint64_t)&config_rop_enabled, NULL);
1182
1183 /* corrupt a signed ptr, auth it, ensure auth failed */
1184 uint64_t kva_corrupted = kva_signed ^ 1;
1185
1186 /* authenticate the corrupted pointer */
1187 kva_authed = (uint64_t) ml_auth_ptr_unchecked((void *)kva_corrupted, ptrauth_key_asib, 0);
1188
1189 /* when AuthIB fails, bits 63:62 will be set to 2'b10 */
1190 uint64_t auth_fail_mask = 3ULL << 61;
1191 uint64_t authib_fail = 2ULL << 61;
1192
1193 /* assert the failed authIB of corrupted pointer is tagged */
1194 T_EXPECT((kva_authed & auth_fail_mask) == authib_fail, NULL);
1195 }
1196
1197 return KERN_SUCCESS;
1198 }
1199 #endif /* defined(HAS_APPLE_PAC) */
1200
1201 #if __ARM_PAN_AVAILABLE__
1202
1203 struct pan_test_thread_args {
1204 volatile bool join;
1205 };
1206
1207 static void
arm64_pan_test_thread(void * arg,wait_result_t __unused wres)1208 arm64_pan_test_thread(void *arg, wait_result_t __unused wres)
1209 {
1210 T_ASSERT(__builtin_arm_rsr("pan") != 0, NULL);
1211
1212 struct pan_test_thread_args *args = arg;
1213
1214 for (processor_t p = processor_list; p != NULL; p = p->processor_list) {
1215 thread_bind(p);
1216 thread_block(THREAD_CONTINUE_NULL);
1217 kprintf("Running PAN test on cpu %d\n", p->cpu_id);
1218 arm64_pan_test();
1219 }
1220
1221 /* unbind thread from specific cpu */
1222 thread_bind(PROCESSOR_NULL);
1223 thread_block(THREAD_CONTINUE_NULL);
1224
1225 while (!args->join) {
1226 ;
1227 }
1228
1229 thread_wakeup(args);
1230 }
1231
1232 kern_return_t
arm64_late_pan_test()1233 arm64_late_pan_test()
1234 {
1235 thread_t thread;
1236 kern_return_t kr;
1237
1238 struct pan_test_thread_args args;
1239 args.join = false;
1240
1241 kr = kernel_thread_start(arm64_pan_test_thread, &args, &thread);
1242 assert(kr == KERN_SUCCESS);
1243
1244 thread_deallocate(thread);
1245
1246 assert_wait(&args, THREAD_UNINT);
1247 args.join = true;
1248 thread_block(THREAD_CONTINUE_NULL);
1249 return KERN_SUCCESS;
1250 }
1251
1252 // Disable KASAN checking for PAN tests as the fixed commpage address doesn't have a shadow mapping
1253
1254 static NOKASAN bool
arm64_pan_test_pan_enabled_fault_handler(arm_saved_state_t * state)1255 arm64_pan_test_pan_enabled_fault_handler(arm_saved_state_t * state)
1256 {
1257 bool retval = false;
1258 uint64_t esr = get_saved_state_esr(state);
1259 esr_exception_class_t class = ESR_EC(esr);
1260 fault_status_t fsc = ISS_IA_FSC(ESR_ISS(esr));
1261 uint32_t cpsr = get_saved_state_cpsr(state);
1262 uint64_t far = get_saved_state_far(state);
1263
1264 if ((class == ESR_EC_DABORT_EL1) && (fsc == FSC_PERMISSION_FAULT_L3) &&
1265 (cpsr & PSR64_PAN) &&
1266 ((esr & ISS_DA_WNR) ? mmu_kvtop_wpreflight(far) : mmu_kvtop(far))) {
1267 ++pan_exception_level;
1268 // read the user-accessible value to make sure
1269 // pan is enabled and produces a 2nd fault from
1270 // the exception handler
1271 if (pan_exception_level == 1) {
1272 ml_expect_fault_begin(arm64_pan_test_pan_enabled_fault_handler, far);
1273 pan_fault_value = *(volatile char *)far;
1274 ml_expect_fault_end();
1275 __builtin_arm_wsr("pan", 1); // turn PAN back on after the nested exception cleared it for this context
1276 }
1277 // this fault address is used for PAN test
1278 // disable PAN and rerun
1279 mask_saved_state_cpsr(state, 0, PSR64_PAN);
1280
1281 retval = true;
1282 }
1283
1284 return retval;
1285 }
1286
1287 static NOKASAN bool
arm64_pan_test_pan_disabled_fault_handler(arm_saved_state_t * state)1288 arm64_pan_test_pan_disabled_fault_handler(arm_saved_state_t * state)
1289 {
1290 bool retval = false;
1291 uint64_t esr = get_saved_state_esr(state);
1292 esr_exception_class_t class = ESR_EC(esr);
1293 fault_status_t fsc = ISS_IA_FSC(ESR_ISS(esr));
1294 uint32_t cpsr = get_saved_state_cpsr(state);
1295
1296 if ((class == ESR_EC_DABORT_EL1) && (fsc == FSC_PERMISSION_FAULT_L3) &&
1297 !(cpsr & PSR64_PAN)) {
1298 ++pan_exception_level;
1299 // On an exception taken from a PAN-disabled context, verify
1300 // that PAN is re-enabled for the exception handler and that
1301 // accessing the test address produces a PAN fault.
1302 ml_expect_fault_begin(arm64_pan_test_pan_enabled_fault_handler, pan_test_addr);
1303 pan_fault_value = *(volatile char *)pan_test_addr;
1304 ml_expect_fault_end();
1305 __builtin_arm_wsr("pan", 1); // turn PAN back on after the nested exception cleared it for this context
1306 add_saved_state_pc(state, 4);
1307
1308 retval = true;
1309 }
1310
1311 return retval;
1312 }
1313
1314 NOKASAN kern_return_t
arm64_pan_test()1315 arm64_pan_test()
1316 {
1317 bool values_match = false;
1318 vm_offset_t priv_addr = 0;
1319
1320 T_LOG("Testing PAN.");
1321
1322
1323 T_ASSERT((__builtin_arm_rsr("SCTLR_EL1") & SCTLR_PAN_UNCHANGED) == 0, "SCTLR_EL1.SPAN must be cleared");
1324
1325 T_ASSERT(__builtin_arm_rsr("pan") != 0, NULL);
1326
1327 pan_exception_level = 0;
1328 pan_fault_value = 0xDE;
1329
1330 // Create an empty pmap, so we can map a user-accessible page
1331 pmap_t pmap = pmap_create_options(NULL, 0, PMAP_CREATE_64BIT);
1332 T_ASSERT(pmap != NULL, NULL);
1333
1334 // Get a physical page to back the mapping
1335 vm_page_t vm_page = vm_page_grab();
1336 T_ASSERT(vm_page != VM_PAGE_NULL, NULL);
1337 ppnum_t pn = VM_PAGE_GET_PHYS_PAGE(vm_page);
1338 pmap_paddr_t pa = ptoa(pn);
1339
1340 // Write to the underlying physical page through the physical aperture
1341 // so we can test against a known value
1342 priv_addr = phystokv((pmap_paddr_t)pa);
1343 *(volatile char *)priv_addr = 0xAB;
1344
1345 // Map the page in the user address space at some, non-zero address
1346 pan_test_addr = PAGE_SIZE;
1347 pmap_enter(pmap, pan_test_addr, pn, VM_PROT_READ, VM_PROT_READ, 0, true, PMAP_MAPPING_TYPE_INFER);
1348
1349 // Context-switch with PAN disabled is prohibited; prevent test logging from
1350 // triggering a voluntary context switch.
1351 mp_disable_preemption();
1352
1353 // Insert the user's pmap root table pointer in TTBR0
1354 pmap_t old_pmap = vm_map_pmap(current_thread()->map);
1355 pmap_switch(pmap);
1356
1357 // Below should trigger a PAN exception as pan_test_addr is accessible
1358 // in user mode
1359 // The exception handler, upon recognizing the fault address is pan_test_addr,
1360 // will disable PAN and rerun this instruction successfully
1361 ml_expect_fault_begin(arm64_pan_test_pan_enabled_fault_handler, pan_test_addr);
1362 values_match = (*(volatile char *)pan_test_addr == *(volatile char *)priv_addr);
1363 ml_expect_fault_end();
1364 T_ASSERT(values_match, NULL);
1365
1366 T_ASSERT(pan_exception_level == 2, NULL);
1367
1368 T_ASSERT(__builtin_arm_rsr("pan") == 0, NULL);
1369
1370 T_ASSERT(pan_fault_value == *(char *)priv_addr, NULL);
1371
1372 pan_exception_level = 0;
1373 pan_fault_value = 0xAD;
1374 pan_ro_addr = (vm_offset_t) &pan_ro_value;
1375
1376 // Force a permission fault while PAN is disabled to make sure PAN is
1377 // re-enabled during the exception handler.
1378 ml_expect_fault_begin(arm64_pan_test_pan_disabled_fault_handler, pan_ro_addr);
1379 *((volatile uint64_t*)pan_ro_addr) = 0xFEEDFACECAFECAFE;
1380 ml_expect_fault_end();
1381
1382 T_ASSERT(pan_exception_level == 2, NULL);
1383
1384 T_ASSERT(__builtin_arm_rsr("pan") == 0, NULL);
1385
1386 T_ASSERT(pan_fault_value == *(char *)priv_addr, NULL);
1387
1388 pmap_switch(old_pmap);
1389
1390 pan_ro_addr = 0;
1391
1392 __builtin_arm_wsr("pan", 1);
1393
1394 mp_enable_preemption();
1395
1396 pmap_remove(pmap, pan_test_addr, pan_test_addr + PAGE_SIZE);
1397 pan_test_addr = 0;
1398
1399 vm_page_lock_queues();
1400 vm_page_free(vm_page);
1401 vm_page_unlock_queues();
1402 pmap_destroy(pmap);
1403
1404 return KERN_SUCCESS;
1405 }
1406 #endif /* __ARM_PAN_AVAILABLE__ */
1407
1408
1409 kern_return_t
arm64_lock_test()1410 arm64_lock_test()
1411 {
1412 return lt_test_locks();
1413 }
1414
1415 kern_return_t
arm64_munger_test()1416 arm64_munger_test()
1417 {
1418 mt_test_mungers();
1419 return 0;
1420 }
1421
1422 #if defined(KERNEL_INTEGRITY_CTRR) && defined(CONFIG_XNUPOST)
1423 SECURITY_READ_ONLY_LATE(uint64_t) ctrr_ro_test;
1424 uint64_t ctrr_nx_test = 0xd65f03c0; /* RET */
1425 volatile uint64_t ctrr_exception_esr;
1426 vm_offset_t ctrr_test_va;
1427 vm_offset_t ctrr_test_page;
1428
1429 kern_return_t
ctrr_test(void)1430 ctrr_test(void)
1431 {
1432 processor_t p;
1433 boolean_t ctrr_disable = FALSE;
1434
1435 PE_parse_boot_argn("-unsafe_kernel_text", &ctrr_disable, sizeof(ctrr_disable));
1436
1437 #if CONFIG_CSR_FROM_DT
1438 if (csr_unsafe_kernel_text) {
1439 ctrr_disable = TRUE;
1440 }
1441 #endif /* CONFIG_CSR_FROM_DT */
1442
1443 if (ctrr_disable) {
1444 T_LOG("Skipping CTRR test when -unsafe_kernel_text boot-arg present");
1445 return KERN_SUCCESS;
1446 }
1447
1448 T_LOG("Running CTRR test.");
1449
1450 for (p = processor_list; p != NULL; p = p->processor_list) {
1451 thread_bind(p);
1452 thread_block(THREAD_CONTINUE_NULL);
1453 T_LOG("Running CTRR test on cpu %d\n", p->cpu_id);
1454 ctrr_test_cpu();
1455 }
1456
1457 /* unbind thread from specific cpu */
1458 thread_bind(PROCESSOR_NULL);
1459 thread_block(THREAD_CONTINUE_NULL);
1460
1461 return KERN_SUCCESS;
1462 }
1463
1464 static bool
ctrr_test_ro_fault_handler(arm_saved_state_t * state)1465 ctrr_test_ro_fault_handler(arm_saved_state_t * state)
1466 {
1467 bool retval = false;
1468 uint64_t esr = get_saved_state_esr(state);
1469 esr_exception_class_t class = ESR_EC(esr);
1470 fault_status_t fsc = ISS_DA_FSC(ESR_ISS(esr));
1471
1472 if ((class == ESR_EC_DABORT_EL1) && (fsc == FSC_PERMISSION_FAULT_L3)) {
1473 ctrr_exception_esr = esr;
1474 add_saved_state_pc(state, 4);
1475 retval = true;
1476 }
1477
1478 return retval;
1479 }
1480
1481 static bool
ctrr_test_nx_fault_handler(arm_saved_state_t * state)1482 ctrr_test_nx_fault_handler(arm_saved_state_t * state)
1483 {
1484 bool retval = false;
1485 uint64_t esr = get_saved_state_esr(state);
1486 esr_exception_class_t class = ESR_EC(esr);
1487 fault_status_t fsc = ISS_IA_FSC(ESR_ISS(esr));
1488
1489 if ((class == ESR_EC_IABORT_EL1) && (fsc == FSC_PERMISSION_FAULT_L3)) {
1490 ctrr_exception_esr = esr;
1491 /* return to the instruction immediately after the call to NX page */
1492 set_saved_state_pc(state, get_saved_state_lr(state));
1493 #if BTI_ENFORCED
1494 /* Clear BTYPE to prevent taking another exception on ERET */
1495 uint32_t spsr = get_saved_state_cpsr(state);
1496 spsr &= ~PSR_BTYPE_MASK;
1497 set_saved_state_cpsr(state, spsr);
1498 #endif /* BTI_ENFORCED */
1499 retval = true;
1500 }
1501
1502 return retval;
1503 }
1504
1505 // Disable KASAN checking for CTRR tests as the test VA doesn't have a shadow mapping
1506
1507 /* test CTRR on a cpu, caller to bind thread to desired cpu */
1508 /* ctrr_test_page was reserved during bootstrap process */
1509 NOKASAN kern_return_t
ctrr_test_cpu(void)1510 ctrr_test_cpu(void)
1511 {
1512 ppnum_t ro_pn, nx_pn;
1513 uint64_t *ctrr_ro_test_ptr;
1514 void (*ctrr_nx_test_ptr)(void);
1515 kern_return_t kr;
1516 uint64_t prot = 0;
1517 extern vm_offset_t virtual_space_start;
1518
1519 /* ctrr read only region = [rorgn_begin_va, rorgn_end_va) */
1520
1521 #if (KERNEL_CTRR_VERSION == 3)
1522 const uint64_t rorgn_lwr = __builtin_arm_rsr64("S3_0_C11_C0_2");
1523 const uint64_t rorgn_upr = __builtin_arm_rsr64("S3_0_C11_C0_3");
1524 #else /* (KERNEL_CTRR_VERSION == 3) */
1525 const uint64_t rorgn_lwr = __builtin_arm_rsr64("S3_4_C15_C2_3");
1526 const uint64_t rorgn_upr = __builtin_arm_rsr64("S3_4_C15_C2_4");
1527 #endif /* (KERNEL_CTRR_VERSION == 3) */
1528 vm_offset_t rorgn_begin_va = phystokv(rorgn_lwr);
1529 vm_offset_t rorgn_end_va = phystokv(rorgn_upr) + 0x1000;
1530 vm_offset_t ro_test_va = (vm_offset_t)&ctrr_ro_test;
1531 vm_offset_t nx_test_va = (vm_offset_t)&ctrr_nx_test;
1532
1533 T_EXPECT(rorgn_begin_va <= ro_test_va && ro_test_va < rorgn_end_va, "Expect ro_test_va to be inside the CTRR region");
1534 T_EXPECT((nx_test_va < rorgn_begin_va) ^ (nx_test_va >= rorgn_end_va), "Expect nx_test_va to be outside the CTRR region");
1535
1536 ro_pn = pmap_find_phys(kernel_pmap, ro_test_va);
1537 nx_pn = pmap_find_phys(kernel_pmap, nx_test_va);
1538 T_EXPECT(ro_pn && nx_pn, "Expect ro page number and nx page number to be non zero");
1539
1540 T_LOG("test virtual page: %p, ctrr_ro_test: %p, ctrr_nx_test: %p, ro_pn: %x, nx_pn: %x ",
1541 (void *)ctrr_test_page, &ctrr_ro_test, &ctrr_nx_test, ro_pn, nx_pn);
1542
1543 prot = pmap_get_arm64_prot(kernel_pmap, ctrr_test_page);
1544 T_EXPECT(~prot & ARM_TTE_VALID, "Expect ctrr_test_page to be unmapped");
1545
1546 T_LOG("Read only region test mapping virtual page %p to CTRR RO page number %d", ctrr_test_page, ro_pn);
1547 kr = pmap_enter(kernel_pmap, ctrr_test_page, ro_pn,
1548 VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE, VM_WIMG_USE_DEFAULT, FALSE, PMAP_MAPPING_TYPE_INFER);
1549 T_EXPECT(kr == KERN_SUCCESS, "Expect pmap_enter of RW mapping to succeed");
1550
1551 // assert entire mmu prot path (Hierarchical protection model) is NOT RO
1552 // fetch effective block level protections from table/block entries
1553 prot = pmap_get_arm64_prot(kernel_pmap, ctrr_test_page);
1554 T_EXPECT(ARM_PTE_EXTRACT_AP(prot) == AP_RWNA && (prot & ARM_PTE_PNX), "Mapping is EL1 RWNX");
1555
1556 ctrr_test_va = ctrr_test_page + (ro_test_va & PAGE_MASK);
1557 ctrr_ro_test_ptr = (void *)ctrr_test_va;
1558
1559 T_LOG("Read only region test writing to %p to provoke data abort", ctrr_ro_test_ptr);
1560
1561 // should cause data abort
1562 ml_expect_fault_begin(ctrr_test_ro_fault_handler, ctrr_test_va);
1563 *ctrr_ro_test_ptr = 1;
1564 ml_expect_fault_end();
1565
1566 // ensure write permission fault at expected level
1567 // data abort handler will set ctrr_exception_esr when ctrr_test_va takes a permission fault
1568
1569 T_EXPECT(ESR_EC(ctrr_exception_esr) == ESR_EC_DABORT_EL1, "Data Abort from EL1 expected");
1570 T_EXPECT(ISS_DA_FSC(ESR_ISS(ctrr_exception_esr)) == FSC_PERMISSION_FAULT_L3, "Permission Fault Expected");
1571 T_EXPECT(ESR_ISS(ctrr_exception_esr) & ISS_DA_WNR, "Write Fault Expected");
1572
1573 ctrr_test_va = 0;
1574 ctrr_exception_esr = 0;
1575 pmap_remove(kernel_pmap, ctrr_test_page, ctrr_test_page + PAGE_SIZE);
1576
1577 T_LOG("No execute test mapping virtual page %p to CTRR PXN page number %d", ctrr_test_page, nx_pn);
1578
1579 kr = pmap_enter(kernel_pmap, ctrr_test_page, nx_pn,
1580 VM_PROT_READ | VM_PROT_EXECUTE, VM_PROT_NONE, VM_WIMG_USE_DEFAULT, FALSE, PMAP_MAPPING_TYPE_INFER);
1581 T_EXPECT(kr == KERN_SUCCESS, "Expect pmap_enter of RX mapping to succeed");
1582
1583 // assert entire mmu prot path (Hierarchical protection model) is NOT XN
1584 prot = pmap_get_arm64_prot(kernel_pmap, ctrr_test_page);
1585 T_EXPECT(ARM_PTE_EXTRACT_AP(prot) == AP_RONA && (~prot & ARM_PTE_PNX), "Mapping is EL1 ROX");
1586
1587 ctrr_test_va = ctrr_test_page + (nx_test_va & PAGE_MASK);
1588 #if __has_feature(ptrauth_calls)
1589 ctrr_nx_test_ptr = ptrauth_sign_unauthenticated((void *)ctrr_test_va, ptrauth_key_function_pointer, 0);
1590 #else
1591 ctrr_nx_test_ptr = (void *)ctrr_test_va;
1592 #endif
1593
1594 T_LOG("No execute test calling ctrr_nx_test_ptr(): %p to provoke instruction abort", ctrr_nx_test_ptr);
1595
1596 // should cause prefetch abort
1597 ml_expect_fault_begin(ctrr_test_nx_fault_handler, ctrr_test_va);
1598 ctrr_nx_test_ptr();
1599 ml_expect_fault_end();
1600
1601 // TODO: ensure execute permission fault at expected level
1602 T_EXPECT(ESR_EC(ctrr_exception_esr) == ESR_EC_IABORT_EL1, "Instruction abort from EL1 Expected");
1603 T_EXPECT(ISS_DA_FSC(ESR_ISS(ctrr_exception_esr)) == FSC_PERMISSION_FAULT_L3, "Permission Fault Expected");
1604
1605 ctrr_test_va = 0;
1606 ctrr_exception_esr = 0;
1607
1608 pmap_remove(kernel_pmap, ctrr_test_page, ctrr_test_page + PAGE_SIZE);
1609
1610 T_LOG("Expect no faults when reading CTRR region to verify correct programming of CTRR limits");
1611 for (vm_offset_t addr = rorgn_begin_va; addr < rorgn_end_va; addr += 8) {
1612 volatile uint64_t x = *(uint64_t *)addr;
1613 (void) x; /* read for side effect only */
1614 }
1615
1616 return KERN_SUCCESS;
1617 }
1618 #endif /* defined(KERNEL_INTEGRITY_CTRR) && defined(CONFIG_XNUPOST) */
1619
1620
1621 /**
1622 * Explicitly assert that xnu is still uniprocessor before running a POST test.
1623 *
1624 * In practice, tests in this module can safely manipulate CPU state without
1625 * fear of getting preempted. There's no way for cpu_boot_thread() to bring up
1626 * the secondary CPUs until StartIOKitMatching() completes, and arm64 orders
1627 * kern_post_test() before StartIOKitMatching().
1628 *
1629 * But this is also an implementation detail. Tests that rely on this ordering
1630 * should call assert_uniprocessor(), so that we can figure out a workaround
1631 * on the off-chance this ordering ever changes.
1632 */
1633 __unused static void
assert_uniprocessor(void)1634 assert_uniprocessor(void)
1635 {
1636 extern unsigned int real_ncpus;
1637 unsigned int ncpus = os_atomic_load(&real_ncpus, relaxed);
1638 T_QUIET; T_ASSERT_EQ_UINT(1, ncpus, "arm64 kernel POST tests should run before any secondary CPUs are brought up");
1639 }
1640
1641
1642 #if CONFIG_SPTM
1643 volatile uint8_t xnu_post_panic_lockdown_did_fire = false;
1644 typedef uint64_t (panic_lockdown_helper_fcn_t)(uint64_t raw);
1645 typedef bool (panic_lockdown_recovery_fcn_t)(arm_saved_state_t *);
1646
1647 /* SP0 vector tests */
1648 extern panic_lockdown_helper_fcn_t arm64_panic_lockdown_test_load;
1649 extern panic_lockdown_helper_fcn_t arm64_panic_lockdown_test_gdbtrap;
1650 extern panic_lockdown_helper_fcn_t arm64_panic_lockdown_test_pac_brk_c470;
1651 extern panic_lockdown_helper_fcn_t arm64_panic_lockdown_test_pac_brk_c471;
1652 extern panic_lockdown_helper_fcn_t arm64_panic_lockdown_test_pac_brk_c472;
1653 extern panic_lockdown_helper_fcn_t arm64_panic_lockdown_test_pac_brk_c473;
1654 extern panic_lockdown_helper_fcn_t arm64_panic_lockdown_test_telemetry_brk_ff00;
1655 extern panic_lockdown_helper_fcn_t arm64_panic_lockdown_test_br_auth_fail;
1656 extern panic_lockdown_helper_fcn_t arm64_panic_lockdown_test_ldr_auth_fail;
1657 extern panic_lockdown_helper_fcn_t arm64_panic_lockdown_test_fpac;
1658 extern panic_lockdown_helper_fcn_t arm64_panic_lockdown_test_copyio;
1659 extern panic_lockdown_helper_fcn_t arm64_panic_lockdown_test_bti_telemetry;
1660
1661 extern int gARM_FEAT_FPACCOMBINE;
1662
1663 /* SP1 vector tests */
1664 extern panic_lockdown_helper_fcn_t arm64_panic_lockdown_test_sp1_invalid_stack;
1665 extern bool arm64_panic_lockdown_test_sp1_invalid_stack_handler(arm_saved_state_t *);
1666 extern panic_lockdown_helper_fcn_t arm64_panic_lockdown_test_sp1_exception_in_vector;
1667 extern panic_lockdown_helper_fcn_t el1_sp1_synchronous_raise_exception_in_vector;
1668 extern bool arm64_panic_lockdown_test_sp1_exception_in_vector_handler(arm_saved_state_t *);
1669
1670 #if DEVELOPMENT || DEBUG
1671 extern struct panic_lockdown_initiator_state debug_panic_lockdown_initiator_state;
1672 #endif /* DEVELOPMENT || DEBUG */
1673
1674 typedef struct arm64_panic_lockdown_test_case {
1675 const char *func_str;
1676 panic_lockdown_helper_fcn_t *func;
1677 uint64_t arg;
1678 esr_exception_class_t expected_ec;
1679 bool expect_lockdown_exceptions_masked;
1680 bool expect_lockdown_exceptions_unmasked;
1681 bool override_expected_fault_pc_valid;
1682 uint64_t override_expected_fault_pc;
1683 } arm64_panic_lockdown_test_case_s;
1684
1685 static arm64_panic_lockdown_test_case_s *arm64_panic_lockdown_active_test;
1686 static volatile bool arm64_panic_lockdown_caught_exception;
1687
1688 static bool
arm64_panic_lockdown_test_exception_handler(arm_saved_state_t * state)1689 arm64_panic_lockdown_test_exception_handler(arm_saved_state_t * state)
1690 {
1691 uint64_t esr = get_saved_state_esr(state);
1692 esr_exception_class_t class = ESR_EC(esr);
1693
1694 if (!arm64_panic_lockdown_active_test ||
1695 class != arm64_panic_lockdown_active_test->expected_ec) {
1696 return false;
1697 }
1698
1699 #if BTI_ENFORCED
1700 /* Clear BTYPE to prevent taking another exception on ERET */
1701 uint32_t spsr = get_saved_state_cpsr(state);
1702 spsr &= ~PSR_BTYPE_MASK;
1703 set_saved_state_cpsr(state, spsr);
1704 #endif /* BTI_ENFORCED */
1705
1706 /* We got the expected exception, recover by forging an early return */
1707 set_saved_state_pc(state, get_saved_state_lr(state));
1708 arm64_panic_lockdown_caught_exception = true;
1709
1710 return true;
1711 }
1712
1713 static void
panic_lockdown_expect_test(const char * treatment,arm64_panic_lockdown_test_case_s * test,bool expect_lockdown,bool mask_interrupts)1714 panic_lockdown_expect_test(const char *treatment,
1715 arm64_panic_lockdown_test_case_s *test,
1716 bool expect_lockdown,
1717 bool mask_interrupts)
1718 {
1719 int ints = 0;
1720
1721 arm64_panic_lockdown_active_test = test;
1722 xnu_post_panic_lockdown_did_fire = false;
1723 arm64_panic_lockdown_caught_exception = false;
1724
1725 uintptr_t fault_pc;
1726 if (test->override_expected_fault_pc_valid) {
1727 fault_pc = (uintptr_t)test->override_expected_fault_pc;
1728 } else {
1729 fault_pc = (uintptr_t)test->func;
1730 #ifdef BTI_ENFORCED
1731 /* When BTI is enabled, we expect the fault to occur after the landing pad */
1732 fault_pc += 4;
1733 #endif /* BTI_ENFORCED */
1734 }
1735
1736
1737 ml_expect_fault_pc_begin(
1738 arm64_panic_lockdown_test_exception_handler,
1739 fault_pc);
1740
1741 if (mask_interrupts) {
1742 ints = ml_set_interrupts_enabled(FALSE);
1743 }
1744
1745 (void)test->func(test->arg);
1746
1747 if (mask_interrupts) {
1748 (void)ml_set_interrupts_enabled(ints);
1749 }
1750
1751 ml_expect_fault_end();
1752
1753 if (expect_lockdown == xnu_post_panic_lockdown_did_fire &&
1754 arm64_panic_lockdown_caught_exception) {
1755 T_PASS("%s + %s OK\n", test->func_str, treatment);
1756 } else {
1757 T_FAIL(
1758 "%s + %s FAIL (expected lockdown: %d, did lockdown: %d, caught exception: %d)\n",
1759 test->func_str, treatment,
1760 expect_lockdown, xnu_post_panic_lockdown_did_fire,
1761 arm64_panic_lockdown_caught_exception);
1762 }
1763
1764 #if DEVELOPMENT || DEBUG
1765 /* Check that the debug info is minimally functional */
1766 if (expect_lockdown) {
1767 T_EXPECT_NE_ULLONG(debug_panic_lockdown_initiator_state.initiator_pc,
1768 0ULL, "Initiator PC set");
1769 } else {
1770 T_EXPECT_EQ_ULLONG(debug_panic_lockdown_initiator_state.initiator_pc,
1771 0ULL, "Initiator PC not set");
1772 }
1773
1774 /* Reset the debug data so it can be filled later if needed */
1775 debug_panic_lockdown_initiator_state.initiator_pc = 0;
1776 #endif /* DEVELOPMENT || DEBUG */
1777 }
1778
1779 static void
panic_lockdown_expect_fault_raw(const char * label,panic_lockdown_helper_fcn_t entrypoint,panic_lockdown_helper_fcn_t faulting_function,expected_fault_handler_t fault_handler)1780 panic_lockdown_expect_fault_raw(const char *label,
1781 panic_lockdown_helper_fcn_t entrypoint,
1782 panic_lockdown_helper_fcn_t faulting_function,
1783 expected_fault_handler_t fault_handler)
1784 {
1785 uint64_t test_success = 0;
1786 xnu_post_panic_lockdown_did_fire = false;
1787
1788 uintptr_t fault_pc = (uintptr_t)faulting_function;
1789 #ifdef BTI_ENFORCED
1790 /* When BTI is enabled, we expect the fault to occur after the landing pad */
1791 fault_pc += 4;
1792 #endif /* BTI_ENFORCED */
1793
1794 ml_expect_fault_pc_begin(fault_handler, fault_pc);
1795
1796 test_success = entrypoint(0);
1797
1798 ml_expect_fault_end();
1799
1800 if (test_success && xnu_post_panic_lockdown_did_fire) {
1801 T_PASS("%s OK\n", label);
1802 } else {
1803 T_FAIL("%s FAIL (test returned: %d, did lockdown: %d)\n",
1804 label, test_success, xnu_post_panic_lockdown_did_fire);
1805 }
1806 }
1807
1808 /**
1809 * Returns a pointer which is guranteed to be invalid under IA with the zero
1810 * discriminator.
1811 *
1812 * This is somewhat over complicating it since it's exceedingly likely that a
1813 * any given pointer will have a zero PAC (and thus break the test), but it's
1814 * easy enough to avoid the problem.
1815 */
1816 static uint64_t
panic_lockdown_pacia_get_invalid_ptr()1817 panic_lockdown_pacia_get_invalid_ptr()
1818 {
1819 char *unsigned_ptr = (char *)0xFFFFFFFFAABBCC00;
1820 char *signed_ptr = NULL;
1821 do {
1822 unsigned_ptr += 4 /* avoid alignment exceptions */;
1823 signed_ptr = ptrauth_sign_unauthenticated(
1824 unsigned_ptr,
1825 ptrauth_key_asia,
1826 0);
1827 } while ((uint64_t)unsigned_ptr == (uint64_t)signed_ptr);
1828
1829 return (uint64_t)unsigned_ptr;
1830 }
1831
1832 /**
1833 * Returns a pointer which is guranteed to be invalid under DA with the zero
1834 * discriminator.
1835 */
1836 static uint64_t
panic_lockdown_pacda_get_invalid_ptr(void)1837 panic_lockdown_pacda_get_invalid_ptr(void)
1838 {
1839 char *unsigned_ptr = (char *)0xFFFFFFFFAABBCC00;
1840 char *signed_ptr = NULL;
1841 do {
1842 unsigned_ptr += 8 /* avoid alignment exceptions */;
1843 signed_ptr = ptrauth_sign_unauthenticated(
1844 unsigned_ptr,
1845 ptrauth_key_asda,
1846 0);
1847 } while ((uint64_t)unsigned_ptr == (uint64_t)signed_ptr);
1848
1849 return (uint64_t)unsigned_ptr;
1850 }
1851
1852 kern_return_t
arm64_panic_lockdown_test(void)1853 arm64_panic_lockdown_test(void)
1854 {
1855 #if __has_feature(ptrauth_calls)
1856 uint64_t ia_invalid = panic_lockdown_pacia_get_invalid_ptr();
1857 #endif /* ptrauth_calls */
1858 arm64_panic_lockdown_test_case_s tests[] = {
1859 {
1860 .func_str = "arm64_panic_lockdown_test_load",
1861 .func = &arm64_panic_lockdown_test_load,
1862 /* Trigger a null deref */
1863 .arg = (uint64_t)NULL,
1864 .expected_ec = ESR_EC_DABORT_EL1,
1865 .expect_lockdown_exceptions_masked = true,
1866 .expect_lockdown_exceptions_unmasked = false,
1867 },
1868 {
1869 .func_str = "arm64_panic_lockdown_test_gdbtrap",
1870 .func = &arm64_panic_lockdown_test_gdbtrap,
1871 .arg = 0,
1872 .expected_ec = ESR_EC_UNCATEGORIZED,
1873 /* GDBTRAP instructions should be allowed everywhere */
1874 .expect_lockdown_exceptions_masked = false,
1875 .expect_lockdown_exceptions_unmasked = false,
1876 },
1877 #if __has_feature(ptrauth_calls)
1878 {
1879 .func_str = "arm64_panic_lockdown_test_pac_brk_c470",
1880 .func = &arm64_panic_lockdown_test_pac_brk_c470,
1881 .arg = 0,
1882 .expected_ec = ESR_EC_BRK_AARCH64,
1883 .expect_lockdown_exceptions_masked = true,
1884 .expect_lockdown_exceptions_unmasked = true,
1885 },
1886 {
1887 .func_str = "arm64_panic_lockdown_test_pac_brk_c471",
1888 .func = &arm64_panic_lockdown_test_pac_brk_c471,
1889 .arg = 0,
1890 .expected_ec = ESR_EC_BRK_AARCH64,
1891 .expect_lockdown_exceptions_masked = true,
1892 .expect_lockdown_exceptions_unmasked = true,
1893 },
1894 {
1895 .func_str = "arm64_panic_lockdown_test_pac_brk_c472",
1896 .func = &arm64_panic_lockdown_test_pac_brk_c472,
1897 .arg = 0,
1898 .expected_ec = ESR_EC_BRK_AARCH64,
1899 .expect_lockdown_exceptions_masked = true,
1900 .expect_lockdown_exceptions_unmasked = true,
1901 },
1902 {
1903 .func_str = "arm64_panic_lockdown_test_pac_brk_c473",
1904 .func = &arm64_panic_lockdown_test_pac_brk_c473,
1905 .arg = 0,
1906 .expected_ec = ESR_EC_BRK_AARCH64,
1907 .expect_lockdown_exceptions_masked = true,
1908 .expect_lockdown_exceptions_unmasked = true,
1909 },
1910 {
1911 .func_str = "arm64_panic_lockdown_test_telemetry_brk_ff00",
1912 .func = &arm64_panic_lockdown_test_telemetry_brk_ff00,
1913 .arg = 0,
1914 .expected_ec = ESR_EC_BRK_AARCH64,
1915 /*
1916 * PAC breakpoints are not the only breakpoints, ensure that other
1917 * BRKs (like those used for telemetry) do not trigger lockdowns.
1918 * This is necessary to avoid conflicts with features like UBSan
1919 * telemetry (which could fire at any time in C code).
1920 */
1921 .expect_lockdown_exceptions_masked = false,
1922 .expect_lockdown_exceptions_unmasked = false,
1923 },
1924 {
1925 .func_str = "arm64_panic_lockdown_test_br_auth_fail",
1926 .func = &arm64_panic_lockdown_test_br_auth_fail,
1927 .arg = ia_invalid,
1928 .expected_ec = gARM_FEAT_FPACCOMBINE ? ESR_EC_PAC_FAIL : ESR_EC_IABORT_EL1,
1929 .expect_lockdown_exceptions_masked = true,
1930 .expect_lockdown_exceptions_unmasked = true,
1931 /*
1932 * Pre-FEAT_FPACCOMBINED, BRAx branches to a poisoned PC so we
1933 * expect to fault on the branch target rather than the branch
1934 * itself. The exact ELR will likely be different from ia_invalid,
1935 * but since the expect logic in sleh only matches on low bits (i.e.
1936 * not bits which will be poisoned), this is fine.
1937 * On FEAT_FPACCOMBINED devices, we will fault on the branch itself.
1938 */
1939 .override_expected_fault_pc_valid = !gARM_FEAT_FPACCOMBINE,
1940 .override_expected_fault_pc = ia_invalid
1941 },
1942 {
1943 .func_str = "arm64_panic_lockdown_test_ldr_auth_fail",
1944 .func = &arm64_panic_lockdown_test_ldr_auth_fail,
1945 .arg = panic_lockdown_pacda_get_invalid_ptr(),
1946 .expected_ec = gARM_FEAT_FPACCOMBINE ? ESR_EC_PAC_FAIL : ESR_EC_DABORT_EL1,
1947 .expect_lockdown_exceptions_masked = true,
1948 .expect_lockdown_exceptions_unmasked = true,
1949 },
1950 {
1951 .func_str = "arm64_panic_lockdown_test_copyio_poison",
1952 .func = arm64_panic_lockdown_test_copyio,
1953 /* fake a poisoned kernel pointer by flipping the bottom PAC bit */
1954 .arg = ((uint64_t)-1) ^ (1LLU << (64 - T1SZ_BOOT)),
1955 .expected_ec = ESR_EC_DABORT_EL1,
1956 .expect_lockdown_exceptions_masked = false,
1957 .expect_lockdown_exceptions_unmasked = false,
1958 },
1959 #if __ARM_ARCH_8_6__
1960 {
1961 .func_str = "arm64_panic_lockdown_test_fpac",
1962 .func = &arm64_panic_lockdown_test_fpac,
1963 .arg = ia_invalid,
1964 .expected_ec = ESR_EC_PAC_FAIL,
1965 .expect_lockdown_exceptions_masked = true,
1966 .expect_lockdown_exceptions_unmasked = true,
1967 },
1968 #endif /* __ARM_ARCH_8_6__ */
1969 #endif /* ptrauth_calls */
1970 {
1971 .func_str = "arm64_panic_lockdown_test_copyio",
1972 .func = arm64_panic_lockdown_test_copyio,
1973 .arg = 0x0 /* load from NULL */,
1974 .expected_ec = ESR_EC_DABORT_EL1,
1975 .expect_lockdown_exceptions_masked = false,
1976 .expect_lockdown_exceptions_unmasked = false,
1977 },
1978 };
1979
1980 size_t test_count = sizeof(tests) / sizeof(*tests);
1981 for (size_t i = 0; i < test_count; i++) {
1982 panic_lockdown_expect_test(
1983 "Exceptions unmasked",
1984 &tests[i],
1985 tests[i].expect_lockdown_exceptions_unmasked,
1986 /* mask_interrupts */ false);
1987
1988 panic_lockdown_expect_test(
1989 "Exceptions masked",
1990 &tests[i],
1991 tests[i].expect_lockdown_exceptions_masked,
1992 /* mask_interrupts */ true);
1993 }
1994
1995 panic_lockdown_expect_fault_raw("arm64_panic_lockdown_test_sp1_invalid_stack",
1996 arm64_panic_lockdown_test_sp1_invalid_stack,
1997 arm64_panic_lockdown_test_pac_brk_c470,
1998 arm64_panic_lockdown_test_sp1_invalid_stack_handler);
1999
2000 panic_lockdown_expect_fault_raw("arm64_panic_lockdown_test_sp1_exception_in_vector",
2001 arm64_panic_lockdown_test_sp1_exception_in_vector,
2002 el1_sp1_synchronous_raise_exception_in_vector,
2003 arm64_panic_lockdown_test_sp1_exception_in_vector_handler);
2004 return KERN_SUCCESS;
2005 }
2006 #endif /* CONFIG_SPTM */
2007
2008
2009
2010 #if HAS_SPECRES
2011
2012 /*** CPS RCTX ***/
2013
2014 #if HAS_CPSRCTX
2015
2016 static inline void
_cpsrctx_exec(uint64_t ctx)2017 _cpsrctx_exec(uint64_t ctx)
2018 {
2019 asm volatile ( "ISB SY");
2020 asm volatile ( "CPS RCTX, %0" :: "r"(ctx));
2021 asm volatile ( "DSB SY");
2022 asm volatile ( "ISB SY");
2023 }
2024
2025 static void
_cpsrctx_do_test(void)2026 _cpsrctx_do_test(void)
2027 {
2028 typedef struct {
2029 union {
2030 struct {
2031 uint64_t ASID:16;
2032 uint64_t GASID:1;
2033 uint64_t :7;
2034 uint64_t EL:2;
2035 uint64_t NS:1;
2036 uint64_t NSE:1;
2037 uint64_t :4;
2038 uint64_t VMID:16;
2039 uint64_t GVMID:1;
2040 uint64_t :7;
2041 uint64_t GM:1;
2042 uint64_t :3;
2043 uint64_t IS:3;
2044 uint64_t :1;
2045 };
2046 uint64_t raw;
2047 };
2048 } cpsrctx_ctx;
2049
2050 assert(sizeof(cpsrctx_ctx) == 8);
2051
2052 /*
2053 * Test various possible meaningful CPS_RCTX context ID.
2054 */
2055
2056 /* el : EL0 / EL1 / EL2. */
2057 for (uint8_t el = 0; el < 3; el++) {
2058 /* Always non-secure. */
2059 const uint8_t ns = 1;
2060 const uint8_t nse = 0;
2061
2062 /* Iterat eover some couples of ASIDs / VMIDs. */
2063 for (uint16_t xxid = 0; xxid < 256; xxid++) {
2064 const uint16_t asid = (uint16_t) (xxid << 4);
2065 const uint16_t vmid = (uint16_t) (256 - (xxid << 4));
2066
2067 /* Test 4 G[AS|VM]ID combinations. */
2068 for (uint8_t bid = 0; bid < 4; bid++) {
2069 const uint8_t gasid = bid & 1;
2070 const uint8_t gvmid = bid & 2;
2071
2072 /* Test all GM / IS combinations. */
2073 for (uint8_t gid = 0; gid < 0x8; gid++) {
2074 const uint8_t gm = gid & 1;
2075 const uint8_t is = gid >> 1;
2076
2077 /* Generate the context descriptor. */
2078 cpsrctx_ctx ctx = {0};
2079 ctx.ASID = asid;
2080 ctx.GASID = gasid;
2081 ctx.EL = el;
2082 ctx.NS = ns;
2083 ctx.NSE = nse;
2084 ctx.VMID = vmid;
2085 ctx.GVMID = gvmid;
2086 ctx.GM = gm;
2087 ctx.IS = is;
2088
2089 /* Execute the CPS instruction. */
2090 _cpsrctx_exec(ctx.raw);
2091
2092 /* Insert some operation. */
2093 volatile uint8_t sum = 0;
2094 for (volatile uint8_t i = 0; i < 64; i++) {
2095 sum += i * sum + 3;
2096 }
2097 }
2098
2099 /* If el0 is not targetted, just need to do it once. */
2100 if (el != 0) {
2101 goto not_el0_skip;
2102 }
2103 }
2104 }
2105
2106 /* El0 skip. */
2107 not_el0_skip: ;
2108 }
2109 }
2110
2111 #endif /* HAS_CPSRCTX */
2112
2113 /*** SPECRES ***/
2114
2115 #if HAS_SPECRES2
2116 /*
2117 * Execute a COSP RCTX instruction.
2118 */
2119 static void
_cosprctx_exec(uint64_t raw)2120 _cosprctx_exec(uint64_t raw)
2121 {
2122 asm volatile ( "ISB SY");
2123 __asm__ volatile ("COSP RCTX, %0" :: "r" (raw));
2124 asm volatile ( "DSB SY");
2125 asm volatile ( "ISB SY");
2126 }
2127 #endif
2128
2129 /*
2130 * Execute a CFP RCTX instruction.
2131 */
2132 static void
_cfprctx_exec(uint64_t raw)2133 _cfprctx_exec(uint64_t raw)
2134 {
2135 asm volatile ( "ISB SY");
2136 __asm__ volatile ("CFP RCTX, %0" :: "r" (raw));
2137 asm volatile ( "DSB SY");
2138 asm volatile ( "ISB SY");
2139 }
2140
2141 /*
2142 * Execute a CPP RCTX instruction.
2143 */
2144 static void
_cpprctx_exec(uint64_t raw)2145 _cpprctx_exec(uint64_t raw)
2146 {
2147 asm volatile ( "ISB SY");
2148 __asm__ volatile ("CPP RCTX, %0" :: "r" (raw));
2149 asm volatile ( "DSB SY");
2150 asm volatile ( "ISB SY");
2151 }
2152
2153 /*
2154 * Execute a DVP RCTX instruction.
2155 */
2156 static void
_dvprctx_exec(uint64_t raw)2157 _dvprctx_exec(uint64_t raw)
2158 {
2159 asm volatile ( "ISB SY");
2160 __asm__ volatile ("DVP RCTX, %0" :: "r" (raw));
2161 asm volatile ( "DSB SY");
2162 asm volatile ( "ISB SY");
2163 }
2164
2165 static void
_specres_do_test_std(void (* impl)(uint64_t raw))2166 _specres_do_test_std(void (*impl)(uint64_t raw))
2167 {
2168 typedef struct {
2169 union {
2170 struct {
2171 uint64_t ASID:16;
2172 uint64_t GASID:1;
2173 uint64_t :7;
2174 uint64_t EL:2;
2175 uint64_t NS:1;
2176 uint64_t NSE:1;
2177 uint64_t :4;
2178 uint64_t VMID:16;
2179 uint64_t GVMID:1;
2180 };
2181 uint64_t raw;
2182 };
2183 } specres_ctx;
2184
2185 assert(sizeof(specres_ctx) == 8);
2186
2187 /*
2188 * Test various possible meaningful COSP_RCTX context ID.
2189 */
2190
2191 /* el : EL0 / EL1 / EL2. */
2192 for (uint8_t el = 0; el < 3; el++) {
2193 /* Always non-secure. */
2194 const uint8_t ns = 1;
2195 const uint8_t nse = 0;
2196
2197 /* Iterate over some couples of ASIDs / VMIDs. */
2198 for (uint16_t xxid = 0; xxid < 256; xxid++) {
2199 const uint16_t asid = (uint16_t) (xxid << 4);
2200 const uint16_t vmid = (uint16_t) (256 - (xxid << 4));
2201
2202 /* Test 4 G[AS|VM]ID combinations. */
2203 for (uint8_t bid = 0; bid < 4; bid++) {
2204 const uint8_t gasid = bid & 1;
2205 const uint8_t gvmid = bid & 2;
2206
2207 /* Generate the context descriptor. */
2208 specres_ctx ctx = {0};
2209 ctx.ASID = asid;
2210 ctx.GASID = gasid;
2211 ctx.EL = el;
2212 ctx.NS = ns;
2213 ctx.NSE = nse;
2214 ctx.VMID = vmid;
2215 ctx.GVMID = gvmid;
2216
2217 /* Execute the COSP instruction. */
2218 (*impl)(ctx.raw);
2219
2220 /* Insert some operation. */
2221 volatile uint8_t sum = 0;
2222 for (volatile uint8_t i = 0; i < 64; i++) {
2223 sum += i * sum + 3;
2224 }
2225
2226 /* If el0 is not targetted, just need to do it once. */
2227 if (el != 0) {
2228 goto not_el0_skip;
2229 }
2230 }
2231 }
2232
2233 /* El0 skip. */
2234 not_el0_skip: ;
2235 }
2236 }
2237
2238 /*** RCTX ***/
2239
2240 static void
_rctx_do_test(void)2241 _rctx_do_test(void)
2242 {
2243 _specres_do_test_std(&_cfprctx_exec);
2244 _specres_do_test_std(&_cpprctx_exec);
2245 _specres_do_test_std(&_dvprctx_exec);
2246 #if HAS_SPECRES2
2247 _specres_do_test_std(&_cosprctx_exec);
2248 #endif
2249 #if HAS_CPSRCTX
2250 _cpsrctx_do_test();
2251 #endif
2252 }
2253
2254 kern_return_t
specres_test(void)2255 specres_test(void)
2256 {
2257 /* Basic instructions test. */
2258 _cfprctx_exec(0);
2259 _cpprctx_exec(0);
2260 _dvprctx_exec(0);
2261 #if HAS_SPECRES2
2262 _cosprctx_exec(0);
2263 #endif
2264 #if HAS_CPSRCTX
2265 _cpsrctx_exec(0);
2266 #endif
2267
2268 /* More advanced instructions test. */
2269 _rctx_do_test();
2270
2271 return KERN_SUCCESS;
2272 }
2273
2274 #endif /* HAS_SPECRES */
2275 #if BTI_ENFORCED
2276 typedef uint64_t (bti_landing_pad_func_t)(void);
2277 typedef uint64_t (bti_shim_func_t)(bti_landing_pad_func_t *);
2278
2279 extern bti_shim_func_t arm64_bti_test_jump_shim;
2280 extern bti_shim_func_t arm64_bti_test_call_shim;
2281
2282 extern bti_landing_pad_func_t arm64_bti_test_func_with_no_landing_pad;
2283 extern bti_landing_pad_func_t arm64_bti_test_func_with_call_landing_pad;
2284 extern bti_landing_pad_func_t arm64_bti_test_func_with_jump_landing_pad;
2285 extern bti_landing_pad_func_t arm64_bti_test_func_with_jump_call_landing_pad;
2286 #if __has_feature(ptrauth_returns)
2287 extern bti_landing_pad_func_t arm64_bti_test_func_with_pac_landing_pad;
2288 #endif /* __has_feature(ptrauth_returns) */
2289
2290 typedef struct arm64_bti_test_func_case {
2291 const char *func_str;
2292 bti_landing_pad_func_t *func;
2293 uint64_t expect_return_value;
2294 uint8_t expect_call_ok;
2295 uint8_t expect_jump_ok;
2296 } arm64_bti_test_func_case_s;
2297
2298 static volatile uintptr_t bti_exception_handler_pc = 0;
2299
2300 static bool
arm64_bti_test_exception_handler(arm_saved_state_t * state)2301 arm64_bti_test_exception_handler(arm_saved_state_t * state)
2302 {
2303 uint64_t esr = get_saved_state_esr(state);
2304 esr_exception_class_t class = ESR_EC(esr);
2305
2306 if (class != ESR_EC_BTI_FAIL) {
2307 return false;
2308 }
2309
2310 /* Capture any desired exception metrics */
2311 bti_exception_handler_pc = get_saved_state_pc(state);
2312
2313 /* "Cancel" the function call by forging an early return */
2314 set_saved_state_pc(state, get_saved_state_lr(state));
2315
2316 /* Clear BTYPE to prevent taking another exception after ERET */
2317 uint32_t spsr = get_saved_state_cpsr(state);
2318 spsr &= ~PSR_BTYPE_MASK;
2319 set_saved_state_cpsr(state, spsr);
2320
2321 return true;
2322 }
2323
2324 static void
arm64_bti_test_func_with_shim(uint8_t expect_ok,const char * shim_str,bti_shim_func_t * shim,arm64_bti_test_func_case_s * test_case)2325 arm64_bti_test_func_with_shim(
2326 uint8_t expect_ok,
2327 const char *shim_str,
2328 bti_shim_func_t *shim,
2329 arm64_bti_test_func_case_s *test_case)
2330 {
2331 uint64_t result = -1;
2332
2333 /* Capture BTI exceptions triggered by our target function */
2334 uintptr_t raw_func = (uintptr_t)ptrauth_strip(
2335 (void *)test_case->func,
2336 ptrauth_key_function_pointer);
2337 ml_expect_fault_pc_begin(arm64_bti_test_exception_handler, raw_func);
2338 bti_exception_handler_pc = 0;
2339
2340 /*
2341 * The assembly routines do not support C function type discriminators, so
2342 * strip and resign with zero if needed
2343 */
2344 bti_landing_pad_func_t *resigned = ptrauth_auth_and_resign(
2345 test_case->func,
2346 ptrauth_key_function_pointer,
2347 ptrauth_type_discriminator(bti_landing_pad_func_t),
2348 ptrauth_key_function_pointer, 0);
2349
2350 result = shim(resigned);
2351
2352 ml_expect_fault_end();
2353
2354 if (!expect_ok && raw_func != bti_exception_handler_pc) {
2355 T_FAIL("Expected BTI exception at 0x%llx but got one at %llx instead\n",
2356 raw_func, bti_exception_handler_pc);
2357 } else if (expect_ok && bti_exception_handler_pc) {
2358 T_FAIL("Did not expect BTI exception but got on at 0x%llx\n",
2359 bti_exception_handler_pc);
2360 } else if (!expect_ok && !bti_exception_handler_pc) {
2361 T_FAIL("Failed to hit expected exception!\n");
2362 } else if (expect_ok && result != test_case->expect_return_value) {
2363 T_FAIL("Incorrect test function result (expected=%llu, result=%llu\n)",
2364 test_case->expect_return_value, result);
2365 } else {
2366 T_PASS("%s (shim=%s)\n", test_case->func_str, shim_str);
2367 }
2368 }
2369
2370 /**
2371 * This test works to ensure that BTI exceptions are raised where expected
2372 * and only where they are expected by exhaustively testing all indirect branch
2373 * combinations with all landing pad options.
2374 */
2375 kern_return_t
arm64_bti_test(void)2376 arm64_bti_test(void)
2377 {
2378 static arm64_bti_test_func_case_s tests[] = {
2379 {
2380 .func_str = "arm64_bti_test_func_with_no_landing_pad",
2381 .func = &arm64_bti_test_func_with_no_landing_pad,
2382 .expect_return_value = 1,
2383 .expect_call_ok = 0,
2384 .expect_jump_ok = 0,
2385 },
2386 {
2387 .func_str = "arm64_bti_test_func_with_call_landing_pad",
2388 .func = &arm64_bti_test_func_with_call_landing_pad,
2389 .expect_return_value = 2,
2390 .expect_call_ok = 1,
2391 .expect_jump_ok = 0,
2392 },
2393 {
2394 .func_str = "arm64_bti_test_func_with_jump_landing_pad",
2395 .func = &arm64_bti_test_func_with_jump_landing_pad,
2396 .expect_return_value = 3,
2397 .expect_call_ok = 0,
2398 .expect_jump_ok = 1,
2399 },
2400 {
2401 .func_str = "arm64_bti_test_func_with_jump_call_landing_pad",
2402 .func = &arm64_bti_test_func_with_jump_call_landing_pad,
2403 .expect_return_value = 4,
2404 .expect_call_ok = 1,
2405 .expect_jump_ok = 1,
2406 },
2407 #if __has_feature(ptrauth_returns)
2408 {
2409 .func_str = "arm64_bti_test_func_with_pac_landing_pad",
2410 .func = &arm64_bti_test_func_with_pac_landing_pad,
2411 .expect_return_value = 5,
2412 .expect_call_ok = 1,
2413 .expect_jump_ok = 0,
2414 },
2415 #endif /* __has_feature(ptrauth_returns) */
2416 };
2417
2418 size_t test_count = sizeof(tests) / sizeof(*tests);
2419 for (size_t i = 0; i < test_count; i++) {
2420 arm64_bti_test_func_case_s *test_case = tests + i;
2421
2422 arm64_bti_test_func_with_shim(test_case->expect_call_ok,
2423 "arm64_bti_test_call_shim",
2424 arm64_bti_test_call_shim,
2425 test_case);
2426
2427
2428 arm64_bti_test_func_with_shim(test_case->expect_jump_ok,
2429 "arm64_bti_test_jump_shim",
2430 arm64_bti_test_jump_shim,
2431 test_case);
2432 }
2433
2434 return KERN_SUCCESS;
2435 }
2436 #endif /* BTI_ENFORCED */
2437
2438
2439 /**
2440 * Test the speculation guards
2441 * We can't easily ensure that the guards actually behave correctly under
2442 * speculation, but we can at least ensure that the guards are non-speculatively
2443 * correct.
2444 */
2445 kern_return_t
arm64_speculation_guard_test(void)2446 arm64_speculation_guard_test(void)
2447 {
2448 uint64_t cookie1_64 = 0x5350454354524521ULL; /* SPECTRE! */
2449 uint64_t cookie2_64 = 0x5941592043505553ULL; /* YAY CPUS */
2450 uint32_t cookie1_32 = (uint32_t)cookie1_64;
2451 uint32_t cookie2_32 = (uint32_t)cookie2_64;
2452 uint64_t result64 = 0;
2453 uint32_t result32 = 0;
2454
2455 /*
2456 * Test the zeroing guard
2457 * Since failing the guard triggers a panic, we don't actually test that
2458 * part as part of the automated tests.
2459 */
2460
2461 result64 = 0;
2462 SPECULATION_GUARD_ZEROING_XXX(
2463 /* out */ result64, /* value */ cookie1_64,
2464 /* cmp_1 */ 0ULL, /* cmp_2 */ 1ULL, /* cc */ "NE");
2465 T_EXPECT_EQ_ULLONG(result64, cookie1_64, "64, 64 zeroing guard works");
2466
2467 result64 = 0;
2468 SPECULATION_GUARD_ZEROING_XWW(
2469 /* out */ result64, /* value */ cookie1_64,
2470 /* cmp_1 */ 1U, /* cmp_2 */ 0U, /* cc */ "HI");
2471 T_EXPECT_EQ_ULLONG(result64, cookie1_64, "64, 32 zeroing guard works");
2472
2473 result32 = 0;
2474 SPECULATION_GUARD_ZEROING_WXX(
2475 /* out */ result32, /* value */ cookie1_32,
2476 /* cmp_1 */ -1LL, /* cmp_2 */ 4LL, /* cc */ "LT");
2477 T_ASSERT_EQ_UINT(result32, cookie1_32, "32, 64 zeroing guard works");
2478
2479 result32 = 0;
2480 SPECULATION_GUARD_ZEROING_WWW(
2481 /* out */ result32, /* value */ cookie1_32,
2482 /* cmp_1 */ 1, /* cmp_2 */ -4, /* cc */ "GT");
2483 T_ASSERT_EQ_UINT(result32, cookie1_32, "32, 32 zeroing guard works");
2484
2485 /*
2486 * Test the selection guard
2487 */
2488
2489 result64 = 0;
2490 SPECULATION_GUARD_SELECT_XXX(
2491 /* out */ result64,
2492 /* cmp_1 */ 16ULL, /* cmp_2 */ 32ULL,
2493 /* cc */ "EQ", /* sel_1 */ cookie1_64,
2494 /* n_cc */ "NE", /* sel_2 */ cookie2_64);
2495 T_EXPECT_EQ_ULLONG(result64, cookie2_64, "64, 64 select guard works (1)");
2496
2497 result64 = 0;
2498 SPECULATION_GUARD_SELECT_XXX(
2499 /* out */ result64,
2500 /* cmp_1 */ 32ULL, /* cmp_2 */ 32ULL,
2501 /* cc */ "EQ", /* sel_1 */ cookie1_64,
2502 /* n_cc */ "NE", /* sel_2 */ cookie2_64);
2503 T_EXPECT_EQ_ULLONG(result64, cookie1_64, "64, 64 select guard works (2)");
2504
2505
2506 result32 = 0;
2507 SPECULATION_GUARD_SELECT_WXX(
2508 /* out */ result32,
2509 /* cmp_1 */ 16ULL, /* cmp_2 */ 32ULL,
2510 /* cc */ "HI", /* sel_1 */ cookie1_64,
2511 /* n_cc */ "LS", /* sel_2 */ cookie2_64);
2512 T_EXPECT_EQ_ULLONG(result32, cookie2_32, "32, 64 select guard works (1)");
2513
2514 result32 = 0;
2515 SPECULATION_GUARD_SELECT_WXX(
2516 /* out */ result32,
2517 /* cmp_1 */ 16ULL, /* cmp_2 */ 2ULL,
2518 /* cc */ "HI", /* sel_1 */ cookie1_64,
2519 /* n_cc */ "LS", /* sel_2 */ cookie2_64);
2520 T_EXPECT_EQ_ULLONG(result32, cookie1_32, "32, 64 select guard works (2)");
2521
2522 return KERN_SUCCESS;
2523 }
2524