xref: /xnu-11417.140.69/osfmk/arm64/platform_tests.c (revision 43a90889846e00bfb5cf1d255cdc0a701a1e05a4)
1 /*
2  * Copyright (c) 2011-2018 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_COPYRIGHT@
30  */
31 /*
32  * Mach Operating System Copyright (c) 1991,1990,1989,1988,1987 Carnegie
33  * Mellon University All Rights Reserved.
34  *
35  * Permission to use, copy, modify and distribute this software and its
36  * documentation is hereby granted, provided that both the copyright notice
37  * and this permission notice appear in all copies of the software,
38  * derivative works or modified versions, and any portions thereof, and that
39  * both notices appear in supporting documentation.
40  *
41  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION.
42  * CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES
43  * WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
44  *
45  * Carnegie Mellon requests users of this software to return to
46  *
47  * Software Distribution Coordinator  or  [email protected]
48  * School of Computer Science Carnegie Mellon University Pittsburgh PA
49  * 15213-3890
50  *
51  * any improvements or extensions that they make and grant Carnegie Mellon the
52  * rights to redistribute these changes.
53  */
54 
55 #include <mach_ldebug.h>
56 
57 #define LOCK_PRIVATE 1
58 
59 #include <vm/pmap.h>
60 #include <vm/vm_map_xnu.h>
61 #include <vm/vm_page_internal.h>
62 #include <vm/vm_kern_xnu.h>
63 #include <mach/vm_map.h>
64 #include <kern/backtrace.h>
65 #include <kern/kalloc.h>
66 #include <kern/cpu_number.h>
67 #include <kern/locks.h>
68 #include <kern/misc_protos.h>
69 #include <kern/thread.h>
70 #include <kern/processor.h>
71 #include <kern/sched_prim.h>
72 #include <kern/debug.h>
73 #include <string.h>
74 #include <tests/xnupost.h>
75 
76 #if     MACH_KDB
77 #include <ddb/db_command.h>
78 #include <ddb/db_output.h>
79 #include <ddb/db_sym.h>
80 #include <ddb/db_print.h>
81 #endif                          /* MACH_KDB */
82 
83 #include <san/kasan.h>
84 #include <sys/errno.h>
85 #include <sys/kdebug.h>
86 #include <sys/munge.h>
87 #include <machine/cpu_capabilities.h>
88 #include <arm/cpu_data_internal.h>
89 #include <arm/pmap.h>
90 
91 #if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)
92 #include <arm64/amcc_rorgn.h>
93 #endif // defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)
94 
95 #include <arm64/machine_machdep.h>
96 
97 kern_return_t arm64_backtrace_test(void);
98 kern_return_t arm64_lock_test(void);
99 kern_return_t arm64_munger_test(void);
100 kern_return_t arm64_pan_test(void);
101 kern_return_t arm64_late_pan_test(void);
102 #if defined(HAS_APPLE_PAC)
103 #include <ptrauth.h>
104 kern_return_t arm64_ropjop_test(void);
105 #endif
106 #if defined(KERNEL_INTEGRITY_CTRR)
107 kern_return_t ctrr_test(void);
108 kern_return_t ctrr_test_cpu(void);
109 #endif
110 #if BTI_ENFORCED
111 kern_return_t arm64_bti_test(void);
112 #endif /* BTI_ENFORCED */
113 #if HAS_SPECRES
114 extern kern_return_t specres_test(void);
115 #endif
116 
117 // exception handler ignores this fault address during PAN test
118 #if __ARM_PAN_AVAILABLE__
119 const uint64_t pan_ro_value = 0xFEEDB0B0DEADBEEF;
120 vm_offset_t pan_test_addr = 0;
121 vm_offset_t pan_ro_addr = 0;
122 volatile int pan_exception_level = 0;
123 volatile char pan_fault_value = 0;
124 #endif
125 
126 #if CONFIG_SPTM
127 kern_return_t arm64_panic_lockdown_test(void);
128 #endif /* CONFIG_SPTM */
129 
130 #include <arm64/speculation.h>
131 kern_return_t arm64_speculation_guard_test(void);
132 
133 #include <libkern/OSAtomic.h>
134 #define LOCK_TEST_ITERATIONS 50
135 #define LOCK_TEST_SETUP_TIMEOUT_SEC 15
136 static hw_lock_data_t   lt_hw_lock;
137 static lck_spin_t       lt_lck_spin_t;
138 static lck_mtx_t        lt_mtx;
139 static lck_rw_t         lt_rwlock;
140 static volatile uint32_t lt_counter = 0;
141 static volatile int     lt_spinvolatile;
142 static volatile uint32_t lt_max_holders = 0;
143 static volatile uint32_t lt_upgrade_holders = 0;
144 static volatile uint32_t lt_max_upgrade_holders = 0;
145 static volatile uint32_t lt_num_holders = 0;
146 static volatile uint32_t lt_done_threads;
147 static volatile uint32_t lt_target_done_threads;
148 static volatile uint32_t lt_cpu_bind_id = 0;
149 static uint64_t          lt_setup_timeout = 0;
150 
151 static void
lt_note_another_blocking_lock_holder()152 lt_note_another_blocking_lock_holder()
153 {
154 	hw_lock_lock(&lt_hw_lock, LCK_GRP_NULL);
155 	lt_num_holders++;
156 	lt_max_holders = (lt_max_holders < lt_num_holders) ? lt_num_holders : lt_max_holders;
157 	hw_lock_unlock(&lt_hw_lock);
158 }
159 
160 static void
lt_note_blocking_lock_release()161 lt_note_blocking_lock_release()
162 {
163 	hw_lock_lock(&lt_hw_lock, LCK_GRP_NULL);
164 	lt_num_holders--;
165 	hw_lock_unlock(&lt_hw_lock);
166 }
167 
168 static void
lt_spin_a_little_bit()169 lt_spin_a_little_bit()
170 {
171 	uint32_t i;
172 
173 	for (i = 0; i < 10000; i++) {
174 		lt_spinvolatile++;
175 	}
176 }
177 
178 static void
lt_sleep_a_little_bit()179 lt_sleep_a_little_bit()
180 {
181 	delay(100);
182 }
183 
184 static void
lt_grab_mutex()185 lt_grab_mutex()
186 {
187 	lck_mtx_lock(&lt_mtx);
188 	lt_note_another_blocking_lock_holder();
189 	lt_sleep_a_little_bit();
190 	lt_counter++;
191 	lt_note_blocking_lock_release();
192 	lck_mtx_unlock(&lt_mtx);
193 }
194 
195 static void
lt_grab_mutex_with_try()196 lt_grab_mutex_with_try()
197 {
198 	while (0 == lck_mtx_try_lock(&lt_mtx)) {
199 		;
200 	}
201 	lt_note_another_blocking_lock_holder();
202 	lt_sleep_a_little_bit();
203 	lt_counter++;
204 	lt_note_blocking_lock_release();
205 	lck_mtx_unlock(&lt_mtx);
206 }
207 
208 static void
lt_grab_rw_exclusive()209 lt_grab_rw_exclusive()
210 {
211 	lck_rw_lock_exclusive(&lt_rwlock);
212 	lt_note_another_blocking_lock_holder();
213 	lt_sleep_a_little_bit();
214 	lt_counter++;
215 	lt_note_blocking_lock_release();
216 	lck_rw_done(&lt_rwlock);
217 }
218 
219 static void
lt_grab_rw_exclusive_with_try()220 lt_grab_rw_exclusive_with_try()
221 {
222 	while (0 == lck_rw_try_lock_exclusive(&lt_rwlock)) {
223 		lt_sleep_a_little_bit();
224 	}
225 
226 	lt_note_another_blocking_lock_holder();
227 	lt_sleep_a_little_bit();
228 	lt_counter++;
229 	lt_note_blocking_lock_release();
230 	lck_rw_done(&lt_rwlock);
231 }
232 
233 /* Disabled until lt_grab_rw_shared() is fixed (rdar://30685840)
234  *  static void
235  *  lt_grab_rw_shared()
236  *  {
237  *       lck_rw_lock_shared(&lt_rwlock);
238  *       lt_counter++;
239  *
240  *       lt_note_another_blocking_lock_holder();
241  *       lt_sleep_a_little_bit();
242  *       lt_note_blocking_lock_release();
243  *
244  *       lck_rw_done(&lt_rwlock);
245  *  }
246  */
247 
248 /* Disabled until lt_grab_rw_shared_with_try() is fixed (rdar://30685840)
249  *  static void
250  *  lt_grab_rw_shared_with_try()
251  *  {
252  *       while(0 == lck_rw_try_lock_shared(&lt_rwlock));
253  *       lt_counter++;
254  *
255  *       lt_note_another_blocking_lock_holder();
256  *       lt_sleep_a_little_bit();
257  *       lt_note_blocking_lock_release();
258  *
259  *       lck_rw_done(&lt_rwlock);
260  *  }
261  */
262 
263 static void
lt_upgrade_downgrade_rw()264 lt_upgrade_downgrade_rw()
265 {
266 	boolean_t upgraded, success;
267 
268 	success = lck_rw_try_lock_shared(&lt_rwlock);
269 	if (!success) {
270 		lck_rw_lock_shared(&lt_rwlock);
271 	}
272 
273 	lt_note_another_blocking_lock_holder();
274 	lt_sleep_a_little_bit();
275 	lt_note_blocking_lock_release();
276 
277 	upgraded = lck_rw_lock_shared_to_exclusive(&lt_rwlock);
278 	if (!upgraded) {
279 		success = lck_rw_try_lock_exclusive(&lt_rwlock);
280 
281 		if (!success) {
282 			lck_rw_lock_exclusive(&lt_rwlock);
283 		}
284 	}
285 
286 	lt_upgrade_holders++;
287 	if (lt_upgrade_holders > lt_max_upgrade_holders) {
288 		lt_max_upgrade_holders = lt_upgrade_holders;
289 	}
290 
291 	lt_counter++;
292 	lt_sleep_a_little_bit();
293 
294 	lt_upgrade_holders--;
295 
296 	lck_rw_lock_exclusive_to_shared(&lt_rwlock);
297 
298 	lt_spin_a_little_bit();
299 	lck_rw_done(&lt_rwlock);
300 }
301 
302 #if __AMP__
303 const int limit = 1000000;
304 static int lt_stress_local_counters[MAX_CPUS];
305 
306 lck_ticket_t lt_ticket_lock;
307 lck_grp_t lt_ticket_grp;
308 
309 static void
lt_stress_ticket_lock()310 lt_stress_ticket_lock()
311 {
312 	uint local_counter = 0;
313 
314 	uint cpuid = cpu_number();
315 
316 	kprintf("%s>cpu %d starting\n", __FUNCTION__, cpuid);
317 
318 	lck_ticket_lock(&lt_ticket_lock, &lt_ticket_grp);
319 	lt_counter++;
320 	local_counter++;
321 	lck_ticket_unlock(&lt_ticket_lock);
322 
323 	/* Wait until all test threads have finished any binding */
324 	while (lt_counter < lt_target_done_threads) {
325 		if (mach_absolute_time() > lt_setup_timeout) {
326 			kprintf("%s>cpu %d noticed that we exceeded setup timeout of %d seconds during initial setup phase (only %d out of %d threads checked in)",
327 			    __FUNCTION__, cpuid, LOCK_TEST_SETUP_TIMEOUT_SEC, lt_counter, lt_target_done_threads);
328 			return;
329 		}
330 		/* Yield to keep the CPUs available for the threads to bind */
331 		thread_yield_internal(1);
332 	}
333 
334 	lck_ticket_lock(&lt_ticket_lock, &lt_ticket_grp);
335 	lt_counter++;
336 	local_counter++;
337 	lck_ticket_unlock(&lt_ticket_lock);
338 
339 	/*
340 	 * Now that the test threads have finished any binding, wait
341 	 * until they are all actively spinning on-core (done yielding)
342 	 * so we get a fairly timed start.
343 	 */
344 	while (lt_counter < 2 * lt_target_done_threads) {
345 		if (mach_absolute_time() > lt_setup_timeout) {
346 			kprintf("%s>cpu %d noticed that we exceeded setup timeout of %d seconds during secondary setup phase (only %d out of %d threads checked in)",
347 			    __FUNCTION__, cpuid, LOCK_TEST_SETUP_TIMEOUT_SEC, lt_counter - lt_target_done_threads, lt_target_done_threads);
348 			return;
349 		}
350 	}
351 
352 	kprintf("%s>cpu %d started\n", __FUNCTION__, cpuid);
353 
354 	while (lt_counter < limit) {
355 		lck_ticket_lock(&lt_ticket_lock, &lt_ticket_grp);
356 		if (lt_counter < limit) {
357 			lt_counter++;
358 			local_counter++;
359 		}
360 		lck_ticket_unlock(&lt_ticket_lock);
361 	}
362 
363 	lt_stress_local_counters[cpuid] = local_counter;
364 
365 	kprintf("%s>final counter %d cpu %d incremented the counter %d times\n", __FUNCTION__, lt_counter, cpuid, local_counter);
366 }
367 #endif
368 
369 static void
lt_grab_hw_lock()370 lt_grab_hw_lock()
371 {
372 	hw_lock_lock(&lt_hw_lock, LCK_GRP_NULL);
373 	lt_counter++;
374 	lt_spin_a_little_bit();
375 	hw_lock_unlock(&lt_hw_lock);
376 }
377 
378 static void
lt_grab_hw_lock_with_try()379 lt_grab_hw_lock_with_try()
380 {
381 	while (0 == hw_lock_try(&lt_hw_lock, LCK_GRP_NULL)) {
382 		;
383 	}
384 	lt_counter++;
385 	lt_spin_a_little_bit();
386 	hw_lock_unlock(&lt_hw_lock);
387 }
388 
389 static void
lt_grab_hw_lock_with_to()390 lt_grab_hw_lock_with_to()
391 {
392 	(void)hw_lock_to(&lt_hw_lock, &hw_lock_spin_policy, LCK_GRP_NULL);
393 	lt_counter++;
394 	lt_spin_a_little_bit();
395 	hw_lock_unlock(&lt_hw_lock);
396 }
397 
398 static void
lt_grab_spin_lock()399 lt_grab_spin_lock()
400 {
401 	lck_spin_lock(&lt_lck_spin_t);
402 	lt_counter++;
403 	lt_spin_a_little_bit();
404 	lck_spin_unlock(&lt_lck_spin_t);
405 }
406 
407 static void
lt_grab_spin_lock_with_try()408 lt_grab_spin_lock_with_try()
409 {
410 	while (0 == lck_spin_try_lock(&lt_lck_spin_t)) {
411 		;
412 	}
413 	lt_counter++;
414 	lt_spin_a_little_bit();
415 	lck_spin_unlock(&lt_lck_spin_t);
416 }
417 
418 static volatile boolean_t lt_thread_lock_grabbed;
419 static volatile boolean_t lt_thread_lock_success;
420 
421 static void
lt_reset()422 lt_reset()
423 {
424 	lt_counter = 0;
425 	lt_max_holders = 0;
426 	lt_num_holders = 0;
427 	lt_max_upgrade_holders = 0;
428 	lt_upgrade_holders = 0;
429 	lt_done_threads = 0;
430 	lt_target_done_threads = 0;
431 	lt_cpu_bind_id = 0;
432 	/* Reset timeout deadline out from current time */
433 	nanoseconds_to_absolutetime(LOCK_TEST_SETUP_TIMEOUT_SEC * NSEC_PER_SEC, &lt_setup_timeout);
434 	lt_setup_timeout += mach_absolute_time();
435 
436 	OSMemoryBarrier();
437 }
438 
439 static void
lt_trylock_hw_lock_with_to()440 lt_trylock_hw_lock_with_to()
441 {
442 	OSMemoryBarrier();
443 	while (!lt_thread_lock_grabbed) {
444 		lt_sleep_a_little_bit();
445 		OSMemoryBarrier();
446 	}
447 	lt_thread_lock_success = hw_lock_to(&lt_hw_lock,
448 	    &hw_lock_test_give_up_policy, LCK_GRP_NULL);
449 	OSMemoryBarrier();
450 	mp_enable_preemption();
451 }
452 
453 static void
lt_trylock_spin_try_lock()454 lt_trylock_spin_try_lock()
455 {
456 	OSMemoryBarrier();
457 	while (!lt_thread_lock_grabbed) {
458 		lt_sleep_a_little_bit();
459 		OSMemoryBarrier();
460 	}
461 	lt_thread_lock_success = lck_spin_try_lock(&lt_lck_spin_t);
462 	OSMemoryBarrier();
463 }
464 
465 static void
lt_trylock_thread(void * arg,wait_result_t wres __unused)466 lt_trylock_thread(void *arg, wait_result_t wres __unused)
467 {
468 	void (*func)(void) = (void (*)(void))arg;
469 
470 	func();
471 
472 	OSIncrementAtomic((volatile SInt32*) &lt_done_threads);
473 }
474 
475 static void
lt_start_trylock_thread(thread_continue_t func)476 lt_start_trylock_thread(thread_continue_t func)
477 {
478 	thread_t thread;
479 	kern_return_t kr;
480 
481 	kr = kernel_thread_start(lt_trylock_thread, func, &thread);
482 	assert(kr == KERN_SUCCESS);
483 
484 	thread_deallocate(thread);
485 }
486 
487 static void
lt_wait_for_lock_test_threads()488 lt_wait_for_lock_test_threads()
489 {
490 	OSMemoryBarrier();
491 	/* Spin to reduce dependencies */
492 	while (lt_done_threads < lt_target_done_threads) {
493 		lt_sleep_a_little_bit();
494 		OSMemoryBarrier();
495 	}
496 	OSMemoryBarrier();
497 }
498 
499 static kern_return_t
lt_test_trylocks()500 lt_test_trylocks()
501 {
502 	boolean_t success;
503 	extern unsigned int real_ncpus;
504 
505 	/*
506 	 * First mtx try lock succeeds, second fails.
507 	 */
508 	success = lck_mtx_try_lock(&lt_mtx);
509 	T_ASSERT_NOTNULL(success, "First mtx try lock");
510 	success = lck_mtx_try_lock(&lt_mtx);
511 	T_ASSERT_NULL(success, "Second mtx try lock for a locked mtx");
512 	lck_mtx_unlock(&lt_mtx);
513 
514 	/*
515 	 * After regular grab, can't try lock.
516 	 */
517 	lck_mtx_lock(&lt_mtx);
518 	success = lck_mtx_try_lock(&lt_mtx);
519 	T_ASSERT_NULL(success, "try lock should fail after regular lck_mtx_lock");
520 	lck_mtx_unlock(&lt_mtx);
521 
522 	/*
523 	 * Two shared try locks on a previously unheld rwlock suceed, and a
524 	 * subsequent exclusive attempt fails.
525 	 */
526 	success = lck_rw_try_lock_shared(&lt_rwlock);
527 	T_ASSERT_NOTNULL(success, "Two shared try locks on a previously unheld rwlock should succeed");
528 	success = lck_rw_try_lock_shared(&lt_rwlock);
529 	T_ASSERT_NOTNULL(success, "Two shared try locks on a previously unheld rwlock should succeed");
530 	success = lck_rw_try_lock_exclusive(&lt_rwlock);
531 	T_ASSERT_NULL(success, "exclusive lock attempt on previously held lock should fail");
532 	lck_rw_done(&lt_rwlock);
533 	lck_rw_done(&lt_rwlock);
534 
535 	/*
536 	 * After regular shared grab, can trylock
537 	 * for shared but not for exclusive.
538 	 */
539 	lck_rw_lock_shared(&lt_rwlock);
540 	success = lck_rw_try_lock_shared(&lt_rwlock);
541 	T_ASSERT_NOTNULL(success, "After regular shared grab another shared try lock should succeed.");
542 	success = lck_rw_try_lock_exclusive(&lt_rwlock);
543 	T_ASSERT_NULL(success, "After regular shared grab an exclusive lock attempt should fail.");
544 	lck_rw_done(&lt_rwlock);
545 	lck_rw_done(&lt_rwlock);
546 
547 	/*
548 	 * An exclusive try lock succeeds, subsequent shared and exclusive
549 	 * attempts fail.
550 	 */
551 	success = lck_rw_try_lock_exclusive(&lt_rwlock);
552 	T_ASSERT_NOTNULL(success, "An exclusive try lock should succeed");
553 	success = lck_rw_try_lock_shared(&lt_rwlock);
554 	T_ASSERT_NULL(success, "try lock in shared mode attempt after an exclusive grab should fail");
555 	success = lck_rw_try_lock_exclusive(&lt_rwlock);
556 	T_ASSERT_NULL(success, "try lock in exclusive mode attempt after an exclusive grab should fail");
557 	lck_rw_done(&lt_rwlock);
558 
559 	/*
560 	 * After regular exclusive grab, neither kind of trylock succeeds.
561 	 */
562 	lck_rw_lock_exclusive(&lt_rwlock);
563 	success = lck_rw_try_lock_shared(&lt_rwlock);
564 	T_ASSERT_NULL(success, "After regular exclusive grab, shared trylock should not succeed");
565 	success = lck_rw_try_lock_exclusive(&lt_rwlock);
566 	T_ASSERT_NULL(success, "After regular exclusive grab, exclusive trylock should not succeed");
567 	lck_rw_done(&lt_rwlock);
568 
569 	/*
570 	 * First spin lock attempts succeed, second attempts fail.
571 	 */
572 	success = hw_lock_try(&lt_hw_lock, LCK_GRP_NULL);
573 	T_ASSERT_NOTNULL(success, "First spin lock attempts should succeed");
574 	success = hw_lock_try(&lt_hw_lock, LCK_GRP_NULL);
575 	T_ASSERT_NULL(success, "Second attempt to spin lock should fail");
576 	hw_lock_unlock(&lt_hw_lock);
577 
578 	hw_lock_lock(&lt_hw_lock, LCK_GRP_NULL);
579 	success = hw_lock_try(&lt_hw_lock, LCK_GRP_NULL);
580 	T_ASSERT_NULL(success, "After taking spin lock, trylock attempt should fail");
581 	hw_lock_unlock(&lt_hw_lock);
582 
583 	lt_reset();
584 	lt_thread_lock_grabbed = false;
585 	lt_thread_lock_success = true;
586 	lt_target_done_threads = 1;
587 	OSMemoryBarrier();
588 	lt_start_trylock_thread(lt_trylock_hw_lock_with_to);
589 	success = hw_lock_to(&lt_hw_lock, &hw_lock_test_give_up_policy, LCK_GRP_NULL);
590 	T_ASSERT_NOTNULL(success, "First spin lock with timeout should succeed");
591 	if (real_ncpus == 1) {
592 		mp_enable_preemption(); /* if we re-enable preemption, the other thread can timeout and exit */
593 	}
594 	OSIncrementAtomic((volatile SInt32*)&lt_thread_lock_grabbed);
595 	lt_wait_for_lock_test_threads();
596 	T_ASSERT_NULL(lt_thread_lock_success, "Second spin lock with timeout should fail and timeout");
597 	if (real_ncpus == 1) {
598 		mp_disable_preemption(); /* don't double-enable when we unlock */
599 	}
600 	hw_lock_unlock(&lt_hw_lock);
601 
602 	lt_reset();
603 	lt_thread_lock_grabbed = false;
604 	lt_thread_lock_success = true;
605 	lt_target_done_threads = 1;
606 	OSMemoryBarrier();
607 	lt_start_trylock_thread(lt_trylock_hw_lock_with_to);
608 	hw_lock_lock(&lt_hw_lock, LCK_GRP_NULL);
609 	if (real_ncpus == 1) {
610 		mp_enable_preemption(); /* if we re-enable preemption, the other thread can timeout and exit */
611 	}
612 	OSIncrementAtomic((volatile SInt32*)&lt_thread_lock_grabbed);
613 	lt_wait_for_lock_test_threads();
614 	T_ASSERT_NULL(lt_thread_lock_success, "after taking a spin lock, lock attempt with timeout should fail");
615 	if (real_ncpus == 1) {
616 		mp_disable_preemption(); /* don't double-enable when we unlock */
617 	}
618 	hw_lock_unlock(&lt_hw_lock);
619 
620 	success = lck_spin_try_lock(&lt_lck_spin_t);
621 	T_ASSERT_NOTNULL(success, "spin trylock of previously unheld lock should succeed");
622 	success = lck_spin_try_lock(&lt_lck_spin_t);
623 	T_ASSERT_NULL(success, "spin trylock attempt of previously held lock (with trylock) should fail");
624 	lck_spin_unlock(&lt_lck_spin_t);
625 
626 	lt_reset();
627 	lt_thread_lock_grabbed = false;
628 	lt_thread_lock_success = true;
629 	lt_target_done_threads = 1;
630 	lt_start_trylock_thread(lt_trylock_spin_try_lock);
631 	lck_spin_lock(&lt_lck_spin_t);
632 	if (real_ncpus == 1) {
633 		mp_enable_preemption(); /* if we re-enable preemption, the other thread can timeout and exit */
634 	}
635 	OSIncrementAtomic((volatile SInt32*)&lt_thread_lock_grabbed);
636 	lt_wait_for_lock_test_threads();
637 	T_ASSERT_NULL(lt_thread_lock_success, "spin trylock attempt of previously held lock should fail");
638 	if (real_ncpus == 1) {
639 		mp_disable_preemption(); /* don't double-enable when we unlock */
640 	}
641 	lck_spin_unlock(&lt_lck_spin_t);
642 
643 	return KERN_SUCCESS;
644 }
645 
646 static void
lt_thread(void * arg,wait_result_t wres __unused)647 lt_thread(void *arg, wait_result_t wres __unused)
648 {
649 	void (*func)(void) = (void (*)(void))arg;
650 	uint32_t i;
651 
652 	for (i = 0; i < LOCK_TEST_ITERATIONS; i++) {
653 		func();
654 	}
655 
656 	OSIncrementAtomic((volatile SInt32*) &lt_done_threads);
657 }
658 
659 static void
lt_start_lock_thread(thread_continue_t func)660 lt_start_lock_thread(thread_continue_t func)
661 {
662 	thread_t thread;
663 	kern_return_t kr;
664 
665 	kr = kernel_thread_start(lt_thread, func, &thread);
666 	assert(kr == KERN_SUCCESS);
667 
668 	thread_deallocate(thread);
669 }
670 
671 #if __AMP__
672 static void
lt_bound_thread(void * arg,wait_result_t wres __unused)673 lt_bound_thread(void *arg, wait_result_t wres __unused)
674 {
675 	void (*func)(void) = (void (*)(void))arg;
676 
677 	int cpuid = OSIncrementAtomic((volatile SInt32 *)&lt_cpu_bind_id);
678 
679 	processor_t processor = processor_list;
680 	while ((processor != NULL) && (processor->cpu_id != cpuid)) {
681 		processor = processor->processor_list;
682 	}
683 
684 	if (processor != NULL) {
685 		thread_bind(processor);
686 	}
687 
688 	thread_block(THREAD_CONTINUE_NULL);
689 
690 	func();
691 
692 	OSIncrementAtomic((volatile SInt32*) &lt_done_threads);
693 }
694 
695 static void
lt_e_thread(void * arg,wait_result_t wres __unused)696 lt_e_thread(void *arg, wait_result_t wres __unused)
697 {
698 	void (*func)(void) = (void (*)(void))arg;
699 
700 	thread_t thread = current_thread();
701 
702 	thread_soft_bind_cluster_type(thread, 'e');
703 
704 	func();
705 
706 	OSIncrementAtomic((volatile SInt32*) &lt_done_threads);
707 }
708 
709 static void
lt_p_thread(void * arg,wait_result_t wres __unused)710 lt_p_thread(void *arg, wait_result_t wres __unused)
711 {
712 	void (*func)(void) = (void (*)(void))arg;
713 
714 	thread_t thread = current_thread();
715 
716 	thread_soft_bind_cluster_type(thread, 'p');
717 
718 	func();
719 
720 	OSIncrementAtomic((volatile SInt32*) &lt_done_threads);
721 }
722 
723 static void
lt_start_lock_thread_with_bind(thread_continue_t bind_type,thread_continue_t func)724 lt_start_lock_thread_with_bind(thread_continue_t bind_type, thread_continue_t func)
725 {
726 	thread_t thread;
727 	kern_return_t kr;
728 
729 	kr = kernel_thread_start(bind_type, func, &thread);
730 	assert(kr == KERN_SUCCESS);
731 
732 	thread_deallocate(thread);
733 }
734 #endif /* __AMP__ */
735 
736 static kern_return_t
lt_test_locks()737 lt_test_locks()
738 {
739 #if SCHED_HYGIENE_DEBUG
740 	/*
741 	 * When testing, the preemption disable threshold may be hit (for
742 	 * example when testing a lock timeout). To avoid this, the preemption
743 	 * disable measurement is temporarily disabled during lock testing.
744 	 */
745 	int old_mode = sched_preemption_disable_debug_mode;
746 	if (old_mode == SCHED_HYGIENE_MODE_PANIC) {
747 		sched_preemption_disable_debug_mode = SCHED_HYGIENE_MODE_OFF;
748 	}
749 #endif /* SCHED_HYGIENE_DEBUG */
750 
751 	kern_return_t kr = KERN_SUCCESS;
752 	lck_grp_attr_t *lga = lck_grp_attr_alloc_init();
753 	lck_grp_t *lg = lck_grp_alloc_init("lock test", lga);
754 
755 	lck_mtx_init(&lt_mtx, lg, LCK_ATTR_NULL);
756 	lck_rw_init(&lt_rwlock, lg, LCK_ATTR_NULL);
757 	lck_spin_init(&lt_lck_spin_t, lg, LCK_ATTR_NULL);
758 	hw_lock_init(&lt_hw_lock);
759 
760 	T_LOG("Testing locks.");
761 
762 	/* Try locks (custom) */
763 	lt_reset();
764 
765 	T_LOG("Running try lock test.");
766 	kr = lt_test_trylocks();
767 	T_EXPECT_NULL(kr, "try lock test failed.");
768 
769 	/* Uncontended mutex */
770 	T_LOG("Running uncontended mutex test.");
771 	lt_reset();
772 	lt_target_done_threads = 1;
773 	lt_start_lock_thread(lt_grab_mutex);
774 	lt_wait_for_lock_test_threads();
775 	T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
776 	T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
777 
778 	/* Contended mutex:try locks*/
779 	T_LOG("Running contended mutex test.");
780 	lt_reset();
781 	lt_target_done_threads = 3;
782 	lt_start_lock_thread(lt_grab_mutex);
783 	lt_start_lock_thread(lt_grab_mutex);
784 	lt_start_lock_thread(lt_grab_mutex);
785 	lt_wait_for_lock_test_threads();
786 	T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
787 	T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
788 
789 	/* Contended mutex: try locks*/
790 	T_LOG("Running contended mutex trylock test.");
791 	lt_reset();
792 	lt_target_done_threads = 3;
793 	lt_start_lock_thread(lt_grab_mutex_with_try);
794 	lt_start_lock_thread(lt_grab_mutex_with_try);
795 	lt_start_lock_thread(lt_grab_mutex_with_try);
796 	lt_wait_for_lock_test_threads();
797 	T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
798 	T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
799 
800 	/* Uncontended exclusive rwlock */
801 	T_LOG("Running uncontended exclusive rwlock test.");
802 	lt_reset();
803 	lt_target_done_threads = 1;
804 	lt_start_lock_thread(lt_grab_rw_exclusive);
805 	lt_wait_for_lock_test_threads();
806 	T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
807 	T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
808 
809 	/* Uncontended shared rwlock */
810 
811 	/* Disabled until lt_grab_rw_shared() is fixed (rdar://30685840)
812 	 *  T_LOG("Running uncontended shared rwlock test.");
813 	 *  lt_reset();
814 	 *  lt_target_done_threads = 1;
815 	 *  lt_start_lock_thread(lt_grab_rw_shared);
816 	 *  lt_wait_for_lock_test_threads();
817 	 *  T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
818 	 *  T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
819 	 */
820 
821 	/* Contended exclusive rwlock */
822 	T_LOG("Running contended exclusive rwlock test.");
823 	lt_reset();
824 	lt_target_done_threads = 3;
825 	lt_start_lock_thread(lt_grab_rw_exclusive);
826 	lt_start_lock_thread(lt_grab_rw_exclusive);
827 	lt_start_lock_thread(lt_grab_rw_exclusive);
828 	lt_wait_for_lock_test_threads();
829 	T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
830 	T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
831 
832 	/* One shared, two exclusive */
833 	/* Disabled until lt_grab_rw_shared() is fixed (rdar://30685840)
834 	 *  T_LOG("Running test with one shared and two exclusive rw lock threads.");
835 	 *  lt_reset();
836 	 *  lt_target_done_threads = 3;
837 	 *  lt_start_lock_thread(lt_grab_rw_shared);
838 	 *  lt_start_lock_thread(lt_grab_rw_exclusive);
839 	 *  lt_start_lock_thread(lt_grab_rw_exclusive);
840 	 *  lt_wait_for_lock_test_threads();
841 	 *  T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
842 	 *  T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
843 	 */
844 
845 	/* Four shared */
846 	/* Disabled until lt_grab_rw_shared() is fixed (rdar://30685840)
847 	 *  T_LOG("Running test with four shared holders.");
848 	 *  lt_reset();
849 	 *  lt_target_done_threads = 4;
850 	 *  lt_start_lock_thread(lt_grab_rw_shared);
851 	 *  lt_start_lock_thread(lt_grab_rw_shared);
852 	 *  lt_start_lock_thread(lt_grab_rw_shared);
853 	 *  lt_start_lock_thread(lt_grab_rw_shared);
854 	 *  lt_wait_for_lock_test_threads();
855 	 *  T_EXPECT_LE_UINT(lt_max_holders, 4, NULL);
856 	 */
857 
858 	/* Three doing upgrades and downgrades */
859 	T_LOG("Running test with threads upgrading and downgrading.");
860 	lt_reset();
861 	lt_target_done_threads = 3;
862 	lt_start_lock_thread(lt_upgrade_downgrade_rw);
863 	lt_start_lock_thread(lt_upgrade_downgrade_rw);
864 	lt_start_lock_thread(lt_upgrade_downgrade_rw);
865 	lt_wait_for_lock_test_threads();
866 	T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
867 	T_EXPECT_LE_UINT(lt_max_holders, 3, NULL);
868 	T_EXPECT_EQ_UINT(lt_max_upgrade_holders, 1, NULL);
869 
870 	/* Uncontended - exclusive trylocks */
871 	T_LOG("Running test with single thread doing exclusive rwlock trylocks.");
872 	lt_reset();
873 	lt_target_done_threads = 1;
874 	lt_start_lock_thread(lt_grab_rw_exclusive_with_try);
875 	lt_wait_for_lock_test_threads();
876 	T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
877 	T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
878 
879 	/* Uncontended - shared trylocks */
880 	/* Disabled until lt_grab_rw_shared_with_try() is fixed (rdar://30685840)
881 	 *  T_LOG("Running test with single thread doing shared rwlock trylocks.");
882 	 *  lt_reset();
883 	 *  lt_target_done_threads = 1;
884 	 *  lt_start_lock_thread(lt_grab_rw_shared_with_try);
885 	 *  lt_wait_for_lock_test_threads();
886 	 *  T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
887 	 *  T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
888 	 */
889 
890 	/* Three doing exclusive trylocks */
891 	T_LOG("Running test with threads doing exclusive rwlock trylocks.");
892 	lt_reset();
893 	lt_target_done_threads = 3;
894 	lt_start_lock_thread(lt_grab_rw_exclusive_with_try);
895 	lt_start_lock_thread(lt_grab_rw_exclusive_with_try);
896 	lt_start_lock_thread(lt_grab_rw_exclusive_with_try);
897 	lt_wait_for_lock_test_threads();
898 	T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
899 	T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
900 
901 	/* Three doing shared trylocks */
902 	/* Disabled until lt_grab_rw_shared_with_try() is fixed (rdar://30685840)
903 	 *  T_LOG("Running test with threads doing shared rwlock trylocks.");
904 	 *  lt_reset();
905 	 *  lt_target_done_threads = 3;
906 	 *  lt_start_lock_thread(lt_grab_rw_shared_with_try);
907 	 *  lt_start_lock_thread(lt_grab_rw_shared_with_try);
908 	 *  lt_start_lock_thread(lt_grab_rw_shared_with_try);
909 	 *  lt_wait_for_lock_test_threads();
910 	 *  T_EXPECT_LE_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
911 	 *  T_EXPECT_LE_UINT(lt_max_holders, 3, NULL);
912 	 */
913 
914 	/* Three doing various trylocks */
915 	/* Disabled until lt_grab_rw_shared_with_try() is fixed (rdar://30685840)
916 	 *  T_LOG("Running test with threads doing mixed rwlock trylocks.");
917 	 *  lt_reset();
918 	 *  lt_target_done_threads = 4;
919 	 *  lt_start_lock_thread(lt_grab_rw_shared_with_try);
920 	 *  lt_start_lock_thread(lt_grab_rw_shared_with_try);
921 	 *  lt_start_lock_thread(lt_grab_rw_exclusive_with_try);
922 	 *  lt_start_lock_thread(lt_grab_rw_exclusive_with_try);
923 	 *  lt_wait_for_lock_test_threads();
924 	 *  T_EXPECT_LE_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
925 	 *  T_EXPECT_LE_UINT(lt_max_holders, 2, NULL);
926 	 */
927 
928 	/* HW locks */
929 	T_LOG("Running test with hw_lock_lock()");
930 	lt_reset();
931 	lt_target_done_threads = 3;
932 	lt_start_lock_thread(lt_grab_hw_lock);
933 	lt_start_lock_thread(lt_grab_hw_lock);
934 	lt_start_lock_thread(lt_grab_hw_lock);
935 	lt_wait_for_lock_test_threads();
936 	T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
937 
938 #if __AMP__
939 	/* Ticket locks stress test */
940 	T_LOG("Running Ticket locks stress test with lck_ticket_lock()");
941 	extern unsigned int real_ncpus;
942 	lck_grp_init(&lt_ticket_grp, "ticket lock stress", LCK_GRP_ATTR_NULL);
943 	lck_ticket_init(&lt_ticket_lock, &lt_ticket_grp);
944 	lt_reset();
945 	lt_target_done_threads = real_ncpus;
946 	uint thread_count = 0;
947 	for (processor_t processor = processor_list; processor != NULL; processor = processor->processor_list) {
948 		lt_start_lock_thread_with_bind(lt_bound_thread, lt_stress_ticket_lock);
949 		thread_count++;
950 	}
951 	T_EXPECT_GE_UINT(thread_count, lt_target_done_threads, "Spawned enough threads for valid test");
952 	lt_wait_for_lock_test_threads();
953 	bool starvation = false;
954 	uint total_local_count = 0;
955 	for (processor_t processor = processor_list; processor != NULL; processor = processor->processor_list) {
956 		starvation = starvation || (lt_stress_local_counters[processor->cpu_id] < 10);
957 		total_local_count += lt_stress_local_counters[processor->cpu_id];
958 	}
959 	if (mach_absolute_time() > lt_setup_timeout) {
960 		T_FAIL("Stress test setup timed out after %d seconds", LOCK_TEST_SETUP_TIMEOUT_SEC);
961 	} else if (total_local_count != lt_counter) {
962 		T_FAIL("Lock failure\n");
963 	} else if (starvation) {
964 		T_FAIL("Lock starvation found\n");
965 	} else {
966 		T_PASS("Ticket locks stress test with lck_ticket_lock() (%u total acquires)", total_local_count);
967 	}
968 
969 	/* AMP ticket locks stress test */
970 	T_LOG("Running AMP Ticket locks stress test bound to clusters with lck_ticket_lock()");
971 	lt_reset();
972 	lt_target_done_threads = real_ncpus;
973 	thread_count = 0;
974 	for (processor_t processor = processor_list; processor != NULL; processor = processor->processor_list) {
975 		processor_set_t pset = processor->processor_set;
976 		switch (pset->pset_cluster_type) {
977 		case PSET_AMP_P:
978 			lt_start_lock_thread_with_bind(lt_p_thread, lt_stress_ticket_lock);
979 			break;
980 		case PSET_AMP_E:
981 			lt_start_lock_thread_with_bind(lt_e_thread, lt_stress_ticket_lock);
982 			break;
983 		default:
984 			lt_start_lock_thread(lt_stress_ticket_lock);
985 			break;
986 		}
987 		thread_count++;
988 	}
989 	T_EXPECT_GE_UINT(thread_count, lt_target_done_threads, "Spawned enough threads for valid test");
990 	lt_wait_for_lock_test_threads();
991 #endif /* __AMP__ */
992 
993 	/* HW locks: trylocks */
994 	T_LOG("Running test with hw_lock_try()");
995 	lt_reset();
996 	lt_target_done_threads = 3;
997 	lt_start_lock_thread(lt_grab_hw_lock_with_try);
998 	lt_start_lock_thread(lt_grab_hw_lock_with_try);
999 	lt_start_lock_thread(lt_grab_hw_lock_with_try);
1000 	lt_wait_for_lock_test_threads();
1001 	T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
1002 
1003 	/* HW locks: with timeout */
1004 	T_LOG("Running test with hw_lock_to()");
1005 	lt_reset();
1006 	lt_target_done_threads = 3;
1007 	lt_start_lock_thread(lt_grab_hw_lock_with_to);
1008 	lt_start_lock_thread(lt_grab_hw_lock_with_to);
1009 	lt_start_lock_thread(lt_grab_hw_lock_with_to);
1010 	lt_wait_for_lock_test_threads();
1011 	T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
1012 
1013 	/* Spin locks */
1014 	T_LOG("Running test with lck_spin_lock()");
1015 	lt_reset();
1016 	lt_target_done_threads = 3;
1017 	lt_start_lock_thread(lt_grab_spin_lock);
1018 	lt_start_lock_thread(lt_grab_spin_lock);
1019 	lt_start_lock_thread(lt_grab_spin_lock);
1020 	lt_wait_for_lock_test_threads();
1021 	T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
1022 
1023 	/* Spin locks: trylocks */
1024 	T_LOG("Running test with lck_spin_try_lock()");
1025 	lt_reset();
1026 	lt_target_done_threads = 3;
1027 	lt_start_lock_thread(lt_grab_spin_lock_with_try);
1028 	lt_start_lock_thread(lt_grab_spin_lock_with_try);
1029 	lt_start_lock_thread(lt_grab_spin_lock_with_try);
1030 	lt_wait_for_lock_test_threads();
1031 	T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
1032 
1033 #if SCHED_HYGIENE_DEBUG
1034 	sched_preemption_disable_debug_mode = old_mode;
1035 #endif /* SCHED_HYGIENE_DEBUG */
1036 
1037 	return KERN_SUCCESS;
1038 }
1039 
1040 #define MT_MAX_ARGS             8
1041 #define MT_INITIAL_VALUE        0xfeedbeef
1042 #define MT_W_VAL                (0x00000000feedbeefULL) /* Drop in zeros */
1043 #define MT_S_VAL                (0xfffffffffeedbeefULL) /* High bit is 1, so sign-extends as negative */
1044 #define MT_L_VAL                (((uint64_t)MT_INITIAL_VALUE) | (((uint64_t)MT_INITIAL_VALUE) << 32)) /* Two back-to-back */
1045 
1046 typedef void (*sy_munge_t)(void*);
1047 
1048 #define MT_FUNC(x) #x, x
1049 struct munger_test {
1050 	const char      *mt_name;
1051 	sy_munge_t      mt_func;
1052 	uint32_t        mt_in_words;
1053 	uint32_t        mt_nout;
1054 	uint64_t        mt_expected[MT_MAX_ARGS];
1055 } munger_tests[] = {
1056 	{MT_FUNC(munge_w), 1, 1, {MT_W_VAL}},
1057 	{MT_FUNC(munge_ww), 2, 2, {MT_W_VAL, MT_W_VAL}},
1058 	{MT_FUNC(munge_www), 3, 3, {MT_W_VAL, MT_W_VAL, MT_W_VAL}},
1059 	{MT_FUNC(munge_wwww), 4, 4, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}},
1060 	{MT_FUNC(munge_wwwww), 5, 5, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}},
1061 	{MT_FUNC(munge_wwwwww), 6, 6, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}},
1062 	{MT_FUNC(munge_wwwwwww), 7, 7, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}},
1063 	{MT_FUNC(munge_wwwwwwww), 8, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}},
1064 	{MT_FUNC(munge_wl), 3, 2, {MT_W_VAL, MT_L_VAL}},
1065 	{MT_FUNC(munge_wwl), 4, 3, {MT_W_VAL, MT_W_VAL, MT_L_VAL}},
1066 	{MT_FUNC(munge_wwlll), 8, 5, {MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL}},
1067 	{MT_FUNC(munge_wwlllll), 12, 7, {MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL}},
1068 	{MT_FUNC(munge_wwllllll), 14, 8, {MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL}},
1069 	{MT_FUNC(munge_wlw), 4, 3, {MT_W_VAL, MT_L_VAL, MT_W_VAL}},
1070 	{MT_FUNC(munge_wlwwwll), 10, 7, {MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL}},
1071 	{MT_FUNC(munge_wlwwwllw), 11, 8, {MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_W_VAL}},
1072 	{MT_FUNC(munge_wlwwlwlw), 11, 8, {MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL}},
1073 	{MT_FUNC(munge_wll), 5, 3, {MT_W_VAL, MT_L_VAL, MT_L_VAL}},
1074 	{MT_FUNC(munge_wlll), 7, 4, {MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL}},
1075 	{MT_FUNC(munge_wllwwll), 11, 7, {MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL}},
1076 	{MT_FUNC(munge_wwwlw), 6, 5, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL}},
1077 	{MT_FUNC(munge_wwwlww), 7, 6, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL}},
1078 	{MT_FUNC(munge_wwwlwww), 8, 7, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}},
1079 	{MT_FUNC(munge_wwwl), 5, 4, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}},
1080 	{MT_FUNC(munge_wwwwlw), 7, 6, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL}},
1081 	{MT_FUNC(munge_wwwwllww), 10, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL}},
1082 	{MT_FUNC(munge_wwwwl), 6, 5, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}},
1083 	{MT_FUNC(munge_wwwwwl), 7, 6, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}},
1084 	{MT_FUNC(munge_wwwwwlww), 9, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL}},
1085 	{MT_FUNC(munge_wwwwwllw), 10, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_W_VAL}},
1086 	{MT_FUNC(munge_wwwwwlll), 11, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL}},
1087 	{MT_FUNC(munge_wwwwwwl), 8, 7, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}},
1088 	{MT_FUNC(munge_wwwwwwlw), 9, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL}},
1089 	{MT_FUNC(munge_wwwwwwll), 10, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL}},
1090 	{MT_FUNC(munge_wsw), 3, 3, {MT_W_VAL, MT_S_VAL, MT_W_VAL}},
1091 	{MT_FUNC(munge_wws), 3, 3, {MT_W_VAL, MT_W_VAL, MT_S_VAL}},
1092 	{MT_FUNC(munge_wwwsw), 5, 5, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_S_VAL, MT_W_VAL}},
1093 	{MT_FUNC(munge_llllll), 12, 6, {MT_L_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL}},
1094 	{MT_FUNC(munge_llll), 8, 4, {MT_L_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL}},
1095 	{MT_FUNC(munge_l), 2, 1, {MT_L_VAL}},
1096 	{MT_FUNC(munge_lw), 3, 2, {MT_L_VAL, MT_W_VAL}},
1097 	{MT_FUNC(munge_lww), 4, 3, {MT_L_VAL, MT_W_VAL, MT_W_VAL}},
1098 	{MT_FUNC(munge_lwww), 5, 4, {MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}},
1099 	{MT_FUNC(munge_lwwwwwww), 9, 8, {MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}},
1100 	{MT_FUNC(munge_wlwwwl), 8, 6, {MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}},
1101 	{MT_FUNC(munge_wwlwwwl), 9, 7, {MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}}
1102 };
1103 
1104 #define MT_TEST_COUNT (sizeof(munger_tests) / sizeof(struct munger_test))
1105 
1106 static void
mt_reset(uint32_t in_words,size_t total_size,uint32_t * data)1107 mt_reset(uint32_t in_words, size_t total_size, uint32_t *data)
1108 {
1109 	uint32_t i;
1110 
1111 	for (i = 0; i < in_words; i++) {
1112 		data[i] = MT_INITIAL_VALUE;
1113 	}
1114 
1115 	if (in_words * sizeof(uint32_t) < total_size) {
1116 		bzero(&data[in_words], total_size - in_words * sizeof(uint32_t));
1117 	}
1118 }
1119 
1120 static void
mt_test_mungers()1121 mt_test_mungers()
1122 {
1123 	uint64_t data[MT_MAX_ARGS];
1124 	uint32_t i, j;
1125 
1126 	for (i = 0; i < MT_TEST_COUNT; i++) {
1127 		struct munger_test *test = &munger_tests[i];
1128 		int pass = 1;
1129 
1130 		T_LOG("Testing %s", test->mt_name);
1131 
1132 		mt_reset(test->mt_in_words, sizeof(data), (uint32_t*)data);
1133 		test->mt_func(data);
1134 
1135 		for (j = 0; j < test->mt_nout; j++) {
1136 			if (data[j] != test->mt_expected[j]) {
1137 				T_FAIL("Index %d: expected %llx, got %llx.", j, test->mt_expected[j], data[j]);
1138 				pass = 0;
1139 			}
1140 		}
1141 		if (pass) {
1142 			T_PASS(test->mt_name);
1143 		}
1144 	}
1145 }
1146 
1147 #if defined(HAS_APPLE_PAC)
1148 
1149 
1150 kern_return_t
arm64_ropjop_test()1151 arm64_ropjop_test()
1152 {
1153 	T_LOG("Testing ROP/JOP");
1154 
1155 	/* how is ROP/JOP configured */
1156 	boolean_t config_rop_enabled = TRUE;
1157 	boolean_t config_jop_enabled = TRUE;
1158 
1159 
1160 	if (config_jop_enabled) {
1161 		/* jop key */
1162 		uint64_t apiakey_hi = __builtin_arm_rsr64("APIAKEYHI_EL1");
1163 		uint64_t apiakey_lo = __builtin_arm_rsr64("APIAKEYLO_EL1");
1164 
1165 		T_EXPECT(apiakey_hi != 0 && apiakey_lo != 0, NULL);
1166 	}
1167 
1168 	if (config_rop_enabled) {
1169 		/* rop key */
1170 		uint64_t apibkey_hi = __builtin_arm_rsr64("APIBKEYHI_EL1");
1171 		uint64_t apibkey_lo = __builtin_arm_rsr64("APIBKEYLO_EL1");
1172 
1173 		T_EXPECT(apibkey_hi != 0 && apibkey_lo != 0, NULL);
1174 
1175 		/* sign a KVA (the address of this function) */
1176 		uint64_t kva_signed = (uint64_t) ptrauth_sign_unauthenticated((void *)&config_rop_enabled, ptrauth_key_asib, 0);
1177 
1178 		/* assert it was signed (changed) */
1179 		T_EXPECT(kva_signed != (uint64_t)&config_rop_enabled, NULL);
1180 
1181 		/* authenticate the newly signed KVA */
1182 		uint64_t kva_authed = (uint64_t) ml_auth_ptr_unchecked((void *)kva_signed, ptrauth_key_asib, 0);
1183 
1184 		/* assert the authed KVA is the original KVA */
1185 		T_EXPECT(kva_authed == (uint64_t)&config_rop_enabled, NULL);
1186 
1187 		/* corrupt a signed ptr, auth it, ensure auth failed */
1188 		uint64_t kva_corrupted = kva_signed ^ 1;
1189 
1190 		/* authenticate the corrupted pointer */
1191 		kva_authed = (uint64_t) ml_auth_ptr_unchecked((void *)kva_corrupted, ptrauth_key_asib, 0);
1192 
1193 		/* when AuthIB fails, bits 63:62 will be set to 2'b10 */
1194 		uint64_t auth_fail_mask = 3ULL << 61;
1195 		uint64_t authib_fail = 2ULL << 61;
1196 
1197 		/* assert the failed authIB of corrupted pointer is tagged */
1198 		T_EXPECT((kva_authed & auth_fail_mask) == authib_fail, NULL);
1199 	}
1200 
1201 	return KERN_SUCCESS;
1202 }
1203 #endif /* defined(HAS_APPLE_PAC) */
1204 
1205 #if __ARM_PAN_AVAILABLE__
1206 
1207 struct pan_test_thread_args {
1208 	volatile bool join;
1209 };
1210 
1211 static void
arm64_pan_test_thread(void * arg,wait_result_t __unused wres)1212 arm64_pan_test_thread(void *arg, wait_result_t __unused wres)
1213 {
1214 	T_ASSERT(__builtin_arm_rsr("pan") != 0, NULL);
1215 
1216 	struct pan_test_thread_args *args = arg;
1217 
1218 	for (processor_t p = processor_list; p != NULL; p = p->processor_list) {
1219 		thread_bind(p);
1220 		thread_block(THREAD_CONTINUE_NULL);
1221 		kprintf("Running PAN test on cpu %d\n", p->cpu_id);
1222 		arm64_pan_test();
1223 	}
1224 
1225 	/* unbind thread from specific cpu */
1226 	thread_bind(PROCESSOR_NULL);
1227 	thread_block(THREAD_CONTINUE_NULL);
1228 
1229 	while (!args->join) {
1230 		;
1231 	}
1232 
1233 	thread_wakeup(args);
1234 }
1235 
1236 kern_return_t
arm64_late_pan_test()1237 arm64_late_pan_test()
1238 {
1239 	thread_t thread;
1240 	kern_return_t kr;
1241 
1242 	struct pan_test_thread_args args;
1243 	args.join = false;
1244 
1245 	kr = kernel_thread_start(arm64_pan_test_thread, &args, &thread);
1246 	assert(kr == KERN_SUCCESS);
1247 
1248 	thread_deallocate(thread);
1249 
1250 	assert_wait(&args, THREAD_UNINT);
1251 	args.join = true;
1252 	thread_block(THREAD_CONTINUE_NULL);
1253 	return KERN_SUCCESS;
1254 }
1255 
1256 // Disable KASAN checking for PAN tests as the fixed commpage address doesn't have a shadow mapping
1257 
1258 static NOKASAN bool
arm64_pan_test_pan_enabled_fault_handler(arm_saved_state_t * state)1259 arm64_pan_test_pan_enabled_fault_handler(arm_saved_state_t * state)
1260 {
1261 	bool retval                 = false;
1262 	uint64_t esr                = get_saved_state_esr(state);
1263 	esr_exception_class_t class = ESR_EC(esr);
1264 	fault_status_t fsc          = ISS_IA_FSC(ESR_ISS(esr));
1265 	uint32_t cpsr               = get_saved_state_cpsr(state);
1266 	uint64_t far                = get_saved_state_far(state);
1267 
1268 	if ((class == ESR_EC_DABORT_EL1) && (fsc == FSC_PERMISSION_FAULT_L3) &&
1269 	    (cpsr & PSR64_PAN) &&
1270 	    ((esr & ISS_DA_WNR) ? mmu_kvtop_wpreflight(far) : mmu_kvtop(far))) {
1271 		++pan_exception_level;
1272 		// read the user-accessible value to make sure
1273 		// pan is enabled and produces a 2nd fault from
1274 		// the exception handler
1275 		if (pan_exception_level == 1) {
1276 			ml_expect_fault_begin(arm64_pan_test_pan_enabled_fault_handler, far);
1277 			pan_fault_value = *(volatile char *)far;
1278 			ml_expect_fault_end();
1279 			__builtin_arm_wsr("pan", 1); // turn PAN back on after the nested exception cleared it for this context
1280 		}
1281 		// this fault address is used for PAN test
1282 		// disable PAN and rerun
1283 		mask_saved_state_cpsr(state, 0, PSR64_PAN);
1284 
1285 		retval = true;
1286 	}
1287 
1288 	return retval;
1289 }
1290 
1291 static NOKASAN bool
arm64_pan_test_pan_disabled_fault_handler(arm_saved_state_t * state)1292 arm64_pan_test_pan_disabled_fault_handler(arm_saved_state_t * state)
1293 {
1294 	bool retval             = false;
1295 	uint64_t esr            = get_saved_state_esr(state);
1296 	esr_exception_class_t class = ESR_EC(esr);
1297 	fault_status_t fsc      = ISS_IA_FSC(ESR_ISS(esr));
1298 	uint32_t cpsr           = get_saved_state_cpsr(state);
1299 
1300 	if ((class == ESR_EC_DABORT_EL1) && (fsc == FSC_PERMISSION_FAULT_L3) &&
1301 	    !(cpsr & PSR64_PAN)) {
1302 		++pan_exception_level;
1303 		// On an exception taken from a PAN-disabled context, verify
1304 		// that PAN is re-enabled for the exception handler and that
1305 		// accessing the test address produces a PAN fault.
1306 		ml_expect_fault_begin(arm64_pan_test_pan_enabled_fault_handler, pan_test_addr);
1307 		pan_fault_value = *(volatile char *)pan_test_addr;
1308 		ml_expect_fault_end();
1309 		__builtin_arm_wsr("pan", 1); // turn PAN back on after the nested exception cleared it for this context
1310 		add_saved_state_pc(state, 4);
1311 
1312 		retval = true;
1313 	}
1314 
1315 	return retval;
1316 }
1317 
1318 NOKASAN kern_return_t
arm64_pan_test()1319 arm64_pan_test()
1320 {
1321 	bool values_match = false;
1322 	vm_offset_t priv_addr = 0;
1323 
1324 	T_LOG("Testing PAN.");
1325 
1326 
1327 	T_ASSERT((__builtin_arm_rsr("SCTLR_EL1") & SCTLR_PAN_UNCHANGED) == 0, "SCTLR_EL1.SPAN must be cleared");
1328 
1329 	T_ASSERT(__builtin_arm_rsr("pan") != 0, NULL);
1330 
1331 	pan_exception_level = 0;
1332 	pan_fault_value = 0xDE;
1333 
1334 	// Create an empty pmap, so we can map a user-accessible page
1335 	pmap_t pmap = pmap_create_options(NULL, 0, PMAP_CREATE_64BIT);
1336 	T_ASSERT(pmap != NULL, NULL);
1337 
1338 	// Get a physical page to back the mapping
1339 	vm_page_t vm_page = vm_page_grab();
1340 	T_ASSERT(vm_page != VM_PAGE_NULL, NULL);
1341 	ppnum_t pn = VM_PAGE_GET_PHYS_PAGE(vm_page);
1342 	pmap_paddr_t pa = ptoa(pn);
1343 
1344 	// Write to the underlying physical page through the physical aperture
1345 	// so we can test against a known value
1346 	priv_addr = phystokv((pmap_paddr_t)pa);
1347 	*(volatile char *)priv_addr = 0xAB;
1348 
1349 	// Map the page in the user address space at some, non-zero address
1350 	pan_test_addr = PAGE_SIZE;
1351 	pmap_enter(pmap, pan_test_addr, pn, VM_PROT_READ, VM_PROT_READ, 0, true, PMAP_MAPPING_TYPE_INFER);
1352 
1353 	// Context-switch with PAN disabled is prohibited; prevent test logging from
1354 	// triggering a voluntary context switch.
1355 	mp_disable_preemption();
1356 
1357 	// Insert the user's pmap root table pointer in TTBR0
1358 	thread_t thread = current_thread();
1359 	pmap_t old_pmap = vm_map_pmap(thread->map);
1360 	pmap_switch(pmap, thread);
1361 
1362 	// Below should trigger a PAN exception as pan_test_addr is accessible
1363 	// in user mode
1364 	// The exception handler, upon recognizing the fault address is pan_test_addr,
1365 	// will disable PAN and rerun this instruction successfully
1366 	ml_expect_fault_begin(arm64_pan_test_pan_enabled_fault_handler, pan_test_addr);
1367 	values_match = (*(volatile char *)pan_test_addr == *(volatile char *)priv_addr);
1368 	ml_expect_fault_end();
1369 	T_ASSERT(values_match, NULL);
1370 
1371 	T_ASSERT(pan_exception_level == 2, NULL);
1372 
1373 	T_ASSERT(__builtin_arm_rsr("pan") == 0, NULL);
1374 
1375 	T_ASSERT(pan_fault_value == *(char *)priv_addr, NULL);
1376 
1377 	pan_exception_level = 0;
1378 	pan_fault_value = 0xAD;
1379 	pan_ro_addr = (vm_offset_t) &pan_ro_value;
1380 
1381 	// Force a permission fault while PAN is disabled to make sure PAN is
1382 	// re-enabled during the exception handler.
1383 	ml_expect_fault_begin(arm64_pan_test_pan_disabled_fault_handler, pan_ro_addr);
1384 	*((volatile uint64_t*)pan_ro_addr) = 0xFEEDFACECAFECAFE;
1385 	ml_expect_fault_end();
1386 
1387 	T_ASSERT(pan_exception_level == 2, NULL);
1388 
1389 	T_ASSERT(__builtin_arm_rsr("pan") == 0, NULL);
1390 
1391 	T_ASSERT(pan_fault_value == *(char *)priv_addr, NULL);
1392 
1393 	pmap_switch(old_pmap, thread);
1394 
1395 	pan_ro_addr = 0;
1396 
1397 	__builtin_arm_wsr("pan", 1);
1398 
1399 	mp_enable_preemption();
1400 
1401 	pmap_remove(pmap, pan_test_addr, pan_test_addr + PAGE_SIZE);
1402 	pan_test_addr = 0;
1403 
1404 	vm_page_lock_queues();
1405 	vm_page_free(vm_page);
1406 	vm_page_unlock_queues();
1407 	pmap_destroy(pmap);
1408 
1409 	return KERN_SUCCESS;
1410 }
1411 #endif /* __ARM_PAN_AVAILABLE__ */
1412 
1413 
1414 kern_return_t
arm64_lock_test()1415 arm64_lock_test()
1416 {
1417 	return lt_test_locks();
1418 }
1419 
1420 kern_return_t
arm64_munger_test()1421 arm64_munger_test()
1422 {
1423 	mt_test_mungers();
1424 	return 0;
1425 }
1426 
1427 #if defined(KERNEL_INTEGRITY_CTRR) && defined(CONFIG_XNUPOST)
1428 SECURITY_READ_ONLY_LATE(uint64_t) ctrr_ro_test;
1429 uint64_t ctrr_nx_test = 0xd65f03c0; /* RET */
1430 volatile uint64_t ctrr_exception_esr;
1431 vm_offset_t ctrr_test_va;
1432 vm_offset_t ctrr_test_page;
1433 
1434 kern_return_t
ctrr_test(void)1435 ctrr_test(void)
1436 {
1437 	processor_t p;
1438 	boolean_t ctrr_disable = FALSE;
1439 
1440 	PE_parse_boot_argn("-unsafe_kernel_text", &ctrr_disable, sizeof(ctrr_disable));
1441 
1442 #if CONFIG_CSR_FROM_DT
1443 	if (csr_unsafe_kernel_text) {
1444 		ctrr_disable = TRUE;
1445 	}
1446 #endif /* CONFIG_CSR_FROM_DT */
1447 
1448 	if (ctrr_disable) {
1449 		T_LOG("Skipping CTRR test when -unsafe_kernel_text boot-arg present");
1450 		return KERN_SUCCESS;
1451 	}
1452 
1453 	T_LOG("Running CTRR test.");
1454 
1455 	for (p = processor_list; p != NULL; p = p->processor_list) {
1456 		thread_bind(p);
1457 		thread_block(THREAD_CONTINUE_NULL);
1458 		T_LOG("Running CTRR test on cpu %d\n", p->cpu_id);
1459 		ctrr_test_cpu();
1460 	}
1461 
1462 	/* unbind thread from specific cpu */
1463 	thread_bind(PROCESSOR_NULL);
1464 	thread_block(THREAD_CONTINUE_NULL);
1465 
1466 	return KERN_SUCCESS;
1467 }
1468 
1469 static bool
ctrr_test_ro_fault_handler(arm_saved_state_t * state)1470 ctrr_test_ro_fault_handler(arm_saved_state_t * state)
1471 {
1472 	bool retval                 = false;
1473 	uint64_t esr                = get_saved_state_esr(state);
1474 	esr_exception_class_t class = ESR_EC(esr);
1475 	fault_status_t fsc          = ISS_DA_FSC(ESR_ISS(esr));
1476 
1477 	if ((class == ESR_EC_DABORT_EL1) && (fsc == FSC_PERMISSION_FAULT_L3)) {
1478 		ctrr_exception_esr = esr;
1479 		add_saved_state_pc(state, 4);
1480 		retval = true;
1481 	}
1482 
1483 	return retval;
1484 }
1485 
1486 static bool
ctrr_test_nx_fault_handler(arm_saved_state_t * state)1487 ctrr_test_nx_fault_handler(arm_saved_state_t * state)
1488 {
1489 	bool retval                 = false;
1490 	uint64_t esr                = get_saved_state_esr(state);
1491 	esr_exception_class_t class = ESR_EC(esr);
1492 	fault_status_t fsc          = ISS_IA_FSC(ESR_ISS(esr));
1493 
1494 	if ((class == ESR_EC_IABORT_EL1) && (fsc == FSC_PERMISSION_FAULT_L3)) {
1495 		ctrr_exception_esr = esr;
1496 		/* return to the instruction immediately after the call to NX page */
1497 		set_saved_state_pc(state, get_saved_state_lr(state));
1498 #if BTI_ENFORCED
1499 		/* Clear BTYPE to prevent taking another exception on ERET */
1500 		uint32_t spsr = get_saved_state_cpsr(state);
1501 		spsr &= ~PSR_BTYPE_MASK;
1502 		set_saved_state_cpsr(state, spsr);
1503 #endif /* BTI_ENFORCED */
1504 		retval = true;
1505 	}
1506 
1507 	return retval;
1508 }
1509 
1510 // Disable KASAN checking for CTRR tests as the test VA  doesn't have a shadow mapping
1511 
1512 /* test CTRR on a cpu, caller to bind thread to desired cpu */
1513 /* ctrr_test_page was reserved during bootstrap process */
1514 NOKASAN kern_return_t
ctrr_test_cpu(void)1515 ctrr_test_cpu(void)
1516 {
1517 	ppnum_t ro_pn, nx_pn;
1518 	uint64_t *ctrr_ro_test_ptr;
1519 	void (*ctrr_nx_test_ptr)(void);
1520 	kern_return_t kr;
1521 	uint64_t prot = 0;
1522 	extern vm_offset_t virtual_space_start;
1523 
1524 	/* ctrr read only region = [rorgn_begin_va, rorgn_end_va) */
1525 
1526 #if (KERNEL_CTRR_VERSION == 3)
1527 	const uint64_t rorgn_lwr = __builtin_arm_rsr64("S3_0_C11_C0_2");
1528 	const uint64_t rorgn_upr = __builtin_arm_rsr64("S3_0_C11_C0_3");
1529 #else /* (KERNEL_CTRR_VERSION == 3) */
1530 	const uint64_t rorgn_lwr = __builtin_arm_rsr64("S3_4_C15_C2_3");
1531 	const uint64_t rorgn_upr = __builtin_arm_rsr64("S3_4_C15_C2_4");
1532 #endif /* (KERNEL_CTRR_VERSION == 3) */
1533 	vm_offset_t rorgn_begin_va = phystokv(rorgn_lwr);
1534 	vm_offset_t rorgn_end_va = phystokv(rorgn_upr) + 0x1000;
1535 	vm_offset_t ro_test_va = (vm_offset_t)&ctrr_ro_test;
1536 	vm_offset_t nx_test_va = (vm_offset_t)&ctrr_nx_test;
1537 
1538 	T_EXPECT(rorgn_begin_va <= ro_test_va && ro_test_va < rorgn_end_va, "Expect ro_test_va to be inside the CTRR region");
1539 	T_EXPECT((nx_test_va < rorgn_begin_va) ^ (nx_test_va >= rorgn_end_va), "Expect nx_test_va to be outside the CTRR region");
1540 
1541 	ro_pn = pmap_find_phys(kernel_pmap, ro_test_va);
1542 	nx_pn = pmap_find_phys(kernel_pmap, nx_test_va);
1543 	T_EXPECT(ro_pn && nx_pn, "Expect ro page number and nx page number to be non zero");
1544 
1545 	T_LOG("test virtual page: %p, ctrr_ro_test: %p, ctrr_nx_test: %p, ro_pn: %x, nx_pn: %x ",
1546 	    (void *)ctrr_test_page, &ctrr_ro_test, &ctrr_nx_test, ro_pn, nx_pn);
1547 
1548 	prot = pmap_get_arm64_prot(kernel_pmap, ctrr_test_page);
1549 	T_EXPECT(~prot & ARM_TTE_VALID, "Expect ctrr_test_page to be unmapped");
1550 
1551 	T_LOG("Read only region test mapping virtual page %p to CTRR RO page number %d", ctrr_test_page, ro_pn);
1552 	kr = pmap_enter(kernel_pmap, ctrr_test_page, ro_pn,
1553 	    VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE, VM_WIMG_USE_DEFAULT, FALSE, PMAP_MAPPING_TYPE_INFER);
1554 	T_EXPECT(kr == KERN_SUCCESS, "Expect pmap_enter of RW mapping to succeed");
1555 
1556 	// assert entire mmu prot path (Hierarchical protection model) is NOT RO
1557 	// fetch effective block level protections from table/block entries
1558 	prot = pmap_get_arm64_prot(kernel_pmap, ctrr_test_page);
1559 	T_EXPECT(ARM_PTE_EXTRACT_AP(prot) == AP_RWNA && (prot & ARM_PTE_PNX), "Mapping is EL1 RWNX");
1560 
1561 	ctrr_test_va = ctrr_test_page + (ro_test_va & PAGE_MASK);
1562 	ctrr_ro_test_ptr = (void *)ctrr_test_va;
1563 
1564 	T_LOG("Read only region test writing to %p to provoke data abort", ctrr_ro_test_ptr);
1565 
1566 	// should cause data abort
1567 	ml_expect_fault_begin(ctrr_test_ro_fault_handler, ctrr_test_va);
1568 	*ctrr_ro_test_ptr = 1;
1569 	ml_expect_fault_end();
1570 
1571 	// ensure write permission fault at expected level
1572 	// data abort handler will set ctrr_exception_esr when ctrr_test_va takes a permission fault
1573 
1574 	T_EXPECT(ESR_EC(ctrr_exception_esr) == ESR_EC_DABORT_EL1, "Data Abort from EL1 expected");
1575 	T_EXPECT(ISS_DA_FSC(ESR_ISS(ctrr_exception_esr)) == FSC_PERMISSION_FAULT_L3, "Permission Fault Expected");
1576 	T_EXPECT(ESR_ISS(ctrr_exception_esr) & ISS_DA_WNR, "Write Fault Expected");
1577 
1578 	ctrr_test_va = 0;
1579 	ctrr_exception_esr = 0;
1580 	pmap_remove(kernel_pmap, ctrr_test_page, ctrr_test_page + PAGE_SIZE);
1581 
1582 	T_LOG("No execute test mapping virtual page %p to CTRR PXN page number %d", ctrr_test_page, nx_pn);
1583 
1584 	kr = pmap_enter(kernel_pmap, ctrr_test_page, nx_pn,
1585 	    VM_PROT_READ | VM_PROT_EXECUTE, VM_PROT_NONE, VM_WIMG_USE_DEFAULT, FALSE, PMAP_MAPPING_TYPE_INFER);
1586 	T_EXPECT(kr == KERN_SUCCESS, "Expect pmap_enter of RX mapping to succeed");
1587 
1588 	// assert entire mmu prot path (Hierarchical protection model) is NOT XN
1589 	prot = pmap_get_arm64_prot(kernel_pmap, ctrr_test_page);
1590 	T_EXPECT(ARM_PTE_EXTRACT_AP(prot) == AP_RONA && (~prot & ARM_PTE_PNX), "Mapping is EL1 ROX");
1591 
1592 	ctrr_test_va = ctrr_test_page + (nx_test_va & PAGE_MASK);
1593 #if __has_feature(ptrauth_calls)
1594 	ctrr_nx_test_ptr = ptrauth_sign_unauthenticated((void *)ctrr_test_va, ptrauth_key_function_pointer, 0);
1595 #else
1596 	ctrr_nx_test_ptr = (void *)ctrr_test_va;
1597 #endif
1598 
1599 	T_LOG("No execute test calling ctrr_nx_test_ptr(): %p to provoke instruction abort", ctrr_nx_test_ptr);
1600 
1601 	// should cause prefetch abort
1602 	ml_expect_fault_begin(ctrr_test_nx_fault_handler, ctrr_test_va);
1603 	ctrr_nx_test_ptr();
1604 	ml_expect_fault_end();
1605 
1606 	// TODO: ensure execute permission fault at expected level
1607 	T_EXPECT(ESR_EC(ctrr_exception_esr) == ESR_EC_IABORT_EL1, "Instruction abort from EL1 Expected");
1608 	T_EXPECT(ISS_DA_FSC(ESR_ISS(ctrr_exception_esr)) == FSC_PERMISSION_FAULT_L3, "Permission Fault Expected");
1609 
1610 	ctrr_test_va = 0;
1611 	ctrr_exception_esr = 0;
1612 
1613 	pmap_remove(kernel_pmap, ctrr_test_page, ctrr_test_page + PAGE_SIZE);
1614 
1615 	T_LOG("Expect no faults when reading CTRR region to verify correct programming of CTRR limits");
1616 	for (vm_offset_t addr = rorgn_begin_va; addr < rorgn_end_va; addr += 8) {
1617 		volatile uint64_t x = *(uint64_t *)addr;
1618 		(void) x; /* read for side effect only */
1619 	}
1620 
1621 	return KERN_SUCCESS;
1622 }
1623 #endif /* defined(KERNEL_INTEGRITY_CTRR) && defined(CONFIG_XNUPOST) */
1624 
1625 
1626 /**
1627  * Explicitly assert that xnu is still uniprocessor before running a POST test.
1628  *
1629  * In practice, tests in this module can safely manipulate CPU state without
1630  * fear of getting preempted.  There's no way for cpu_boot_thread() to bring up
1631  * the secondary CPUs until StartIOKitMatching() completes, and arm64 orders
1632  * kern_post_test() before StartIOKitMatching().
1633  *
1634  * But this is also an implementation detail.  Tests that rely on this ordering
1635  * should call assert_uniprocessor(), so that we can figure out a workaround
1636  * on the off-chance this ordering ever changes.
1637  */
1638 __unused static void
assert_uniprocessor(void)1639 assert_uniprocessor(void)
1640 {
1641 	extern unsigned int real_ncpus;
1642 	unsigned int ncpus = os_atomic_load(&real_ncpus, relaxed);
1643 	T_QUIET; T_ASSERT_EQ_UINT(1, ncpus, "arm64 kernel POST tests should run before any secondary CPUs are brought up");
1644 }
1645 
1646 
1647 #if CONFIG_SPTM
1648 volatile uint8_t xnu_post_panic_lockdown_did_fire = false;
1649 typedef uint64_t (panic_lockdown_helper_fcn_t)(uint64_t raw);
1650 typedef bool (panic_lockdown_recovery_fcn_t)(arm_saved_state_t *);
1651 
1652 /* SP0 vector tests */
1653 extern panic_lockdown_helper_fcn_t arm64_panic_lockdown_test_load;
1654 extern panic_lockdown_helper_fcn_t arm64_panic_lockdown_test_gdbtrap;
1655 extern panic_lockdown_helper_fcn_t arm64_panic_lockdown_test_pac_brk_c470;
1656 extern panic_lockdown_helper_fcn_t arm64_panic_lockdown_test_pac_brk_c471;
1657 extern panic_lockdown_helper_fcn_t arm64_panic_lockdown_test_pac_brk_c472;
1658 extern panic_lockdown_helper_fcn_t arm64_panic_lockdown_test_pac_brk_c473;
1659 extern panic_lockdown_helper_fcn_t arm64_panic_lockdown_test_telemetry_brk_ff00;
1660 extern panic_lockdown_helper_fcn_t arm64_panic_lockdown_test_br_auth_fail;
1661 extern panic_lockdown_helper_fcn_t arm64_panic_lockdown_test_ldr_auth_fail;
1662 extern panic_lockdown_helper_fcn_t arm64_panic_lockdown_test_fpac;
1663 extern panic_lockdown_helper_fcn_t arm64_panic_lockdown_test_copyio;
1664 extern uint8_t arm64_panic_lockdown_test_copyio_fault_pc;
1665 extern panic_lockdown_helper_fcn_t arm64_panic_lockdown_test_bti_telemetry;
1666 
1667 extern int gARM_FEAT_FPACCOMBINE;
1668 
1669 /* SP1 vector tests */
1670 extern panic_lockdown_helper_fcn_t arm64_panic_lockdown_test_sp1_invalid_stack;
1671 extern bool arm64_panic_lockdown_test_sp1_invalid_stack_handler(arm_saved_state_t *);
1672 extern panic_lockdown_helper_fcn_t arm64_panic_lockdown_test_sp1_exception_in_vector;
1673 extern panic_lockdown_helper_fcn_t el1_sp1_synchronous_raise_exception_in_vector;
1674 extern bool arm64_panic_lockdown_test_sp1_exception_in_vector_handler(arm_saved_state_t *);
1675 
1676 #if DEVELOPMENT || DEBUG
1677 extern struct panic_lockdown_initiator_state debug_panic_lockdown_initiator_state;
1678 #endif /* DEVELOPMENT || DEBUG */
1679 
1680 typedef struct arm64_panic_lockdown_test_case {
1681 	const char *name;
1682 	panic_lockdown_helper_fcn_t *func;
1683 	uint64_t arg;
1684 	esr_exception_class_t expected_ec;
1685 	bool check_fs;
1686 	fault_status_t expected_fs;
1687 	bool expect_lockdown_exceptions_masked;
1688 	bool expect_lockdown_exceptions_unmasked;
1689 	bool override_expected_fault_pc_valid;
1690 	uint64_t override_expected_fault_pc;
1691 } arm64_panic_lockdown_test_case_s;
1692 
1693 static arm64_panic_lockdown_test_case_s *arm64_panic_lockdown_active_test;
1694 static volatile bool arm64_panic_lockdown_caught_exception;
1695 
1696 static bool
arm64_panic_lockdown_test_exception_handler(arm_saved_state_t * state)1697 arm64_panic_lockdown_test_exception_handler(arm_saved_state_t * state)
1698 {
1699 	uint64_t esr = get_saved_state_esr(state);
1700 	esr_exception_class_t class = ESR_EC(esr);
1701 	fault_status_t fs = ISS_DA_FSC(ESR_ISS(esr));
1702 
1703 	if (!arm64_panic_lockdown_active_test ||
1704 	    class != arm64_panic_lockdown_active_test->expected_ec ||
1705 	    (arm64_panic_lockdown_active_test->check_fs &&
1706 	    fs != arm64_panic_lockdown_active_test->expected_fs)) {
1707 		return false;
1708 	}
1709 
1710 
1711 #if BTI_ENFORCED
1712 	/* Clear BTYPE to prevent taking another exception on ERET */
1713 	uint32_t spsr = get_saved_state_cpsr(state);
1714 	spsr &= ~PSR_BTYPE_MASK;
1715 	set_saved_state_cpsr(state, spsr);
1716 #endif /* BTI_ENFORCED */
1717 
1718 	/* We got the expected exception, recover by forging an early return */
1719 	set_saved_state_pc(state, get_saved_state_lr(state));
1720 	arm64_panic_lockdown_caught_exception = true;
1721 
1722 	return true;
1723 }
1724 
1725 static void
panic_lockdown_expect_test(const char * treatment,arm64_panic_lockdown_test_case_s * test,bool expect_lockdown,bool mask_interrupts)1726 panic_lockdown_expect_test(const char *treatment,
1727     arm64_panic_lockdown_test_case_s *test,
1728     bool expect_lockdown,
1729     bool mask_interrupts)
1730 {
1731 	int ints = 0;
1732 
1733 	arm64_panic_lockdown_active_test = test;
1734 	xnu_post_panic_lockdown_did_fire = false;
1735 	arm64_panic_lockdown_caught_exception = false;
1736 
1737 	uintptr_t fault_pc;
1738 	if (test->override_expected_fault_pc_valid) {
1739 		fault_pc = (uintptr_t)test->override_expected_fault_pc;
1740 	} else {
1741 		fault_pc = (uintptr_t)test->func;
1742 #ifdef BTI_ENFORCED
1743 		/* When BTI is enabled, we expect the fault to occur after the landing pad */
1744 		fault_pc += 4;
1745 #endif /* BTI_ENFORCED */
1746 	}
1747 
1748 
1749 	ml_expect_fault_pc_begin(
1750 		arm64_panic_lockdown_test_exception_handler,
1751 		fault_pc);
1752 
1753 	if (mask_interrupts) {
1754 		ints = ml_set_interrupts_enabled(FALSE);
1755 	}
1756 
1757 	(void)test->func(test->arg);
1758 
1759 	if (mask_interrupts) {
1760 		(void)ml_set_interrupts_enabled(ints);
1761 	}
1762 
1763 	ml_expect_fault_end();
1764 
1765 	if (expect_lockdown == xnu_post_panic_lockdown_did_fire &&
1766 	    arm64_panic_lockdown_caught_exception) {
1767 		T_PASS("%s + %s OK\n", test->name, treatment);
1768 	} else {
1769 		T_FAIL(
1770 			"%s + %s FAIL (expected lockdown: %d, did lockdown: %d, caught exception: %d)\n",
1771 			test->name, treatment,
1772 			expect_lockdown, xnu_post_panic_lockdown_did_fire,
1773 			arm64_panic_lockdown_caught_exception);
1774 	}
1775 
1776 #if DEVELOPMENT || DEBUG
1777 	/* Check that the debug info is minimally functional */
1778 	if (expect_lockdown) {
1779 		T_EXPECT_NE_ULLONG(debug_panic_lockdown_initiator_state.initiator_pc,
1780 		    0ULL, "Initiator PC set");
1781 	} else {
1782 		T_EXPECT_EQ_ULLONG(debug_panic_lockdown_initiator_state.initiator_pc,
1783 		    0ULL, "Initiator PC not set");
1784 	}
1785 
1786 	/* Reset the debug data so it can be filled later if needed */
1787 	debug_panic_lockdown_initiator_state.initiator_pc = 0;
1788 #endif /* DEVELOPMENT || DEBUG */
1789 }
1790 
1791 static void
panic_lockdown_expect_fault_raw(const char * label,panic_lockdown_helper_fcn_t entrypoint,panic_lockdown_helper_fcn_t faulting_function,expected_fault_handler_t fault_handler)1792 panic_lockdown_expect_fault_raw(const char *label,
1793     panic_lockdown_helper_fcn_t entrypoint,
1794     panic_lockdown_helper_fcn_t faulting_function,
1795     expected_fault_handler_t fault_handler)
1796 {
1797 	uint64_t test_success = 0;
1798 	xnu_post_panic_lockdown_did_fire = false;
1799 
1800 	uintptr_t fault_pc = (uintptr_t)faulting_function;
1801 #ifdef BTI_ENFORCED
1802 	/* When BTI is enabled, we expect the fault to occur after the landing pad */
1803 	fault_pc += 4;
1804 #endif /* BTI_ENFORCED */
1805 
1806 	ml_expect_fault_pc_begin(fault_handler, fault_pc);
1807 
1808 	test_success = entrypoint(0);
1809 
1810 	ml_expect_fault_end();
1811 
1812 	if (test_success && xnu_post_panic_lockdown_did_fire) {
1813 		T_PASS("%s OK\n", label);
1814 	} else {
1815 		T_FAIL("%s FAIL (test returned: %d, did lockdown: %d)\n",
1816 		    label, test_success, xnu_post_panic_lockdown_did_fire);
1817 	}
1818 }
1819 
1820 /**
1821  * Returns a pointer which is guranteed to be invalid under IA with the zero
1822  * discriminator.
1823  *
1824  * This is somewhat over complicating it since it's exceedingly likely that a
1825  * any given pointer will have a zero PAC (and thus break the test), but it's
1826  * easy enough to avoid the problem.
1827  */
1828 static uint64_t
panic_lockdown_pacia_get_invalid_ptr()1829 panic_lockdown_pacia_get_invalid_ptr()
1830 {
1831 	char *unsigned_ptr = (char *)0xFFFFFFFFAABBCC00;
1832 	char *signed_ptr = NULL;
1833 	do {
1834 		unsigned_ptr += 4 /* avoid alignment exceptions */;
1835 		signed_ptr = ptrauth_sign_unauthenticated(
1836 			unsigned_ptr,
1837 			ptrauth_key_asia,
1838 			0);
1839 	} while ((uint64_t)unsigned_ptr == (uint64_t)signed_ptr);
1840 
1841 	return (uint64_t)unsigned_ptr;
1842 }
1843 
1844 /**
1845  * Returns a pointer which is guranteed to be invalid under DA with the zero
1846  * discriminator.
1847  */
1848 static uint64_t
panic_lockdown_pacda_get_invalid_ptr(void)1849 panic_lockdown_pacda_get_invalid_ptr(void)
1850 {
1851 	char *unsigned_ptr = (char *)0xFFFFFFFFAABBCC00;
1852 	char *signed_ptr = NULL;
1853 	do {
1854 		unsigned_ptr += 8 /* avoid alignment exceptions */;
1855 		signed_ptr = ptrauth_sign_unauthenticated(
1856 			unsigned_ptr,
1857 			ptrauth_key_asda,
1858 			0);
1859 	} while ((uint64_t)unsigned_ptr == (uint64_t)signed_ptr);
1860 
1861 	return (uint64_t)unsigned_ptr;
1862 }
1863 
1864 kern_return_t
arm64_panic_lockdown_test(void)1865 arm64_panic_lockdown_test(void)
1866 {
1867 #if __has_feature(ptrauth_calls)
1868 	uint64_t ia_invalid = panic_lockdown_pacia_get_invalid_ptr();
1869 #endif /* ptrauth_calls */
1870 
1871 	arm64_panic_lockdown_test_case_s tests[] = {
1872 		{
1873 			.name = "arm64_panic_lockdown_test_load",
1874 			.func = &arm64_panic_lockdown_test_load,
1875 			/* Trigger a null deref */
1876 			.arg = (uint64_t)NULL,
1877 			.expected_ec = ESR_EC_DABORT_EL1,
1878 			.expect_lockdown_exceptions_masked = true,
1879 			.expect_lockdown_exceptions_unmasked = false,
1880 		},
1881 		{
1882 			.name = "arm64_panic_lockdown_test_gdbtrap",
1883 			.func = &arm64_panic_lockdown_test_gdbtrap,
1884 			.arg = 0,
1885 			.expected_ec = ESR_EC_UNCATEGORIZED,
1886 			/* GDBTRAP instructions should be allowed everywhere */
1887 			.expect_lockdown_exceptions_masked = false,
1888 			.expect_lockdown_exceptions_unmasked = false,
1889 		},
1890 #if __has_feature(ptrauth_calls)
1891 		{
1892 			.name = "arm64_panic_lockdown_test_pac_brk_c470",
1893 			.func = &arm64_panic_lockdown_test_pac_brk_c470,
1894 			.arg = 0,
1895 			.expected_ec = ESR_EC_BRK_AARCH64,
1896 			.expect_lockdown_exceptions_masked = true,
1897 			.expect_lockdown_exceptions_unmasked = true,
1898 		},
1899 		{
1900 			.name = "arm64_panic_lockdown_test_pac_brk_c471",
1901 			.func = &arm64_panic_lockdown_test_pac_brk_c471,
1902 			.arg = 0,
1903 			.expected_ec = ESR_EC_BRK_AARCH64,
1904 			.expect_lockdown_exceptions_masked = true,
1905 			.expect_lockdown_exceptions_unmasked = true,
1906 		},
1907 		{
1908 			.name = "arm64_panic_lockdown_test_pac_brk_c472",
1909 			.func = &arm64_panic_lockdown_test_pac_brk_c472,
1910 			.arg = 0,
1911 			.expected_ec = ESR_EC_BRK_AARCH64,
1912 			.expect_lockdown_exceptions_masked = true,
1913 			.expect_lockdown_exceptions_unmasked = true,
1914 		},
1915 		{
1916 			.name = "arm64_panic_lockdown_test_pac_brk_c473",
1917 			.func = &arm64_panic_lockdown_test_pac_brk_c473,
1918 			.arg = 0,
1919 			.expected_ec = ESR_EC_BRK_AARCH64,
1920 			.expect_lockdown_exceptions_masked = true,
1921 			.expect_lockdown_exceptions_unmasked = true,
1922 		},
1923 		{
1924 			.name = "arm64_panic_lockdown_test_telemetry_brk_ff00",
1925 			.func = &arm64_panic_lockdown_test_telemetry_brk_ff00,
1926 			.arg = 0,
1927 			.expected_ec = ESR_EC_BRK_AARCH64,
1928 			/*
1929 			 * PAC breakpoints are not the only breakpoints, ensure that other
1930 			 * BRKs (like those used for telemetry) do not trigger lockdowns.
1931 			 * This is necessary to avoid conflicts with features like UBSan
1932 			 * telemetry (which could fire at any time in C code).
1933 			 */
1934 			.expect_lockdown_exceptions_masked = false,
1935 			.expect_lockdown_exceptions_unmasked = false,
1936 		},
1937 		{
1938 			.name = "arm64_panic_lockdown_test_br_auth_fail",
1939 			.func = &arm64_panic_lockdown_test_br_auth_fail,
1940 			.arg = ia_invalid,
1941 			.expected_ec = gARM_FEAT_FPACCOMBINE ? ESR_EC_PAC_FAIL : ESR_EC_IABORT_EL1,
1942 			.expect_lockdown_exceptions_masked = true,
1943 			.expect_lockdown_exceptions_unmasked = true,
1944 			/*
1945 			 * Pre-FEAT_FPACCOMBINED, BRAx branches to a poisoned PC so we
1946 			 * expect to fault on the branch target rather than the branch
1947 			 * itself. The exact ELR will likely be different from ia_invalid,
1948 			 * but since the expect logic in sleh only matches on low bits (i.e.
1949 			 * not bits which will be poisoned), this is fine.
1950 			 * On FEAT_FPACCOMBINED devices, we will fault on the branch itself.
1951 			 */
1952 			.override_expected_fault_pc_valid = !gARM_FEAT_FPACCOMBINE,
1953 			.override_expected_fault_pc = ia_invalid
1954 		},
1955 		{
1956 			.name = "arm64_panic_lockdown_test_ldr_auth_fail",
1957 			.func = &arm64_panic_lockdown_test_ldr_auth_fail,
1958 			.arg = panic_lockdown_pacda_get_invalid_ptr(),
1959 			.expected_ec = gARM_FEAT_FPACCOMBINE ? ESR_EC_PAC_FAIL : ESR_EC_DABORT_EL1,
1960 			.expect_lockdown_exceptions_masked = true,
1961 			.expect_lockdown_exceptions_unmasked = true,
1962 		},
1963 		{
1964 			.name = "arm64_panic_lockdown_test_copyio_poison",
1965 			.func = &arm64_panic_lockdown_test_copyio,
1966 			/* fake a poisoned kernel pointer by flipping the bottom PAC bit */
1967 			.arg = ((uint64_t)-1) ^ (1LLU << (64 - T1SZ_BOOT)),
1968 			.expected_ec = ESR_EC_DABORT_EL1,
1969 			.expect_lockdown_exceptions_masked = false,
1970 			.expect_lockdown_exceptions_unmasked = false,
1971 			.override_expected_fault_pc_valid = true,
1972 			.override_expected_fault_pc = (uint64_t)&arm64_panic_lockdown_test_copyio_fault_pc,
1973 		},
1974 #if __ARM_ARCH_8_6__
1975 		{
1976 			.name = "arm64_panic_lockdown_test_fpac",
1977 			.func = &arm64_panic_lockdown_test_fpac,
1978 			.arg = ia_invalid,
1979 			.expected_ec = ESR_EC_PAC_FAIL,
1980 			.expect_lockdown_exceptions_masked = true,
1981 			.expect_lockdown_exceptions_unmasked = true,
1982 		},
1983 #endif /* __ARM_ARCH_8_6__ */
1984 #endif /* ptrauth_calls */
1985 		{
1986 			.name = "arm64_panic_lockdown_test_copyio",
1987 			.func = &arm64_panic_lockdown_test_copyio,
1988 			.arg = 0x0 /* load from NULL */,
1989 			.expected_ec = ESR_EC_DABORT_EL1,
1990 			.expect_lockdown_exceptions_masked = false,
1991 			.expect_lockdown_exceptions_unmasked = false,
1992 			.override_expected_fault_pc_valid = true,
1993 			.override_expected_fault_pc = (uint64_t)&arm64_panic_lockdown_test_copyio_fault_pc,
1994 		},
1995 	};
1996 
1997 	size_t test_count = sizeof(tests) / sizeof(*tests);
1998 	for (size_t i = 0; i < test_count; i++) {
1999 		panic_lockdown_expect_test(
2000 			"Exceptions unmasked",
2001 			&tests[i],
2002 			tests[i].expect_lockdown_exceptions_unmasked,
2003 			/* mask_interrupts */ false);
2004 
2005 		panic_lockdown_expect_test(
2006 			"Exceptions masked",
2007 			&tests[i],
2008 			tests[i].expect_lockdown_exceptions_masked,
2009 			/* mask_interrupts */ true);
2010 	}
2011 
2012 	panic_lockdown_expect_fault_raw("arm64_panic_lockdown_test_sp1_invalid_stack",
2013 	    arm64_panic_lockdown_test_sp1_invalid_stack,
2014 	    arm64_panic_lockdown_test_pac_brk_c470,
2015 	    arm64_panic_lockdown_test_sp1_invalid_stack_handler);
2016 
2017 	panic_lockdown_expect_fault_raw("arm64_panic_lockdown_test_sp1_exception_in_vector",
2018 	    arm64_panic_lockdown_test_sp1_exception_in_vector,
2019 	    el1_sp1_synchronous_raise_exception_in_vector,
2020 	    arm64_panic_lockdown_test_sp1_exception_in_vector_handler);
2021 	return KERN_SUCCESS;
2022 }
2023 #endif /* CONFIG_SPTM */
2024 
2025 
2026 
2027 #if HAS_SPECRES
2028 
2029 /*** CPS RCTX ***/
2030 
2031 
2032 /*** SPECRES ***/
2033 
2034 #if HAS_SPECRES2
2035 /*
2036  * Execute a COSP RCTX instruction.
2037  */
2038 static void
_cosprctx_exec(uint64_t raw)2039 _cosprctx_exec(uint64_t raw)
2040 {
2041 	asm volatile ( "ISB SY");
2042 	__asm__ volatile ("COSP RCTX, %0" :: "r" (raw));
2043 	asm volatile ( "DSB SY");
2044 	asm volatile ( "ISB SY");
2045 }
2046 #endif
2047 
2048 /*
2049  * Execute a CFP RCTX instruction.
2050  */
2051 static void
_cfprctx_exec(uint64_t raw)2052 _cfprctx_exec(uint64_t raw)
2053 {
2054 	asm volatile ( "ISB SY");
2055 	__asm__ volatile ("CFP RCTX, %0" :: "r" (raw));
2056 	asm volatile ( "DSB SY");
2057 	asm volatile ( "ISB SY");
2058 }
2059 
2060 /*
2061  * Execute a CPP RCTX instruction.
2062  */
2063 static void
_cpprctx_exec(uint64_t raw)2064 _cpprctx_exec(uint64_t raw)
2065 {
2066 	asm volatile ( "ISB SY");
2067 	__asm__ volatile ("CPP RCTX, %0" :: "r" (raw));
2068 	asm volatile ( "DSB SY");
2069 	asm volatile ( "ISB SY");
2070 }
2071 
2072 /*
2073  * Execute a DVP RCTX instruction.
2074  */
2075 static void
_dvprctx_exec(uint64_t raw)2076 _dvprctx_exec(uint64_t raw)
2077 {
2078 	asm volatile ( "ISB SY");
2079 	__asm__ volatile ("DVP RCTX, %0" :: "r" (raw));
2080 	asm volatile ( "DSB SY");
2081 	asm volatile ( "ISB SY");
2082 }
2083 
2084 static void
_specres_do_test_std(void (* impl)(uint64_t raw))2085 _specres_do_test_std(void (*impl)(uint64_t raw))
2086 {
2087 	typedef struct {
2088 		union {
2089 			struct {
2090 				uint64_t ASID:16;
2091 				uint64_t GASID:1;
2092 				uint64_t :7;
2093 				uint64_t EL:2;
2094 				uint64_t NS:1;
2095 				uint64_t NSE:1;
2096 				uint64_t :4;
2097 				uint64_t VMID:16;
2098 				uint64_t GVMID:1;
2099 			};
2100 			uint64_t raw;
2101 		};
2102 	} specres_ctx;
2103 
2104 	assert(sizeof(specres_ctx) == 8);
2105 
2106 	/*
2107 	 * Test various possible meaningful COSP_RCTX context ID.
2108 	 */
2109 
2110 	/* el : EL0 / EL1 / EL2. */
2111 	for (uint8_t el = 0; el < 3; el++) {
2112 		/* Always non-secure. */
2113 		const uint8_t ns = 1;
2114 		const uint8_t nse = 0;
2115 
2116 		/* Iterate over some couples of ASIDs / VMIDs. */
2117 		for (uint16_t xxid = 0; xxid < 256; xxid++) {
2118 			const uint16_t asid = (uint16_t) (xxid << 4);
2119 			const uint16_t vmid = (uint16_t) (256 - (xxid << 4));
2120 
2121 			/* Test 4 G[AS|VM]ID combinations. */
2122 			for (uint8_t bid = 0; bid < 4; bid++) {
2123 				const uint8_t gasid = bid & 1;
2124 				const uint8_t gvmid = bid & 2;
2125 
2126 				/* Generate the context descriptor. */
2127 				specres_ctx ctx = {0};
2128 				ctx.ASID = asid;
2129 				ctx.GASID = gasid;
2130 				ctx.EL = el;
2131 				ctx.NS = ns;
2132 				ctx.NSE = nse;
2133 				ctx.VMID = vmid;
2134 				ctx.GVMID = gvmid;
2135 
2136 				/* Execute the COSP instruction. */
2137 				(*impl)(ctx.raw);
2138 
2139 				/* Insert some operation. */
2140 				volatile uint8_t sum = 0;
2141 				for (volatile uint8_t i = 0; i < 64; i++) {
2142 					sum += i * sum + 3;
2143 				}
2144 
2145 				/* If el0 is not targetted, just need to do it once. */
2146 				if (el != 0) {
2147 					goto not_el0_skip;
2148 				}
2149 			}
2150 		}
2151 
2152 		/* El0 skip. */
2153 not_el0_skip:   ;
2154 	}
2155 }
2156 
2157 /*** RCTX ***/
2158 
2159 static void
_rctx_do_test(void)2160 _rctx_do_test(void)
2161 {
2162 	_specres_do_test_std(&_cfprctx_exec);
2163 	_specres_do_test_std(&_cpprctx_exec);
2164 	_specres_do_test_std(&_dvprctx_exec);
2165 #if HAS_SPECRES2
2166 	_specres_do_test_std(&_cosprctx_exec);
2167 #endif
2168 }
2169 
2170 kern_return_t
specres_test(void)2171 specres_test(void)
2172 {
2173 	/* Basic instructions test. */
2174 	_cfprctx_exec(0);
2175 	_cpprctx_exec(0);
2176 	_dvprctx_exec(0);
2177 #if HAS_SPECRES2
2178 	_cosprctx_exec(0);
2179 #endif
2180 
2181 	/* More advanced instructions test. */
2182 	_rctx_do_test();
2183 
2184 	return KERN_SUCCESS;
2185 }
2186 
2187 #endif /* HAS_SPECRES */
2188 #if BTI_ENFORCED
2189 typedef uint64_t (bti_landing_pad_func_t)(void);
2190 typedef uint64_t (bti_shim_func_t)(bti_landing_pad_func_t *);
2191 
2192 extern bti_shim_func_t arm64_bti_test_jump_shim;
2193 extern bti_shim_func_t arm64_bti_test_call_shim;
2194 
2195 extern bti_landing_pad_func_t arm64_bti_test_func_with_no_landing_pad;
2196 extern bti_landing_pad_func_t arm64_bti_test_func_with_call_landing_pad;
2197 extern bti_landing_pad_func_t arm64_bti_test_func_with_jump_landing_pad;
2198 extern bti_landing_pad_func_t arm64_bti_test_func_with_jump_call_landing_pad;
2199 #if __has_feature(ptrauth_returns)
2200 extern bti_landing_pad_func_t arm64_bti_test_func_with_pac_landing_pad;
2201 #endif /* __has_feature(ptrauth_returns) */
2202 
2203 typedef struct arm64_bti_test_func_case {
2204 	const char *func_str;
2205 	bti_landing_pad_func_t *func;
2206 	uint64_t expect_return_value;
2207 	uint8_t  expect_call_ok;
2208 	uint8_t  expect_jump_ok;
2209 } arm64_bti_test_func_case_s;
2210 
2211 static volatile uintptr_t bti_exception_handler_pc = 0;
2212 
2213 static bool
arm64_bti_test_exception_handler(arm_saved_state_t * state)2214 arm64_bti_test_exception_handler(arm_saved_state_t * state)
2215 {
2216 	uint64_t esr = get_saved_state_esr(state);
2217 	esr_exception_class_t class = ESR_EC(esr);
2218 
2219 	if (class != ESR_EC_BTI_FAIL) {
2220 		return false;
2221 	}
2222 
2223 	/* Capture any desired exception metrics */
2224 	bti_exception_handler_pc = get_saved_state_pc(state);
2225 
2226 	/* "Cancel" the function call by forging an early return */
2227 	set_saved_state_pc(state, get_saved_state_lr(state));
2228 
2229 	/* Clear BTYPE to prevent taking another exception after ERET */
2230 	uint32_t spsr = get_saved_state_cpsr(state);
2231 	spsr &= ~PSR_BTYPE_MASK;
2232 	set_saved_state_cpsr(state, spsr);
2233 
2234 	return true;
2235 }
2236 
2237 static void
arm64_bti_test_func_with_shim(uint8_t expect_ok,const char * shim_str,bti_shim_func_t * shim,arm64_bti_test_func_case_s * test_case)2238 arm64_bti_test_func_with_shim(
2239 	uint8_t expect_ok,
2240 	const char *shim_str,
2241 	bti_shim_func_t *shim,
2242 	arm64_bti_test_func_case_s *test_case)
2243 {
2244 	uint64_t result = -1;
2245 
2246 	/* Capture BTI exceptions triggered by our target function */
2247 	uintptr_t raw_func = (uintptr_t)ptrauth_strip(
2248 		(void *)test_case->func,
2249 		ptrauth_key_function_pointer);
2250 	ml_expect_fault_pc_begin(arm64_bti_test_exception_handler, raw_func);
2251 	bti_exception_handler_pc = 0;
2252 
2253 	/*
2254 	 * The assembly routines do not support C function type discriminators, so
2255 	 * strip and resign with zero if needed
2256 	 */
2257 	bti_landing_pad_func_t *resigned = ptrauth_auth_and_resign(
2258 		test_case->func,
2259 		ptrauth_key_function_pointer,
2260 		ptrauth_type_discriminator(bti_landing_pad_func_t),
2261 		ptrauth_key_function_pointer, 0);
2262 
2263 	result = shim(resigned);
2264 
2265 	ml_expect_fault_end();
2266 
2267 	if (!expect_ok && raw_func != bti_exception_handler_pc) {
2268 		T_FAIL("Expected BTI exception at 0x%llx but got one at %llx instead\n",
2269 		    raw_func, bti_exception_handler_pc);
2270 	} else if (expect_ok && bti_exception_handler_pc) {
2271 		T_FAIL("Did not expect BTI exception but got on at 0x%llx\n",
2272 		    bti_exception_handler_pc);
2273 	} else if (!expect_ok && !bti_exception_handler_pc) {
2274 		T_FAIL("Failed to hit expected exception!\n");
2275 	} else if (expect_ok && result != test_case->expect_return_value) {
2276 		T_FAIL("Incorrect test function result (expected=%llu, result=%llu\n)",
2277 		    test_case->expect_return_value, result);
2278 	} else {
2279 		T_PASS("%s (shim=%s)\n", test_case->func_str, shim_str);
2280 	}
2281 }
2282 
2283 /**
2284  * This test works to ensure that BTI exceptions are raised where expected
2285  * and only where they are expected by exhaustively testing all indirect branch
2286  * combinations with all landing pad options.
2287  */
2288 kern_return_t
arm64_bti_test(void)2289 arm64_bti_test(void)
2290 {
2291 	static arm64_bti_test_func_case_s tests[] = {
2292 		{
2293 			.func_str = "arm64_bti_test_func_with_no_landing_pad",
2294 			.func = &arm64_bti_test_func_with_no_landing_pad,
2295 			.expect_return_value     = 1,
2296 			.expect_call_ok          = 0,
2297 			.expect_jump_ok          = 0,
2298 		},
2299 		{
2300 			.func_str = "arm64_bti_test_func_with_call_landing_pad",
2301 			.func = &arm64_bti_test_func_with_call_landing_pad,
2302 			.expect_return_value     = 2,
2303 			.expect_call_ok          = 1,
2304 			.expect_jump_ok          = 0,
2305 		},
2306 		{
2307 			.func_str = "arm64_bti_test_func_with_jump_landing_pad",
2308 			.func = &arm64_bti_test_func_with_jump_landing_pad,
2309 			.expect_return_value     = 3,
2310 			.expect_call_ok          = 0,
2311 			.expect_jump_ok          = 1,
2312 		},
2313 		{
2314 			.func_str = "arm64_bti_test_func_with_jump_call_landing_pad",
2315 			.func = &arm64_bti_test_func_with_jump_call_landing_pad,
2316 			.expect_return_value     = 4,
2317 			.expect_call_ok          = 1,
2318 			.expect_jump_ok          = 1,
2319 		},
2320 #if __has_feature(ptrauth_returns)
2321 		{
2322 			.func_str = "arm64_bti_test_func_with_pac_landing_pad",
2323 			.func = &arm64_bti_test_func_with_pac_landing_pad,
2324 			.expect_return_value     = 5,
2325 			.expect_call_ok          = 1,
2326 			.expect_jump_ok          = 0,
2327 		},
2328 #endif /* __has_feature(ptrauth_returns) */
2329 	};
2330 
2331 	size_t test_count = sizeof(tests) / sizeof(*tests);
2332 	for (size_t i = 0; i < test_count; i++) {
2333 		arm64_bti_test_func_case_s *test_case = tests + i;
2334 
2335 		arm64_bti_test_func_with_shim(test_case->expect_call_ok,
2336 		    "arm64_bti_test_call_shim",
2337 		    arm64_bti_test_call_shim,
2338 		    test_case);
2339 
2340 
2341 		arm64_bti_test_func_with_shim(test_case->expect_jump_ok,
2342 		    "arm64_bti_test_jump_shim",
2343 		    arm64_bti_test_jump_shim,
2344 		    test_case);
2345 	}
2346 
2347 	return KERN_SUCCESS;
2348 }
2349 #endif /* BTI_ENFORCED */
2350 
2351 
2352 /**
2353  * Test the speculation guards
2354  * We can't easily ensure that the guards actually behave correctly under
2355  * speculation, but we can at least ensure that the guards are non-speculatively
2356  * correct.
2357  */
2358 kern_return_t
arm64_speculation_guard_test(void)2359 arm64_speculation_guard_test(void)
2360 {
2361 	uint64_t cookie1_64 = 0x5350454354524521ULL; /* SPECTRE! */
2362 	uint64_t cookie2_64 = 0x5941592043505553ULL; /* YAY CPUS */
2363 	uint32_t cookie1_32 = (uint32_t)cookie1_64;
2364 	uint32_t cookie2_32 = (uint32_t)cookie2_64;
2365 	uint64_t result64 = 0;
2366 	uint32_t result32 = 0;
2367 	bool result_valid;
2368 
2369 	/*
2370 	 * Test the zeroing guard
2371 	 * Since failing the guard triggers a panic, we don't actually test that
2372 	 * part as part of the automated tests.
2373 	 */
2374 
2375 	result64 = 0;
2376 	SPECULATION_GUARD_ZEROING_XXX(
2377 		/* out */ result64, /* out_valid */ result_valid,
2378 		/* value */ cookie1_64,
2379 		/* cmp_1 */ 0ULL, /* cmp_2 */ 1ULL, /* cc */ "NE");
2380 	T_EXPECT(result_valid, "result valid");
2381 	T_EXPECT_EQ_ULLONG(result64, cookie1_64, "64, 64 zeroing guard works");
2382 
2383 	result64 = 0;
2384 	SPECULATION_GUARD_ZEROING_XWW(
2385 		/* out */ result64, /* out_valid */ result_valid,
2386 		/* value */ cookie1_64,
2387 		/* cmp_1 */ 1U, /* cmp_2 */ 0U, /* cc */ "HI");
2388 	T_EXPECT(result_valid, "result valid");
2389 	T_EXPECT_EQ_ULLONG(result64, cookie1_64, "64, 32 zeroing guard works");
2390 
2391 	result32 = 0;
2392 	SPECULATION_GUARD_ZEROING_WXX(
2393 		/* out */ result32, /* out_valid */ result_valid,
2394 		/* value */ cookie1_32,
2395 		/* cmp_1 */ -1LL, /* cmp_2 */ 4LL, /* cc */ "LT");
2396 	T_EXPECT(result_valid, "result valid");
2397 	T_EXPECT_EQ_UINT(result32, cookie1_32, "32, 64 zeroing guard works");
2398 
2399 	result32 = 0;
2400 	SPECULATION_GUARD_ZEROING_WWW(
2401 		/* out */ result32, /* out_valid */ result_valid,
2402 		/* value */ cookie1_32,
2403 		/* cmp_1 */ 1, /* cmp_2 */ -4, /* cc */ "GT");
2404 	T_EXPECT(result_valid, "result valid");
2405 	T_EXPECT_EQ_UINT(result32, cookie1_32, "32, 32 zeroing guard works");
2406 
2407 	result32 = 0x41;
2408 	SPECULATION_GUARD_ZEROING_WWW(
2409 		/* out */ result32, /* out_valid */ result_valid,
2410 		/* value */ cookie1_32,
2411 		/* cmp_1 */ 1, /* cmp_2 */ -4, /* cc */ "LT");
2412 	T_EXPECT(!result_valid, "result invalid");
2413 	T_EXPECT_EQ_UINT(result32, 0, "zeroing guard works with failing condition");
2414 
2415 	/*
2416 	 * Test the selection guard
2417 	 */
2418 
2419 	result64 = 0;
2420 	SPECULATION_GUARD_SELECT_XXX(
2421 		/* out */ result64,
2422 		/* cmp_1 */ 16ULL, /* cmp_2 */ 32ULL,
2423 		/* cc   */ "EQ", /* sel_1 */ cookie1_64,
2424 		/* n_cc */ "NE", /* sel_2 */ cookie2_64);
2425 	T_EXPECT_EQ_ULLONG(result64, cookie2_64, "64, 64 select guard works (1)");
2426 
2427 	result64 = 0;
2428 	SPECULATION_GUARD_SELECT_XXX(
2429 		/* out */ result64,
2430 		/* cmp_1 */ 32ULL, /* cmp_2 */ 32ULL,
2431 		/* cc   */ "EQ", /* sel_1 */ cookie1_64,
2432 		/* n_cc */ "NE", /* sel_2 */ cookie2_64);
2433 	T_EXPECT_EQ_ULLONG(result64, cookie1_64, "64, 64 select guard works (2)");
2434 
2435 
2436 	result32 = 0;
2437 	SPECULATION_GUARD_SELECT_WXX(
2438 		/* out */ result32,
2439 		/* cmp_1 */ 16ULL, /* cmp_2 */ 32ULL,
2440 		/* cc   */ "HI", /* sel_1 */ cookie1_64,
2441 		/* n_cc */ "LS", /* sel_2 */ cookie2_64);
2442 	T_EXPECT_EQ_ULLONG(result32, cookie2_32, "32, 64 select guard works (1)");
2443 
2444 	result32 = 0;
2445 	SPECULATION_GUARD_SELECT_WXX(
2446 		/* out */ result32,
2447 		/* cmp_1 */ 16ULL, /* cmp_2 */ 2ULL,
2448 		/* cc   */ "HI", /* sel_1 */ cookie1_64,
2449 		/* n_cc */ "LS", /* sel_2 */ cookie2_64);
2450 	T_EXPECT_EQ_ULLONG(result32, cookie1_32, "32, 64 select guard works (2)");
2451 
2452 	return KERN_SUCCESS;
2453 }
2454 
2455 extern void arm64_brk_lr_gpr(void);
2456 extern void arm64_brk_lr_fault(void);
2457 
2458 static NOKASAN bool
arm64_backtrace_test_fault_handler(arm_saved_state_t * state)2459 arm64_backtrace_test_fault_handler(arm_saved_state_t * state)
2460 {
2461 	/* Similar setup to backtrace_kernel_sysctl() */
2462 	const unsigned int bt_len = 24;
2463 	const size_t bt_size = sizeof(uint8_t) * bt_len;
2464 	uint8_t *bt = kalloc_data(bt_size, Z_WAITOK | Z_ZERO);
2465 	backtrace_info_t packed_info = BTI_NONE;
2466 
2467 	/* Call the backtrace function */
2468 	backtrace_packed(BTP_KERN_OFFSET_32, bt, bt_size, NULL, &packed_info);
2469 
2470 	add_saved_state_pc(state, 4);
2471 	return true;
2472 }
2473 
2474 /**
2475  * Make sure EL1 fleh doesn't push a bogus stack frame when LR is being used as
2476  * a GPR in the caller.
2477  *
2478  * This test writes a GPR-like value into LR that is >4GB away from any kernel
2479  * address and tries to run backtrace_packed() from a sync handler.
2480  * backtrace_packed() has an invariant that all addresses in the stack frame are
2481  * within 4GB of the kernel text.
2482  */
2483 kern_return_t
arm64_backtrace_test(void)2484 arm64_backtrace_test(void)
2485 {
2486 	ml_expect_fault_pc_begin(arm64_backtrace_test_fault_handler, (uintptr_t)&arm64_brk_lr_fault);
2487 	arm64_brk_lr_gpr();
2488 	ml_expect_fault_end();
2489 
2490 #if CONFIG_SPTM && (DEVELOPMENT || DEBUG)
2491 	/* Reset the debug data so it can be filled later if needed */
2492 	debug_panic_lockdown_initiator_state.initiator_pc = 0;
2493 #endif /* CONFIG_SPTM && (DEVELOPMENT || DEBUG) */
2494 	return KERN_SUCCESS;
2495 }
2496