xref: /xnu-12377.61.12/osfmk/arm64/platform_tests.c (revision 4d495c6e23c53686cf65f45067f79024cf5dcee8)
1 /*
2  * Copyright (c) 2011-2018 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_COPYRIGHT@
30  */
31 /*
32  * Mach Operating System Copyright (c) 1991,1990,1989,1988,1987 Carnegie
33  * Mellon University All Rights Reserved.
34  *
35  * Permission to use, copy, modify and distribute this software and its
36  * documentation is hereby granted, provided that both the copyright notice
37  * and this permission notice appear in all copies of the software,
38  * derivative works or modified versions, and any portions thereof, and that
39  * both notices appear in supporting documentation.
40  *
41  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION.
42  * CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES
43  * WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
44  *
45  * Carnegie Mellon requests users of this software to return to
46  *
47  * Software Distribution Coordinator  or  [email protected]
48  * School of Computer Science Carnegie Mellon University Pittsburgh PA
49  * 15213-3890
50  *
51  * any improvements or extensions that they make and grant Carnegie Mellon the
52  * rights to redistribute these changes.
53  */
54 
55 #include <mach_ldebug.h>
56 
57 #define LOCK_PRIVATE 1
58 
59 #include <vm/pmap.h>
60 #include <vm/vm_map_xnu.h>
61 #include <vm/vm_page_internal.h>
62 #include <vm/vm_kern_xnu.h>
63 #include <mach/vm_map.h>
64 #include <kern/backtrace.h>
65 #include <kern/kalloc.h>
66 #include <kern/cpu_number.h>
67 #include <kern/locks.h>
68 #include <kern/misc_protos.h>
69 #include <kern/thread.h>
70 #include <kern/processor.h>
71 #include <kern/sched_prim.h>
72 #include <kern/debug.h>
73 #include <stdatomic.h>
74 #include <string.h>
75 #include <tests/xnupost.h>
76 
77 #if     MACH_KDB
78 #include <ddb/db_command.h>
79 #include <ddb/db_output.h>
80 #include <ddb/db_sym.h>
81 #include <ddb/db_print.h>
82 #endif                          /* MACH_KDB */
83 
84 #include <san/kasan.h>
85 #include <sys/errno.h>
86 #include <sys/kdebug.h>
87 #include <sys/munge.h>
88 #include <machine/cpu_capabilities.h>
89 #include <machine/machine_routines.h>
90 #include <arm/cpu_data_internal.h>
91 #include <arm/pmap.h>
92 #include <arm/pmap/pmap_pt_geometry.h>
93 
94 #if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) || defined(KERNEL_INTEGRITY_PV_CTRR)
95 #include <arm64/amcc_rorgn.h>
96 #endif // defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) || defined(KERNEL_INTEGRITY_PV_CTRR)
97 
98 #include <arm64/machine_machdep.h>
99 
100 kern_return_t arm64_backtrace_test(void);
101 kern_return_t arm64_lock_test(void);
102 kern_return_t arm64_munger_test(void);
103 kern_return_t arm64_pan_test(void);
104 kern_return_t arm64_late_pan_test(void);
105 #if defined(HAS_APPLE_PAC)
106 #include <ptrauth.h>
107 kern_return_t arm64_ropjop_test(void);
108 #endif
109 #if defined(KERNEL_INTEGRITY_CTRR) || defined(KERNEL_INTEGRITY_PV_CTRR)
110 kern_return_t ctrr_test(void);
111 kern_return_t ctrr_test_cpu(void);
112 #endif
113 #if BTI_ENFORCED
114 kern_return_t arm64_bti_test(void);
115 #endif /* BTI_ENFORCED */
116 #if HAS_MTE
117 #include <arm_acle.h>
118 kern_return_t mte_test(void);
119 kern_return_t mte_copyio_recovery_handler_test(void);
120 #endif
121 #if HAS_SPECRES
122 extern kern_return_t specres_test(void);
123 #endif
124 
125 // exception handler ignores this fault address during PAN test
126 #if __ARM_PAN_AVAILABLE__
127 const uint64_t pan_ro_value = 0xFEEDB0B0DEADBEEF;
128 vm_offset_t pan_test_addr = 0;
129 vm_offset_t pan_ro_addr = 0;
130 volatile int pan_exception_level = 0;
131 volatile char pan_fault_value = 0;
132 #endif
133 
134 #if CONFIG_SPTM
135 kern_return_t arm64_panic_lockdown_test(void);
136 #endif /* CONFIG_SPTM */
137 
138 
139 #include <arm64/speculation.h>
140 kern_return_t arm64_speculation_guard_test(void);
141 
142 
143 #include <libkern/OSAtomic.h>
144 #define LOCK_TEST_ITERATIONS 50
145 #define LOCK_TEST_SETUP_TIMEOUT_SEC 15
146 static hw_lock_data_t   lt_hw_lock;
147 static lck_spin_t       lt_lck_spin_t;
148 static lck_mtx_t        lt_mtx;
149 static lck_rw_t         lt_rwlock;
150 static volatile uint32_t lt_counter = 0;
151 static volatile int     lt_spinvolatile;
152 static volatile uint32_t lt_max_holders = 0;
153 static volatile uint32_t lt_upgrade_holders = 0;
154 static volatile uint32_t lt_max_upgrade_holders = 0;
155 static volatile uint32_t lt_num_holders = 0;
156 static volatile uint32_t lt_done_threads;
157 static volatile uint32_t lt_target_done_threads;
158 static volatile uint32_t lt_cpu_bind_id = 0;
159 static uint64_t          lt_setup_timeout = 0;
160 
161 static void
lt_note_another_blocking_lock_holder()162 lt_note_another_blocking_lock_holder()
163 {
164 	hw_lock_lock(&lt_hw_lock, LCK_GRP_NULL);
165 	lt_num_holders++;
166 	lt_max_holders = (lt_max_holders < lt_num_holders) ? lt_num_holders : lt_max_holders;
167 	hw_lock_unlock(&lt_hw_lock);
168 }
169 
170 static void
lt_note_blocking_lock_release()171 lt_note_blocking_lock_release()
172 {
173 	hw_lock_lock(&lt_hw_lock, LCK_GRP_NULL);
174 	lt_num_holders--;
175 	hw_lock_unlock(&lt_hw_lock);
176 }
177 
178 static void
lt_spin_a_little_bit()179 lt_spin_a_little_bit()
180 {
181 	uint32_t i;
182 
183 	for (i = 0; i < 10000; i++) {
184 		lt_spinvolatile++;
185 	}
186 }
187 
188 static void
lt_sleep_a_little_bit()189 lt_sleep_a_little_bit()
190 {
191 	delay(100);
192 }
193 
194 static void
lt_grab_mutex()195 lt_grab_mutex()
196 {
197 	lck_mtx_lock(&lt_mtx);
198 	lt_note_another_blocking_lock_holder();
199 	lt_sleep_a_little_bit();
200 	lt_counter++;
201 	lt_note_blocking_lock_release();
202 	lck_mtx_unlock(&lt_mtx);
203 }
204 
205 static void
lt_grab_mutex_with_try()206 lt_grab_mutex_with_try()
207 {
208 	while (0 == lck_mtx_try_lock(&lt_mtx)) {
209 		;
210 	}
211 	lt_note_another_blocking_lock_holder();
212 	lt_sleep_a_little_bit();
213 	lt_counter++;
214 	lt_note_blocking_lock_release();
215 	lck_mtx_unlock(&lt_mtx);
216 }
217 
218 static void
lt_grab_rw_exclusive()219 lt_grab_rw_exclusive()
220 {
221 	lck_rw_lock_exclusive(&lt_rwlock);
222 	lt_note_another_blocking_lock_holder();
223 	lt_sleep_a_little_bit();
224 	lt_counter++;
225 	lt_note_blocking_lock_release();
226 	lck_rw_done(&lt_rwlock);
227 }
228 
229 static void
lt_grab_rw_exclusive_with_try()230 lt_grab_rw_exclusive_with_try()
231 {
232 	while (0 == lck_rw_try_lock_exclusive(&lt_rwlock)) {
233 		lt_sleep_a_little_bit();
234 	}
235 
236 	lt_note_another_blocking_lock_holder();
237 	lt_sleep_a_little_bit();
238 	lt_counter++;
239 	lt_note_blocking_lock_release();
240 	lck_rw_done(&lt_rwlock);
241 }
242 
243 /* Disabled until lt_grab_rw_shared() is fixed (rdar://30685840)
244  *  static void
245  *  lt_grab_rw_shared()
246  *  {
247  *       lck_rw_lock_shared(&lt_rwlock);
248  *       lt_counter++;
249  *
250  *       lt_note_another_blocking_lock_holder();
251  *       lt_sleep_a_little_bit();
252  *       lt_note_blocking_lock_release();
253  *
254  *       lck_rw_done(&lt_rwlock);
255  *  }
256  */
257 
258 /* Disabled until lt_grab_rw_shared_with_try() is fixed (rdar://30685840)
259  *  static void
260  *  lt_grab_rw_shared_with_try()
261  *  {
262  *       while(0 == lck_rw_try_lock_shared(&lt_rwlock));
263  *       lt_counter++;
264  *
265  *       lt_note_another_blocking_lock_holder();
266  *       lt_sleep_a_little_bit();
267  *       lt_note_blocking_lock_release();
268  *
269  *       lck_rw_done(&lt_rwlock);
270  *  }
271  */
272 
273 static void
lt_upgrade_downgrade_rw()274 lt_upgrade_downgrade_rw()
275 {
276 	boolean_t upgraded, success;
277 
278 	success = lck_rw_try_lock_shared(&lt_rwlock);
279 	if (!success) {
280 		lck_rw_lock_shared(&lt_rwlock);
281 	}
282 
283 	lt_note_another_blocking_lock_holder();
284 	lt_sleep_a_little_bit();
285 	lt_note_blocking_lock_release();
286 
287 	upgraded = lck_rw_lock_shared_to_exclusive(&lt_rwlock);
288 	if (!upgraded) {
289 		success = lck_rw_try_lock_exclusive(&lt_rwlock);
290 
291 		if (!success) {
292 			lck_rw_lock_exclusive(&lt_rwlock);
293 		}
294 	}
295 
296 	lt_upgrade_holders++;
297 	if (lt_upgrade_holders > lt_max_upgrade_holders) {
298 		lt_max_upgrade_holders = lt_upgrade_holders;
299 	}
300 
301 	lt_counter++;
302 	lt_sleep_a_little_bit();
303 
304 	lt_upgrade_holders--;
305 
306 	lck_rw_lock_exclusive_to_shared(&lt_rwlock);
307 
308 	lt_spin_a_little_bit();
309 	lck_rw_done(&lt_rwlock);
310 }
311 
312 #if __AMP__
313 const int limit = 1000000;
314 static int lt_stress_local_counters[MAX_CPUS];
315 
316 lck_ticket_t lt_ticket_lock;
317 lck_grp_t lt_ticket_grp;
318 
319 static void
lt_stress_ticket_lock()320 lt_stress_ticket_lock()
321 {
322 	uint local_counter = 0;
323 
324 	uint cpuid = cpu_number();
325 
326 	kprintf("%s>cpu %u starting\n", __FUNCTION__, cpuid);
327 
328 	lck_ticket_lock(&lt_ticket_lock, &lt_ticket_grp);
329 	lt_counter++;
330 	local_counter++;
331 	lck_ticket_unlock(&lt_ticket_lock);
332 
333 	/* Wait until all test threads have finished any binding */
334 	while (lt_counter < lt_target_done_threads) {
335 		if (mach_absolute_time() > lt_setup_timeout) {
336 			kprintf("%s>cpu %u noticed that we exceeded setup timeout of %d seconds during initial setup phase (only %u out of %u threads checked in)",
337 			    __FUNCTION__, cpuid, LOCK_TEST_SETUP_TIMEOUT_SEC, lt_counter, lt_target_done_threads);
338 			return;
339 		}
340 		/* Yield to keep the CPUs available for the threads to bind */
341 		thread_yield_internal(1);
342 	}
343 
344 	lck_ticket_lock(&lt_ticket_lock, &lt_ticket_grp);
345 	lt_counter++;
346 	local_counter++;
347 	lck_ticket_unlock(&lt_ticket_lock);
348 
349 	/*
350 	 * Now that the test threads have finished any binding, wait
351 	 * until they are all actively spinning on-core (done yielding)
352 	 * so we get a fairly timed start.
353 	 */
354 	while (lt_counter < 2 * lt_target_done_threads) {
355 		if (mach_absolute_time() > lt_setup_timeout) {
356 			kprintf("%s>cpu %u noticed that we exceeded setup timeout of %d seconds during secondary setup phase (only %u out of %u threads checked in)",
357 			    __FUNCTION__, cpuid, LOCK_TEST_SETUP_TIMEOUT_SEC, lt_counter - lt_target_done_threads, lt_target_done_threads);
358 			return;
359 		}
360 	}
361 
362 	kprintf("%s>cpu %u started\n", __FUNCTION__, cpuid);
363 
364 	while (lt_counter < limit) {
365 		lck_ticket_lock(&lt_ticket_lock, &lt_ticket_grp);
366 		if (lt_counter < limit) {
367 			lt_counter++;
368 			local_counter++;
369 		}
370 		lck_ticket_unlock(&lt_ticket_lock);
371 	}
372 
373 	lt_stress_local_counters[cpuid] = local_counter;
374 
375 	kprintf("%s>final counter %u cpu %u incremented the counter %u times\n", __FUNCTION__, lt_counter, cpuid, local_counter);
376 }
377 #endif
378 
379 static void
lt_grab_hw_lock()380 lt_grab_hw_lock()
381 {
382 	hw_lock_lock(&lt_hw_lock, LCK_GRP_NULL);
383 	lt_counter++;
384 	lt_spin_a_little_bit();
385 	hw_lock_unlock(&lt_hw_lock);
386 }
387 
388 static void
lt_grab_hw_lock_with_try()389 lt_grab_hw_lock_with_try()
390 {
391 	while (0 == hw_lock_try(&lt_hw_lock, LCK_GRP_NULL)) {
392 		;
393 	}
394 	lt_counter++;
395 	lt_spin_a_little_bit();
396 	hw_lock_unlock(&lt_hw_lock);
397 }
398 
399 static void
lt_grab_hw_lock_with_to()400 lt_grab_hw_lock_with_to()
401 {
402 	(void)hw_lock_to(&lt_hw_lock, &hw_lock_spin_policy, LCK_GRP_NULL);
403 	lt_counter++;
404 	lt_spin_a_little_bit();
405 	hw_lock_unlock(&lt_hw_lock);
406 }
407 
408 static void
lt_grab_spin_lock()409 lt_grab_spin_lock()
410 {
411 	lck_spin_lock(&lt_lck_spin_t);
412 	lt_counter++;
413 	lt_spin_a_little_bit();
414 	lck_spin_unlock(&lt_lck_spin_t);
415 }
416 
417 static void
lt_grab_spin_lock_with_try()418 lt_grab_spin_lock_with_try()
419 {
420 	while (0 == lck_spin_try_lock(&lt_lck_spin_t)) {
421 		;
422 	}
423 	lt_counter++;
424 	lt_spin_a_little_bit();
425 	lck_spin_unlock(&lt_lck_spin_t);
426 }
427 
428 static volatile boolean_t lt_thread_lock_grabbed;
429 static volatile boolean_t lt_thread_lock_success;
430 
431 static void
lt_reset()432 lt_reset()
433 {
434 	lt_counter = 0;
435 	lt_max_holders = 0;
436 	lt_num_holders = 0;
437 	lt_max_upgrade_holders = 0;
438 	lt_upgrade_holders = 0;
439 	lt_done_threads = 0;
440 	lt_target_done_threads = 0;
441 	lt_cpu_bind_id = 0;
442 	/* Reset timeout deadline out from current time */
443 	nanoseconds_to_absolutetime(LOCK_TEST_SETUP_TIMEOUT_SEC * NSEC_PER_SEC, &lt_setup_timeout);
444 	lt_setup_timeout += mach_absolute_time();
445 
446 	OSMemoryBarrier();
447 }
448 
449 static void
lt_trylock_hw_lock_with_to()450 lt_trylock_hw_lock_with_to()
451 {
452 	OSMemoryBarrier();
453 	while (!lt_thread_lock_grabbed) {
454 		lt_sleep_a_little_bit();
455 		OSMemoryBarrier();
456 	}
457 	lt_thread_lock_success = hw_lock_to(&lt_hw_lock,
458 	    &hw_lock_test_give_up_policy, LCK_GRP_NULL);
459 	OSMemoryBarrier();
460 	mp_enable_preemption();
461 }
462 
463 static void
lt_trylock_spin_try_lock()464 lt_trylock_spin_try_lock()
465 {
466 	OSMemoryBarrier();
467 	while (!lt_thread_lock_grabbed) {
468 		lt_sleep_a_little_bit();
469 		OSMemoryBarrier();
470 	}
471 	lt_thread_lock_success = lck_spin_try_lock(&lt_lck_spin_t);
472 	OSMemoryBarrier();
473 }
474 
475 static void
lt_trylock_thread(void * arg,wait_result_t wres __unused)476 lt_trylock_thread(void *arg, wait_result_t wres __unused)
477 {
478 	void (*func)(void) = (void (*)(void))arg;
479 
480 	func();
481 
482 	OSIncrementAtomic((volatile SInt32*) &lt_done_threads);
483 }
484 
485 static void
lt_start_trylock_thread(thread_continue_t func)486 lt_start_trylock_thread(thread_continue_t func)
487 {
488 	thread_t thread;
489 	kern_return_t kr;
490 
491 	kr = kernel_thread_start(lt_trylock_thread, func, &thread);
492 	assert(kr == KERN_SUCCESS);
493 
494 	thread_deallocate(thread);
495 }
496 
497 static void
lt_wait_for_lock_test_threads()498 lt_wait_for_lock_test_threads()
499 {
500 	OSMemoryBarrier();
501 	/* Spin to reduce dependencies */
502 	while (lt_done_threads < lt_target_done_threads) {
503 		lt_sleep_a_little_bit();
504 		OSMemoryBarrier();
505 	}
506 	OSMemoryBarrier();
507 }
508 
509 static kern_return_t
lt_test_trylocks()510 lt_test_trylocks()
511 {
512 	boolean_t success;
513 	extern unsigned int real_ncpus;
514 
515 	/*
516 	 * First mtx try lock succeeds, second fails.
517 	 */
518 	success = lck_mtx_try_lock(&lt_mtx);
519 	T_ASSERT_NOTNULL(success, "First mtx try lock");
520 	success = lck_mtx_try_lock(&lt_mtx);
521 	T_ASSERT_NULL(success, "Second mtx try lock for a locked mtx");
522 	lck_mtx_unlock(&lt_mtx);
523 
524 	/*
525 	 * After regular grab, can't try lock.
526 	 */
527 	lck_mtx_lock(&lt_mtx);
528 	success = lck_mtx_try_lock(&lt_mtx);
529 	T_ASSERT_NULL(success, "try lock should fail after regular lck_mtx_lock");
530 	lck_mtx_unlock(&lt_mtx);
531 
532 	/*
533 	 * Two shared try locks on a previously unheld rwlock suceed, and a
534 	 * subsequent exclusive attempt fails.
535 	 */
536 	success = lck_rw_try_lock_shared(&lt_rwlock);
537 	T_ASSERT_NOTNULL(success, "Two shared try locks on a previously unheld rwlock should succeed");
538 	success = lck_rw_try_lock_shared(&lt_rwlock);
539 	T_ASSERT_NOTNULL(success, "Two shared try locks on a previously unheld rwlock should succeed");
540 	success = lck_rw_try_lock_exclusive(&lt_rwlock);
541 	T_ASSERT_NULL(success, "exclusive lock attempt on previously held lock should fail");
542 	lck_rw_done(&lt_rwlock);
543 	lck_rw_done(&lt_rwlock);
544 
545 	/*
546 	 * After regular shared grab, can trylock
547 	 * for shared but not for exclusive.
548 	 */
549 	lck_rw_lock_shared(&lt_rwlock);
550 	success = lck_rw_try_lock_shared(&lt_rwlock);
551 	T_ASSERT_NOTNULL(success, "After regular shared grab another shared try lock should succeed.");
552 	success = lck_rw_try_lock_exclusive(&lt_rwlock);
553 	T_ASSERT_NULL(success, "After regular shared grab an exclusive lock attempt should fail.");
554 	lck_rw_done(&lt_rwlock);
555 	lck_rw_done(&lt_rwlock);
556 
557 	/*
558 	 * An exclusive try lock succeeds, subsequent shared and exclusive
559 	 * attempts fail.
560 	 */
561 	success = lck_rw_try_lock_exclusive(&lt_rwlock);
562 	T_ASSERT_NOTNULL(success, "An exclusive try lock should succeed");
563 	success = lck_rw_try_lock_shared(&lt_rwlock);
564 	T_ASSERT_NULL(success, "try lock in shared mode attempt after an exclusive grab should fail");
565 	success = lck_rw_try_lock_exclusive(&lt_rwlock);
566 	T_ASSERT_NULL(success, "try lock in exclusive mode attempt after an exclusive grab should fail");
567 	lck_rw_done(&lt_rwlock);
568 
569 	/*
570 	 * After regular exclusive grab, neither kind of trylock succeeds.
571 	 */
572 	lck_rw_lock_exclusive(&lt_rwlock);
573 	success = lck_rw_try_lock_shared(&lt_rwlock);
574 	T_ASSERT_NULL(success, "After regular exclusive grab, shared trylock should not succeed");
575 	success = lck_rw_try_lock_exclusive(&lt_rwlock);
576 	T_ASSERT_NULL(success, "After regular exclusive grab, exclusive trylock should not succeed");
577 	lck_rw_done(&lt_rwlock);
578 
579 	/*
580 	 * First spin lock attempts succeed, second attempts fail.
581 	 */
582 	success = hw_lock_try(&lt_hw_lock, LCK_GRP_NULL);
583 	T_ASSERT_NOTNULL(success, "First spin lock attempts should succeed");
584 	success = hw_lock_try(&lt_hw_lock, LCK_GRP_NULL);
585 	T_ASSERT_NULL(success, "Second attempt to spin lock should fail");
586 	hw_lock_unlock(&lt_hw_lock);
587 
588 	hw_lock_lock(&lt_hw_lock, LCK_GRP_NULL);
589 	success = hw_lock_try(&lt_hw_lock, LCK_GRP_NULL);
590 	T_ASSERT_NULL(success, "After taking spin lock, trylock attempt should fail");
591 	hw_lock_unlock(&lt_hw_lock);
592 
593 	lt_reset();
594 	lt_thread_lock_grabbed = false;
595 	lt_thread_lock_success = true;
596 	lt_target_done_threads = 1;
597 	OSMemoryBarrier();
598 	lt_start_trylock_thread(lt_trylock_hw_lock_with_to);
599 	success = hw_lock_to(&lt_hw_lock, &hw_lock_test_give_up_policy, LCK_GRP_NULL);
600 	T_ASSERT_NOTNULL(success, "First spin lock with timeout should succeed");
601 	if (real_ncpus == 1) {
602 		mp_enable_preemption(); /* if we re-enable preemption, the other thread can timeout and exit */
603 	}
604 	OSIncrementAtomic((volatile SInt32*)&lt_thread_lock_grabbed);
605 	lt_wait_for_lock_test_threads();
606 	T_ASSERT_NULL(lt_thread_lock_success, "Second spin lock with timeout should fail and timeout");
607 	if (real_ncpus == 1) {
608 		mp_disable_preemption(); /* don't double-enable when we unlock */
609 	}
610 	hw_lock_unlock(&lt_hw_lock);
611 
612 	lt_reset();
613 	lt_thread_lock_grabbed = false;
614 	lt_thread_lock_success = true;
615 	lt_target_done_threads = 1;
616 	OSMemoryBarrier();
617 	lt_start_trylock_thread(lt_trylock_hw_lock_with_to);
618 	hw_lock_lock(&lt_hw_lock, LCK_GRP_NULL);
619 	if (real_ncpus == 1) {
620 		mp_enable_preemption(); /* if we re-enable preemption, the other thread can timeout and exit */
621 	}
622 	OSIncrementAtomic((volatile SInt32*)&lt_thread_lock_grabbed);
623 	lt_wait_for_lock_test_threads();
624 	T_ASSERT_NULL(lt_thread_lock_success, "after taking a spin lock, lock attempt with timeout should fail");
625 	if (real_ncpus == 1) {
626 		mp_disable_preemption(); /* don't double-enable when we unlock */
627 	}
628 	hw_lock_unlock(&lt_hw_lock);
629 
630 	success = lck_spin_try_lock(&lt_lck_spin_t);
631 	T_ASSERT_NOTNULL(success, "spin trylock of previously unheld lock should succeed");
632 	success = lck_spin_try_lock(&lt_lck_spin_t);
633 	T_ASSERT_NULL(success, "spin trylock attempt of previously held lock (with trylock) should fail");
634 	lck_spin_unlock(&lt_lck_spin_t);
635 
636 	lt_reset();
637 	lt_thread_lock_grabbed = false;
638 	lt_thread_lock_success = true;
639 	lt_target_done_threads = 1;
640 	lt_start_trylock_thread(lt_trylock_spin_try_lock);
641 	lck_spin_lock(&lt_lck_spin_t);
642 	if (real_ncpus == 1) {
643 		mp_enable_preemption(); /* if we re-enable preemption, the other thread can timeout and exit */
644 	}
645 	OSIncrementAtomic((volatile SInt32*)&lt_thread_lock_grabbed);
646 	lt_wait_for_lock_test_threads();
647 	T_ASSERT_NULL(lt_thread_lock_success, "spin trylock attempt of previously held lock should fail");
648 	if (real_ncpus == 1) {
649 		mp_disable_preemption(); /* don't double-enable when we unlock */
650 	}
651 	lck_spin_unlock(&lt_lck_spin_t);
652 
653 	return KERN_SUCCESS;
654 }
655 
656 static void
lt_thread(void * arg,wait_result_t wres __unused)657 lt_thread(void *arg, wait_result_t wres __unused)
658 {
659 	void (*func)(void) = (void (*)(void))arg;
660 	uint32_t i;
661 
662 	for (i = 0; i < LOCK_TEST_ITERATIONS; i++) {
663 		func();
664 	}
665 
666 	OSIncrementAtomic((volatile SInt32*) &lt_done_threads);
667 }
668 
669 static void
lt_start_lock_thread(thread_continue_t func)670 lt_start_lock_thread(thread_continue_t func)
671 {
672 	thread_t thread;
673 	kern_return_t kr;
674 
675 	kr = kernel_thread_start(lt_thread, func, &thread);
676 	assert(kr == KERN_SUCCESS);
677 
678 	thread_deallocate(thread);
679 }
680 
681 #if __AMP__
682 static void
lt_bound_thread(void * arg,wait_result_t wres __unused)683 lt_bound_thread(void *arg, wait_result_t wres __unused)
684 {
685 	void (*func)(void) = (void (*)(void))arg;
686 
687 	int cpuid = OSIncrementAtomic((volatile SInt32 *)&lt_cpu_bind_id);
688 
689 	processor_t processor = processor_list;
690 	while ((processor != NULL) && (processor->cpu_id != cpuid)) {
691 		processor = processor->processor_list;
692 	}
693 
694 	if (processor != NULL) {
695 		thread_bind(processor);
696 	}
697 
698 	thread_block(THREAD_CONTINUE_NULL);
699 
700 	func();
701 
702 	OSIncrementAtomic((volatile SInt32*) &lt_done_threads);
703 }
704 
705 static void
lt_cluster_bound_thread(void * arg,char cluster_type)706 lt_cluster_bound_thread(void *arg, char cluster_type)
707 {
708 	void (*func)(void) = (void (*)(void))arg;
709 
710 	thread_t thread = current_thread();
711 
712 	kern_return_t kr = thread_soft_bind_cluster_type(thread, cluster_type);
713 	if (kr != KERN_SUCCESS) {
714 		kprintf("%s>failed to bind to cluster type %c\n", __FUNCTION__, cluster_type);
715 	}
716 
717 	func();
718 
719 	OSIncrementAtomic((volatile SInt32*) &lt_done_threads);
720 }
721 
722 static void
lt_e_thread(void * arg,wait_result_t wres __unused)723 lt_e_thread(void *arg, wait_result_t wres __unused)
724 {
725 	lt_cluster_bound_thread(arg, 'e');
726 }
727 
728 
729 static void
lt_p_thread(void * arg,wait_result_t wres __unused)730 lt_p_thread(void *arg, wait_result_t wres __unused)
731 {
732 	lt_cluster_bound_thread(arg, 'p');
733 }
734 
735 static void
lt_start_lock_thread_with_bind(thread_continue_t bind_type,thread_continue_t func)736 lt_start_lock_thread_with_bind(thread_continue_t bind_type, thread_continue_t func)
737 {
738 	thread_t thread;
739 	kern_return_t kr;
740 
741 	kr = kernel_thread_start(bind_type, func, &thread);
742 	assert(kr == KERN_SUCCESS);
743 
744 	thread_deallocate(thread);
745 }
746 #endif /* __AMP__ */
747 
748 static kern_return_t
lt_test_locks()749 lt_test_locks()
750 {
751 #if SCHED_HYGIENE_DEBUG
752 	/*
753 	 * When testing, the preemption disable threshold may be hit (for
754 	 * example when testing a lock timeout). To avoid this, the preemption
755 	 * disable measurement is temporarily disabled during lock testing.
756 	 */
757 	int old_mode = sched_preemption_disable_debug_mode;
758 	if (old_mode == SCHED_HYGIENE_MODE_PANIC) {
759 		sched_preemption_disable_debug_mode = SCHED_HYGIENE_MODE_OFF;
760 	}
761 #endif /* SCHED_HYGIENE_DEBUG */
762 
763 	kern_return_t kr = KERN_SUCCESS;
764 	lck_grp_attr_t *lga = lck_grp_attr_alloc_init();
765 	lck_grp_t *lg = lck_grp_alloc_init("lock test", lga);
766 
767 	lck_mtx_init(&lt_mtx, lg, LCK_ATTR_NULL);
768 	lck_rw_init(&lt_rwlock, lg, LCK_ATTR_NULL);
769 	lck_spin_init(&lt_lck_spin_t, lg, LCK_ATTR_NULL);
770 	hw_lock_init(&lt_hw_lock);
771 
772 	T_LOG("Testing locks.");
773 
774 	/* Try locks (custom) */
775 	lt_reset();
776 
777 	T_LOG("Running try lock test.");
778 	kr = lt_test_trylocks();
779 	T_EXPECT_NULL(kr, "try lock test failed.");
780 
781 	/* Uncontended mutex */
782 	T_LOG("Running uncontended mutex test.");
783 	lt_reset();
784 	lt_target_done_threads = 1;
785 	lt_start_lock_thread(lt_grab_mutex);
786 	lt_wait_for_lock_test_threads();
787 	T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
788 	T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
789 
790 	/* Contended mutex:try locks*/
791 	T_LOG("Running contended mutex test.");
792 	lt_reset();
793 	lt_target_done_threads = 3;
794 	lt_start_lock_thread(lt_grab_mutex);
795 	lt_start_lock_thread(lt_grab_mutex);
796 	lt_start_lock_thread(lt_grab_mutex);
797 	lt_wait_for_lock_test_threads();
798 	T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
799 	T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
800 
801 	/* Contended mutex: try locks*/
802 	T_LOG("Running contended mutex trylock test.");
803 	lt_reset();
804 	lt_target_done_threads = 3;
805 	lt_start_lock_thread(lt_grab_mutex_with_try);
806 	lt_start_lock_thread(lt_grab_mutex_with_try);
807 	lt_start_lock_thread(lt_grab_mutex_with_try);
808 	lt_wait_for_lock_test_threads();
809 	T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
810 	T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
811 
812 	/* Uncontended exclusive rwlock */
813 	T_LOG("Running uncontended exclusive rwlock test.");
814 	lt_reset();
815 	lt_target_done_threads = 1;
816 	lt_start_lock_thread(lt_grab_rw_exclusive);
817 	lt_wait_for_lock_test_threads();
818 	T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
819 	T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
820 
821 	/* Uncontended shared rwlock */
822 
823 	/* Disabled until lt_grab_rw_shared() is fixed (rdar://30685840)
824 	 *  T_LOG("Running uncontended shared rwlock test.");
825 	 *  lt_reset();
826 	 *  lt_target_done_threads = 1;
827 	 *  lt_start_lock_thread(lt_grab_rw_shared);
828 	 *  lt_wait_for_lock_test_threads();
829 	 *  T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
830 	 *  T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
831 	 */
832 
833 	/* Contended exclusive rwlock */
834 	T_LOG("Running contended exclusive rwlock test.");
835 	lt_reset();
836 	lt_target_done_threads = 3;
837 	lt_start_lock_thread(lt_grab_rw_exclusive);
838 	lt_start_lock_thread(lt_grab_rw_exclusive);
839 	lt_start_lock_thread(lt_grab_rw_exclusive);
840 	lt_wait_for_lock_test_threads();
841 	T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
842 	T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
843 
844 	/* One shared, two exclusive */
845 	/* Disabled until lt_grab_rw_shared() is fixed (rdar://30685840)
846 	 *  T_LOG("Running test with one shared and two exclusive rw lock threads.");
847 	 *  lt_reset();
848 	 *  lt_target_done_threads = 3;
849 	 *  lt_start_lock_thread(lt_grab_rw_shared);
850 	 *  lt_start_lock_thread(lt_grab_rw_exclusive);
851 	 *  lt_start_lock_thread(lt_grab_rw_exclusive);
852 	 *  lt_wait_for_lock_test_threads();
853 	 *  T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
854 	 *  T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
855 	 */
856 
857 	/* Four shared */
858 	/* Disabled until lt_grab_rw_shared() is fixed (rdar://30685840)
859 	 *  T_LOG("Running test with four shared holders.");
860 	 *  lt_reset();
861 	 *  lt_target_done_threads = 4;
862 	 *  lt_start_lock_thread(lt_grab_rw_shared);
863 	 *  lt_start_lock_thread(lt_grab_rw_shared);
864 	 *  lt_start_lock_thread(lt_grab_rw_shared);
865 	 *  lt_start_lock_thread(lt_grab_rw_shared);
866 	 *  lt_wait_for_lock_test_threads();
867 	 *  T_EXPECT_LE_UINT(lt_max_holders, 4, NULL);
868 	 */
869 
870 	/* Three doing upgrades and downgrades */
871 	T_LOG("Running test with threads upgrading and downgrading.");
872 	lt_reset();
873 	lt_target_done_threads = 3;
874 	lt_start_lock_thread(lt_upgrade_downgrade_rw);
875 	lt_start_lock_thread(lt_upgrade_downgrade_rw);
876 	lt_start_lock_thread(lt_upgrade_downgrade_rw);
877 	lt_wait_for_lock_test_threads();
878 	T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
879 	T_EXPECT_LE_UINT(lt_max_holders, 3, NULL);
880 	T_EXPECT_EQ_UINT(lt_max_upgrade_holders, 1, NULL);
881 
882 	/* Uncontended - exclusive trylocks */
883 	T_LOG("Running test with single thread doing exclusive rwlock trylocks.");
884 	lt_reset();
885 	lt_target_done_threads = 1;
886 	lt_start_lock_thread(lt_grab_rw_exclusive_with_try);
887 	lt_wait_for_lock_test_threads();
888 	T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
889 	T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
890 
891 	/* Uncontended - shared trylocks */
892 	/* Disabled until lt_grab_rw_shared_with_try() is fixed (rdar://30685840)
893 	 *  T_LOG("Running test with single thread doing shared rwlock trylocks.");
894 	 *  lt_reset();
895 	 *  lt_target_done_threads = 1;
896 	 *  lt_start_lock_thread(lt_grab_rw_shared_with_try);
897 	 *  lt_wait_for_lock_test_threads();
898 	 *  T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
899 	 *  T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
900 	 */
901 
902 	/* Three doing exclusive trylocks */
903 	T_LOG("Running test with threads doing exclusive rwlock trylocks.");
904 	lt_reset();
905 	lt_target_done_threads = 3;
906 	lt_start_lock_thread(lt_grab_rw_exclusive_with_try);
907 	lt_start_lock_thread(lt_grab_rw_exclusive_with_try);
908 	lt_start_lock_thread(lt_grab_rw_exclusive_with_try);
909 	lt_wait_for_lock_test_threads();
910 	T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
911 	T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
912 
913 	/* Three doing shared trylocks */
914 	/* Disabled until lt_grab_rw_shared_with_try() is fixed (rdar://30685840)
915 	 *  T_LOG("Running test with threads doing shared rwlock trylocks.");
916 	 *  lt_reset();
917 	 *  lt_target_done_threads = 3;
918 	 *  lt_start_lock_thread(lt_grab_rw_shared_with_try);
919 	 *  lt_start_lock_thread(lt_grab_rw_shared_with_try);
920 	 *  lt_start_lock_thread(lt_grab_rw_shared_with_try);
921 	 *  lt_wait_for_lock_test_threads();
922 	 *  T_EXPECT_LE_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
923 	 *  T_EXPECT_LE_UINT(lt_max_holders, 3, NULL);
924 	 */
925 
926 	/* Three doing various trylocks */
927 	/* Disabled until lt_grab_rw_shared_with_try() is fixed (rdar://30685840)
928 	 *  T_LOG("Running test with threads doing mixed rwlock trylocks.");
929 	 *  lt_reset();
930 	 *  lt_target_done_threads = 4;
931 	 *  lt_start_lock_thread(lt_grab_rw_shared_with_try);
932 	 *  lt_start_lock_thread(lt_grab_rw_shared_with_try);
933 	 *  lt_start_lock_thread(lt_grab_rw_exclusive_with_try);
934 	 *  lt_start_lock_thread(lt_grab_rw_exclusive_with_try);
935 	 *  lt_wait_for_lock_test_threads();
936 	 *  T_EXPECT_LE_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
937 	 *  T_EXPECT_LE_UINT(lt_max_holders, 2, NULL);
938 	 */
939 
940 	/* HW locks */
941 	T_LOG("Running test with hw_lock_lock()");
942 	lt_reset();
943 	lt_target_done_threads = 3;
944 	lt_start_lock_thread(lt_grab_hw_lock);
945 	lt_start_lock_thread(lt_grab_hw_lock);
946 	lt_start_lock_thread(lt_grab_hw_lock);
947 	lt_wait_for_lock_test_threads();
948 	T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
949 
950 #if __AMP__
951 	/* Ticket locks stress test */
952 	T_LOG("Running Ticket locks stress test with lck_ticket_lock()");
953 	extern unsigned int real_ncpus;
954 	lck_grp_init(&lt_ticket_grp, "ticket lock stress", LCK_GRP_ATTR_NULL);
955 	lck_ticket_init(&lt_ticket_lock, &lt_ticket_grp);
956 	lt_reset();
957 	lt_target_done_threads = real_ncpus;
958 	uint thread_count = 0;
959 	for (processor_t processor = processor_list; processor != NULL; processor = processor->processor_list) {
960 		lt_start_lock_thread_with_bind(lt_bound_thread, lt_stress_ticket_lock);
961 		thread_count++;
962 	}
963 	T_EXPECT_GE_UINT(thread_count, lt_target_done_threads, "Spawned enough threads for valid test");
964 	lt_wait_for_lock_test_threads();
965 	bool starvation = false;
966 	uint total_local_count = 0;
967 	for (processor_t processor = processor_list; processor != NULL; processor = processor->processor_list) {
968 		starvation = starvation || (lt_stress_local_counters[processor->cpu_id] < 10);
969 		total_local_count += lt_stress_local_counters[processor->cpu_id];
970 	}
971 	if (mach_absolute_time() > lt_setup_timeout) {
972 		T_FAIL("Stress test setup timed out after %d seconds", LOCK_TEST_SETUP_TIMEOUT_SEC);
973 	} else if (total_local_count != lt_counter) {
974 		T_FAIL("Lock failure\n");
975 	} else if (starvation) {
976 		T_FAIL("Lock starvation found\n");
977 	} else {
978 		T_PASS("Ticket locks stress test with lck_ticket_lock() (%u total acquires)", total_local_count);
979 	}
980 
981 	/* AMP ticket locks stress test */
982 	T_LOG("Running AMP Ticket locks stress test bound to clusters with lck_ticket_lock()");
983 	lt_reset();
984 	lt_target_done_threads = real_ncpus;
985 	thread_count = 0;
986 	for (processor_t processor = processor_list; processor != NULL; processor = processor->processor_list) {
987 		processor_set_t pset = processor->processor_set;
988 		switch (pset->pset_cluster_type) {
989 		case PSET_AMP_P:
990 			lt_start_lock_thread_with_bind(lt_p_thread, lt_stress_ticket_lock);
991 			break;
992 		case PSET_AMP_E:
993 			lt_start_lock_thread_with_bind(lt_e_thread, lt_stress_ticket_lock);
994 			break;
995 		default:
996 			lt_start_lock_thread(lt_stress_ticket_lock);
997 			break;
998 		}
999 		thread_count++;
1000 	}
1001 	T_EXPECT_GE_UINT(thread_count, lt_target_done_threads, "Spawned enough threads for valid test");
1002 	lt_wait_for_lock_test_threads();
1003 #endif /* __AMP__ */
1004 
1005 	/* HW locks: trylocks */
1006 	T_LOG("Running test with hw_lock_try()");
1007 	lt_reset();
1008 	lt_target_done_threads = 3;
1009 	lt_start_lock_thread(lt_grab_hw_lock_with_try);
1010 	lt_start_lock_thread(lt_grab_hw_lock_with_try);
1011 	lt_start_lock_thread(lt_grab_hw_lock_with_try);
1012 	lt_wait_for_lock_test_threads();
1013 	T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
1014 
1015 	/* HW locks: with timeout */
1016 	T_LOG("Running test with hw_lock_to()");
1017 	lt_reset();
1018 	lt_target_done_threads = 3;
1019 	lt_start_lock_thread(lt_grab_hw_lock_with_to);
1020 	lt_start_lock_thread(lt_grab_hw_lock_with_to);
1021 	lt_start_lock_thread(lt_grab_hw_lock_with_to);
1022 	lt_wait_for_lock_test_threads();
1023 	T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
1024 
1025 	/* Spin locks */
1026 	T_LOG("Running test with lck_spin_lock()");
1027 	lt_reset();
1028 	lt_target_done_threads = 3;
1029 	lt_start_lock_thread(lt_grab_spin_lock);
1030 	lt_start_lock_thread(lt_grab_spin_lock);
1031 	lt_start_lock_thread(lt_grab_spin_lock);
1032 	lt_wait_for_lock_test_threads();
1033 	T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
1034 
1035 	/* Spin locks: trylocks */
1036 	T_LOG("Running test with lck_spin_try_lock()");
1037 	lt_reset();
1038 	lt_target_done_threads = 3;
1039 	lt_start_lock_thread(lt_grab_spin_lock_with_try);
1040 	lt_start_lock_thread(lt_grab_spin_lock_with_try);
1041 	lt_start_lock_thread(lt_grab_spin_lock_with_try);
1042 	lt_wait_for_lock_test_threads();
1043 	T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
1044 
1045 #if SCHED_HYGIENE_DEBUG
1046 	sched_preemption_disable_debug_mode = old_mode;
1047 #endif /* SCHED_HYGIENE_DEBUG */
1048 
1049 	return KERN_SUCCESS;
1050 }
1051 
1052 #define MT_MAX_ARGS             8
1053 #define MT_INITIAL_VALUE        0xfeedbeef
1054 #define MT_W_VAL                (0x00000000feedbeefULL) /* Drop in zeros */
1055 #define MT_S_VAL                (0xfffffffffeedbeefULL) /* High bit is 1, so sign-extends as negative */
1056 #define MT_L_VAL                (((uint64_t)MT_INITIAL_VALUE) | (((uint64_t)MT_INITIAL_VALUE) << 32)) /* Two back-to-back */
1057 
1058 typedef void (*sy_munge_t)(void*);
1059 
1060 #define MT_FUNC(x) #x, x
1061 struct munger_test {
1062 	const char      *mt_name;
1063 	sy_munge_t      mt_func;
1064 	uint32_t        mt_in_words;
1065 	uint32_t        mt_nout;
1066 	uint64_t        mt_expected[MT_MAX_ARGS];
1067 } munger_tests[] = {
1068 	{MT_FUNC(munge_w), 1, 1, {MT_W_VAL}},
1069 	{MT_FUNC(munge_ww), 2, 2, {MT_W_VAL, MT_W_VAL}},
1070 	{MT_FUNC(munge_www), 3, 3, {MT_W_VAL, MT_W_VAL, MT_W_VAL}},
1071 	{MT_FUNC(munge_wwww), 4, 4, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}},
1072 	{MT_FUNC(munge_wwwww), 5, 5, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}},
1073 	{MT_FUNC(munge_wwwwww), 6, 6, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}},
1074 	{MT_FUNC(munge_wwwwwww), 7, 7, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}},
1075 	{MT_FUNC(munge_wwwwwwww), 8, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}},
1076 	{MT_FUNC(munge_wl), 3, 2, {MT_W_VAL, MT_L_VAL}},
1077 	{MT_FUNC(munge_wwl), 4, 3, {MT_W_VAL, MT_W_VAL, MT_L_VAL}},
1078 	{MT_FUNC(munge_wwlll), 8, 5, {MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL}},
1079 	{MT_FUNC(munge_wwlllll), 12, 7, {MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL}},
1080 	{MT_FUNC(munge_wwllllll), 14, 8, {MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL}},
1081 	{MT_FUNC(munge_wlw), 4, 3, {MT_W_VAL, MT_L_VAL, MT_W_VAL}},
1082 	{MT_FUNC(munge_wlwwwll), 10, 7, {MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL}},
1083 	{MT_FUNC(munge_wlwwwllw), 11, 8, {MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_W_VAL}},
1084 	{MT_FUNC(munge_wlwwlwlw), 11, 8, {MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL}},
1085 	{MT_FUNC(munge_wll), 5, 3, {MT_W_VAL, MT_L_VAL, MT_L_VAL}},
1086 	{MT_FUNC(munge_wlll), 7, 4, {MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL}},
1087 	{MT_FUNC(munge_wllwwll), 11, 7, {MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL}},
1088 	{MT_FUNC(munge_wwwlw), 6, 5, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL}},
1089 	{MT_FUNC(munge_wwwlww), 7, 6, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL}},
1090 	{MT_FUNC(munge_wwwlwww), 8, 7, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}},
1091 	{MT_FUNC(munge_wwwl), 5, 4, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}},
1092 	{MT_FUNC(munge_wwwwlw), 7, 6, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL}},
1093 	{MT_FUNC(munge_wwwwllww), 10, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL}},
1094 	{MT_FUNC(munge_wwwwl), 6, 5, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}},
1095 	{MT_FUNC(munge_wwwwwl), 7, 6, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}},
1096 	{MT_FUNC(munge_wwwwwlww), 9, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL}},
1097 	{MT_FUNC(munge_wwwwwllw), 10, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_W_VAL}},
1098 	{MT_FUNC(munge_wwwwwlll), 11, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL}},
1099 	{MT_FUNC(munge_wwwwwwl), 8, 7, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}},
1100 	{MT_FUNC(munge_wwwwwwlw), 9, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL}},
1101 	{MT_FUNC(munge_wwwwwwll), 10, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL}},
1102 	{MT_FUNC(munge_wsw), 3, 3, {MT_W_VAL, MT_S_VAL, MT_W_VAL}},
1103 	{MT_FUNC(munge_wws), 3, 3, {MT_W_VAL, MT_W_VAL, MT_S_VAL}},
1104 	{MT_FUNC(munge_wwwsw), 5, 5, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_S_VAL, MT_W_VAL}},
1105 	{MT_FUNC(munge_llllll), 12, 6, {MT_L_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL}},
1106 	{MT_FUNC(munge_llll), 8, 4, {MT_L_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL}},
1107 	{MT_FUNC(munge_l), 2, 1, {MT_L_VAL}},
1108 	{MT_FUNC(munge_lw), 3, 2, {MT_L_VAL, MT_W_VAL}},
1109 	{MT_FUNC(munge_lww), 4, 3, {MT_L_VAL, MT_W_VAL, MT_W_VAL}},
1110 	{MT_FUNC(munge_lwww), 5, 4, {MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}},
1111 	{MT_FUNC(munge_lwwwwwww), 9, 8, {MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}},
1112 	{MT_FUNC(munge_wlwwwl), 8, 6, {MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}},
1113 	{MT_FUNC(munge_wwlwwwl), 9, 7, {MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}}
1114 };
1115 
1116 #define MT_TEST_COUNT (sizeof(munger_tests) / sizeof(struct munger_test))
1117 
1118 static void
mt_reset(uint32_t in_words,size_t total_size,uint32_t * data)1119 mt_reset(uint32_t in_words, size_t total_size, uint32_t *data)
1120 {
1121 	uint32_t i;
1122 
1123 	for (i = 0; i < in_words; i++) {
1124 		data[i] = MT_INITIAL_VALUE;
1125 	}
1126 
1127 	if (in_words * sizeof(uint32_t) < total_size) {
1128 		bzero(&data[in_words], total_size - in_words * sizeof(uint32_t));
1129 	}
1130 }
1131 
1132 static void
mt_test_mungers()1133 mt_test_mungers()
1134 {
1135 	uint64_t data[MT_MAX_ARGS];
1136 	uint32_t i, j;
1137 
1138 	for (i = 0; i < MT_TEST_COUNT; i++) {
1139 		struct munger_test *test = &munger_tests[i];
1140 		int pass = 1;
1141 
1142 		T_LOG("Testing %s", test->mt_name);
1143 
1144 		mt_reset(test->mt_in_words, sizeof(data), (uint32_t*)data);
1145 		test->mt_func(data);
1146 
1147 		for (j = 0; j < test->mt_nout; j++) {
1148 			if (data[j] != test->mt_expected[j]) {
1149 				T_FAIL("Index %d: expected %llx, got %llx.", j, test->mt_expected[j], data[j]);
1150 				pass = 0;
1151 			}
1152 		}
1153 		if (pass) {
1154 			T_PASS(test->mt_name);
1155 		}
1156 	}
1157 }
1158 
1159 #if defined(HAS_APPLE_PAC)
1160 
1161 
1162 kern_return_t
arm64_ropjop_test()1163 arm64_ropjop_test()
1164 {
1165 	T_LOG("Testing ROP/JOP");
1166 
1167 	/* how is ROP/JOP configured */
1168 	boolean_t config_rop_enabled = TRUE;
1169 	boolean_t config_jop_enabled = TRUE;
1170 
1171 
1172 	if (config_jop_enabled) {
1173 		/* jop key */
1174 		uint64_t apiakey_hi = __builtin_arm_rsr64("APIAKEYHI_EL1");
1175 		uint64_t apiakey_lo = __builtin_arm_rsr64("APIAKEYLO_EL1");
1176 
1177 		T_EXPECT(apiakey_hi != 0 && apiakey_lo != 0, NULL);
1178 	}
1179 
1180 	if (config_rop_enabled) {
1181 		/* rop key */
1182 		uint64_t apibkey_hi = __builtin_arm_rsr64("APIBKEYHI_EL1");
1183 		uint64_t apibkey_lo = __builtin_arm_rsr64("APIBKEYLO_EL1");
1184 
1185 		T_EXPECT(apibkey_hi != 0 && apibkey_lo != 0, NULL);
1186 
1187 		/* sign a KVA (the address of this function) */
1188 		uint64_t kva_signed = (uint64_t) ptrauth_sign_unauthenticated((void *)&config_rop_enabled, ptrauth_key_asib, 0);
1189 
1190 		/* assert it was signed (changed) */
1191 		T_EXPECT(kva_signed != (uint64_t)&config_rop_enabled, NULL);
1192 
1193 		/* authenticate the newly signed KVA */
1194 		uint64_t kva_authed = (uint64_t) ml_auth_ptr_unchecked((void *)kva_signed, ptrauth_key_asib, 0);
1195 
1196 		/* assert the authed KVA is the original KVA */
1197 		T_EXPECT(kva_authed == (uint64_t)&config_rop_enabled, NULL);
1198 
1199 		/* corrupt a signed ptr, auth it, ensure auth failed */
1200 		uint64_t kva_corrupted = kva_signed ^ 1;
1201 
1202 		/* authenticate the corrupted pointer */
1203 		kva_authed = (uint64_t) ml_auth_ptr_unchecked((void *)kva_corrupted, ptrauth_key_asib, 0);
1204 
1205 		/* when AuthIB fails, bits 63:62 will be set to 2'b10 */
1206 		uint64_t auth_fail_mask = 3ULL << 61;
1207 		uint64_t authib_fail = 2ULL << 61;
1208 
1209 		/* assert the failed authIB of corrupted pointer is tagged */
1210 		T_EXPECT((kva_authed & auth_fail_mask) == authib_fail, NULL);
1211 	}
1212 
1213 	return KERN_SUCCESS;
1214 }
1215 #endif /* defined(HAS_APPLE_PAC) */
1216 
1217 #if __ARM_PAN_AVAILABLE__
1218 
1219 struct pan_test_thread_args {
1220 	volatile bool join;
1221 };
1222 
1223 static void
arm64_pan_test_thread(void * arg,wait_result_t __unused wres)1224 arm64_pan_test_thread(void *arg, wait_result_t __unused wres)
1225 {
1226 	T_ASSERT(__builtin_arm_rsr("pan") != 0, NULL);
1227 
1228 	struct pan_test_thread_args *args = arg;
1229 
1230 	for (processor_t p = processor_list; p != NULL; p = p->processor_list) {
1231 		thread_bind(p);
1232 		thread_block(THREAD_CONTINUE_NULL);
1233 		kprintf("Running PAN test on cpu %d\n", p->cpu_id);
1234 		arm64_pan_test();
1235 	}
1236 
1237 	/* unbind thread from specific cpu */
1238 	thread_bind(PROCESSOR_NULL);
1239 	thread_block(THREAD_CONTINUE_NULL);
1240 
1241 	while (!args->join) {
1242 		;
1243 	}
1244 
1245 	thread_wakeup(args);
1246 }
1247 
1248 kern_return_t
arm64_late_pan_test()1249 arm64_late_pan_test()
1250 {
1251 	thread_t thread;
1252 	kern_return_t kr;
1253 
1254 	struct pan_test_thread_args args;
1255 	args.join = false;
1256 
1257 	kr = kernel_thread_start(arm64_pan_test_thread, &args, &thread);
1258 	assert(kr == KERN_SUCCESS);
1259 
1260 	thread_deallocate(thread);
1261 
1262 	assert_wait(&args, THREAD_UNINT);
1263 	args.join = true;
1264 	thread_block(THREAD_CONTINUE_NULL);
1265 	return KERN_SUCCESS;
1266 }
1267 
1268 // Disable KASAN checking for PAN tests as the fixed commpage address doesn't have a shadow mapping
1269 
1270 static NOKASAN bool
arm64_pan_test_pan_enabled_fault_handler(arm_saved_state_t * state)1271 arm64_pan_test_pan_enabled_fault_handler(arm_saved_state_t * state)
1272 {
1273 	bool retval                 = false;
1274 	uint64_t esr                = get_saved_state_esr(state);
1275 	esr_exception_class_t class = ESR_EC(esr);
1276 	fault_status_t fsc          = ISS_IA_FSC(ESR_ISS(esr));
1277 	uint32_t cpsr               = get_saved_state_cpsr(state);
1278 	uint64_t far                = get_saved_state_far(state);
1279 
1280 	if ((class == ESR_EC_DABORT_EL1) && (fsc == FSC_PERMISSION_FAULT_L3) &&
1281 	    (cpsr & PSR64_PAN) &&
1282 	    ((esr & ISS_DA_WNR) ? mmu_kvtop_wpreflight(far) : mmu_kvtop(far))) {
1283 		++pan_exception_level;
1284 		// read the user-accessible value to make sure
1285 		// pan is enabled and produces a 2nd fault from
1286 		// the exception handler
1287 		if (pan_exception_level == 1) {
1288 			ml_expect_fault_begin(arm64_pan_test_pan_enabled_fault_handler, far);
1289 			pan_fault_value = *(volatile char *)far;
1290 			ml_expect_fault_end();
1291 			__builtin_arm_wsr("pan", 1); // turn PAN back on after the nested exception cleared it for this context
1292 		}
1293 		// this fault address is used for PAN test
1294 		// disable PAN and rerun
1295 		mask_saved_state_cpsr(state, 0, PSR64_PAN);
1296 
1297 		retval = true;
1298 	}
1299 
1300 	return retval;
1301 }
1302 
1303 static NOKASAN bool
arm64_pan_test_pan_disabled_fault_handler(arm_saved_state_t * state)1304 arm64_pan_test_pan_disabled_fault_handler(arm_saved_state_t * state)
1305 {
1306 	bool retval             = false;
1307 	uint64_t esr            = get_saved_state_esr(state);
1308 	esr_exception_class_t class = ESR_EC(esr);
1309 	fault_status_t fsc      = ISS_IA_FSC(ESR_ISS(esr));
1310 	uint32_t cpsr           = get_saved_state_cpsr(state);
1311 
1312 	if ((class == ESR_EC_DABORT_EL1) && (fsc == FSC_PERMISSION_FAULT_L3) &&
1313 	    !(cpsr & PSR64_PAN)) {
1314 		++pan_exception_level;
1315 		// On an exception taken from a PAN-disabled context, verify
1316 		// that PAN is re-enabled for the exception handler and that
1317 		// accessing the test address produces a PAN fault.
1318 		ml_expect_fault_begin(arm64_pan_test_pan_enabled_fault_handler, pan_test_addr);
1319 		pan_fault_value = *(volatile char *)pan_test_addr;
1320 		ml_expect_fault_end();
1321 		__builtin_arm_wsr("pan", 1); // turn PAN back on after the nested exception cleared it for this context
1322 		add_saved_state_pc(state, 4);
1323 
1324 		retval = true;
1325 	}
1326 
1327 	return retval;
1328 }
1329 
1330 NOKASAN kern_return_t
arm64_pan_test()1331 arm64_pan_test()
1332 {
1333 	bool values_match = false;
1334 	vm_offset_t priv_addr = 0;
1335 
1336 	T_LOG("Testing PAN.");
1337 
1338 
1339 	T_ASSERT((__builtin_arm_rsr("SCTLR_EL1") & SCTLR_PAN_UNCHANGED) == 0, "SCTLR_EL1.SPAN must be cleared");
1340 
1341 	T_ASSERT(__builtin_arm_rsr("pan") != 0, NULL);
1342 
1343 	pan_exception_level = 0;
1344 	pan_fault_value = 0xDE;
1345 
1346 	// Create an empty pmap, so we can map a user-accessible page
1347 	pmap_t pmap = pmap_create_options(NULL, 0, PMAP_CREATE_64BIT);
1348 	T_ASSERT(pmap != NULL, NULL);
1349 
1350 	// Get a physical page to back the mapping
1351 	vm_page_t vm_page = vm_page_grab();
1352 	T_ASSERT(vm_page != VM_PAGE_NULL, NULL);
1353 	ppnum_t pn = VM_PAGE_GET_PHYS_PAGE(vm_page);
1354 	pmap_paddr_t pa = ptoa(pn);
1355 
1356 	// Write to the underlying physical page through the physical aperture
1357 	// so we can test against a known value
1358 	priv_addr = phystokv((pmap_paddr_t)pa);
1359 	*(volatile char *)priv_addr = 0xAB;
1360 
1361 	// Map the page in the user address space at some, non-zero address
1362 	pan_test_addr = PAGE_SIZE;
1363 	pmap_enter(pmap, pan_test_addr, pn, VM_PROT_READ, VM_PROT_READ, 0, true, PMAP_MAPPING_TYPE_INFER);
1364 
1365 	// Context-switch with PAN disabled is prohibited; prevent test logging from
1366 	// triggering a voluntary context switch.
1367 	mp_disable_preemption();
1368 
1369 	// Insert the user's pmap root table pointer in TTBR0
1370 	thread_t thread = current_thread();
1371 	pmap_t old_pmap = vm_map_pmap(thread->map);
1372 	pmap_switch(pmap, thread);
1373 
1374 	// Below should trigger a PAN exception as pan_test_addr is accessible
1375 	// in user mode
1376 	// The exception handler, upon recognizing the fault address is pan_test_addr,
1377 	// will disable PAN and rerun this instruction successfully
1378 	ml_expect_fault_begin(arm64_pan_test_pan_enabled_fault_handler, pan_test_addr);
1379 	values_match = (*(volatile char *)pan_test_addr == *(volatile char *)priv_addr);
1380 	ml_expect_fault_end();
1381 	T_ASSERT(values_match, NULL);
1382 
1383 	T_ASSERT(pan_exception_level == 2, NULL);
1384 
1385 	T_ASSERT(__builtin_arm_rsr("pan") == 0, NULL);
1386 
1387 	T_ASSERT(pan_fault_value == *(char *)priv_addr, NULL);
1388 
1389 	pan_exception_level = 0;
1390 	pan_fault_value = 0xAD;
1391 	pan_ro_addr = (vm_offset_t) &pan_ro_value;
1392 
1393 	// Force a permission fault while PAN is disabled to make sure PAN is
1394 	// re-enabled during the exception handler.
1395 	ml_expect_fault_begin(arm64_pan_test_pan_disabled_fault_handler, pan_ro_addr);
1396 	*((volatile uint64_t*)pan_ro_addr) = 0xFEEDFACECAFECAFE;
1397 	ml_expect_fault_end();
1398 
1399 	T_ASSERT(pan_exception_level == 2, NULL);
1400 
1401 	T_ASSERT(__builtin_arm_rsr("pan") == 0, NULL);
1402 
1403 	T_ASSERT(pan_fault_value == *(char *)priv_addr, NULL);
1404 
1405 	pmap_switch(old_pmap, thread);
1406 
1407 	pan_ro_addr = 0;
1408 
1409 	__builtin_arm_wsr("pan", 1);
1410 
1411 	mp_enable_preemption();
1412 
1413 	pmap_remove(pmap, pan_test_addr, pan_test_addr + PAGE_SIZE);
1414 	pan_test_addr = 0;
1415 
1416 	vm_page_lock_queues();
1417 	vm_page_free(vm_page);
1418 	vm_page_unlock_queues();
1419 	pmap_destroy(pmap);
1420 
1421 	return KERN_SUCCESS;
1422 }
1423 #endif /* __ARM_PAN_AVAILABLE__ */
1424 
1425 
1426 kern_return_t
arm64_lock_test()1427 arm64_lock_test()
1428 {
1429 	return lt_test_locks();
1430 }
1431 
1432 kern_return_t
arm64_munger_test()1433 arm64_munger_test()
1434 {
1435 	mt_test_mungers();
1436 	return 0;
1437 }
1438 
1439 #if (defined(KERNEL_INTEGRITY_CTRR) || defined(KERNEL_INTEGRITY_PV_CTRR)) && defined(CONFIG_XNUPOST)
1440 SECURITY_READ_ONLY_LATE(uint64_t) ctrr_ro_test;
1441 uint64_t ctrr_nx_test = 0xd65f03c0; /* RET */
1442 volatile uint64_t ctrr_exception_esr;
1443 vm_offset_t ctrr_test_va;
1444 vm_offset_t ctrr_test_page;
1445 atomic_bool ctrr_test_in_progress;
1446 
1447 kern_return_t
ctrr_test(void)1448 ctrr_test(void)
1449 {
1450 	processor_t p;
1451 
1452 	/*
1453 	 * The test uses some globals and also a specific reserved VA region, so it
1454 	 * can't run concurrently.  This might otherwise happen via the sysctl
1455 	 * interface.
1456 	 */
1457 	bool expected = false;
1458 	if (!atomic_compare_exchange_strong_explicit(&ctrr_test_in_progress,
1459 	    &expected, true,
1460 	    memory_order_acq_rel, memory_order_relaxed)) {
1461 		T_FAIL("Can't run multiple CTRR tests at once");
1462 		return KERN_SUCCESS;
1463 	}
1464 
1465 
1466 	T_LOG("Running CTRR test.");
1467 
1468 	for (p = processor_list; p != NULL; p = p->processor_list) {
1469 		thread_bind(p);
1470 		thread_block(THREAD_CONTINUE_NULL);
1471 		T_LOG("Running CTRR test on CPU %d\n", p->cpu_id);
1472 		ctrr_test_cpu();
1473 	}
1474 
1475 	/* unbind thread from specific cpu */
1476 	thread_bind(PROCESSOR_NULL);
1477 	thread_block(THREAD_CONTINUE_NULL);
1478 
1479 	T_PASS("Done running CTRR test on all CPUs");
1480 	atomic_store_explicit(&ctrr_test_in_progress, false, memory_order_release);
1481 
1482 	return KERN_SUCCESS;
1483 }
1484 
1485 static bool
ctrr_test_ro_fault_handler(arm_saved_state_t * state)1486 ctrr_test_ro_fault_handler(arm_saved_state_t * state)
1487 {
1488 	bool retval                 = false;
1489 	uint64_t esr                = get_saved_state_esr(state);
1490 	esr_exception_class_t class = ESR_EC(esr);
1491 	fault_status_t fsc          = ISS_DA_FSC(ESR_ISS(esr));
1492 
1493 	if ((class == ESR_EC_DABORT_EL1) && (fsc == FSC_PERMISSION_FAULT_L3)) {
1494 		ctrr_exception_esr = esr;
1495 		add_saved_state_pc(state, 4);
1496 		retval = true;
1497 	}
1498 
1499 	return retval;
1500 }
1501 
1502 static bool
ctrr_test_nx_fault_handler(arm_saved_state_t * state)1503 ctrr_test_nx_fault_handler(arm_saved_state_t * state)
1504 {
1505 	bool retval                 = false;
1506 	uint64_t esr                = get_saved_state_esr(state);
1507 	esr_exception_class_t class = ESR_EC(esr);
1508 	fault_status_t fsc          = ISS_IA_FSC(ESR_ISS(esr));
1509 
1510 	if ((class == ESR_EC_IABORT_EL1) && (fsc == FSC_PERMISSION_FAULT_L3)) {
1511 		ctrr_exception_esr = esr;
1512 		/* return to the instruction immediately after the call to NX page */
1513 		set_saved_state_pc(state, get_saved_state_lr(state));
1514 #if BTI_ENFORCED
1515 		/* Clear BTYPE to prevent taking another exception on ERET */
1516 		uint32_t spsr = get_saved_state_cpsr(state);
1517 		spsr &= ~PSR_BTYPE_MASK;
1518 		set_saved_state_cpsr(state, spsr);
1519 #endif /* BTI_ENFORCED */
1520 		retval = true;
1521 	}
1522 
1523 	return retval;
1524 }
1525 
1526 // Disable KASAN checking for CTRR tests as the test VA  doesn't have a shadow mapping
1527 
1528 /* test CTRR on a cpu, caller to bind thread to desired cpu */
1529 /* ctrr_test_page was reserved during bootstrap process if no SPTM */
1530 NOKASAN kern_return_t
ctrr_test_cpu(void)1531 ctrr_test_cpu(void)
1532 {
1533 	ppnum_t ro_pn, nx_pn;
1534 	uint64_t *ctrr_ro_test_ptr;
1535 	void (*ctrr_nx_test_ptr)(void);
1536 	kern_return_t kr;
1537 	uint64_t prot = 0;
1538 	extern vm_offset_t virtual_space_start;
1539 	extern vm_offset_t rorgn_begin;
1540 	extern vm_offset_t rorgn_end;
1541 
1542 	vm_offset_t ro_test_va = (vm_offset_t)&ctrr_ro_test;
1543 	vm_offset_t nx_test_va = (vm_offset_t)&ctrr_nx_test;
1544 	bool ctrr_enabled = !ml_unsafe_kernel_text();
1545 
1546 #if CONFIG_SPTM
1547 	if (/* DISABLES CODE */ (1)) {
1548 		T_SKIP("Skipping CTRR test because testing under SPTM is not supported yet");
1549 		return KERN_SUCCESS;
1550 	}
1551 #endif
1552 
1553 #if defined(KERNEL_INTEGRITY_PV_CTRR)
1554 	if (rorgn_begin == 0 && rorgn_end == 0) {
1555 		// Under paravirtualized CTRR, it's possible that we want CTRR to be
1556 		// enabled but we're running under an older host that doesn't support
1557 		// it.
1558 		ctrr_enabled = false;
1559 		T_LOG("Treating paravirtualized CTRR as disabled due to lack of support");
1560 	}
1561 #endif
1562 
1563 	// The CTRR read-only region is the physical address range [rorgn_begin, rorgn_end].
1564 	// rorgn_end will be one byte short of a page boundary.
1565 	if (ctrr_enabled) {
1566 		T_EXPECT(rorgn_begin != 0, "Expect rorgn_begin to be set when CTRR enabled");
1567 		T_EXPECT_GE_ULONG(rorgn_end, rorgn_begin, "Expect rorgn_end to be >= rorgn_begin when CTRR enabled");
1568 
1569 		pmap_paddr_t ro_test_pa = kvtophys_nofail(ro_test_va);
1570 		pmap_paddr_t nx_test_pa = kvtophys_nofail(nx_test_va);
1571 
1572 		T_EXPECT(rorgn_begin <= ro_test_pa && ro_test_pa <= rorgn_end, "Expect ro_test_pa to be inside the CTRR region");
1573 		T_EXPECT((nx_test_pa < rorgn_begin) ^ (nx_test_pa > rorgn_end), "Expect nx_test_pa to be outside the CTRR region");
1574 	} else {
1575 		T_EXPECT_EQ_ULONG(rorgn_begin, 0, "Expect rorgn_begin to be unset when CTRR disabled");
1576 		T_EXPECT_EQ_ULONG(rorgn_end, 0, "Expect rorgn_end to be unset when CTRR disabled");
1577 		T_LOG("Skipping region check because CTRR is disabled");
1578 	}
1579 
1580 	if (ctrr_enabled) {
1581 		T_LOG("Expect no faults when reading CTRR region to verify correct programming of CTRR limits");
1582 		for (pmap_paddr_t page_pa = rorgn_begin; page_pa <= rorgn_end; page_pa += PAGE_SIZE) {
1583 			vm_offset_t page_va = phystokv(page_pa);
1584 			for (vm_offset_t va = page_va; va < page_va + PAGE_SIZE; va += 8) {
1585 				volatile uint64_t x = *(uint64_t *)va;
1586 				(void) x; /* read for side effect only */
1587 			}
1588 		}
1589 	} else {
1590 		T_LOG("Skipping read test because CTRR is disabled");
1591 	}
1592 
1593 	ro_pn = pmap_find_phys(kernel_pmap, ro_test_va);
1594 	nx_pn = pmap_find_phys(kernel_pmap, nx_test_va);
1595 	T_EXPECT(ro_pn && nx_pn, "Expect ro page number and nx page number to be non zero");
1596 
1597 	T_LOG("test virtual page: %p, ctrr_ro_test: %p, ctrr_nx_test: %p, ro_pn: %x, nx_pn: %x ",
1598 	    (void *)ctrr_test_page, &ctrr_ro_test, &ctrr_nx_test, ro_pn, nx_pn);
1599 	T_ASSERT(ctrr_test_page != 0, "Expect ctrr_test_page to be initialized");
1600 
1601 	prot = pmap_get_arm64_prot(kernel_pmap, ctrr_test_page);
1602 	T_EXPECT(~prot & ARM_TTE_VALID, "Expect ctrr_test_page to be unmapped");
1603 
1604 	T_LOG("Read only region test mapping virtual page %p to CTRR RO page number %d", ctrr_test_page, ro_pn);
1605 	kr = pmap_enter(kernel_pmap, ctrr_test_page, ro_pn,
1606 	    VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE, VM_WIMG_USE_DEFAULT, FALSE, PMAP_MAPPING_TYPE_INFER);
1607 	T_EXPECT(kr == KERN_SUCCESS, "Expect pmap_enter of RW mapping to succeed");
1608 
1609 	// assert entire mmu prot path (Hierarchical protection model) is NOT RO
1610 	// fetch effective block level protections from table/block entries
1611 	prot = pmap_get_arm64_prot(kernel_pmap, ctrr_test_page);
1612 	T_EXPECT(ARM_PTE_EXTRACT_AP(prot) == AP_RWNA && (prot & ARM_PTE_PNX), "Mapping is EL1 RWNX");
1613 
1614 	ctrr_test_va = ctrr_test_page + (ro_test_va & PAGE_MASK);
1615 	ctrr_ro_test_ptr = (void *)ctrr_test_va;
1616 
1617 	T_LOG("Read only region test writing to %p to provoke data abort", ctrr_ro_test_ptr);
1618 
1619 	// should cause data abort
1620 	ml_expect_fault_begin(ctrr_test_ro_fault_handler, ctrr_test_va);
1621 	*ctrr_ro_test_ptr = 1;
1622 	ml_expect_fault_end();
1623 
1624 	// ensure write permission fault at expected level
1625 	// data abort handler will set ctrr_exception_esr when ctrr_test_va takes a permission fault
1626 
1627 	if (ctrr_enabled) {
1628 		T_EXPECT(ESR_EC(ctrr_exception_esr) == ESR_EC_DABORT_EL1, "Data Abort from EL1 expected");
1629 		T_EXPECT(ISS_DA_FSC(ESR_ISS(ctrr_exception_esr)) == FSC_PERMISSION_FAULT_L3, "Permission Fault Expected");
1630 		T_EXPECT(ESR_ISS(ctrr_exception_esr) & ISS_DA_WNR, "Write Fault Expected");
1631 	} else {
1632 		T_EXPECT(ctrr_exception_esr == 0, "No fault expected with CTRR disabled");
1633 	}
1634 
1635 	ctrr_test_va = 0;
1636 	ctrr_exception_esr = 0;
1637 	pmap_remove(kernel_pmap, ctrr_test_page, ctrr_test_page + PAGE_SIZE);
1638 
1639 	T_LOG("No execute test mapping virtual page %p to CTRR PXN page number %d", ctrr_test_page, nx_pn);
1640 
1641 	kr = pmap_enter(kernel_pmap, ctrr_test_page, nx_pn,
1642 	    VM_PROT_READ | VM_PROT_EXECUTE, VM_PROT_NONE, VM_WIMG_USE_DEFAULT, FALSE, PMAP_MAPPING_TYPE_INFER);
1643 
1644 	T_EXPECT(kr == KERN_SUCCESS, "Expect pmap_enter of RX mapping to succeed");
1645 
1646 	// assert entire mmu prot path (Hierarchical protection model) is NOT XN
1647 	prot = pmap_get_arm64_prot(kernel_pmap, ctrr_test_page);
1648 	T_EXPECT(ARM_PTE_EXTRACT_AP(prot) == AP_RONA && (~prot & ARM_PTE_PNX), "Mapping is EL1 ROX (prot=0x%lx)", prot);
1649 
1650 	ctrr_test_va = ctrr_test_page + (nx_test_va & PAGE_MASK);
1651 #if __has_feature(ptrauth_calls)
1652 	ctrr_nx_test_ptr = ptrauth_sign_unauthenticated((void *)ctrr_test_va, ptrauth_key_function_pointer, 0);
1653 #else
1654 	ctrr_nx_test_ptr = (void *)ctrr_test_va;
1655 #endif
1656 
1657 	T_LOG("No execute test calling ctrr_nx_test_ptr(): %p to provoke instruction abort", ctrr_nx_test_ptr);
1658 
1659 	// should cause prefetch abort
1660 	ml_expect_fault_begin(ctrr_test_nx_fault_handler, ctrr_test_va);
1661 	ctrr_nx_test_ptr();
1662 	ml_expect_fault_end();
1663 
1664 	if (ctrr_enabled) {
1665 		// FIXME: rdar://143430725 (xnu support for paravirtualized CTXR)
1666 		// Without FEAT_XNX support on the host side, we cannot test kernel execution outside CTXR regions.
1667 #if !defined(KERNEL_INTEGRITY_PV_CTRR)
1668 		// TODO: ensure execute permission fault at expected level
1669 		T_EXPECT(ESR_EC(ctrr_exception_esr) == ESR_EC_IABORT_EL1, "Instruction abort from EL1 Expected");
1670 		T_EXPECT(ISS_DA_FSC(ESR_ISS(ctrr_exception_esr)) == FSC_PERMISSION_FAULT_L3, "Permission Fault Expected");
1671 #endif /* !defined(KERNEL_INTEGRITY_PV_CTRR) */
1672 	} else {
1673 		T_EXPECT(ctrr_exception_esr == 0, "No fault expected with CTRR disabled");
1674 	}
1675 
1676 	ctrr_test_va = 0;
1677 	ctrr_exception_esr = 0;
1678 
1679 	pmap_remove(kernel_pmap, ctrr_test_page, ctrr_test_page + PAGE_SIZE);
1680 
1681 	return KERN_SUCCESS;
1682 }
1683 #endif /* (defined(KERNEL_INTEGRITY_CTRR) || defined(KERNEL_INTEGRITY_PV_CTRR)) && defined(CONFIG_XNUPOST) */
1684 
1685 
1686 /**
1687  * Explicitly assert that xnu is still uniprocessor before running a POST test.
1688  *
1689  * In practice, tests in this module can safely manipulate CPU state without
1690  * fear of getting preempted.  There's no way for cpu_boot_thread() to bring up
1691  * the secondary CPUs until StartIOKitMatching() completes, and arm64 orders
1692  * kern_post_test() before StartIOKitMatching().
1693  *
1694  * But this is also an implementation detail.  Tests that rely on this ordering
1695  * should call assert_uniprocessor(), so that we can figure out a workaround
1696  * on the off-chance this ordering ever changes.
1697  */
1698 __unused static void
assert_uniprocessor(void)1699 assert_uniprocessor(void)
1700 {
1701 	extern unsigned int real_ncpus;
1702 	unsigned int ncpus = os_atomic_load(&real_ncpus, relaxed);
1703 	T_QUIET; T_ASSERT_EQ_UINT(1, ncpus, "arm64 kernel POST tests should run before any secondary CPUs are brought up");
1704 }
1705 
1706 
1707 #if CONFIG_SPTM
1708 volatile uint8_t xnu_post_panic_lockdown_did_fire = false;
1709 typedef uint64_t (panic_lockdown_helper_fcn_t)(uint64_t raw);
1710 typedef bool (panic_lockdown_precondition_fcn_t)(void);
1711 typedef bool (panic_lockdown_recovery_fcn_t)(arm_saved_state_t *);
1712 
1713 /* SP0 vector tests */
1714 extern panic_lockdown_helper_fcn_t arm64_panic_lockdown_test_load;
1715 extern panic_lockdown_helper_fcn_t arm64_panic_lockdown_test_gdbtrap;
1716 extern panic_lockdown_helper_fcn_t arm64_panic_lockdown_test_pac_brk_c470;
1717 extern panic_lockdown_helper_fcn_t arm64_panic_lockdown_test_pac_brk_c471;
1718 extern panic_lockdown_helper_fcn_t arm64_panic_lockdown_test_pac_brk_c472;
1719 extern panic_lockdown_helper_fcn_t arm64_panic_lockdown_test_pac_brk_c473;
1720 extern panic_lockdown_helper_fcn_t arm64_panic_lockdown_test_telemetry_brk_ff00;
1721 extern panic_lockdown_helper_fcn_t arm64_panic_lockdown_test_br_auth_fail;
1722 extern panic_lockdown_helper_fcn_t arm64_panic_lockdown_test_ldr_auth_fail;
1723 extern panic_lockdown_helper_fcn_t arm64_panic_lockdown_test_fpac;
1724 extern panic_lockdown_helper_fcn_t arm64_panic_lockdown_test_copyio;
1725 extern uint8_t arm64_panic_lockdown_test_copyio_fault_pc;
1726 #if HAS_MTE
1727 extern panic_lockdown_helper_fcn_t arm64_panic_lockdown_test_copyio_tag_check_fault_recoverable;
1728 extern uint8_t arm64_panic_lockdown_test_copyio_tag_check_fault_recoverable_fault_pc;
1729 #endif /* HAS_MTE */
1730 
1731 extern int gARM_FEAT_FPACCOMBINE;
1732 
1733 /* SP1 vector tests */
1734 extern panic_lockdown_helper_fcn_t arm64_panic_lockdown_test_sp1_invalid_stack;
1735 extern bool arm64_panic_lockdown_test_sp1_invalid_stack_handler(arm_saved_state_t *);
1736 extern panic_lockdown_helper_fcn_t arm64_panic_lockdown_test_sp1_exception_in_vector;
1737 extern panic_lockdown_helper_fcn_t el1_sp1_synchronous_raise_exception_in_vector;
1738 extern bool arm64_panic_lockdown_test_sp1_exception_in_vector_handler(arm_saved_state_t *);
1739 
1740 #if DEVELOPMENT || DEBUG
1741 extern struct panic_lockdown_initiator_state debug_panic_lockdown_initiator_state;
1742 #endif /* DEVELOPMENT || DEBUG */
1743 
1744 typedef struct arm64_panic_lockdown_test_case {
1745 	const char *name;
1746 	panic_lockdown_helper_fcn_t *func;
1747 	uint64_t arg;
1748 	panic_lockdown_precondition_fcn_t *precondition;
1749 	esr_exception_class_t expected_ec;
1750 	bool check_fs;
1751 	fault_status_t expected_fs;
1752 	bool expect_lockdown_exceptions_masked;
1753 	bool expect_lockdown_exceptions_unmasked;
1754 	bool override_expected_fault_pc_valid;
1755 	uint64_t override_expected_fault_pc;
1756 } arm64_panic_lockdown_test_case_s;
1757 
1758 static arm64_panic_lockdown_test_case_s *arm64_panic_lockdown_active_test;
1759 static volatile bool arm64_panic_lockdown_caught_exception;
1760 
1761 static bool
arm64_panic_lockdown_test_exception_handler(arm_saved_state_t * state)1762 arm64_panic_lockdown_test_exception_handler(arm_saved_state_t * state)
1763 {
1764 	uint64_t esr = get_saved_state_esr(state);
1765 	esr_exception_class_t class = ESR_EC(esr);
1766 	fault_status_t fs = ISS_DA_FSC(ESR_ISS(esr));
1767 
1768 	if (!arm64_panic_lockdown_active_test ||
1769 	    class != arm64_panic_lockdown_active_test->expected_ec ||
1770 	    (arm64_panic_lockdown_active_test->check_fs &&
1771 	    fs != arm64_panic_lockdown_active_test->expected_fs)) {
1772 		return false;
1773 	}
1774 
1775 
1776 #if BTI_ENFORCED
1777 	/* Clear BTYPE to prevent taking another exception on ERET */
1778 	uint32_t spsr = get_saved_state_cpsr(state);
1779 	spsr &= ~PSR_BTYPE_MASK;
1780 	set_saved_state_cpsr(state, spsr);
1781 #endif /* BTI_ENFORCED */
1782 
1783 	/* We got the expected exception, recover by forging an early return */
1784 	set_saved_state_pc(state, get_saved_state_lr(state));
1785 	arm64_panic_lockdown_caught_exception = true;
1786 
1787 	return true;
1788 }
1789 
1790 static void
panic_lockdown_expect_test(const char * treatment,arm64_panic_lockdown_test_case_s * test,bool expect_lockdown,bool mask_interrupts)1791 panic_lockdown_expect_test(const char *treatment,
1792     arm64_panic_lockdown_test_case_s *test,
1793     bool expect_lockdown,
1794     bool mask_interrupts)
1795 {
1796 	int ints = 0;
1797 
1798 	arm64_panic_lockdown_active_test = test;
1799 	xnu_post_panic_lockdown_did_fire = false;
1800 	arm64_panic_lockdown_caught_exception = false;
1801 
1802 	uintptr_t fault_pc;
1803 	if (test->override_expected_fault_pc_valid) {
1804 		fault_pc = (uintptr_t)test->override_expected_fault_pc;
1805 	} else {
1806 		fault_pc = (uintptr_t)test->func;
1807 #ifdef BTI_ENFORCED
1808 		/* When BTI is enabled, we expect the fault to occur after the landing pad */
1809 		fault_pc += 4;
1810 #endif /* BTI_ENFORCED */
1811 	}
1812 
1813 
1814 	ml_expect_fault_pc_begin(
1815 		arm64_panic_lockdown_test_exception_handler,
1816 		fault_pc);
1817 
1818 	if (mask_interrupts) {
1819 		ints = ml_set_interrupts_enabled(FALSE);
1820 	}
1821 
1822 	(void)test->func(test->arg);
1823 
1824 	if (mask_interrupts) {
1825 		(void)ml_set_interrupts_enabled(ints);
1826 	}
1827 
1828 	ml_expect_fault_end();
1829 
1830 	if (expect_lockdown == xnu_post_panic_lockdown_did_fire &&
1831 	    arm64_panic_lockdown_caught_exception) {
1832 		T_PASS("%s + %s OK\n", test->name, treatment);
1833 	} else {
1834 		T_FAIL(
1835 			"%s + %s FAIL (expected lockdown: %d, did lockdown: %d, caught exception: %d)\n",
1836 			test->name, treatment,
1837 			expect_lockdown, xnu_post_panic_lockdown_did_fire,
1838 			arm64_panic_lockdown_caught_exception);
1839 	}
1840 
1841 #if DEVELOPMENT || DEBUG
1842 	/* Check that the debug info is minimally functional */
1843 	if (expect_lockdown) {
1844 		T_EXPECT_NE_ULLONG(debug_panic_lockdown_initiator_state.initiator_pc,
1845 		    0ULL, "Initiator PC set");
1846 	} else {
1847 		T_EXPECT_EQ_ULLONG(debug_panic_lockdown_initiator_state.initiator_pc,
1848 		    0ULL, "Initiator PC not set");
1849 	}
1850 
1851 	/* Reset the debug data so it can be filled later if needed */
1852 	debug_panic_lockdown_initiator_state.initiator_pc = 0;
1853 #endif /* DEVELOPMENT || DEBUG */
1854 }
1855 
1856 static void
panic_lockdown_expect_fault_raw(const char * label,panic_lockdown_helper_fcn_t entrypoint,panic_lockdown_helper_fcn_t faulting_function,expected_fault_handler_t fault_handler)1857 panic_lockdown_expect_fault_raw(const char *label,
1858     panic_lockdown_helper_fcn_t entrypoint,
1859     panic_lockdown_helper_fcn_t faulting_function,
1860     expected_fault_handler_t fault_handler)
1861 {
1862 	uint64_t test_success = 0;
1863 	xnu_post_panic_lockdown_did_fire = false;
1864 
1865 	uintptr_t fault_pc = (uintptr_t)faulting_function;
1866 #ifdef BTI_ENFORCED
1867 	/* When BTI is enabled, we expect the fault to occur after the landing pad */
1868 	fault_pc += 4;
1869 #endif /* BTI_ENFORCED */
1870 
1871 	ml_expect_fault_pc_begin(fault_handler, fault_pc);
1872 
1873 	test_success = entrypoint(0);
1874 
1875 	ml_expect_fault_end();
1876 
1877 	if (test_success && xnu_post_panic_lockdown_did_fire) {
1878 		T_PASS("%s OK\n", label);
1879 	} else {
1880 		T_FAIL("%s FAIL (test returned: %d, did lockdown: %d)\n",
1881 		    label, test_success, xnu_post_panic_lockdown_did_fire);
1882 	}
1883 }
1884 
1885 /**
1886  * Returns a pointer which is guranteed to be invalid under IA with the zero
1887  * discriminator.
1888  *
1889  * This is somewhat over complicating it since it's exceedingly likely that a
1890  * any given pointer will have a zero PAC (and thus break the test), but it's
1891  * easy enough to avoid the problem.
1892  */
1893 static uint64_t
panic_lockdown_pacia_get_invalid_ptr()1894 panic_lockdown_pacia_get_invalid_ptr()
1895 {
1896 	char *unsigned_ptr = (char *)0xFFFFFFFFAABBCC00;
1897 	char *signed_ptr = NULL;
1898 	do {
1899 		unsigned_ptr += 4 /* avoid alignment exceptions */;
1900 		signed_ptr = ptrauth_sign_unauthenticated(
1901 			unsigned_ptr,
1902 			ptrauth_key_asia,
1903 			0);
1904 	} while ((uint64_t)unsigned_ptr == (uint64_t)signed_ptr);
1905 
1906 	return (uint64_t)unsigned_ptr;
1907 }
1908 
1909 /**
1910  * Returns a pointer which is guranteed to be invalid under DA with the zero
1911  * discriminator.
1912  */
1913 static uint64_t
panic_lockdown_pacda_get_invalid_ptr(void)1914 panic_lockdown_pacda_get_invalid_ptr(void)
1915 {
1916 	char *unsigned_ptr = (char *)0xFFFFFFFFAABBCC00;
1917 	char *signed_ptr = NULL;
1918 	do {
1919 		unsigned_ptr += 8 /* avoid alignment exceptions */;
1920 		signed_ptr = ptrauth_sign_unauthenticated(
1921 			unsigned_ptr,
1922 			ptrauth_key_asda,
1923 			0);
1924 	} while ((uint64_t)unsigned_ptr == (uint64_t)signed_ptr);
1925 
1926 	return (uint64_t)unsigned_ptr;
1927 }
1928 
1929 #if HAS_MTE
1930 static bool
arm64_panic_lockdown_is_mte_enabled(void)1931 arm64_panic_lockdown_is_mte_enabled(void)
1932 {
1933 	if (!is_mte_enabled) {
1934 		T_LOG("MTE disabled");
1935 	}
1936 	return is_mte_enabled;
1937 }
1938 #endif /* HAS_MTE */
1939 
1940 kern_return_t
arm64_panic_lockdown_test(void)1941 arm64_panic_lockdown_test(void)
1942 {
1943 #if __has_feature(ptrauth_calls)
1944 	uint64_t ia_invalid = panic_lockdown_pacia_get_invalid_ptr();
1945 #endif /* ptrauth_calls */
1946 
1947 #if HAS_MTE
1948 	/*
1949 	 * Generate a kernel pointer with an invalid/wrong tag by grabbing an
1950 	 * arbitrary pointer (in this case, a canonically tagged global) and
1951 	 * advancing the tag by one.
1952 	 */
1953 	uintptr_t kernel_ptr_invalid_tag = (uintptr_t)__arm_mte_increment_tag(
1954 		&xnu_post_panic_lockdown_did_fire, 1);
1955 #endif /* HAS_MTE */
1956 	arm64_panic_lockdown_test_case_s tests[] = {
1957 		{
1958 			.name = "arm64_panic_lockdown_test_load",
1959 			.func = &arm64_panic_lockdown_test_load,
1960 			/* Trigger a null deref */
1961 			.arg = (uint64_t)NULL,
1962 			.expected_ec = ESR_EC_DABORT_EL1,
1963 			.expect_lockdown_exceptions_masked = true,
1964 			.expect_lockdown_exceptions_unmasked = false,
1965 		},
1966 		{
1967 			.name = "arm64_panic_lockdown_test_gdbtrap",
1968 			.func = &arm64_panic_lockdown_test_gdbtrap,
1969 			.arg = 0,
1970 			.expected_ec = ESR_EC_UNCATEGORIZED,
1971 			/* GDBTRAP instructions should be allowed everywhere */
1972 			.expect_lockdown_exceptions_masked = false,
1973 			.expect_lockdown_exceptions_unmasked = false,
1974 		},
1975 #if __has_feature(ptrauth_calls)
1976 		{
1977 			.name = "arm64_panic_lockdown_test_pac_brk_c470",
1978 			.func = &arm64_panic_lockdown_test_pac_brk_c470,
1979 			.arg = 0,
1980 			.expected_ec = ESR_EC_BRK_AARCH64,
1981 			.expect_lockdown_exceptions_masked = true,
1982 			.expect_lockdown_exceptions_unmasked = true,
1983 		},
1984 		{
1985 			.name = "arm64_panic_lockdown_test_pac_brk_c471",
1986 			.func = &arm64_panic_lockdown_test_pac_brk_c471,
1987 			.arg = 0,
1988 			.expected_ec = ESR_EC_BRK_AARCH64,
1989 			.expect_lockdown_exceptions_masked = true,
1990 			.expect_lockdown_exceptions_unmasked = true,
1991 		},
1992 		{
1993 			.name = "arm64_panic_lockdown_test_pac_brk_c472",
1994 			.func = &arm64_panic_lockdown_test_pac_brk_c472,
1995 			.arg = 0,
1996 			.expected_ec = ESR_EC_BRK_AARCH64,
1997 			.expect_lockdown_exceptions_masked = true,
1998 			.expect_lockdown_exceptions_unmasked = true,
1999 		},
2000 		{
2001 			.name = "arm64_panic_lockdown_test_pac_brk_c473",
2002 			.func = &arm64_panic_lockdown_test_pac_brk_c473,
2003 			.arg = 0,
2004 			.expected_ec = ESR_EC_BRK_AARCH64,
2005 			.expect_lockdown_exceptions_masked = true,
2006 			.expect_lockdown_exceptions_unmasked = true,
2007 		},
2008 		{
2009 			.name = "arm64_panic_lockdown_test_telemetry_brk_ff00",
2010 			.func = &arm64_panic_lockdown_test_telemetry_brk_ff00,
2011 			.arg = 0,
2012 			.expected_ec = ESR_EC_BRK_AARCH64,
2013 			/*
2014 			 * PAC breakpoints are not the only breakpoints, ensure that other
2015 			 * BRKs (like those used for telemetry) do not trigger lockdowns.
2016 			 * This is necessary to avoid conflicts with features like UBSan
2017 			 * telemetry (which could fire at any time in C code).
2018 			 */
2019 			.expect_lockdown_exceptions_masked = false,
2020 			.expect_lockdown_exceptions_unmasked = false,
2021 		},
2022 		{
2023 			.name = "arm64_panic_lockdown_test_br_auth_fail",
2024 			.func = &arm64_panic_lockdown_test_br_auth_fail,
2025 			.arg = ia_invalid,
2026 			.expected_ec = gARM_FEAT_FPACCOMBINE ? ESR_EC_PAC_FAIL : ESR_EC_IABORT_EL1,
2027 			.expect_lockdown_exceptions_masked = true,
2028 			.expect_lockdown_exceptions_unmasked = true,
2029 			/*
2030 			 * Pre-FEAT_FPACCOMBINED, BRAx branches to a poisoned PC so we
2031 			 * expect to fault on the branch target rather than the branch
2032 			 * itself. The exact ELR will likely be different from ia_invalid,
2033 			 * but since the expect logic in sleh only matches on low bits (i.e.
2034 			 * not bits which will be poisoned), this is fine.
2035 			 * On FEAT_FPACCOMBINED devices, we will fault on the branch itself.
2036 			 */
2037 			.override_expected_fault_pc_valid = !gARM_FEAT_FPACCOMBINE,
2038 			.override_expected_fault_pc = ia_invalid
2039 		},
2040 		{
2041 			.name = "arm64_panic_lockdown_test_ldr_auth_fail",
2042 			.func = &arm64_panic_lockdown_test_ldr_auth_fail,
2043 			.arg = panic_lockdown_pacda_get_invalid_ptr(),
2044 			.expected_ec = gARM_FEAT_FPACCOMBINE ? ESR_EC_PAC_FAIL : ESR_EC_DABORT_EL1,
2045 			.expect_lockdown_exceptions_masked = true,
2046 			.expect_lockdown_exceptions_unmasked = true,
2047 		},
2048 		{
2049 			.name = "arm64_panic_lockdown_test_copyio_poison",
2050 			.func = &arm64_panic_lockdown_test_copyio,
2051 			/* fake a poisoned kernel pointer by flipping the bottom PAC bit */
2052 			.arg = ((uint64_t)-1) ^ (1LLU << (64 - T1SZ_BOOT)),
2053 			.expected_ec = ESR_EC_DABORT_EL1,
2054 			.expect_lockdown_exceptions_masked = false,
2055 			.expect_lockdown_exceptions_unmasked = false,
2056 			.override_expected_fault_pc_valid = true,
2057 			.override_expected_fault_pc = (uint64_t)&arm64_panic_lockdown_test_copyio_fault_pc,
2058 		},
2059 #if __ARM_ARCH_8_6__
2060 		{
2061 			.name = "arm64_panic_lockdown_test_fpac",
2062 			.func = &arm64_panic_lockdown_test_fpac,
2063 			.arg = ia_invalid,
2064 			.expected_ec = ESR_EC_PAC_FAIL,
2065 			.expect_lockdown_exceptions_masked = true,
2066 			.expect_lockdown_exceptions_unmasked = true,
2067 		},
2068 #endif /* __ARM_ARCH_8_6__ */
2069 #endif /* ptrauth_calls */
2070 		{
2071 			.name = "arm64_panic_lockdown_test_copyio",
2072 			.func = &arm64_panic_lockdown_test_copyio,
2073 			.arg = 0x0 /* load from NULL */,
2074 			.expected_ec = ESR_EC_DABORT_EL1,
2075 			.expect_lockdown_exceptions_masked = false,
2076 			.expect_lockdown_exceptions_unmasked = false,
2077 			.override_expected_fault_pc_valid = true,
2078 			.override_expected_fault_pc = (uint64_t)&arm64_panic_lockdown_test_copyio_fault_pc,
2079 		},
2080 #if HAS_MTE
2081 		{
2082 			/* Validate that non-copyio tag check fails trigger a lockdown */
2083 			.name = "arm64_panic_lockdown_test_load_mte_fail",
2084 			.func = &arm64_panic_lockdown_test_load,
2085 			.arg = kernel_ptr_invalid_tag,
2086 			.precondition = arm64_panic_lockdown_is_mte_enabled,
2087 			.expected_ec = ESR_EC_DABORT_EL1,
2088 			.check_fs = true,
2089 			.expected_fs = FSC_SYNC_TAG_CHECK_FAULT,
2090 			.expect_lockdown_exceptions_masked = true,
2091 			.expect_lockdown_exceptions_unmasked = true,
2092 		},
2093 		{
2094 			/*
2095 			 * Validate that non-tag check recoverable copyio tag check fails
2096 			 * trigger a lockdown
2097 			 */
2098 			.name = "arm64_panic_lockdown_test_copyio_mte_fail",
2099 			.func = &arm64_panic_lockdown_test_copyio,
2100 			.arg = kernel_ptr_invalid_tag,
2101 			.precondition = arm64_panic_lockdown_is_mte_enabled,
2102 			.expected_ec = ESR_EC_DABORT_EL1,
2103 			.check_fs = true,
2104 			.expected_fs = FSC_SYNC_TAG_CHECK_FAULT,
2105 			.expect_lockdown_exceptions_masked = true,
2106 			.expect_lockdown_exceptions_unmasked = true,
2107 			.override_expected_fault_pc_valid = true,
2108 			.override_expected_fault_pc = (uint64_t)&arm64_panic_lockdown_test_copyio_fault_pc,
2109 		},
2110 #if 0 /* rdar://153476527 */
2111 		{
2112 			/*
2113 			 * Validate that kernel tag check recoverable copyio functions do
2114 			 * not trigger a lockdown on tag check fail.
2115 			 */
2116 			.name = "arm64_panic_lockdown_test_copyio_tag_check_fault_recoverable",
2117 			.func = &arm64_panic_lockdown_test_copyio_tag_check_fault_recoverable,
2118 			.arg = kernel_ptr_invalid_tag,
2119 			.precondition = arm64_panic_lockdown_is_mte_enabled,
2120 			.expected_ec = ESR_EC_DABORT_EL1,
2121 			.check_fs = true,
2122 			.expected_fs = FSC_SYNC_TAG_CHECK_FAULT,
2123 			.expect_lockdown_exceptions_masked = false,
2124 			.expect_lockdown_exceptions_unmasked = false,
2125 			.override_expected_fault_pc_valid = true,
2126 			.override_expected_fault_pc = (uint64_t)&arm64_panic_lockdown_test_copyio_tag_check_fault_recoverable_fault_pc,
2127 		},
2128 #endif /* 0 */
2129 #endif /* HAS_MTE */
2130 	};
2131 
2132 	size_t test_count = sizeof(tests) / sizeof(*tests);
2133 	for (size_t i = 0; i < test_count; i++) {
2134 		if (tests[i].precondition &&
2135 		    !tests[i].precondition()) {
2136 			T_LOG("%s skipped due to precondition check", tests[i].name);
2137 			continue;
2138 		}
2139 
2140 		panic_lockdown_expect_test(
2141 			"Exceptions unmasked",
2142 			&tests[i],
2143 			tests[i].expect_lockdown_exceptions_unmasked,
2144 			/* mask_interrupts */ false);
2145 
2146 		panic_lockdown_expect_test(
2147 			"Exceptions masked",
2148 			&tests[i],
2149 			tests[i].expect_lockdown_exceptions_masked,
2150 			/* mask_interrupts */ true);
2151 	}
2152 
2153 	panic_lockdown_expect_fault_raw("arm64_panic_lockdown_test_sp1_invalid_stack",
2154 	    arm64_panic_lockdown_test_sp1_invalid_stack,
2155 	    arm64_panic_lockdown_test_pac_brk_c470,
2156 	    arm64_panic_lockdown_test_sp1_invalid_stack_handler);
2157 
2158 	panic_lockdown_expect_fault_raw("arm64_panic_lockdown_test_sp1_exception_in_vector",
2159 	    arm64_panic_lockdown_test_sp1_exception_in_vector,
2160 	    el1_sp1_synchronous_raise_exception_in_vector,
2161 	    arm64_panic_lockdown_test_sp1_exception_in_vector_handler);
2162 	return KERN_SUCCESS;
2163 }
2164 #endif /* CONFIG_SPTM */
2165 
2166 #if HAS_MTE
2167 volatile uint64_t mte_test_esr;
2168 
2169 static bool
mte_test_fault_handler(arm_saved_state_t * ss)2170 mte_test_fault_handler(arm_saved_state_t *ss)
2171 {
2172 	uint64_t esr = get_saved_state_esr(ss);
2173 	esr_exception_class_t ec = ESR_EC(esr);
2174 	bool ret = false;
2175 
2176 	if (ec == ESR_EC_DABORT_EL1) {
2177 		fault_status_t fsc = ISS_IA_FSC(ESR_ISS(esr));
2178 		if (fsc == FSC_SYNC_TAG_CHECK_FAULT) {
2179 			mte_test_esr = esr;
2180 			add_saved_state_pc(ss, 4);
2181 			ret = true;
2182 		}
2183 	}
2184 
2185 	return ret;
2186 }
2187 
2188 static inline unsigned int
extract_mte_tag(void * ptr)2189 extract_mte_tag(void *ptr)
2190 {
2191 	return (((uintptr_t)ptr) >> 56) & 0xF;
2192 }
2193 
2194 kern_return_t
mte_test(void)2195 mte_test(void)
2196 {
2197 	if (!is_mte_enabled) {
2198 		T_SKIP("MTE disabled");
2199 		return KERN_SUCCESS;
2200 	}
2201 
2202 	/* This test needs to manipulate GCR_EL1 without getting preempted */
2203 	assert_uniprocessor();
2204 
2205 	vm_address_t address;
2206 	kern_return_t kr;
2207 	const size_t MTE_GRANULE_SIZE = 16;
2208 	const unsigned int NUM_MTE_TAGS = 16;
2209 
2210 	/* Allocate a MTE backed page */
2211 	kr = kmem_alloc(kernel_map, &address, PAGE_SIZE, KMA_ZERO | KMA_TAG | KMA_KOBJECT, VM_KERN_MEMORY_DIAG);
2212 	T_ASSERT_EQ_INT(kr, KERN_SUCCESS, "kmem_alloc(KMA_TAG) - allocate an MTE enabled page");
2213 	char *untagged_ptr = (char *)vm_memtag_canonicalize_kernel(address);
2214 	ppnum_t pn = pmap_find_phys(kernel_pmap, address);
2215 	T_ASSERT(pmap_is_tagged_page(pn), "kmem_alloc(KMA_TAG) returned MTE-enabled translation");
2216 
2217 	/* Read the originally assigned tag to the page */
2218 	char *orig_tagged_ptr = __arm_mte_get_tag(untagged_ptr);
2219 	T_LOG("__arm_mte_get_tag(%p) == %p\n", untagged_ptr, orig_tagged_ptr);
2220 	unsigned int orig_tag = extract_mte_tag(orig_tagged_ptr);
2221 
2222 	/* Exclude the original tag from random tag generation */
2223 	uint64_t mask = __arm_mte_exclude_tag(orig_tagged_ptr, 0);
2224 	T_EXPECT_EQ_ULLONG(mask, 1 << orig_tag, "original tag is excluded");
2225 
2226 	char *random_tagged_ptr;
2227 	/*
2228 	 * Generate the random tag.  We've excluded the original tag, so it should never
2229 	 * reappear no matter how many times we regenerate a new tag.
2230 	 */
2231 	for (unsigned int i = 0; i < NUM_MTE_TAGS * 4; i++) {
2232 		random_tagged_ptr = __arm_mte_create_random_tag(untagged_ptr, mask);
2233 		T_QUIET; T_EXPECT_NE_PTR(orig_tagged_ptr, random_tagged_ptr,
2234 		    "random tag was not taken from excluded tag set");
2235 
2236 		ptrdiff_t diff = __arm_mte_ptrdiff(untagged_ptr, random_tagged_ptr);
2237 		T_QUIET; T_EXPECT_EQ_ULLONG(diff, 0, "untagged %p and tagged %p have identical address bits",
2238 		    untagged_ptr, random_tagged_ptr);
2239 	}
2240 	T_LOG("__arm_mte_create_random_tag(%p, %llx) == %p\n", untagged_ptr, mask, random_tagged_ptr);
2241 
2242 	/*
2243 	 * Globally exclude another tag.  Let's arbitrarily pick orig_tag - 1,
2244 	 * so that it takes effect the 15th time we increment orig_tagged_ptr.
2245 	 */
2246 	uint64_t excluded_tag = (orig_tag + (NUM_MTE_TAGS - 1)) % NUM_MTE_TAGS;
2247 	uint64_t old_gcr_el1 = __builtin_arm_rsr64("GCR_EL1");
2248 	uint64_t new_gcr_el1 = old_gcr_el1 & ~GCR_EL1_EXCLUDE_MASK;
2249 	new_gcr_el1 |= (1 << excluded_tag) << GCR_EL1_EXCLUDE_OFFSET;
2250 	__builtin_arm_wsr64("GCR_EL1", new_gcr_el1);
2251 
2252 	char *last_tagged_ptr = orig_tagged_ptr;
2253 	unsigned int last_tag = orig_tag;
2254 	/* Increment the tag until we're just about to reach the excluded one */
2255 	for (unsigned int i = 0; i < NUM_MTE_TAGS - 2; i++) {
2256 		char *next_tagged_ptr = __arm_mte_increment_tag(last_tagged_ptr, 1);
2257 		unsigned int next_tag = extract_mte_tag(next_tagged_ptr);
2258 		T_QUIET; T_EXPECT_EQ_UINT(next_tag, (last_tag + 1) % NUM_MTE_TAGS,
2259 		    "__arm_mte_increment_tag(%p, 1) = %p", last_tagged_ptr, next_tagged_ptr);
2260 
2261 		ptrdiff_t diff = __arm_mte_ptrdiff(last_tagged_ptr, next_tagged_ptr);
2262 		T_QUIET; T_EXPECT_EQ_ULLONG(diff, 0, "previous %p and incremented %p have identical address bits",
2263 		    last_tagged_ptr, next_tagged_ptr);
2264 
2265 		last_tagged_ptr = next_tagged_ptr;
2266 		last_tag = next_tag;
2267 	}
2268 	/* Increment again, and confirm that we've skipped over the excluded tag */
2269 	char *skip_tagged_ptr = __arm_mte_increment_tag(last_tagged_ptr, 1);
2270 	unsigned int skip_tag = extract_mte_tag(skip_tagged_ptr);
2271 	T_EXPECT_EQ_UINT(skip_tag, orig_tag, "__arm_mte_increment_tag() skipped over excluded tag");
2272 
2273 	/* Restore the original tag configuration */
2274 	__builtin_arm_wsr64("GCR_EL1", old_gcr_el1);
2275 
2276 	/* Time to make things real, commit the tag to memory */
2277 	__arm_mte_set_tag(random_tagged_ptr);
2278 
2279 	/* Ensure that we can read back the tag */
2280 	char *read_back = __arm_mte_get_tag(untagged_ptr);
2281 	T_EXPECT_EQ_PTR(read_back, random_tagged_ptr, "tag was committed to memory correctly");
2282 
2283 	/* Verify that accessing memory actually works */
2284 	random_tagged_ptr[0] = 't';
2285 	random_tagged_ptr[1] = 'e';
2286 	random_tagged_ptr[2] = 's';
2287 	random_tagged_ptr[3] = 't';
2288 	T_EXPECT_EQ_STR(random_tagged_ptr, "test", "read/write from tagged memory");
2289 
2290 	/*
2291 	 * Confirm that the next MTE granule still has the default tag, and then
2292 	 * simulate an out-of-bounds access into that granule.
2293 	 */
2294 	void *next_granule_ptr = orig_tagged_ptr + MTE_GRANULE_SIZE;
2295 	unsigned int next_granule_tag = extract_mte_tag(next_granule_ptr);
2296 	T_QUIET; T_ASSERT_EQ_UINT(next_granule_tag, orig_tag,
2297 	    "next MTE granule still has its originally assigned tag");
2298 
2299 	mte_test_esr = 0;
2300 	ml_expect_fault_begin(mte_test_fault_handler, (uintptr_t)&random_tagged_ptr[MTE_GRANULE_SIZE]);
2301 	random_tagged_ptr[MTE_GRANULE_SIZE] = '!';
2302 	ml_expect_fault_end();
2303 	T_EXPECT_EQ_UINT(ESR_EC(mte_test_esr), ESR_EC_DABORT_EL1,
2304 	    "out-of-bounds access to tagged memory raised a data abort");
2305 	T_EXPECT_EQ_UINT(ISS_IA_FSC(ESR_ISS(mte_test_esr)), FSC_SYNC_TAG_CHECK_FAULT,
2306 	    "out-of-bounds access to tagged memory raised a synchronous tag check fault");
2307 
2308 	/*
2309 	 * Simulate a use-after-free by accessing orig_tagged_ptr, which has an
2310 	 * out-of-date tag.
2311 	 */
2312 	mte_test_esr = 0;
2313 	ml_expect_fault_begin(mte_test_fault_handler, (uintptr_t)&orig_tagged_ptr[0]);
2314 	orig_tagged_ptr[0] = 'T';
2315 	ml_expect_fault_end();
2316 	T_EXPECT_EQ_UINT(ESR_EC(mte_test_esr), ESR_EC_DABORT_EL1,
2317 	    "use-after-free access to tagged memory raised a data abort");
2318 	T_EXPECT_EQ_UINT(ISS_IA_FSC(ESR_ISS(mte_test_esr)), FSC_SYNC_TAG_CHECK_FAULT,
2319 	    "use-after-free access to tagged memory raised a synchronous tag check fault");
2320 
2321 	kmem_free(kernel_map, (vm_address_t)__arm_mte_get_tag(untagged_ptr), PAGE_SIZE, KMF_TAG);
2322 	return KERN_SUCCESS;
2323 }
2324 
2325 kern_return_t
mte_copyio_recovery_handler_test(void)2326 mte_copyio_recovery_handler_test(void)
2327 {
2328 	if (!is_mte_enabled) {
2329 		T_SKIP("MTE disabled");
2330 		return KERN_SUCCESS;
2331 	}
2332 
2333 	extern int _copyin_atomic64(const char *src, uint64_t *dst);
2334 	extern int _copyin_atomic64_allow_invalid_kernel_tag(const char *src, uint64_t *dst);
2335 
2336 	vm_address_t kern_addr;
2337 	kern_return_t kr = kmem_alloc(kernel_map, &kern_addr, PAGE_SIZE, KMA_ZERO | KMA_TAG | KMA_KOBJECT, VM_KERN_MEMORY_DIAG);
2338 	T_QUIET; T_ASSERT_EQ_INT(kr, 0, "allocated a tagged page");
2339 
2340 	uint64_t *tagged_addr = __arm_mte_create_random_tag((void *)kern_addr, 0);
2341 	__arm_mte_set_tag(tagged_addr);
2342 	*tagged_addr = 0xFEEDFACECAFEF00D;
2343 
2344 	uint64_t dst;
2345 	int err = _copyin_atomic64((char *)tagged_addr, &dst);
2346 	T_EXPECT_EQ_INT(err, 0, "_copyin_atomic64 from tagged kernel address succeeded");
2347 	T_EXPECT_EQ_ULLONG(*tagged_addr, dst, "_copyin_atomic64 from tagged kernel address copied data correctly");
2348 
2349 	uint64_t *incorrectly_tagged_addr = __arm_mte_increment_tag(tagged_addr, 1);
2350 	dst = 0;
2351 	err = _copyin_atomic64_allow_invalid_kernel_tag((char *)incorrectly_tagged_addr, &dst);
2352 	T_EXPECT_EQ_INT(err, EFAULT, "_copyin_atomic64_allow_invalid_kernel_tag with incorrectly tagged kernel address recovered with EFAULT");
2353 	T_EXPECT_NE_ULLONG(*tagged_addr, dst, "_copyin_atomic64_allow_invalid_kernel_tag from incorrectly tagged kernel address did not copy data");
2354 
2355 	mte_test_esr = 0;
2356 	ml_expect_fault_begin(mte_test_fault_handler, (uintptr_t)incorrectly_tagged_addr);
2357 	_copyin_atomic64((char *)incorrectly_tagged_addr, &dst);
2358 	ml_expect_fault_end();
2359 	T_EXPECT_EQ_UINT(ESR_EC(mte_test_esr), ESR_EC_DABORT_EL1,
2360 	    "_copyin_atomic64 with incorrectly tagged kernel address raised an unrecoverable data abort");
2361 	T_EXPECT_EQ_UINT(ISS_IA_FSC(ESR_ISS(mte_test_esr)), FSC_SYNC_TAG_CHECK_FAULT,
2362 	    "_copyin_atomic64 with incorrectly tagged kernel address raised an unrecoverable synchronous tag check fault");
2363 
2364 	kmem_free(kernel_map, (vm_address_t)__arm_mte_get_tag(tagged_addr), PAGE_SIZE, KMF_TAG);
2365 	return KERN_SUCCESS;
2366 }
2367 #endif /* HAS_MTE */
2368 
2369 
2370 
2371 
2372 #if HAS_SPECRES
2373 
2374 /*** CPS RCTX ***/
2375 
2376 
2377 /*** SPECRES ***/
2378 
2379 #if HAS_SPECRES2
2380 /*
2381  * Execute a COSP RCTX instruction.
2382  */
2383 static void
_cosprctx_exec(uint64_t raw)2384 _cosprctx_exec(uint64_t raw)
2385 {
2386 	asm volatile ( "ISB SY");
2387 	__asm__ volatile ("COSP RCTX, %0" :: "r" (raw));
2388 	asm volatile ( "DSB SY");
2389 	asm volatile ( "ISB SY");
2390 }
2391 #endif
2392 
2393 /*
2394  * Execute a CFP RCTX instruction.
2395  */
2396 static void
_cfprctx_exec(uint64_t raw)2397 _cfprctx_exec(uint64_t raw)
2398 {
2399 	asm volatile ( "ISB SY");
2400 	__asm__ volatile ("CFP RCTX, %0" :: "r" (raw));
2401 	asm volatile ( "DSB SY");
2402 	asm volatile ( "ISB SY");
2403 }
2404 
2405 /*
2406  * Execute a CPP RCTX instruction.
2407  */
2408 static void
_cpprctx_exec(uint64_t raw)2409 _cpprctx_exec(uint64_t raw)
2410 {
2411 	asm volatile ( "ISB SY");
2412 	__asm__ volatile ("CPP RCTX, %0" :: "r" (raw));
2413 	asm volatile ( "DSB SY");
2414 	asm volatile ( "ISB SY");
2415 }
2416 
2417 /*
2418  * Execute a DVP RCTX instruction.
2419  */
2420 static void
_dvprctx_exec(uint64_t raw)2421 _dvprctx_exec(uint64_t raw)
2422 {
2423 	asm volatile ( "ISB SY");
2424 	__asm__ volatile ("DVP RCTX, %0" :: "r" (raw));
2425 	asm volatile ( "DSB SY");
2426 	asm volatile ( "ISB SY");
2427 }
2428 
2429 static void
_specres_do_test_std(void (* impl)(uint64_t raw))2430 _specres_do_test_std(void (*impl)(uint64_t raw))
2431 {
2432 	typedef struct {
2433 		union {
2434 			struct {
2435 				uint64_t ASID:16;
2436 				uint64_t GASID:1;
2437 				uint64_t :7;
2438 				uint64_t EL:2;
2439 				uint64_t NS:1;
2440 				uint64_t NSE:1;
2441 				uint64_t :4;
2442 				uint64_t VMID:16;
2443 				uint64_t GVMID:1;
2444 			};
2445 			uint64_t raw;
2446 		};
2447 	} specres_ctx;
2448 
2449 	assert(sizeof(specres_ctx) == 8);
2450 
2451 	/*
2452 	 * Test various possible meaningful COSP_RCTX context ID.
2453 	 */
2454 
2455 	/* el : EL0 / EL1 / EL2. */
2456 	for (uint8_t el = 0; el < 3; el++) {
2457 		/* Always non-secure. */
2458 		const uint8_t ns = 1;
2459 		const uint8_t nse = 0;
2460 
2461 		/* Iterate over some couples of ASIDs / VMIDs. */
2462 		for (uint16_t xxid = 0; xxid < 256; xxid++) {
2463 			const uint16_t asid = (uint16_t) (xxid << 4);
2464 			const uint16_t vmid = (uint16_t) (256 - (xxid << 4));
2465 
2466 			/* Test 4 G[AS|VM]ID combinations. */
2467 			for (uint8_t bid = 0; bid < 4; bid++) {
2468 				const uint8_t gasid = bid & 1;
2469 				const uint8_t gvmid = bid & 2;
2470 
2471 				/* Generate the context descriptor. */
2472 				specres_ctx ctx = {0};
2473 				ctx.ASID = asid;
2474 				ctx.GASID = gasid;
2475 				ctx.EL = el;
2476 				ctx.NS = ns;
2477 				ctx.NSE = nse;
2478 				ctx.VMID = vmid;
2479 				ctx.GVMID = gvmid;
2480 
2481 				/* Execute the COSP instruction. */
2482 				(*impl)(ctx.raw);
2483 
2484 				/* Insert some operation. */
2485 				volatile uint8_t sum = 0;
2486 				for (volatile uint8_t i = 0; i < 64; i++) {
2487 					sum += i * sum + 3;
2488 				}
2489 
2490 				/* If el0 is not targetted, just need to do it once. */
2491 				if (el != 0) {
2492 					goto not_el0_skip;
2493 				}
2494 			}
2495 		}
2496 
2497 		/* El0 skip. */
2498 not_el0_skip:   ;
2499 	}
2500 }
2501 
2502 /*** RCTX ***/
2503 
2504 static void
_rctx_do_test(void)2505 _rctx_do_test(void)
2506 {
2507 	_specres_do_test_std(&_cfprctx_exec);
2508 	_specres_do_test_std(&_cpprctx_exec);
2509 	_specres_do_test_std(&_dvprctx_exec);
2510 #if HAS_SPECRES2
2511 	_specres_do_test_std(&_cosprctx_exec);
2512 #endif
2513 }
2514 
2515 kern_return_t
specres_test(void)2516 specres_test(void)
2517 {
2518 	/* Basic instructions test. */
2519 	_cfprctx_exec(0);
2520 	_cpprctx_exec(0);
2521 	_dvprctx_exec(0);
2522 #if HAS_SPECRES2
2523 	_cosprctx_exec(0);
2524 #endif
2525 
2526 	/* More advanced instructions test. */
2527 	_rctx_do_test();
2528 
2529 	return KERN_SUCCESS;
2530 }
2531 
2532 #endif /* HAS_SPECRES */
2533 #if BTI_ENFORCED
2534 typedef uint64_t (bti_landing_pad_func_t)(void);
2535 typedef uint64_t (bti_shim_func_t)(bti_landing_pad_func_t *);
2536 
2537 extern bti_shim_func_t arm64_bti_test_jump_shim;
2538 extern bti_shim_func_t arm64_bti_test_call_shim;
2539 
2540 extern bti_landing_pad_func_t arm64_bti_test_func_with_no_landing_pad;
2541 extern bti_landing_pad_func_t arm64_bti_test_func_with_call_landing_pad;
2542 extern bti_landing_pad_func_t arm64_bti_test_func_with_jump_landing_pad;
2543 extern bti_landing_pad_func_t arm64_bti_test_func_with_jump_call_landing_pad;
2544 #if __has_feature(ptrauth_returns)
2545 extern bti_landing_pad_func_t arm64_bti_test_func_with_pac_landing_pad;
2546 #endif /* __has_feature(ptrauth_returns) */
2547 
2548 typedef struct arm64_bti_test_func_case {
2549 	const char *func_str;
2550 	bti_landing_pad_func_t *func;
2551 	uint64_t expect_return_value;
2552 	uint8_t  expect_call_ok;
2553 	uint8_t  expect_jump_ok;
2554 } arm64_bti_test_func_case_s;
2555 
2556 static volatile uintptr_t bti_exception_handler_pc = 0;
2557 
2558 static bool
arm64_bti_test_exception_handler(arm_saved_state_t * state)2559 arm64_bti_test_exception_handler(arm_saved_state_t * state)
2560 {
2561 	uint64_t esr = get_saved_state_esr(state);
2562 	esr_exception_class_t class = ESR_EC(esr);
2563 
2564 	if (class != ESR_EC_BTI_FAIL) {
2565 		return false;
2566 	}
2567 
2568 	/* Capture any desired exception metrics */
2569 	bti_exception_handler_pc = get_saved_state_pc(state);
2570 
2571 	/* "Cancel" the function call by forging an early return */
2572 	set_saved_state_pc(state, get_saved_state_lr(state));
2573 
2574 	/* Clear BTYPE to prevent taking another exception after ERET */
2575 	uint32_t spsr = get_saved_state_cpsr(state);
2576 	spsr &= ~PSR_BTYPE_MASK;
2577 	set_saved_state_cpsr(state, spsr);
2578 
2579 	return true;
2580 }
2581 
2582 static void
arm64_bti_test_func_with_shim(uint8_t expect_ok,const char * shim_str,bti_shim_func_t * shim,arm64_bti_test_func_case_s * test_case)2583 arm64_bti_test_func_with_shim(
2584 	uint8_t expect_ok,
2585 	const char *shim_str,
2586 	bti_shim_func_t *shim,
2587 	arm64_bti_test_func_case_s *test_case)
2588 {
2589 	uint64_t result = -1;
2590 
2591 	/* Capture BTI exceptions triggered by our target function */
2592 	uintptr_t raw_func = (uintptr_t)ptrauth_strip(
2593 		(void *)test_case->func,
2594 		ptrauth_key_function_pointer);
2595 	ml_expect_fault_pc_begin(arm64_bti_test_exception_handler, raw_func);
2596 	bti_exception_handler_pc = 0;
2597 
2598 	/*
2599 	 * The assembly routines do not support C function type discriminators, so
2600 	 * strip and resign with zero if needed
2601 	 */
2602 	bti_landing_pad_func_t *resigned = ptrauth_auth_and_resign(
2603 		test_case->func,
2604 		ptrauth_key_function_pointer,
2605 		ptrauth_type_discriminator(bti_landing_pad_func_t),
2606 		ptrauth_key_function_pointer, 0);
2607 
2608 	result = shim(resigned);
2609 
2610 	ml_expect_fault_end();
2611 
2612 	if (!expect_ok && raw_func != bti_exception_handler_pc) {
2613 		T_FAIL("Expected BTI exception at 0x%llx but got one at %llx instead\n",
2614 		    raw_func, bti_exception_handler_pc);
2615 	} else if (expect_ok && bti_exception_handler_pc) {
2616 		T_FAIL("Did not expect BTI exception but got on at 0x%llx\n",
2617 		    bti_exception_handler_pc);
2618 	} else if (!expect_ok && !bti_exception_handler_pc) {
2619 		T_FAIL("Failed to hit expected exception!\n");
2620 	} else if (expect_ok && result != test_case->expect_return_value) {
2621 		T_FAIL("Incorrect test function result (expected=%llu, result=%llu\n)",
2622 		    test_case->expect_return_value, result);
2623 	} else {
2624 		T_PASS("%s (shim=%s)\n", test_case->func_str, shim_str);
2625 	}
2626 }
2627 
2628 /**
2629  * This test works to ensure that BTI exceptions are raised where expected
2630  * and only where they are expected by exhaustively testing all indirect branch
2631  * combinations with all landing pad options.
2632  */
2633 kern_return_t
arm64_bti_test(void)2634 arm64_bti_test(void)
2635 {
2636 	static arm64_bti_test_func_case_s tests[] = {
2637 		{
2638 			.func_str = "arm64_bti_test_func_with_no_landing_pad",
2639 			.func = &arm64_bti_test_func_with_no_landing_pad,
2640 			.expect_return_value     = 1,
2641 			.expect_call_ok          = 0,
2642 			.expect_jump_ok          = 0,
2643 		},
2644 		{
2645 			.func_str = "arm64_bti_test_func_with_call_landing_pad",
2646 			.func = &arm64_bti_test_func_with_call_landing_pad,
2647 			.expect_return_value     = 2,
2648 			.expect_call_ok          = 1,
2649 			.expect_jump_ok          = 0,
2650 		},
2651 		{
2652 			.func_str = "arm64_bti_test_func_with_jump_landing_pad",
2653 			.func = &arm64_bti_test_func_with_jump_landing_pad,
2654 			.expect_return_value     = 3,
2655 			.expect_call_ok          = 0,
2656 			.expect_jump_ok          = 1,
2657 		},
2658 		{
2659 			.func_str = "arm64_bti_test_func_with_jump_call_landing_pad",
2660 			.func = &arm64_bti_test_func_with_jump_call_landing_pad,
2661 			.expect_return_value     = 4,
2662 			.expect_call_ok          = 1,
2663 			.expect_jump_ok          = 1,
2664 		},
2665 #if __has_feature(ptrauth_returns)
2666 		{
2667 			.func_str = "arm64_bti_test_func_with_pac_landing_pad",
2668 			.func = &arm64_bti_test_func_with_pac_landing_pad,
2669 			.expect_return_value     = 5,
2670 			.expect_call_ok          = 1,
2671 			.expect_jump_ok          = 0,
2672 		},
2673 #endif /* __has_feature(ptrauth_returns) */
2674 	};
2675 
2676 	size_t test_count = sizeof(tests) / sizeof(*tests);
2677 	for (size_t i = 0; i < test_count; i++) {
2678 		arm64_bti_test_func_case_s *test_case = tests + i;
2679 
2680 		arm64_bti_test_func_with_shim(test_case->expect_call_ok,
2681 		    "arm64_bti_test_call_shim",
2682 		    arm64_bti_test_call_shim,
2683 		    test_case);
2684 
2685 
2686 		arm64_bti_test_func_with_shim(test_case->expect_jump_ok,
2687 		    "arm64_bti_test_jump_shim",
2688 		    arm64_bti_test_jump_shim,
2689 		    test_case);
2690 	}
2691 
2692 	return KERN_SUCCESS;
2693 }
2694 #endif /* BTI_ENFORCED */
2695 
2696 #if CONFIG_SPTM && HAS_MTE && (DEVELOPMENT || DEBUG)
2697 /**
2698  * Tests MTE in guarded mode by calling into the SPTM with an
2699  * XNU-provided pointer.
2700  *
2701  * Currently supported test cases:
2702  * 0. Pass SPTM a pointer with a valid tag --> success
2703  * 1. Pass SPTM an untagged pointer from the physical aperture --> panic
2704  * 2. Pass SPTM a pointer with an invalid tag --> panic
2705  */
2706 static int
mte_test_gl2(__unused int64_t test_case,__unused int64_t * out)2707 mte_test_gl2(__unused int64_t test_case, __unused int64_t *out)
2708 {
2709 
2710 	return 0;
2711 }
2712 
2713 SYSCTL_TEST_REGISTER(mte_gl2, mte_test_gl2);
2714 #endif /* CONFIG_SPTM && HAS_MTE && (DEVELOPMENT || DEBUG) */
2715 
2716 
2717 /**
2718  * Test the speculation guards
2719  * We can't easily ensure that the guards actually behave correctly under
2720  * speculation, but we can at least ensure that the guards are non-speculatively
2721  * correct.
2722  */
2723 kern_return_t
arm64_speculation_guard_test(void)2724 arm64_speculation_guard_test(void)
2725 {
2726 	uint64_t cookie1_64 = 0x5350454354524521ULL; /* SPECTRE! */
2727 	uint64_t cookie2_64 = 0x5941592043505553ULL; /* YAY CPUS */
2728 	uint32_t cookie1_32 = (uint32_t)cookie1_64;
2729 	uint32_t cookie2_32 = (uint32_t)cookie2_64;
2730 	uint64_t result64 = 0;
2731 	uint32_t result32 = 0;
2732 	bool result_valid;
2733 
2734 	/*
2735 	 * Test the zeroing guard
2736 	 * Since failing the guard triggers a panic, we don't actually test that
2737 	 * part as part of the automated tests.
2738 	 */
2739 
2740 	result64 = 0;
2741 	SPECULATION_GUARD_ZEROING_XXX(
2742 		/* out */ result64, /* out_valid */ result_valid,
2743 		/* value */ cookie1_64,
2744 		/* cmp_1 */ 0ULL, /* cmp_2 */ 1ULL, /* cc */ "NE");
2745 	T_EXPECT(result_valid, "result valid");
2746 	T_EXPECT_EQ_ULLONG(result64, cookie1_64, "64, 64 zeroing guard works");
2747 
2748 	result64 = 0;
2749 	SPECULATION_GUARD_ZEROING_XWW(
2750 		/* out */ result64, /* out_valid */ result_valid,
2751 		/* value */ cookie1_64,
2752 		/* cmp_1 */ 1U, /* cmp_2 */ 0U, /* cc */ "HI");
2753 	T_EXPECT(result_valid, "result valid");
2754 	T_EXPECT_EQ_ULLONG(result64, cookie1_64, "64, 32 zeroing guard works");
2755 
2756 	result32 = 0;
2757 	SPECULATION_GUARD_ZEROING_WXX(
2758 		/* out */ result32, /* out_valid */ result_valid,
2759 		/* value */ cookie1_32,
2760 		/* cmp_1 */ -1LL, /* cmp_2 */ 4LL, /* cc */ "LT");
2761 	T_EXPECT(result_valid, "result valid");
2762 	T_EXPECT_EQ_UINT(result32, cookie1_32, "32, 64 zeroing guard works");
2763 
2764 	result32 = 0;
2765 	SPECULATION_GUARD_ZEROING_WWW(
2766 		/* out */ result32, /* out_valid */ result_valid,
2767 		/* value */ cookie1_32,
2768 		/* cmp_1 */ 1, /* cmp_2 */ -4, /* cc */ "GT");
2769 	T_EXPECT(result_valid, "result valid");
2770 	T_EXPECT_EQ_UINT(result32, cookie1_32, "32, 32 zeroing guard works");
2771 
2772 	result32 = 0x41;
2773 	SPECULATION_GUARD_ZEROING_WWW(
2774 		/* out */ result32, /* out_valid */ result_valid,
2775 		/* value */ cookie1_32,
2776 		/* cmp_1 */ 1, /* cmp_2 */ -4, /* cc */ "LT");
2777 	T_EXPECT(!result_valid, "result invalid");
2778 	T_EXPECT_EQ_UINT(result32, 0, "zeroing guard works with failing condition");
2779 
2780 	/*
2781 	 * Test the selection guard
2782 	 */
2783 
2784 	result64 = 0;
2785 	SPECULATION_GUARD_SELECT_XXX(
2786 		/* out */ result64,
2787 		/* cmp_1 */ 16ULL, /* cmp_2 */ 32ULL,
2788 		/* cc   */ "EQ", /* sel_1 */ cookie1_64,
2789 		/* n_cc */ "NE", /* sel_2 */ cookie2_64);
2790 	T_EXPECT_EQ_ULLONG(result64, cookie2_64, "64, 64 select guard works (1)");
2791 
2792 	result64 = 0;
2793 	SPECULATION_GUARD_SELECT_XXX(
2794 		/* out */ result64,
2795 		/* cmp_1 */ 32ULL, /* cmp_2 */ 32ULL,
2796 		/* cc   */ "EQ", /* sel_1 */ cookie1_64,
2797 		/* n_cc */ "NE", /* sel_2 */ cookie2_64);
2798 	T_EXPECT_EQ_ULLONG(result64, cookie1_64, "64, 64 select guard works (2)");
2799 
2800 
2801 	result32 = 0;
2802 	SPECULATION_GUARD_SELECT_WXX(
2803 		/* out */ result32,
2804 		/* cmp_1 */ 16ULL, /* cmp_2 */ 32ULL,
2805 		/* cc   */ "HI", /* sel_1 */ cookie1_64,
2806 		/* n_cc */ "LS", /* sel_2 */ cookie2_64);
2807 	T_EXPECT_EQ_ULLONG(result32, cookie2_32, "32, 64 select guard works (1)");
2808 
2809 	result32 = 0;
2810 	SPECULATION_GUARD_SELECT_WXX(
2811 		/* out */ result32,
2812 		/* cmp_1 */ 16ULL, /* cmp_2 */ 2ULL,
2813 		/* cc   */ "HI", /* sel_1 */ cookie1_64,
2814 		/* n_cc */ "LS", /* sel_2 */ cookie2_64);
2815 	T_EXPECT_EQ_ULLONG(result32, cookie1_32, "32, 64 select guard works (2)");
2816 
2817 	return KERN_SUCCESS;
2818 }
2819 
2820 
2821 extern void arm64_brk_lr_gpr(void);
2822 extern void arm64_brk_lr_fault(void);
2823 
2824 static NOKASAN bool
arm64_backtrace_test_fault_handler(arm_saved_state_t * state)2825 arm64_backtrace_test_fault_handler(arm_saved_state_t * state)
2826 {
2827 	/* Similar setup to backtrace_kernel_sysctl() */
2828 	const unsigned int bt_len = 24;
2829 	const size_t bt_size = sizeof(uint8_t) * bt_len;
2830 	uint8_t *bt = kalloc_data(bt_size, Z_WAITOK | Z_ZERO);
2831 	backtrace_info_t packed_info = BTI_NONE;
2832 
2833 	/* Call the backtrace function */
2834 	backtrace_packed(BTP_KERN_OFFSET_32, bt, bt_size, NULL, &packed_info);
2835 
2836 	add_saved_state_pc(state, 4);
2837 	return true;
2838 }
2839 
2840 /**
2841  * Make sure EL1 fleh doesn't push a bogus stack frame when LR is being used as
2842  * a GPR in the caller.
2843  *
2844  * This test writes a GPR-like value into LR that is >4GB away from any kernel
2845  * address and tries to run backtrace_packed() from a sync handler.
2846  * backtrace_packed() has an invariant that all addresses in the stack frame are
2847  * within 4GB of the kernel text.
2848  */
2849 kern_return_t
arm64_backtrace_test(void)2850 arm64_backtrace_test(void)
2851 {
2852 	ml_expect_fault_pc_begin(arm64_backtrace_test_fault_handler, (uintptr_t)&arm64_brk_lr_fault);
2853 	arm64_brk_lr_gpr();
2854 	ml_expect_fault_end();
2855 
2856 #if CONFIG_SPTM && (DEVELOPMENT || DEBUG)
2857 	/* Reset the debug data so it can be filled later if needed */
2858 	debug_panic_lockdown_initiator_state.initiator_pc = 0;
2859 #endif /* CONFIG_SPTM && (DEVELOPMENT || DEBUG) */
2860 	return KERN_SUCCESS;
2861 }
2862