xref: /xnu-11417.121.6/osfmk/arm64/platform_tests.c (revision a1e26a70f38d1d7daa7b49b258e2f8538ad81650)
1 /*
2  * Copyright (c) 2011-2018 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_COPYRIGHT@
30  */
31 /*
32  * Mach Operating System Copyright (c) 1991,1990,1989,1988,1987 Carnegie
33  * Mellon University All Rights Reserved.
34  *
35  * Permission to use, copy, modify and distribute this software and its
36  * documentation is hereby granted, provided that both the copyright notice
37  * and this permission notice appear in all copies of the software,
38  * derivative works or modified versions, and any portions thereof, and that
39  * both notices appear in supporting documentation.
40  *
41  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION.
42  * CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES
43  * WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
44  *
45  * Carnegie Mellon requests users of this software to return to
46  *
47  * Software Distribution Coordinator  or  [email protected]
48  * School of Computer Science Carnegie Mellon University Pittsburgh PA
49  * 15213-3890
50  *
51  * any improvements or extensions that they make and grant Carnegie Mellon the
52  * rights to redistribute these changes.
53  */
54 
55 #include <mach_ldebug.h>
56 
57 #define LOCK_PRIVATE 1
58 
59 #include <vm/pmap.h>
60 #include <vm/vm_map_xnu.h>
61 #include <vm/vm_page_internal.h>
62 #include <vm/vm_kern_xnu.h>
63 #include <kern/kalloc.h>
64 #include <kern/cpu_number.h>
65 #include <kern/locks.h>
66 #include <kern/misc_protos.h>
67 #include <kern/thread.h>
68 #include <kern/processor.h>
69 #include <kern/sched_prim.h>
70 #include <kern/debug.h>
71 #include <string.h>
72 #include <tests/xnupost.h>
73 
74 #if     MACH_KDB
75 #include <ddb/db_command.h>
76 #include <ddb/db_output.h>
77 #include <ddb/db_sym.h>
78 #include <ddb/db_print.h>
79 #endif                          /* MACH_KDB */
80 
81 #include <san/kasan.h>
82 #include <sys/errno.h>
83 #include <sys/kdebug.h>
84 #include <sys/munge.h>
85 #include <machine/cpu_capabilities.h>
86 #include <arm/cpu_data_internal.h>
87 #include <arm/pmap.h>
88 
89 #if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)
90 #include <arm64/amcc_rorgn.h>
91 #endif // defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)
92 
93 #include <arm64/machine_machdep.h>
94 
95 kern_return_t arm64_lock_test(void);
96 kern_return_t arm64_munger_test(void);
97 kern_return_t arm64_pan_test(void);
98 kern_return_t arm64_late_pan_test(void);
99 #if defined(HAS_APPLE_PAC)
100 #include <ptrauth.h>
101 kern_return_t arm64_ropjop_test(void);
102 #endif
103 #if defined(KERNEL_INTEGRITY_CTRR)
104 kern_return_t ctrr_test(void);
105 kern_return_t ctrr_test_cpu(void);
106 #endif
107 #if BTI_ENFORCED
108 kern_return_t arm64_bti_test(void);
109 #endif /* BTI_ENFORCED */
110 #if HAS_SPECRES
111 extern kern_return_t specres_test(void);
112 #endif
113 
114 // exception handler ignores this fault address during PAN test
115 #if __ARM_PAN_AVAILABLE__
116 const uint64_t pan_ro_value = 0xFEEDB0B0DEADBEEF;
117 vm_offset_t pan_test_addr = 0;
118 vm_offset_t pan_ro_addr = 0;
119 volatile int pan_exception_level = 0;
120 volatile char pan_fault_value = 0;
121 #endif
122 
123 #if CONFIG_SPTM
124 kern_return_t arm64_panic_lockdown_test(void);
125 #endif /* CONFIG_SPTM */
126 
127 #include <arm64/speculation.h>
128 kern_return_t arm64_speculation_guard_test(void);
129 
130 #include <libkern/OSAtomic.h>
131 #define LOCK_TEST_ITERATIONS 50
132 #define LOCK_TEST_SETUP_TIMEOUT_SEC 15
133 static hw_lock_data_t   lt_hw_lock;
134 static lck_spin_t       lt_lck_spin_t;
135 static lck_mtx_t        lt_mtx;
136 static lck_rw_t         lt_rwlock;
137 static volatile uint32_t lt_counter = 0;
138 static volatile int     lt_spinvolatile;
139 static volatile uint32_t lt_max_holders = 0;
140 static volatile uint32_t lt_upgrade_holders = 0;
141 static volatile uint32_t lt_max_upgrade_holders = 0;
142 static volatile uint32_t lt_num_holders = 0;
143 static volatile uint32_t lt_done_threads;
144 static volatile uint32_t lt_target_done_threads;
145 static volatile uint32_t lt_cpu_bind_id = 0;
146 static uint64_t          lt_setup_timeout = 0;
147 
148 static void
lt_note_another_blocking_lock_holder()149 lt_note_another_blocking_lock_holder()
150 {
151 	hw_lock_lock(&lt_hw_lock, LCK_GRP_NULL);
152 	lt_num_holders++;
153 	lt_max_holders = (lt_max_holders < lt_num_holders) ? lt_num_holders : lt_max_holders;
154 	hw_lock_unlock(&lt_hw_lock);
155 }
156 
157 static void
lt_note_blocking_lock_release()158 lt_note_blocking_lock_release()
159 {
160 	hw_lock_lock(&lt_hw_lock, LCK_GRP_NULL);
161 	lt_num_holders--;
162 	hw_lock_unlock(&lt_hw_lock);
163 }
164 
165 static void
lt_spin_a_little_bit()166 lt_spin_a_little_bit()
167 {
168 	uint32_t i;
169 
170 	for (i = 0; i < 10000; i++) {
171 		lt_spinvolatile++;
172 	}
173 }
174 
175 static void
lt_sleep_a_little_bit()176 lt_sleep_a_little_bit()
177 {
178 	delay(100);
179 }
180 
181 static void
lt_grab_mutex()182 lt_grab_mutex()
183 {
184 	lck_mtx_lock(&lt_mtx);
185 	lt_note_another_blocking_lock_holder();
186 	lt_sleep_a_little_bit();
187 	lt_counter++;
188 	lt_note_blocking_lock_release();
189 	lck_mtx_unlock(&lt_mtx);
190 }
191 
192 static void
lt_grab_mutex_with_try()193 lt_grab_mutex_with_try()
194 {
195 	while (0 == lck_mtx_try_lock(&lt_mtx)) {
196 		;
197 	}
198 	lt_note_another_blocking_lock_holder();
199 	lt_sleep_a_little_bit();
200 	lt_counter++;
201 	lt_note_blocking_lock_release();
202 	lck_mtx_unlock(&lt_mtx);
203 }
204 
205 static void
lt_grab_rw_exclusive()206 lt_grab_rw_exclusive()
207 {
208 	lck_rw_lock_exclusive(&lt_rwlock);
209 	lt_note_another_blocking_lock_holder();
210 	lt_sleep_a_little_bit();
211 	lt_counter++;
212 	lt_note_blocking_lock_release();
213 	lck_rw_done(&lt_rwlock);
214 }
215 
216 static void
lt_grab_rw_exclusive_with_try()217 lt_grab_rw_exclusive_with_try()
218 {
219 	while (0 == lck_rw_try_lock_exclusive(&lt_rwlock)) {
220 		lt_sleep_a_little_bit();
221 	}
222 
223 	lt_note_another_blocking_lock_holder();
224 	lt_sleep_a_little_bit();
225 	lt_counter++;
226 	lt_note_blocking_lock_release();
227 	lck_rw_done(&lt_rwlock);
228 }
229 
230 /* Disabled until lt_grab_rw_shared() is fixed (rdar://30685840)
231  *  static void
232  *  lt_grab_rw_shared()
233  *  {
234  *       lck_rw_lock_shared(&lt_rwlock);
235  *       lt_counter++;
236  *
237  *       lt_note_another_blocking_lock_holder();
238  *       lt_sleep_a_little_bit();
239  *       lt_note_blocking_lock_release();
240  *
241  *       lck_rw_done(&lt_rwlock);
242  *  }
243  */
244 
245 /* Disabled until lt_grab_rw_shared_with_try() is fixed (rdar://30685840)
246  *  static void
247  *  lt_grab_rw_shared_with_try()
248  *  {
249  *       while(0 == lck_rw_try_lock_shared(&lt_rwlock));
250  *       lt_counter++;
251  *
252  *       lt_note_another_blocking_lock_holder();
253  *       lt_sleep_a_little_bit();
254  *       lt_note_blocking_lock_release();
255  *
256  *       lck_rw_done(&lt_rwlock);
257  *  }
258  */
259 
260 static void
lt_upgrade_downgrade_rw()261 lt_upgrade_downgrade_rw()
262 {
263 	boolean_t upgraded, success;
264 
265 	success = lck_rw_try_lock_shared(&lt_rwlock);
266 	if (!success) {
267 		lck_rw_lock_shared(&lt_rwlock);
268 	}
269 
270 	lt_note_another_blocking_lock_holder();
271 	lt_sleep_a_little_bit();
272 	lt_note_blocking_lock_release();
273 
274 	upgraded = lck_rw_lock_shared_to_exclusive(&lt_rwlock);
275 	if (!upgraded) {
276 		success = lck_rw_try_lock_exclusive(&lt_rwlock);
277 
278 		if (!success) {
279 			lck_rw_lock_exclusive(&lt_rwlock);
280 		}
281 	}
282 
283 	lt_upgrade_holders++;
284 	if (lt_upgrade_holders > lt_max_upgrade_holders) {
285 		lt_max_upgrade_holders = lt_upgrade_holders;
286 	}
287 
288 	lt_counter++;
289 	lt_sleep_a_little_bit();
290 
291 	lt_upgrade_holders--;
292 
293 	lck_rw_lock_exclusive_to_shared(&lt_rwlock);
294 
295 	lt_spin_a_little_bit();
296 	lck_rw_done(&lt_rwlock);
297 }
298 
299 #if __AMP__
300 const int limit = 1000000;
301 static int lt_stress_local_counters[MAX_CPUS];
302 
303 lck_ticket_t lt_ticket_lock;
304 lck_grp_t lt_ticket_grp;
305 
306 static void
lt_stress_ticket_lock()307 lt_stress_ticket_lock()
308 {
309 	uint local_counter = 0;
310 
311 	uint cpuid = cpu_number();
312 
313 	kprintf("%s>cpu %d starting\n", __FUNCTION__, cpuid);
314 
315 	lck_ticket_lock(&lt_ticket_lock, &lt_ticket_grp);
316 	lt_counter++;
317 	local_counter++;
318 	lck_ticket_unlock(&lt_ticket_lock);
319 
320 	/* Wait until all test threads have finished any binding */
321 	while (lt_counter < lt_target_done_threads) {
322 		if (mach_absolute_time() > lt_setup_timeout) {
323 			kprintf("%s>cpu %d noticed that we exceeded setup timeout of %d seconds during initial setup phase (only %d out of %d threads checked in)",
324 			    __FUNCTION__, cpuid, LOCK_TEST_SETUP_TIMEOUT_SEC, lt_counter, lt_target_done_threads);
325 			return;
326 		}
327 		/* Yield to keep the CPUs available for the threads to bind */
328 		thread_yield_internal(1);
329 	}
330 
331 	lck_ticket_lock(&lt_ticket_lock, &lt_ticket_grp);
332 	lt_counter++;
333 	local_counter++;
334 	lck_ticket_unlock(&lt_ticket_lock);
335 
336 	/*
337 	 * Now that the test threads have finished any binding, wait
338 	 * until they are all actively spinning on-core (done yielding)
339 	 * so we get a fairly timed start.
340 	 */
341 	while (lt_counter < 2 * lt_target_done_threads) {
342 		if (mach_absolute_time() > lt_setup_timeout) {
343 			kprintf("%s>cpu %d noticed that we exceeded setup timeout of %d seconds during secondary setup phase (only %d out of %d threads checked in)",
344 			    __FUNCTION__, cpuid, LOCK_TEST_SETUP_TIMEOUT_SEC, lt_counter - lt_target_done_threads, lt_target_done_threads);
345 			return;
346 		}
347 	}
348 
349 	kprintf("%s>cpu %d started\n", __FUNCTION__, cpuid);
350 
351 	while (lt_counter < limit) {
352 		lck_ticket_lock(&lt_ticket_lock, &lt_ticket_grp);
353 		if (lt_counter < limit) {
354 			lt_counter++;
355 			local_counter++;
356 		}
357 		lck_ticket_unlock(&lt_ticket_lock);
358 	}
359 
360 	lt_stress_local_counters[cpuid] = local_counter;
361 
362 	kprintf("%s>final counter %d cpu %d incremented the counter %d times\n", __FUNCTION__, lt_counter, cpuid, local_counter);
363 }
364 #endif
365 
366 static void
lt_grab_hw_lock()367 lt_grab_hw_lock()
368 {
369 	hw_lock_lock(&lt_hw_lock, LCK_GRP_NULL);
370 	lt_counter++;
371 	lt_spin_a_little_bit();
372 	hw_lock_unlock(&lt_hw_lock);
373 }
374 
375 static void
lt_grab_hw_lock_with_try()376 lt_grab_hw_lock_with_try()
377 {
378 	while (0 == hw_lock_try(&lt_hw_lock, LCK_GRP_NULL)) {
379 		;
380 	}
381 	lt_counter++;
382 	lt_spin_a_little_bit();
383 	hw_lock_unlock(&lt_hw_lock);
384 }
385 
386 static void
lt_grab_hw_lock_with_to()387 lt_grab_hw_lock_with_to()
388 {
389 	(void)hw_lock_to(&lt_hw_lock, &hw_lock_spin_policy, LCK_GRP_NULL);
390 	lt_counter++;
391 	lt_spin_a_little_bit();
392 	hw_lock_unlock(&lt_hw_lock);
393 }
394 
395 static void
lt_grab_spin_lock()396 lt_grab_spin_lock()
397 {
398 	lck_spin_lock(&lt_lck_spin_t);
399 	lt_counter++;
400 	lt_spin_a_little_bit();
401 	lck_spin_unlock(&lt_lck_spin_t);
402 }
403 
404 static void
lt_grab_spin_lock_with_try()405 lt_grab_spin_lock_with_try()
406 {
407 	while (0 == lck_spin_try_lock(&lt_lck_spin_t)) {
408 		;
409 	}
410 	lt_counter++;
411 	lt_spin_a_little_bit();
412 	lck_spin_unlock(&lt_lck_spin_t);
413 }
414 
415 static volatile boolean_t lt_thread_lock_grabbed;
416 static volatile boolean_t lt_thread_lock_success;
417 
418 static void
lt_reset()419 lt_reset()
420 {
421 	lt_counter = 0;
422 	lt_max_holders = 0;
423 	lt_num_holders = 0;
424 	lt_max_upgrade_holders = 0;
425 	lt_upgrade_holders = 0;
426 	lt_done_threads = 0;
427 	lt_target_done_threads = 0;
428 	lt_cpu_bind_id = 0;
429 	/* Reset timeout deadline out from current time */
430 	nanoseconds_to_absolutetime(LOCK_TEST_SETUP_TIMEOUT_SEC * NSEC_PER_SEC, &lt_setup_timeout);
431 	lt_setup_timeout += mach_absolute_time();
432 
433 	OSMemoryBarrier();
434 }
435 
436 static void
lt_trylock_hw_lock_with_to()437 lt_trylock_hw_lock_with_to()
438 {
439 	OSMemoryBarrier();
440 	while (!lt_thread_lock_grabbed) {
441 		lt_sleep_a_little_bit();
442 		OSMemoryBarrier();
443 	}
444 	lt_thread_lock_success = hw_lock_to(&lt_hw_lock,
445 	    &hw_lock_test_give_up_policy, LCK_GRP_NULL);
446 	OSMemoryBarrier();
447 	mp_enable_preemption();
448 }
449 
450 static void
lt_trylock_spin_try_lock()451 lt_trylock_spin_try_lock()
452 {
453 	OSMemoryBarrier();
454 	while (!lt_thread_lock_grabbed) {
455 		lt_sleep_a_little_bit();
456 		OSMemoryBarrier();
457 	}
458 	lt_thread_lock_success = lck_spin_try_lock(&lt_lck_spin_t);
459 	OSMemoryBarrier();
460 }
461 
462 static void
lt_trylock_thread(void * arg,wait_result_t wres __unused)463 lt_trylock_thread(void *arg, wait_result_t wres __unused)
464 {
465 	void (*func)(void) = (void (*)(void))arg;
466 
467 	func();
468 
469 	OSIncrementAtomic((volatile SInt32*) &lt_done_threads);
470 }
471 
472 static void
lt_start_trylock_thread(thread_continue_t func)473 lt_start_trylock_thread(thread_continue_t func)
474 {
475 	thread_t thread;
476 	kern_return_t kr;
477 
478 	kr = kernel_thread_start(lt_trylock_thread, func, &thread);
479 	assert(kr == KERN_SUCCESS);
480 
481 	thread_deallocate(thread);
482 }
483 
484 static void
lt_wait_for_lock_test_threads()485 lt_wait_for_lock_test_threads()
486 {
487 	OSMemoryBarrier();
488 	/* Spin to reduce dependencies */
489 	while (lt_done_threads < lt_target_done_threads) {
490 		lt_sleep_a_little_bit();
491 		OSMemoryBarrier();
492 	}
493 	OSMemoryBarrier();
494 }
495 
496 static kern_return_t
lt_test_trylocks()497 lt_test_trylocks()
498 {
499 	boolean_t success;
500 	extern unsigned int real_ncpus;
501 
502 	/*
503 	 * First mtx try lock succeeds, second fails.
504 	 */
505 	success = lck_mtx_try_lock(&lt_mtx);
506 	T_ASSERT_NOTNULL(success, "First mtx try lock");
507 	success = lck_mtx_try_lock(&lt_mtx);
508 	T_ASSERT_NULL(success, "Second mtx try lock for a locked mtx");
509 	lck_mtx_unlock(&lt_mtx);
510 
511 	/*
512 	 * After regular grab, can't try lock.
513 	 */
514 	lck_mtx_lock(&lt_mtx);
515 	success = lck_mtx_try_lock(&lt_mtx);
516 	T_ASSERT_NULL(success, "try lock should fail after regular lck_mtx_lock");
517 	lck_mtx_unlock(&lt_mtx);
518 
519 	/*
520 	 * Two shared try locks on a previously unheld rwlock suceed, and a
521 	 * subsequent exclusive attempt fails.
522 	 */
523 	success = lck_rw_try_lock_shared(&lt_rwlock);
524 	T_ASSERT_NOTNULL(success, "Two shared try locks on a previously unheld rwlock should succeed");
525 	success = lck_rw_try_lock_shared(&lt_rwlock);
526 	T_ASSERT_NOTNULL(success, "Two shared try locks on a previously unheld rwlock should succeed");
527 	success = lck_rw_try_lock_exclusive(&lt_rwlock);
528 	T_ASSERT_NULL(success, "exclusive lock attempt on previously held lock should fail");
529 	lck_rw_done(&lt_rwlock);
530 	lck_rw_done(&lt_rwlock);
531 
532 	/*
533 	 * After regular shared grab, can trylock
534 	 * for shared but not for exclusive.
535 	 */
536 	lck_rw_lock_shared(&lt_rwlock);
537 	success = lck_rw_try_lock_shared(&lt_rwlock);
538 	T_ASSERT_NOTNULL(success, "After regular shared grab another shared try lock should succeed.");
539 	success = lck_rw_try_lock_exclusive(&lt_rwlock);
540 	T_ASSERT_NULL(success, "After regular shared grab an exclusive lock attempt should fail.");
541 	lck_rw_done(&lt_rwlock);
542 	lck_rw_done(&lt_rwlock);
543 
544 	/*
545 	 * An exclusive try lock succeeds, subsequent shared and exclusive
546 	 * attempts fail.
547 	 */
548 	success = lck_rw_try_lock_exclusive(&lt_rwlock);
549 	T_ASSERT_NOTNULL(success, "An exclusive try lock should succeed");
550 	success = lck_rw_try_lock_shared(&lt_rwlock);
551 	T_ASSERT_NULL(success, "try lock in shared mode attempt after an exclusive grab should fail");
552 	success = lck_rw_try_lock_exclusive(&lt_rwlock);
553 	T_ASSERT_NULL(success, "try lock in exclusive mode attempt after an exclusive grab should fail");
554 	lck_rw_done(&lt_rwlock);
555 
556 	/*
557 	 * After regular exclusive grab, neither kind of trylock succeeds.
558 	 */
559 	lck_rw_lock_exclusive(&lt_rwlock);
560 	success = lck_rw_try_lock_shared(&lt_rwlock);
561 	T_ASSERT_NULL(success, "After regular exclusive grab, shared trylock should not succeed");
562 	success = lck_rw_try_lock_exclusive(&lt_rwlock);
563 	T_ASSERT_NULL(success, "After regular exclusive grab, exclusive trylock should not succeed");
564 	lck_rw_done(&lt_rwlock);
565 
566 	/*
567 	 * First spin lock attempts succeed, second attempts fail.
568 	 */
569 	success = hw_lock_try(&lt_hw_lock, LCK_GRP_NULL);
570 	T_ASSERT_NOTNULL(success, "First spin lock attempts should succeed");
571 	success = hw_lock_try(&lt_hw_lock, LCK_GRP_NULL);
572 	T_ASSERT_NULL(success, "Second attempt to spin lock should fail");
573 	hw_lock_unlock(&lt_hw_lock);
574 
575 	hw_lock_lock(&lt_hw_lock, LCK_GRP_NULL);
576 	success = hw_lock_try(&lt_hw_lock, LCK_GRP_NULL);
577 	T_ASSERT_NULL(success, "After taking spin lock, trylock attempt should fail");
578 	hw_lock_unlock(&lt_hw_lock);
579 
580 	lt_reset();
581 	lt_thread_lock_grabbed = false;
582 	lt_thread_lock_success = true;
583 	lt_target_done_threads = 1;
584 	OSMemoryBarrier();
585 	lt_start_trylock_thread(lt_trylock_hw_lock_with_to);
586 	success = hw_lock_to(&lt_hw_lock, &hw_lock_test_give_up_policy, LCK_GRP_NULL);
587 	T_ASSERT_NOTNULL(success, "First spin lock with timeout should succeed");
588 	if (real_ncpus == 1) {
589 		mp_enable_preemption(); /* if we re-enable preemption, the other thread can timeout and exit */
590 	}
591 	OSIncrementAtomic((volatile SInt32*)&lt_thread_lock_grabbed);
592 	lt_wait_for_lock_test_threads();
593 	T_ASSERT_NULL(lt_thread_lock_success, "Second spin lock with timeout should fail and timeout");
594 	if (real_ncpus == 1) {
595 		mp_disable_preemption(); /* don't double-enable when we unlock */
596 	}
597 	hw_lock_unlock(&lt_hw_lock);
598 
599 	lt_reset();
600 	lt_thread_lock_grabbed = false;
601 	lt_thread_lock_success = true;
602 	lt_target_done_threads = 1;
603 	OSMemoryBarrier();
604 	lt_start_trylock_thread(lt_trylock_hw_lock_with_to);
605 	hw_lock_lock(&lt_hw_lock, LCK_GRP_NULL);
606 	if (real_ncpus == 1) {
607 		mp_enable_preemption(); /* if we re-enable preemption, the other thread can timeout and exit */
608 	}
609 	OSIncrementAtomic((volatile SInt32*)&lt_thread_lock_grabbed);
610 	lt_wait_for_lock_test_threads();
611 	T_ASSERT_NULL(lt_thread_lock_success, "after taking a spin lock, lock attempt with timeout should fail");
612 	if (real_ncpus == 1) {
613 		mp_disable_preemption(); /* don't double-enable when we unlock */
614 	}
615 	hw_lock_unlock(&lt_hw_lock);
616 
617 	success = lck_spin_try_lock(&lt_lck_spin_t);
618 	T_ASSERT_NOTNULL(success, "spin trylock of previously unheld lock should succeed");
619 	success = lck_spin_try_lock(&lt_lck_spin_t);
620 	T_ASSERT_NULL(success, "spin trylock attempt of previously held lock (with trylock) should fail");
621 	lck_spin_unlock(&lt_lck_spin_t);
622 
623 	lt_reset();
624 	lt_thread_lock_grabbed = false;
625 	lt_thread_lock_success = true;
626 	lt_target_done_threads = 1;
627 	lt_start_trylock_thread(lt_trylock_spin_try_lock);
628 	lck_spin_lock(&lt_lck_spin_t);
629 	if (real_ncpus == 1) {
630 		mp_enable_preemption(); /* if we re-enable preemption, the other thread can timeout and exit */
631 	}
632 	OSIncrementAtomic((volatile SInt32*)&lt_thread_lock_grabbed);
633 	lt_wait_for_lock_test_threads();
634 	T_ASSERT_NULL(lt_thread_lock_success, "spin trylock attempt of previously held lock should fail");
635 	if (real_ncpus == 1) {
636 		mp_disable_preemption(); /* don't double-enable when we unlock */
637 	}
638 	lck_spin_unlock(&lt_lck_spin_t);
639 
640 	return KERN_SUCCESS;
641 }
642 
643 static void
lt_thread(void * arg,wait_result_t wres __unused)644 lt_thread(void *arg, wait_result_t wres __unused)
645 {
646 	void (*func)(void) = (void (*)(void))arg;
647 	uint32_t i;
648 
649 	for (i = 0; i < LOCK_TEST_ITERATIONS; i++) {
650 		func();
651 	}
652 
653 	OSIncrementAtomic((volatile SInt32*) &lt_done_threads);
654 }
655 
656 static void
lt_start_lock_thread(thread_continue_t func)657 lt_start_lock_thread(thread_continue_t func)
658 {
659 	thread_t thread;
660 	kern_return_t kr;
661 
662 	kr = kernel_thread_start(lt_thread, func, &thread);
663 	assert(kr == KERN_SUCCESS);
664 
665 	thread_deallocate(thread);
666 }
667 
668 #if __AMP__
669 static void
lt_bound_thread(void * arg,wait_result_t wres __unused)670 lt_bound_thread(void *arg, wait_result_t wres __unused)
671 {
672 	void (*func)(void) = (void (*)(void))arg;
673 
674 	int cpuid = OSIncrementAtomic((volatile SInt32 *)&lt_cpu_bind_id);
675 
676 	processor_t processor = processor_list;
677 	while ((processor != NULL) && (processor->cpu_id != cpuid)) {
678 		processor = processor->processor_list;
679 	}
680 
681 	if (processor != NULL) {
682 		thread_bind(processor);
683 	}
684 
685 	thread_block(THREAD_CONTINUE_NULL);
686 
687 	func();
688 
689 	OSIncrementAtomic((volatile SInt32*) &lt_done_threads);
690 }
691 
692 static void
lt_e_thread(void * arg,wait_result_t wres __unused)693 lt_e_thread(void *arg, wait_result_t wres __unused)
694 {
695 	void (*func)(void) = (void (*)(void))arg;
696 
697 	thread_t thread = current_thread();
698 
699 	thread_soft_bind_cluster_type(thread, 'e');
700 
701 	func();
702 
703 	OSIncrementAtomic((volatile SInt32*) &lt_done_threads);
704 }
705 
706 static void
lt_p_thread(void * arg,wait_result_t wres __unused)707 lt_p_thread(void *arg, wait_result_t wres __unused)
708 {
709 	void (*func)(void) = (void (*)(void))arg;
710 
711 	thread_t thread = current_thread();
712 
713 	thread_soft_bind_cluster_type(thread, 'p');
714 
715 	func();
716 
717 	OSIncrementAtomic((volatile SInt32*) &lt_done_threads);
718 }
719 
720 static void
lt_start_lock_thread_with_bind(thread_continue_t bind_type,thread_continue_t func)721 lt_start_lock_thread_with_bind(thread_continue_t bind_type, thread_continue_t func)
722 {
723 	thread_t thread;
724 	kern_return_t kr;
725 
726 	kr = kernel_thread_start(bind_type, func, &thread);
727 	assert(kr == KERN_SUCCESS);
728 
729 	thread_deallocate(thread);
730 }
731 #endif /* __AMP__ */
732 
733 static kern_return_t
lt_test_locks()734 lt_test_locks()
735 {
736 #if SCHED_HYGIENE_DEBUG
737 	/*
738 	 * When testing, the preemption disable threshold may be hit (for
739 	 * example when testing a lock timeout). To avoid this, the preemption
740 	 * disable measurement is temporarily disabled during lock testing.
741 	 */
742 	int old_mode = sched_preemption_disable_debug_mode;
743 	if (old_mode == SCHED_HYGIENE_MODE_PANIC) {
744 		sched_preemption_disable_debug_mode = SCHED_HYGIENE_MODE_OFF;
745 	}
746 #endif /* SCHED_HYGIENE_DEBUG */
747 
748 	kern_return_t kr = KERN_SUCCESS;
749 	lck_grp_attr_t *lga = lck_grp_attr_alloc_init();
750 	lck_grp_t *lg = lck_grp_alloc_init("lock test", lga);
751 
752 	lck_mtx_init(&lt_mtx, lg, LCK_ATTR_NULL);
753 	lck_rw_init(&lt_rwlock, lg, LCK_ATTR_NULL);
754 	lck_spin_init(&lt_lck_spin_t, lg, LCK_ATTR_NULL);
755 	hw_lock_init(&lt_hw_lock);
756 
757 	T_LOG("Testing locks.");
758 
759 	/* Try locks (custom) */
760 	lt_reset();
761 
762 	T_LOG("Running try lock test.");
763 	kr = lt_test_trylocks();
764 	T_EXPECT_NULL(kr, "try lock test failed.");
765 
766 	/* Uncontended mutex */
767 	T_LOG("Running uncontended mutex test.");
768 	lt_reset();
769 	lt_target_done_threads = 1;
770 	lt_start_lock_thread(lt_grab_mutex);
771 	lt_wait_for_lock_test_threads();
772 	T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
773 	T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
774 
775 	/* Contended mutex:try locks*/
776 	T_LOG("Running contended mutex test.");
777 	lt_reset();
778 	lt_target_done_threads = 3;
779 	lt_start_lock_thread(lt_grab_mutex);
780 	lt_start_lock_thread(lt_grab_mutex);
781 	lt_start_lock_thread(lt_grab_mutex);
782 	lt_wait_for_lock_test_threads();
783 	T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
784 	T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
785 
786 	/* Contended mutex: try locks*/
787 	T_LOG("Running contended mutex trylock test.");
788 	lt_reset();
789 	lt_target_done_threads = 3;
790 	lt_start_lock_thread(lt_grab_mutex_with_try);
791 	lt_start_lock_thread(lt_grab_mutex_with_try);
792 	lt_start_lock_thread(lt_grab_mutex_with_try);
793 	lt_wait_for_lock_test_threads();
794 	T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
795 	T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
796 
797 	/* Uncontended exclusive rwlock */
798 	T_LOG("Running uncontended exclusive rwlock test.");
799 	lt_reset();
800 	lt_target_done_threads = 1;
801 	lt_start_lock_thread(lt_grab_rw_exclusive);
802 	lt_wait_for_lock_test_threads();
803 	T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
804 	T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
805 
806 	/* Uncontended shared rwlock */
807 
808 	/* Disabled until lt_grab_rw_shared() is fixed (rdar://30685840)
809 	 *  T_LOG("Running uncontended shared rwlock test.");
810 	 *  lt_reset();
811 	 *  lt_target_done_threads = 1;
812 	 *  lt_start_lock_thread(lt_grab_rw_shared);
813 	 *  lt_wait_for_lock_test_threads();
814 	 *  T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
815 	 *  T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
816 	 */
817 
818 	/* Contended exclusive rwlock */
819 	T_LOG("Running contended exclusive rwlock test.");
820 	lt_reset();
821 	lt_target_done_threads = 3;
822 	lt_start_lock_thread(lt_grab_rw_exclusive);
823 	lt_start_lock_thread(lt_grab_rw_exclusive);
824 	lt_start_lock_thread(lt_grab_rw_exclusive);
825 	lt_wait_for_lock_test_threads();
826 	T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
827 	T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
828 
829 	/* One shared, two exclusive */
830 	/* Disabled until lt_grab_rw_shared() is fixed (rdar://30685840)
831 	 *  T_LOG("Running test with one shared and two exclusive rw lock threads.");
832 	 *  lt_reset();
833 	 *  lt_target_done_threads = 3;
834 	 *  lt_start_lock_thread(lt_grab_rw_shared);
835 	 *  lt_start_lock_thread(lt_grab_rw_exclusive);
836 	 *  lt_start_lock_thread(lt_grab_rw_exclusive);
837 	 *  lt_wait_for_lock_test_threads();
838 	 *  T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
839 	 *  T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
840 	 */
841 
842 	/* Four shared */
843 	/* Disabled until lt_grab_rw_shared() is fixed (rdar://30685840)
844 	 *  T_LOG("Running test with four shared holders.");
845 	 *  lt_reset();
846 	 *  lt_target_done_threads = 4;
847 	 *  lt_start_lock_thread(lt_grab_rw_shared);
848 	 *  lt_start_lock_thread(lt_grab_rw_shared);
849 	 *  lt_start_lock_thread(lt_grab_rw_shared);
850 	 *  lt_start_lock_thread(lt_grab_rw_shared);
851 	 *  lt_wait_for_lock_test_threads();
852 	 *  T_EXPECT_LE_UINT(lt_max_holders, 4, NULL);
853 	 */
854 
855 	/* Three doing upgrades and downgrades */
856 	T_LOG("Running test with threads upgrading and downgrading.");
857 	lt_reset();
858 	lt_target_done_threads = 3;
859 	lt_start_lock_thread(lt_upgrade_downgrade_rw);
860 	lt_start_lock_thread(lt_upgrade_downgrade_rw);
861 	lt_start_lock_thread(lt_upgrade_downgrade_rw);
862 	lt_wait_for_lock_test_threads();
863 	T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
864 	T_EXPECT_LE_UINT(lt_max_holders, 3, NULL);
865 	T_EXPECT_EQ_UINT(lt_max_upgrade_holders, 1, NULL);
866 
867 	/* Uncontended - exclusive trylocks */
868 	T_LOG("Running test with single thread doing exclusive rwlock trylocks.");
869 	lt_reset();
870 	lt_target_done_threads = 1;
871 	lt_start_lock_thread(lt_grab_rw_exclusive_with_try);
872 	lt_wait_for_lock_test_threads();
873 	T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
874 	T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
875 
876 	/* Uncontended - shared trylocks */
877 	/* Disabled until lt_grab_rw_shared_with_try() is fixed (rdar://30685840)
878 	 *  T_LOG("Running test with single thread doing shared rwlock trylocks.");
879 	 *  lt_reset();
880 	 *  lt_target_done_threads = 1;
881 	 *  lt_start_lock_thread(lt_grab_rw_shared_with_try);
882 	 *  lt_wait_for_lock_test_threads();
883 	 *  T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
884 	 *  T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
885 	 */
886 
887 	/* Three doing exclusive trylocks */
888 	T_LOG("Running test with threads doing exclusive rwlock trylocks.");
889 	lt_reset();
890 	lt_target_done_threads = 3;
891 	lt_start_lock_thread(lt_grab_rw_exclusive_with_try);
892 	lt_start_lock_thread(lt_grab_rw_exclusive_with_try);
893 	lt_start_lock_thread(lt_grab_rw_exclusive_with_try);
894 	lt_wait_for_lock_test_threads();
895 	T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
896 	T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
897 
898 	/* Three doing shared trylocks */
899 	/* Disabled until lt_grab_rw_shared_with_try() is fixed (rdar://30685840)
900 	 *  T_LOG("Running test with threads doing shared rwlock trylocks.");
901 	 *  lt_reset();
902 	 *  lt_target_done_threads = 3;
903 	 *  lt_start_lock_thread(lt_grab_rw_shared_with_try);
904 	 *  lt_start_lock_thread(lt_grab_rw_shared_with_try);
905 	 *  lt_start_lock_thread(lt_grab_rw_shared_with_try);
906 	 *  lt_wait_for_lock_test_threads();
907 	 *  T_EXPECT_LE_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
908 	 *  T_EXPECT_LE_UINT(lt_max_holders, 3, NULL);
909 	 */
910 
911 	/* Three doing various trylocks */
912 	/* Disabled until lt_grab_rw_shared_with_try() is fixed (rdar://30685840)
913 	 *  T_LOG("Running test with threads doing mixed rwlock trylocks.");
914 	 *  lt_reset();
915 	 *  lt_target_done_threads = 4;
916 	 *  lt_start_lock_thread(lt_grab_rw_shared_with_try);
917 	 *  lt_start_lock_thread(lt_grab_rw_shared_with_try);
918 	 *  lt_start_lock_thread(lt_grab_rw_exclusive_with_try);
919 	 *  lt_start_lock_thread(lt_grab_rw_exclusive_with_try);
920 	 *  lt_wait_for_lock_test_threads();
921 	 *  T_EXPECT_LE_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
922 	 *  T_EXPECT_LE_UINT(lt_max_holders, 2, NULL);
923 	 */
924 
925 	/* HW locks */
926 	T_LOG("Running test with hw_lock_lock()");
927 	lt_reset();
928 	lt_target_done_threads = 3;
929 	lt_start_lock_thread(lt_grab_hw_lock);
930 	lt_start_lock_thread(lt_grab_hw_lock);
931 	lt_start_lock_thread(lt_grab_hw_lock);
932 	lt_wait_for_lock_test_threads();
933 	T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
934 
935 #if __AMP__
936 	/* Ticket locks stress test */
937 	T_LOG("Running Ticket locks stress test with lck_ticket_lock()");
938 	extern unsigned int real_ncpus;
939 	lck_grp_init(&lt_ticket_grp, "ticket lock stress", LCK_GRP_ATTR_NULL);
940 	lck_ticket_init(&lt_ticket_lock, &lt_ticket_grp);
941 	lt_reset();
942 	lt_target_done_threads = real_ncpus;
943 	uint thread_count = 0;
944 	for (processor_t processor = processor_list; processor != NULL; processor = processor->processor_list) {
945 		lt_start_lock_thread_with_bind(lt_bound_thread, lt_stress_ticket_lock);
946 		thread_count++;
947 	}
948 	T_EXPECT_GE_UINT(thread_count, lt_target_done_threads, "Spawned enough threads for valid test");
949 	lt_wait_for_lock_test_threads();
950 	bool starvation = false;
951 	uint total_local_count = 0;
952 	for (processor_t processor = processor_list; processor != NULL; processor = processor->processor_list) {
953 		starvation = starvation || (lt_stress_local_counters[processor->cpu_id] < 10);
954 		total_local_count += lt_stress_local_counters[processor->cpu_id];
955 	}
956 	if (mach_absolute_time() > lt_setup_timeout) {
957 		T_FAIL("Stress test setup timed out after %d seconds", LOCK_TEST_SETUP_TIMEOUT_SEC);
958 	} else if (total_local_count != lt_counter) {
959 		T_FAIL("Lock failure\n");
960 	} else if (starvation) {
961 		T_FAIL("Lock starvation found\n");
962 	} else {
963 		T_PASS("Ticket locks stress test with lck_ticket_lock() (%u total acquires)", total_local_count);
964 	}
965 
966 	/* AMP ticket locks stress test */
967 	T_LOG("Running AMP Ticket locks stress test bound to clusters with lck_ticket_lock()");
968 	lt_reset();
969 	lt_target_done_threads = real_ncpus;
970 	thread_count = 0;
971 	for (processor_t processor = processor_list; processor != NULL; processor = processor->processor_list) {
972 		processor_set_t pset = processor->processor_set;
973 		switch (pset->pset_cluster_type) {
974 		case PSET_AMP_P:
975 			lt_start_lock_thread_with_bind(lt_p_thread, lt_stress_ticket_lock);
976 			break;
977 		case PSET_AMP_E:
978 			lt_start_lock_thread_with_bind(lt_e_thread, lt_stress_ticket_lock);
979 			break;
980 		default:
981 			lt_start_lock_thread(lt_stress_ticket_lock);
982 			break;
983 		}
984 		thread_count++;
985 	}
986 	T_EXPECT_GE_UINT(thread_count, lt_target_done_threads, "Spawned enough threads for valid test");
987 	lt_wait_for_lock_test_threads();
988 #endif /* __AMP__ */
989 
990 	/* HW locks: trylocks */
991 	T_LOG("Running test with hw_lock_try()");
992 	lt_reset();
993 	lt_target_done_threads = 3;
994 	lt_start_lock_thread(lt_grab_hw_lock_with_try);
995 	lt_start_lock_thread(lt_grab_hw_lock_with_try);
996 	lt_start_lock_thread(lt_grab_hw_lock_with_try);
997 	lt_wait_for_lock_test_threads();
998 	T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
999 
1000 	/* HW locks: with timeout */
1001 	T_LOG("Running test with hw_lock_to()");
1002 	lt_reset();
1003 	lt_target_done_threads = 3;
1004 	lt_start_lock_thread(lt_grab_hw_lock_with_to);
1005 	lt_start_lock_thread(lt_grab_hw_lock_with_to);
1006 	lt_start_lock_thread(lt_grab_hw_lock_with_to);
1007 	lt_wait_for_lock_test_threads();
1008 	T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
1009 
1010 	/* Spin locks */
1011 	T_LOG("Running test with lck_spin_lock()");
1012 	lt_reset();
1013 	lt_target_done_threads = 3;
1014 	lt_start_lock_thread(lt_grab_spin_lock);
1015 	lt_start_lock_thread(lt_grab_spin_lock);
1016 	lt_start_lock_thread(lt_grab_spin_lock);
1017 	lt_wait_for_lock_test_threads();
1018 	T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
1019 
1020 	/* Spin locks: trylocks */
1021 	T_LOG("Running test with lck_spin_try_lock()");
1022 	lt_reset();
1023 	lt_target_done_threads = 3;
1024 	lt_start_lock_thread(lt_grab_spin_lock_with_try);
1025 	lt_start_lock_thread(lt_grab_spin_lock_with_try);
1026 	lt_start_lock_thread(lt_grab_spin_lock_with_try);
1027 	lt_wait_for_lock_test_threads();
1028 	T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
1029 
1030 #if SCHED_HYGIENE_DEBUG
1031 	sched_preemption_disable_debug_mode = old_mode;
1032 #endif /* SCHED_HYGIENE_DEBUG */
1033 
1034 	return KERN_SUCCESS;
1035 }
1036 
1037 #define MT_MAX_ARGS             8
1038 #define MT_INITIAL_VALUE        0xfeedbeef
1039 #define MT_W_VAL                (0x00000000feedbeefULL) /* Drop in zeros */
1040 #define MT_S_VAL                (0xfffffffffeedbeefULL) /* High bit is 1, so sign-extends as negative */
1041 #define MT_L_VAL                (((uint64_t)MT_INITIAL_VALUE) | (((uint64_t)MT_INITIAL_VALUE) << 32)) /* Two back-to-back */
1042 
1043 typedef void (*sy_munge_t)(void*);
1044 
1045 #define MT_FUNC(x) #x, x
1046 struct munger_test {
1047 	const char      *mt_name;
1048 	sy_munge_t      mt_func;
1049 	uint32_t        mt_in_words;
1050 	uint32_t        mt_nout;
1051 	uint64_t        mt_expected[MT_MAX_ARGS];
1052 } munger_tests[] = {
1053 	{MT_FUNC(munge_w), 1, 1, {MT_W_VAL}},
1054 	{MT_FUNC(munge_ww), 2, 2, {MT_W_VAL, MT_W_VAL}},
1055 	{MT_FUNC(munge_www), 3, 3, {MT_W_VAL, MT_W_VAL, MT_W_VAL}},
1056 	{MT_FUNC(munge_wwww), 4, 4, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}},
1057 	{MT_FUNC(munge_wwwww), 5, 5, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}},
1058 	{MT_FUNC(munge_wwwwww), 6, 6, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}},
1059 	{MT_FUNC(munge_wwwwwww), 7, 7, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}},
1060 	{MT_FUNC(munge_wwwwwwww), 8, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}},
1061 	{MT_FUNC(munge_wl), 3, 2, {MT_W_VAL, MT_L_VAL}},
1062 	{MT_FUNC(munge_wwl), 4, 3, {MT_W_VAL, MT_W_VAL, MT_L_VAL}},
1063 	{MT_FUNC(munge_wwlll), 8, 5, {MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL}},
1064 	{MT_FUNC(munge_wwlllll), 12, 7, {MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL}},
1065 	{MT_FUNC(munge_wwllllll), 14, 8, {MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL}},
1066 	{MT_FUNC(munge_wlw), 4, 3, {MT_W_VAL, MT_L_VAL, MT_W_VAL}},
1067 	{MT_FUNC(munge_wlwwwll), 10, 7, {MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL}},
1068 	{MT_FUNC(munge_wlwwwllw), 11, 8, {MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_W_VAL}},
1069 	{MT_FUNC(munge_wlwwlwlw), 11, 8, {MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL}},
1070 	{MT_FUNC(munge_wll), 5, 3, {MT_W_VAL, MT_L_VAL, MT_L_VAL}},
1071 	{MT_FUNC(munge_wlll), 7, 4, {MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL}},
1072 	{MT_FUNC(munge_wllwwll), 11, 7, {MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL}},
1073 	{MT_FUNC(munge_wwwlw), 6, 5, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL}},
1074 	{MT_FUNC(munge_wwwlww), 7, 6, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL}},
1075 	{MT_FUNC(munge_wwwlwww), 8, 7, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}},
1076 	{MT_FUNC(munge_wwwl), 5, 4, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}},
1077 	{MT_FUNC(munge_wwwwlw), 7, 6, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL}},
1078 	{MT_FUNC(munge_wwwwllww), 10, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL}},
1079 	{MT_FUNC(munge_wwwwl), 6, 5, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}},
1080 	{MT_FUNC(munge_wwwwwl), 7, 6, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}},
1081 	{MT_FUNC(munge_wwwwwlww), 9, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL}},
1082 	{MT_FUNC(munge_wwwwwllw), 10, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_W_VAL}},
1083 	{MT_FUNC(munge_wwwwwlll), 11, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL}},
1084 	{MT_FUNC(munge_wwwwwwl), 8, 7, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}},
1085 	{MT_FUNC(munge_wwwwwwlw), 9, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL}},
1086 	{MT_FUNC(munge_wwwwwwll), 10, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL}},
1087 	{MT_FUNC(munge_wsw), 3, 3, {MT_W_VAL, MT_S_VAL, MT_W_VAL}},
1088 	{MT_FUNC(munge_wws), 3, 3, {MT_W_VAL, MT_W_VAL, MT_S_VAL}},
1089 	{MT_FUNC(munge_wwwsw), 5, 5, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_S_VAL, MT_W_VAL}},
1090 	{MT_FUNC(munge_llllll), 12, 6, {MT_L_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL}},
1091 	{MT_FUNC(munge_llll), 8, 4, {MT_L_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL}},
1092 	{MT_FUNC(munge_l), 2, 1, {MT_L_VAL}},
1093 	{MT_FUNC(munge_lw), 3, 2, {MT_L_VAL, MT_W_VAL}},
1094 	{MT_FUNC(munge_lww), 4, 3, {MT_L_VAL, MT_W_VAL, MT_W_VAL}},
1095 	{MT_FUNC(munge_lwww), 5, 4, {MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}},
1096 	{MT_FUNC(munge_lwwwwwww), 9, 8, {MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}},
1097 	{MT_FUNC(munge_wlwwwl), 8, 6, {MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}},
1098 	{MT_FUNC(munge_wwlwwwl), 9, 7, {MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}}
1099 };
1100 
1101 #define MT_TEST_COUNT (sizeof(munger_tests) / sizeof(struct munger_test))
1102 
1103 static void
mt_reset(uint32_t in_words,size_t total_size,uint32_t * data)1104 mt_reset(uint32_t in_words, size_t total_size, uint32_t *data)
1105 {
1106 	uint32_t i;
1107 
1108 	for (i = 0; i < in_words; i++) {
1109 		data[i] = MT_INITIAL_VALUE;
1110 	}
1111 
1112 	if (in_words * sizeof(uint32_t) < total_size) {
1113 		bzero(&data[in_words], total_size - in_words * sizeof(uint32_t));
1114 	}
1115 }
1116 
1117 static void
mt_test_mungers()1118 mt_test_mungers()
1119 {
1120 	uint64_t data[MT_MAX_ARGS];
1121 	uint32_t i, j;
1122 
1123 	for (i = 0; i < MT_TEST_COUNT; i++) {
1124 		struct munger_test *test = &munger_tests[i];
1125 		int pass = 1;
1126 
1127 		T_LOG("Testing %s", test->mt_name);
1128 
1129 		mt_reset(test->mt_in_words, sizeof(data), (uint32_t*)data);
1130 		test->mt_func(data);
1131 
1132 		for (j = 0; j < test->mt_nout; j++) {
1133 			if (data[j] != test->mt_expected[j]) {
1134 				T_FAIL("Index %d: expected %llx, got %llx.", j, test->mt_expected[j], data[j]);
1135 				pass = 0;
1136 			}
1137 		}
1138 		if (pass) {
1139 			T_PASS(test->mt_name);
1140 		}
1141 	}
1142 }
1143 
1144 #if defined(HAS_APPLE_PAC)
1145 
1146 
1147 kern_return_t
arm64_ropjop_test()1148 arm64_ropjop_test()
1149 {
1150 	T_LOG("Testing ROP/JOP");
1151 
1152 	/* how is ROP/JOP configured */
1153 	boolean_t config_rop_enabled = TRUE;
1154 	boolean_t config_jop_enabled = TRUE;
1155 
1156 
1157 	if (config_jop_enabled) {
1158 		/* jop key */
1159 		uint64_t apiakey_hi = __builtin_arm_rsr64("APIAKEYHI_EL1");
1160 		uint64_t apiakey_lo = __builtin_arm_rsr64("APIAKEYLO_EL1");
1161 
1162 		T_EXPECT(apiakey_hi != 0 && apiakey_lo != 0, NULL);
1163 	}
1164 
1165 	if (config_rop_enabled) {
1166 		/* rop key */
1167 		uint64_t apibkey_hi = __builtin_arm_rsr64("APIBKEYHI_EL1");
1168 		uint64_t apibkey_lo = __builtin_arm_rsr64("APIBKEYLO_EL1");
1169 
1170 		T_EXPECT(apibkey_hi != 0 && apibkey_lo != 0, NULL);
1171 
1172 		/* sign a KVA (the address of this function) */
1173 		uint64_t kva_signed = (uint64_t) ptrauth_sign_unauthenticated((void *)&config_rop_enabled, ptrauth_key_asib, 0);
1174 
1175 		/* assert it was signed (changed) */
1176 		T_EXPECT(kva_signed != (uint64_t)&config_rop_enabled, NULL);
1177 
1178 		/* authenticate the newly signed KVA */
1179 		uint64_t kva_authed = (uint64_t) ml_auth_ptr_unchecked((void *)kva_signed, ptrauth_key_asib, 0);
1180 
1181 		/* assert the authed KVA is the original KVA */
1182 		T_EXPECT(kva_authed == (uint64_t)&config_rop_enabled, NULL);
1183 
1184 		/* corrupt a signed ptr, auth it, ensure auth failed */
1185 		uint64_t kva_corrupted = kva_signed ^ 1;
1186 
1187 		/* authenticate the corrupted pointer */
1188 		kva_authed = (uint64_t) ml_auth_ptr_unchecked((void *)kva_corrupted, ptrauth_key_asib, 0);
1189 
1190 		/* when AuthIB fails, bits 63:62 will be set to 2'b10 */
1191 		uint64_t auth_fail_mask = 3ULL << 61;
1192 		uint64_t authib_fail = 2ULL << 61;
1193 
1194 		/* assert the failed authIB of corrupted pointer is tagged */
1195 		T_EXPECT((kva_authed & auth_fail_mask) == authib_fail, NULL);
1196 	}
1197 
1198 	return KERN_SUCCESS;
1199 }
1200 #endif /* defined(HAS_APPLE_PAC) */
1201 
1202 #if __ARM_PAN_AVAILABLE__
1203 
1204 struct pan_test_thread_args {
1205 	volatile bool join;
1206 };
1207 
1208 static void
arm64_pan_test_thread(void * arg,wait_result_t __unused wres)1209 arm64_pan_test_thread(void *arg, wait_result_t __unused wres)
1210 {
1211 	T_ASSERT(__builtin_arm_rsr("pan") != 0, NULL);
1212 
1213 	struct pan_test_thread_args *args = arg;
1214 
1215 	for (processor_t p = processor_list; p != NULL; p = p->processor_list) {
1216 		thread_bind(p);
1217 		thread_block(THREAD_CONTINUE_NULL);
1218 		kprintf("Running PAN test on cpu %d\n", p->cpu_id);
1219 		arm64_pan_test();
1220 	}
1221 
1222 	/* unbind thread from specific cpu */
1223 	thread_bind(PROCESSOR_NULL);
1224 	thread_block(THREAD_CONTINUE_NULL);
1225 
1226 	while (!args->join) {
1227 		;
1228 	}
1229 
1230 	thread_wakeup(args);
1231 }
1232 
1233 kern_return_t
arm64_late_pan_test()1234 arm64_late_pan_test()
1235 {
1236 	thread_t thread;
1237 	kern_return_t kr;
1238 
1239 	struct pan_test_thread_args args;
1240 	args.join = false;
1241 
1242 	kr = kernel_thread_start(arm64_pan_test_thread, &args, &thread);
1243 	assert(kr == KERN_SUCCESS);
1244 
1245 	thread_deallocate(thread);
1246 
1247 	assert_wait(&args, THREAD_UNINT);
1248 	args.join = true;
1249 	thread_block(THREAD_CONTINUE_NULL);
1250 	return KERN_SUCCESS;
1251 }
1252 
1253 // Disable KASAN checking for PAN tests as the fixed commpage address doesn't have a shadow mapping
1254 
1255 static NOKASAN bool
arm64_pan_test_pan_enabled_fault_handler(arm_saved_state_t * state)1256 arm64_pan_test_pan_enabled_fault_handler(arm_saved_state_t * state)
1257 {
1258 	bool retval                 = false;
1259 	uint64_t esr                = get_saved_state_esr(state);
1260 	esr_exception_class_t class = ESR_EC(esr);
1261 	fault_status_t fsc          = ISS_IA_FSC(ESR_ISS(esr));
1262 	uint32_t cpsr               = get_saved_state_cpsr(state);
1263 	uint64_t far                = get_saved_state_far(state);
1264 
1265 	if ((class == ESR_EC_DABORT_EL1) && (fsc == FSC_PERMISSION_FAULT_L3) &&
1266 	    (cpsr & PSR64_PAN) &&
1267 	    ((esr & ISS_DA_WNR) ? mmu_kvtop_wpreflight(far) : mmu_kvtop(far))) {
1268 		++pan_exception_level;
1269 		// read the user-accessible value to make sure
1270 		// pan is enabled and produces a 2nd fault from
1271 		// the exception handler
1272 		if (pan_exception_level == 1) {
1273 			ml_expect_fault_begin(arm64_pan_test_pan_enabled_fault_handler, far);
1274 			pan_fault_value = *(volatile char *)far;
1275 			ml_expect_fault_end();
1276 			__builtin_arm_wsr("pan", 1); // turn PAN back on after the nested exception cleared it for this context
1277 		}
1278 		// this fault address is used for PAN test
1279 		// disable PAN and rerun
1280 		mask_saved_state_cpsr(state, 0, PSR64_PAN);
1281 
1282 		retval = true;
1283 	}
1284 
1285 	return retval;
1286 }
1287 
1288 static NOKASAN bool
arm64_pan_test_pan_disabled_fault_handler(arm_saved_state_t * state)1289 arm64_pan_test_pan_disabled_fault_handler(arm_saved_state_t * state)
1290 {
1291 	bool retval             = false;
1292 	uint64_t esr            = get_saved_state_esr(state);
1293 	esr_exception_class_t class = ESR_EC(esr);
1294 	fault_status_t fsc      = ISS_IA_FSC(ESR_ISS(esr));
1295 	uint32_t cpsr           = get_saved_state_cpsr(state);
1296 
1297 	if ((class == ESR_EC_DABORT_EL1) && (fsc == FSC_PERMISSION_FAULT_L3) &&
1298 	    !(cpsr & PSR64_PAN)) {
1299 		++pan_exception_level;
1300 		// On an exception taken from a PAN-disabled context, verify
1301 		// that PAN is re-enabled for the exception handler and that
1302 		// accessing the test address produces a PAN fault.
1303 		ml_expect_fault_begin(arm64_pan_test_pan_enabled_fault_handler, pan_test_addr);
1304 		pan_fault_value = *(volatile char *)pan_test_addr;
1305 		ml_expect_fault_end();
1306 		__builtin_arm_wsr("pan", 1); // turn PAN back on after the nested exception cleared it for this context
1307 		add_saved_state_pc(state, 4);
1308 
1309 		retval = true;
1310 	}
1311 
1312 	return retval;
1313 }
1314 
1315 NOKASAN kern_return_t
arm64_pan_test()1316 arm64_pan_test()
1317 {
1318 	bool values_match = false;
1319 	vm_offset_t priv_addr = 0;
1320 
1321 	T_LOG("Testing PAN.");
1322 
1323 
1324 	T_ASSERT((__builtin_arm_rsr("SCTLR_EL1") & SCTLR_PAN_UNCHANGED) == 0, "SCTLR_EL1.SPAN must be cleared");
1325 
1326 	T_ASSERT(__builtin_arm_rsr("pan") != 0, NULL);
1327 
1328 	pan_exception_level = 0;
1329 	pan_fault_value = 0xDE;
1330 
1331 	// Create an empty pmap, so we can map a user-accessible page
1332 	pmap_t pmap = pmap_create_options(NULL, 0, PMAP_CREATE_64BIT);
1333 	T_ASSERT(pmap != NULL, NULL);
1334 
1335 	// Get a physical page to back the mapping
1336 	vm_page_t vm_page = vm_page_grab();
1337 	T_ASSERT(vm_page != VM_PAGE_NULL, NULL);
1338 	ppnum_t pn = VM_PAGE_GET_PHYS_PAGE(vm_page);
1339 	pmap_paddr_t pa = ptoa(pn);
1340 
1341 	// Write to the underlying physical page through the physical aperture
1342 	// so we can test against a known value
1343 	priv_addr = phystokv((pmap_paddr_t)pa);
1344 	*(volatile char *)priv_addr = 0xAB;
1345 
1346 	// Map the page in the user address space at some, non-zero address
1347 	pan_test_addr = PAGE_SIZE;
1348 	pmap_enter(pmap, pan_test_addr, pn, VM_PROT_READ, VM_PROT_READ, 0, true, PMAP_MAPPING_TYPE_INFER);
1349 
1350 	// Context-switch with PAN disabled is prohibited; prevent test logging from
1351 	// triggering a voluntary context switch.
1352 	mp_disable_preemption();
1353 
1354 	// Insert the user's pmap root table pointer in TTBR0
1355 	thread_t thread = current_thread();
1356 	pmap_t old_pmap = vm_map_pmap(thread->map);
1357 	pmap_switch(pmap, thread);
1358 
1359 	// Below should trigger a PAN exception as pan_test_addr is accessible
1360 	// in user mode
1361 	// The exception handler, upon recognizing the fault address is pan_test_addr,
1362 	// will disable PAN and rerun this instruction successfully
1363 	ml_expect_fault_begin(arm64_pan_test_pan_enabled_fault_handler, pan_test_addr);
1364 	values_match = (*(volatile char *)pan_test_addr == *(volatile char *)priv_addr);
1365 	ml_expect_fault_end();
1366 	T_ASSERT(values_match, NULL);
1367 
1368 	T_ASSERT(pan_exception_level == 2, NULL);
1369 
1370 	T_ASSERT(__builtin_arm_rsr("pan") == 0, NULL);
1371 
1372 	T_ASSERT(pan_fault_value == *(char *)priv_addr, NULL);
1373 
1374 	pan_exception_level = 0;
1375 	pan_fault_value = 0xAD;
1376 	pan_ro_addr = (vm_offset_t) &pan_ro_value;
1377 
1378 	// Force a permission fault while PAN is disabled to make sure PAN is
1379 	// re-enabled during the exception handler.
1380 	ml_expect_fault_begin(arm64_pan_test_pan_disabled_fault_handler, pan_ro_addr);
1381 	*((volatile uint64_t*)pan_ro_addr) = 0xFEEDFACECAFECAFE;
1382 	ml_expect_fault_end();
1383 
1384 	T_ASSERT(pan_exception_level == 2, NULL);
1385 
1386 	T_ASSERT(__builtin_arm_rsr("pan") == 0, NULL);
1387 
1388 	T_ASSERT(pan_fault_value == *(char *)priv_addr, NULL);
1389 
1390 	pmap_switch(old_pmap, thread);
1391 
1392 	pan_ro_addr = 0;
1393 
1394 	__builtin_arm_wsr("pan", 1);
1395 
1396 	mp_enable_preemption();
1397 
1398 	pmap_remove(pmap, pan_test_addr, pan_test_addr + PAGE_SIZE);
1399 	pan_test_addr = 0;
1400 
1401 	vm_page_lock_queues();
1402 	vm_page_free(vm_page);
1403 	vm_page_unlock_queues();
1404 	pmap_destroy(pmap);
1405 
1406 	return KERN_SUCCESS;
1407 }
1408 #endif /* __ARM_PAN_AVAILABLE__ */
1409 
1410 
1411 kern_return_t
arm64_lock_test()1412 arm64_lock_test()
1413 {
1414 	return lt_test_locks();
1415 }
1416 
1417 kern_return_t
arm64_munger_test()1418 arm64_munger_test()
1419 {
1420 	mt_test_mungers();
1421 	return 0;
1422 }
1423 
1424 #if defined(KERNEL_INTEGRITY_CTRR) && defined(CONFIG_XNUPOST)
1425 SECURITY_READ_ONLY_LATE(uint64_t) ctrr_ro_test;
1426 uint64_t ctrr_nx_test = 0xd65f03c0; /* RET */
1427 volatile uint64_t ctrr_exception_esr;
1428 vm_offset_t ctrr_test_va;
1429 vm_offset_t ctrr_test_page;
1430 
1431 kern_return_t
ctrr_test(void)1432 ctrr_test(void)
1433 {
1434 	processor_t p;
1435 	boolean_t ctrr_disable = FALSE;
1436 
1437 	PE_parse_boot_argn("-unsafe_kernel_text", &ctrr_disable, sizeof(ctrr_disable));
1438 
1439 #if CONFIG_CSR_FROM_DT
1440 	if (csr_unsafe_kernel_text) {
1441 		ctrr_disable = TRUE;
1442 	}
1443 #endif /* CONFIG_CSR_FROM_DT */
1444 
1445 	if (ctrr_disable) {
1446 		T_LOG("Skipping CTRR test when -unsafe_kernel_text boot-arg present");
1447 		return KERN_SUCCESS;
1448 	}
1449 
1450 	T_LOG("Running CTRR test.");
1451 
1452 	for (p = processor_list; p != NULL; p = p->processor_list) {
1453 		thread_bind(p);
1454 		thread_block(THREAD_CONTINUE_NULL);
1455 		T_LOG("Running CTRR test on cpu %d\n", p->cpu_id);
1456 		ctrr_test_cpu();
1457 	}
1458 
1459 	/* unbind thread from specific cpu */
1460 	thread_bind(PROCESSOR_NULL);
1461 	thread_block(THREAD_CONTINUE_NULL);
1462 
1463 	return KERN_SUCCESS;
1464 }
1465 
1466 static bool
ctrr_test_ro_fault_handler(arm_saved_state_t * state)1467 ctrr_test_ro_fault_handler(arm_saved_state_t * state)
1468 {
1469 	bool retval                 = false;
1470 	uint64_t esr                = get_saved_state_esr(state);
1471 	esr_exception_class_t class = ESR_EC(esr);
1472 	fault_status_t fsc          = ISS_DA_FSC(ESR_ISS(esr));
1473 
1474 	if ((class == ESR_EC_DABORT_EL1) && (fsc == FSC_PERMISSION_FAULT_L3)) {
1475 		ctrr_exception_esr = esr;
1476 		add_saved_state_pc(state, 4);
1477 		retval = true;
1478 	}
1479 
1480 	return retval;
1481 }
1482 
1483 static bool
ctrr_test_nx_fault_handler(arm_saved_state_t * state)1484 ctrr_test_nx_fault_handler(arm_saved_state_t * state)
1485 {
1486 	bool retval                 = false;
1487 	uint64_t esr                = get_saved_state_esr(state);
1488 	esr_exception_class_t class = ESR_EC(esr);
1489 	fault_status_t fsc          = ISS_IA_FSC(ESR_ISS(esr));
1490 
1491 	if ((class == ESR_EC_IABORT_EL1) && (fsc == FSC_PERMISSION_FAULT_L3)) {
1492 		ctrr_exception_esr = esr;
1493 		/* return to the instruction immediately after the call to NX page */
1494 		set_saved_state_pc(state, get_saved_state_lr(state));
1495 #if BTI_ENFORCED
1496 		/* Clear BTYPE to prevent taking another exception on ERET */
1497 		uint32_t spsr = get_saved_state_cpsr(state);
1498 		spsr &= ~PSR_BTYPE_MASK;
1499 		set_saved_state_cpsr(state, spsr);
1500 #endif /* BTI_ENFORCED */
1501 		retval = true;
1502 	}
1503 
1504 	return retval;
1505 }
1506 
1507 // Disable KASAN checking for CTRR tests as the test VA  doesn't have a shadow mapping
1508 
1509 /* test CTRR on a cpu, caller to bind thread to desired cpu */
1510 /* ctrr_test_page was reserved during bootstrap process */
1511 NOKASAN kern_return_t
ctrr_test_cpu(void)1512 ctrr_test_cpu(void)
1513 {
1514 	ppnum_t ro_pn, nx_pn;
1515 	uint64_t *ctrr_ro_test_ptr;
1516 	void (*ctrr_nx_test_ptr)(void);
1517 	kern_return_t kr;
1518 	uint64_t prot = 0;
1519 	extern vm_offset_t virtual_space_start;
1520 
1521 	/* ctrr read only region = [rorgn_begin_va, rorgn_end_va) */
1522 
1523 #if (KERNEL_CTRR_VERSION == 3)
1524 	const uint64_t rorgn_lwr = __builtin_arm_rsr64("S3_0_C11_C0_2");
1525 	const uint64_t rorgn_upr = __builtin_arm_rsr64("S3_0_C11_C0_3");
1526 #else /* (KERNEL_CTRR_VERSION == 3) */
1527 	const uint64_t rorgn_lwr = __builtin_arm_rsr64("S3_4_C15_C2_3");
1528 	const uint64_t rorgn_upr = __builtin_arm_rsr64("S3_4_C15_C2_4");
1529 #endif /* (KERNEL_CTRR_VERSION == 3) */
1530 	vm_offset_t rorgn_begin_va = phystokv(rorgn_lwr);
1531 	vm_offset_t rorgn_end_va = phystokv(rorgn_upr) + 0x1000;
1532 	vm_offset_t ro_test_va = (vm_offset_t)&ctrr_ro_test;
1533 	vm_offset_t nx_test_va = (vm_offset_t)&ctrr_nx_test;
1534 
1535 	T_EXPECT(rorgn_begin_va <= ro_test_va && ro_test_va < rorgn_end_va, "Expect ro_test_va to be inside the CTRR region");
1536 	T_EXPECT((nx_test_va < rorgn_begin_va) ^ (nx_test_va >= rorgn_end_va), "Expect nx_test_va to be outside the CTRR region");
1537 
1538 	ro_pn = pmap_find_phys(kernel_pmap, ro_test_va);
1539 	nx_pn = pmap_find_phys(kernel_pmap, nx_test_va);
1540 	T_EXPECT(ro_pn && nx_pn, "Expect ro page number and nx page number to be non zero");
1541 
1542 	T_LOG("test virtual page: %p, ctrr_ro_test: %p, ctrr_nx_test: %p, ro_pn: %x, nx_pn: %x ",
1543 	    (void *)ctrr_test_page, &ctrr_ro_test, &ctrr_nx_test, ro_pn, nx_pn);
1544 
1545 	prot = pmap_get_arm64_prot(kernel_pmap, ctrr_test_page);
1546 	T_EXPECT(~prot & ARM_TTE_VALID, "Expect ctrr_test_page to be unmapped");
1547 
1548 	T_LOG("Read only region test mapping virtual page %p to CTRR RO page number %d", ctrr_test_page, ro_pn);
1549 	kr = pmap_enter(kernel_pmap, ctrr_test_page, ro_pn,
1550 	    VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE, VM_WIMG_USE_DEFAULT, FALSE, PMAP_MAPPING_TYPE_INFER);
1551 	T_EXPECT(kr == KERN_SUCCESS, "Expect pmap_enter of RW mapping to succeed");
1552 
1553 	// assert entire mmu prot path (Hierarchical protection model) is NOT RO
1554 	// fetch effective block level protections from table/block entries
1555 	prot = pmap_get_arm64_prot(kernel_pmap, ctrr_test_page);
1556 	T_EXPECT(ARM_PTE_EXTRACT_AP(prot) == AP_RWNA && (prot & ARM_PTE_PNX), "Mapping is EL1 RWNX");
1557 
1558 	ctrr_test_va = ctrr_test_page + (ro_test_va & PAGE_MASK);
1559 	ctrr_ro_test_ptr = (void *)ctrr_test_va;
1560 
1561 	T_LOG("Read only region test writing to %p to provoke data abort", ctrr_ro_test_ptr);
1562 
1563 	// should cause data abort
1564 	ml_expect_fault_begin(ctrr_test_ro_fault_handler, ctrr_test_va);
1565 	*ctrr_ro_test_ptr = 1;
1566 	ml_expect_fault_end();
1567 
1568 	// ensure write permission fault at expected level
1569 	// data abort handler will set ctrr_exception_esr when ctrr_test_va takes a permission fault
1570 
1571 	T_EXPECT(ESR_EC(ctrr_exception_esr) == ESR_EC_DABORT_EL1, "Data Abort from EL1 expected");
1572 	T_EXPECT(ISS_DA_FSC(ESR_ISS(ctrr_exception_esr)) == FSC_PERMISSION_FAULT_L3, "Permission Fault Expected");
1573 	T_EXPECT(ESR_ISS(ctrr_exception_esr) & ISS_DA_WNR, "Write Fault Expected");
1574 
1575 	ctrr_test_va = 0;
1576 	ctrr_exception_esr = 0;
1577 	pmap_remove(kernel_pmap, ctrr_test_page, ctrr_test_page + PAGE_SIZE);
1578 
1579 	T_LOG("No execute test mapping virtual page %p to CTRR PXN page number %d", ctrr_test_page, nx_pn);
1580 
1581 	kr = pmap_enter(kernel_pmap, ctrr_test_page, nx_pn,
1582 	    VM_PROT_READ | VM_PROT_EXECUTE, VM_PROT_NONE, VM_WIMG_USE_DEFAULT, FALSE, PMAP_MAPPING_TYPE_INFER);
1583 	T_EXPECT(kr == KERN_SUCCESS, "Expect pmap_enter of RX mapping to succeed");
1584 
1585 	// assert entire mmu prot path (Hierarchical protection model) is NOT XN
1586 	prot = pmap_get_arm64_prot(kernel_pmap, ctrr_test_page);
1587 	T_EXPECT(ARM_PTE_EXTRACT_AP(prot) == AP_RONA && (~prot & ARM_PTE_PNX), "Mapping is EL1 ROX");
1588 
1589 	ctrr_test_va = ctrr_test_page + (nx_test_va & PAGE_MASK);
1590 #if __has_feature(ptrauth_calls)
1591 	ctrr_nx_test_ptr = ptrauth_sign_unauthenticated((void *)ctrr_test_va, ptrauth_key_function_pointer, 0);
1592 #else
1593 	ctrr_nx_test_ptr = (void *)ctrr_test_va;
1594 #endif
1595 
1596 	T_LOG("No execute test calling ctrr_nx_test_ptr(): %p to provoke instruction abort", ctrr_nx_test_ptr);
1597 
1598 	// should cause prefetch abort
1599 	ml_expect_fault_begin(ctrr_test_nx_fault_handler, ctrr_test_va);
1600 	ctrr_nx_test_ptr();
1601 	ml_expect_fault_end();
1602 
1603 	// TODO: ensure execute permission fault at expected level
1604 	T_EXPECT(ESR_EC(ctrr_exception_esr) == ESR_EC_IABORT_EL1, "Instruction abort from EL1 Expected");
1605 	T_EXPECT(ISS_DA_FSC(ESR_ISS(ctrr_exception_esr)) == FSC_PERMISSION_FAULT_L3, "Permission Fault Expected");
1606 
1607 	ctrr_test_va = 0;
1608 	ctrr_exception_esr = 0;
1609 
1610 	pmap_remove(kernel_pmap, ctrr_test_page, ctrr_test_page + PAGE_SIZE);
1611 
1612 	T_LOG("Expect no faults when reading CTRR region to verify correct programming of CTRR limits");
1613 	for (vm_offset_t addr = rorgn_begin_va; addr < rorgn_end_va; addr += 8) {
1614 		volatile uint64_t x = *(uint64_t *)addr;
1615 		(void) x; /* read for side effect only */
1616 	}
1617 
1618 	return KERN_SUCCESS;
1619 }
1620 #endif /* defined(KERNEL_INTEGRITY_CTRR) && defined(CONFIG_XNUPOST) */
1621 
1622 
1623 /**
1624  * Explicitly assert that xnu is still uniprocessor before running a POST test.
1625  *
1626  * In practice, tests in this module can safely manipulate CPU state without
1627  * fear of getting preempted.  There's no way for cpu_boot_thread() to bring up
1628  * the secondary CPUs until StartIOKitMatching() completes, and arm64 orders
1629  * kern_post_test() before StartIOKitMatching().
1630  *
1631  * But this is also an implementation detail.  Tests that rely on this ordering
1632  * should call assert_uniprocessor(), so that we can figure out a workaround
1633  * on the off-chance this ordering ever changes.
1634  */
1635 __unused static void
assert_uniprocessor(void)1636 assert_uniprocessor(void)
1637 {
1638 	extern unsigned int real_ncpus;
1639 	unsigned int ncpus = os_atomic_load(&real_ncpus, relaxed);
1640 	T_QUIET; T_ASSERT_EQ_UINT(1, ncpus, "arm64 kernel POST tests should run before any secondary CPUs are brought up");
1641 }
1642 
1643 
1644 #if CONFIG_SPTM
1645 volatile uint8_t xnu_post_panic_lockdown_did_fire = false;
1646 typedef uint64_t (panic_lockdown_helper_fcn_t)(uint64_t raw);
1647 typedef bool (panic_lockdown_recovery_fcn_t)(arm_saved_state_t *);
1648 
1649 /* SP0 vector tests */
1650 extern panic_lockdown_helper_fcn_t arm64_panic_lockdown_test_load;
1651 extern panic_lockdown_helper_fcn_t arm64_panic_lockdown_test_gdbtrap;
1652 extern panic_lockdown_helper_fcn_t arm64_panic_lockdown_test_pac_brk_c470;
1653 extern panic_lockdown_helper_fcn_t arm64_panic_lockdown_test_pac_brk_c471;
1654 extern panic_lockdown_helper_fcn_t arm64_panic_lockdown_test_pac_brk_c472;
1655 extern panic_lockdown_helper_fcn_t arm64_panic_lockdown_test_pac_brk_c473;
1656 extern panic_lockdown_helper_fcn_t arm64_panic_lockdown_test_telemetry_brk_ff00;
1657 extern panic_lockdown_helper_fcn_t arm64_panic_lockdown_test_br_auth_fail;
1658 extern panic_lockdown_helper_fcn_t arm64_panic_lockdown_test_ldr_auth_fail;
1659 extern panic_lockdown_helper_fcn_t arm64_panic_lockdown_test_fpac;
1660 extern panic_lockdown_helper_fcn_t arm64_panic_lockdown_test_copyio;
1661 extern uint8_t arm64_panic_lockdown_test_copyio_fault_pc;
1662 extern panic_lockdown_helper_fcn_t arm64_panic_lockdown_test_bti_telemetry;
1663 
1664 extern int gARM_FEAT_FPACCOMBINE;
1665 
1666 /* SP1 vector tests */
1667 extern panic_lockdown_helper_fcn_t arm64_panic_lockdown_test_sp1_invalid_stack;
1668 extern bool arm64_panic_lockdown_test_sp1_invalid_stack_handler(arm_saved_state_t *);
1669 extern panic_lockdown_helper_fcn_t arm64_panic_lockdown_test_sp1_exception_in_vector;
1670 extern panic_lockdown_helper_fcn_t el1_sp1_synchronous_raise_exception_in_vector;
1671 extern bool arm64_panic_lockdown_test_sp1_exception_in_vector_handler(arm_saved_state_t *);
1672 
1673 #if DEVELOPMENT || DEBUG
1674 extern struct panic_lockdown_initiator_state debug_panic_lockdown_initiator_state;
1675 #endif /* DEVELOPMENT || DEBUG */
1676 
1677 typedef struct arm64_panic_lockdown_test_case {
1678 	const char *name;
1679 	panic_lockdown_helper_fcn_t *func;
1680 	uint64_t arg;
1681 	esr_exception_class_t expected_ec;
1682 	bool check_fs;
1683 	fault_status_t expected_fs;
1684 	bool expect_lockdown_exceptions_masked;
1685 	bool expect_lockdown_exceptions_unmasked;
1686 	bool override_expected_fault_pc_valid;
1687 	uint64_t override_expected_fault_pc;
1688 } arm64_panic_lockdown_test_case_s;
1689 
1690 static arm64_panic_lockdown_test_case_s *arm64_panic_lockdown_active_test;
1691 static volatile bool arm64_panic_lockdown_caught_exception;
1692 
1693 static bool
arm64_panic_lockdown_test_exception_handler(arm_saved_state_t * state)1694 arm64_panic_lockdown_test_exception_handler(arm_saved_state_t * state)
1695 {
1696 	uint64_t esr = get_saved_state_esr(state);
1697 	esr_exception_class_t class = ESR_EC(esr);
1698 	fault_status_t fs = ISS_DA_FSC(ESR_ISS(esr));
1699 
1700 	if (!arm64_panic_lockdown_active_test ||
1701 	    class != arm64_panic_lockdown_active_test->expected_ec ||
1702 	    (arm64_panic_lockdown_active_test->check_fs &&
1703 	    fs != arm64_panic_lockdown_active_test->expected_fs)) {
1704 		return false;
1705 	}
1706 
1707 
1708 #if BTI_ENFORCED
1709 	/* Clear BTYPE to prevent taking another exception on ERET */
1710 	uint32_t spsr = get_saved_state_cpsr(state);
1711 	spsr &= ~PSR_BTYPE_MASK;
1712 	set_saved_state_cpsr(state, spsr);
1713 #endif /* BTI_ENFORCED */
1714 
1715 	/* We got the expected exception, recover by forging an early return */
1716 	set_saved_state_pc(state, get_saved_state_lr(state));
1717 	arm64_panic_lockdown_caught_exception = true;
1718 
1719 	return true;
1720 }
1721 
1722 static void
panic_lockdown_expect_test(const char * treatment,arm64_panic_lockdown_test_case_s * test,bool expect_lockdown,bool mask_interrupts)1723 panic_lockdown_expect_test(const char *treatment,
1724     arm64_panic_lockdown_test_case_s *test,
1725     bool expect_lockdown,
1726     bool mask_interrupts)
1727 {
1728 	int ints = 0;
1729 
1730 	arm64_panic_lockdown_active_test = test;
1731 	xnu_post_panic_lockdown_did_fire = false;
1732 	arm64_panic_lockdown_caught_exception = false;
1733 
1734 	uintptr_t fault_pc;
1735 	if (test->override_expected_fault_pc_valid) {
1736 		fault_pc = (uintptr_t)test->override_expected_fault_pc;
1737 	} else {
1738 		fault_pc = (uintptr_t)test->func;
1739 #ifdef BTI_ENFORCED
1740 		/* When BTI is enabled, we expect the fault to occur after the landing pad */
1741 		fault_pc += 4;
1742 #endif /* BTI_ENFORCED */
1743 	}
1744 
1745 
1746 	ml_expect_fault_pc_begin(
1747 		arm64_panic_lockdown_test_exception_handler,
1748 		fault_pc);
1749 
1750 	if (mask_interrupts) {
1751 		ints = ml_set_interrupts_enabled(FALSE);
1752 	}
1753 
1754 	(void)test->func(test->arg);
1755 
1756 	if (mask_interrupts) {
1757 		(void)ml_set_interrupts_enabled(ints);
1758 	}
1759 
1760 	ml_expect_fault_end();
1761 
1762 	if (expect_lockdown == xnu_post_panic_lockdown_did_fire &&
1763 	    arm64_panic_lockdown_caught_exception) {
1764 		T_PASS("%s + %s OK\n", test->name, treatment);
1765 	} else {
1766 		T_FAIL(
1767 			"%s + %s FAIL (expected lockdown: %d, did lockdown: %d, caught exception: %d)\n",
1768 			test->name, treatment,
1769 			expect_lockdown, xnu_post_panic_lockdown_did_fire,
1770 			arm64_panic_lockdown_caught_exception);
1771 	}
1772 
1773 #if DEVELOPMENT || DEBUG
1774 	/* Check that the debug info is minimally functional */
1775 	if (expect_lockdown) {
1776 		T_EXPECT_NE_ULLONG(debug_panic_lockdown_initiator_state.initiator_pc,
1777 		    0ULL, "Initiator PC set");
1778 	} else {
1779 		T_EXPECT_EQ_ULLONG(debug_panic_lockdown_initiator_state.initiator_pc,
1780 		    0ULL, "Initiator PC not set");
1781 	}
1782 
1783 	/* Reset the debug data so it can be filled later if needed */
1784 	debug_panic_lockdown_initiator_state.initiator_pc = 0;
1785 #endif /* DEVELOPMENT || DEBUG */
1786 }
1787 
1788 static void
panic_lockdown_expect_fault_raw(const char * label,panic_lockdown_helper_fcn_t entrypoint,panic_lockdown_helper_fcn_t faulting_function,expected_fault_handler_t fault_handler)1789 panic_lockdown_expect_fault_raw(const char *label,
1790     panic_lockdown_helper_fcn_t entrypoint,
1791     panic_lockdown_helper_fcn_t faulting_function,
1792     expected_fault_handler_t fault_handler)
1793 {
1794 	uint64_t test_success = 0;
1795 	xnu_post_panic_lockdown_did_fire = false;
1796 
1797 	uintptr_t fault_pc = (uintptr_t)faulting_function;
1798 #ifdef BTI_ENFORCED
1799 	/* When BTI is enabled, we expect the fault to occur after the landing pad */
1800 	fault_pc += 4;
1801 #endif /* BTI_ENFORCED */
1802 
1803 	ml_expect_fault_pc_begin(fault_handler, fault_pc);
1804 
1805 	test_success = entrypoint(0);
1806 
1807 	ml_expect_fault_end();
1808 
1809 	if (test_success && xnu_post_panic_lockdown_did_fire) {
1810 		T_PASS("%s OK\n", label);
1811 	} else {
1812 		T_FAIL("%s FAIL (test returned: %d, did lockdown: %d)\n",
1813 		    label, test_success, xnu_post_panic_lockdown_did_fire);
1814 	}
1815 }
1816 
1817 /**
1818  * Returns a pointer which is guranteed to be invalid under IA with the zero
1819  * discriminator.
1820  *
1821  * This is somewhat over complicating it since it's exceedingly likely that a
1822  * any given pointer will have a zero PAC (and thus break the test), but it's
1823  * easy enough to avoid the problem.
1824  */
1825 static uint64_t
panic_lockdown_pacia_get_invalid_ptr()1826 panic_lockdown_pacia_get_invalid_ptr()
1827 {
1828 	char *unsigned_ptr = (char *)0xFFFFFFFFAABBCC00;
1829 	char *signed_ptr = NULL;
1830 	do {
1831 		unsigned_ptr += 4 /* avoid alignment exceptions */;
1832 		signed_ptr = ptrauth_sign_unauthenticated(
1833 			unsigned_ptr,
1834 			ptrauth_key_asia,
1835 			0);
1836 	} while ((uint64_t)unsigned_ptr == (uint64_t)signed_ptr);
1837 
1838 	return (uint64_t)unsigned_ptr;
1839 }
1840 
1841 /**
1842  * Returns a pointer which is guranteed to be invalid under DA with the zero
1843  * discriminator.
1844  */
1845 static uint64_t
panic_lockdown_pacda_get_invalid_ptr(void)1846 panic_lockdown_pacda_get_invalid_ptr(void)
1847 {
1848 	char *unsigned_ptr = (char *)0xFFFFFFFFAABBCC00;
1849 	char *signed_ptr = NULL;
1850 	do {
1851 		unsigned_ptr += 8 /* avoid alignment exceptions */;
1852 		signed_ptr = ptrauth_sign_unauthenticated(
1853 			unsigned_ptr,
1854 			ptrauth_key_asda,
1855 			0);
1856 	} while ((uint64_t)unsigned_ptr == (uint64_t)signed_ptr);
1857 
1858 	return (uint64_t)unsigned_ptr;
1859 }
1860 
1861 kern_return_t
arm64_panic_lockdown_test(void)1862 arm64_panic_lockdown_test(void)
1863 {
1864 #if __has_feature(ptrauth_calls)
1865 	uint64_t ia_invalid = panic_lockdown_pacia_get_invalid_ptr();
1866 #endif /* ptrauth_calls */
1867 
1868 	arm64_panic_lockdown_test_case_s tests[] = {
1869 		{
1870 			.name = "arm64_panic_lockdown_test_load",
1871 			.func = &arm64_panic_lockdown_test_load,
1872 			/* Trigger a null deref */
1873 			.arg = (uint64_t)NULL,
1874 			.expected_ec = ESR_EC_DABORT_EL1,
1875 			.expect_lockdown_exceptions_masked = true,
1876 			.expect_lockdown_exceptions_unmasked = false,
1877 		},
1878 		{
1879 			.name = "arm64_panic_lockdown_test_gdbtrap",
1880 			.func = &arm64_panic_lockdown_test_gdbtrap,
1881 			.arg = 0,
1882 			.expected_ec = ESR_EC_UNCATEGORIZED,
1883 			/* GDBTRAP instructions should be allowed everywhere */
1884 			.expect_lockdown_exceptions_masked = false,
1885 			.expect_lockdown_exceptions_unmasked = false,
1886 		},
1887 #if __has_feature(ptrauth_calls)
1888 		{
1889 			.name = "arm64_panic_lockdown_test_pac_brk_c470",
1890 			.func = &arm64_panic_lockdown_test_pac_brk_c470,
1891 			.arg = 0,
1892 			.expected_ec = ESR_EC_BRK_AARCH64,
1893 			.expect_lockdown_exceptions_masked = true,
1894 			.expect_lockdown_exceptions_unmasked = true,
1895 		},
1896 		{
1897 			.name = "arm64_panic_lockdown_test_pac_brk_c471",
1898 			.func = &arm64_panic_lockdown_test_pac_brk_c471,
1899 			.arg = 0,
1900 			.expected_ec = ESR_EC_BRK_AARCH64,
1901 			.expect_lockdown_exceptions_masked = true,
1902 			.expect_lockdown_exceptions_unmasked = true,
1903 		},
1904 		{
1905 			.name = "arm64_panic_lockdown_test_pac_brk_c472",
1906 			.func = &arm64_panic_lockdown_test_pac_brk_c472,
1907 			.arg = 0,
1908 			.expected_ec = ESR_EC_BRK_AARCH64,
1909 			.expect_lockdown_exceptions_masked = true,
1910 			.expect_lockdown_exceptions_unmasked = true,
1911 		},
1912 		{
1913 			.name = "arm64_panic_lockdown_test_pac_brk_c473",
1914 			.func = &arm64_panic_lockdown_test_pac_brk_c473,
1915 			.arg = 0,
1916 			.expected_ec = ESR_EC_BRK_AARCH64,
1917 			.expect_lockdown_exceptions_masked = true,
1918 			.expect_lockdown_exceptions_unmasked = true,
1919 		},
1920 		{
1921 			.name = "arm64_panic_lockdown_test_telemetry_brk_ff00",
1922 			.func = &arm64_panic_lockdown_test_telemetry_brk_ff00,
1923 			.arg = 0,
1924 			.expected_ec = ESR_EC_BRK_AARCH64,
1925 			/*
1926 			 * PAC breakpoints are not the only breakpoints, ensure that other
1927 			 * BRKs (like those used for telemetry) do not trigger lockdowns.
1928 			 * This is necessary to avoid conflicts with features like UBSan
1929 			 * telemetry (which could fire at any time in C code).
1930 			 */
1931 			.expect_lockdown_exceptions_masked = false,
1932 			.expect_lockdown_exceptions_unmasked = false,
1933 		},
1934 		{
1935 			.name = "arm64_panic_lockdown_test_br_auth_fail",
1936 			.func = &arm64_panic_lockdown_test_br_auth_fail,
1937 			.arg = ia_invalid,
1938 			.expected_ec = gARM_FEAT_FPACCOMBINE ? ESR_EC_PAC_FAIL : ESR_EC_IABORT_EL1,
1939 			.expect_lockdown_exceptions_masked = true,
1940 			.expect_lockdown_exceptions_unmasked = true,
1941 			/*
1942 			 * Pre-FEAT_FPACCOMBINED, BRAx branches to a poisoned PC so we
1943 			 * expect to fault on the branch target rather than the branch
1944 			 * itself. The exact ELR will likely be different from ia_invalid,
1945 			 * but since the expect logic in sleh only matches on low bits (i.e.
1946 			 * not bits which will be poisoned), this is fine.
1947 			 * On FEAT_FPACCOMBINED devices, we will fault on the branch itself.
1948 			 */
1949 			.override_expected_fault_pc_valid = !gARM_FEAT_FPACCOMBINE,
1950 			.override_expected_fault_pc = ia_invalid
1951 		},
1952 		{
1953 			.name = "arm64_panic_lockdown_test_ldr_auth_fail",
1954 			.func = &arm64_panic_lockdown_test_ldr_auth_fail,
1955 			.arg = panic_lockdown_pacda_get_invalid_ptr(),
1956 			.expected_ec = gARM_FEAT_FPACCOMBINE ? ESR_EC_PAC_FAIL : ESR_EC_DABORT_EL1,
1957 			.expect_lockdown_exceptions_masked = true,
1958 			.expect_lockdown_exceptions_unmasked = true,
1959 		},
1960 		{
1961 			.name = "arm64_panic_lockdown_test_copyio_poison",
1962 			.func = &arm64_panic_lockdown_test_copyio,
1963 			/* fake a poisoned kernel pointer by flipping the bottom PAC bit */
1964 			.arg = ((uint64_t)-1) ^ (1LLU << (64 - T1SZ_BOOT)),
1965 			.expected_ec = ESR_EC_DABORT_EL1,
1966 			.expect_lockdown_exceptions_masked = false,
1967 			.expect_lockdown_exceptions_unmasked = false,
1968 			.override_expected_fault_pc_valid = true,
1969 			.override_expected_fault_pc = (uint64_t)&arm64_panic_lockdown_test_copyio_fault_pc,
1970 		},
1971 #if __ARM_ARCH_8_6__
1972 		{
1973 			.name = "arm64_panic_lockdown_test_fpac",
1974 			.func = &arm64_panic_lockdown_test_fpac,
1975 			.arg = ia_invalid,
1976 			.expected_ec = ESR_EC_PAC_FAIL,
1977 			.expect_lockdown_exceptions_masked = true,
1978 			.expect_lockdown_exceptions_unmasked = true,
1979 		},
1980 #endif /* __ARM_ARCH_8_6__ */
1981 #endif /* ptrauth_calls */
1982 		{
1983 			.name = "arm64_panic_lockdown_test_copyio",
1984 			.func = &arm64_panic_lockdown_test_copyio,
1985 			.arg = 0x0 /* load from NULL */,
1986 			.expected_ec = ESR_EC_DABORT_EL1,
1987 			.expect_lockdown_exceptions_masked = false,
1988 			.expect_lockdown_exceptions_unmasked = false,
1989 			.override_expected_fault_pc_valid = true,
1990 			.override_expected_fault_pc = (uint64_t)&arm64_panic_lockdown_test_copyio_fault_pc,
1991 		},
1992 	};
1993 
1994 	size_t test_count = sizeof(tests) / sizeof(*tests);
1995 	for (size_t i = 0; i < test_count; i++) {
1996 		panic_lockdown_expect_test(
1997 			"Exceptions unmasked",
1998 			&tests[i],
1999 			tests[i].expect_lockdown_exceptions_unmasked,
2000 			/* mask_interrupts */ false);
2001 
2002 		panic_lockdown_expect_test(
2003 			"Exceptions masked",
2004 			&tests[i],
2005 			tests[i].expect_lockdown_exceptions_masked,
2006 			/* mask_interrupts */ true);
2007 	}
2008 
2009 	panic_lockdown_expect_fault_raw("arm64_panic_lockdown_test_sp1_invalid_stack",
2010 	    arm64_panic_lockdown_test_sp1_invalid_stack,
2011 	    arm64_panic_lockdown_test_pac_brk_c470,
2012 	    arm64_panic_lockdown_test_sp1_invalid_stack_handler);
2013 
2014 	panic_lockdown_expect_fault_raw("arm64_panic_lockdown_test_sp1_exception_in_vector",
2015 	    arm64_panic_lockdown_test_sp1_exception_in_vector,
2016 	    el1_sp1_synchronous_raise_exception_in_vector,
2017 	    arm64_panic_lockdown_test_sp1_exception_in_vector_handler);
2018 	return KERN_SUCCESS;
2019 }
2020 #endif /* CONFIG_SPTM */
2021 
2022 
2023 
2024 #if HAS_SPECRES
2025 
2026 /*** CPS RCTX ***/
2027 
2028 
2029 /*** SPECRES ***/
2030 
2031 #if HAS_SPECRES2
2032 /*
2033  * Execute a COSP RCTX instruction.
2034  */
2035 static void
_cosprctx_exec(uint64_t raw)2036 _cosprctx_exec(uint64_t raw)
2037 {
2038 	asm volatile ( "ISB SY");
2039 	__asm__ volatile ("COSP RCTX, %0" :: "r" (raw));
2040 	asm volatile ( "DSB SY");
2041 	asm volatile ( "ISB SY");
2042 }
2043 #endif
2044 
2045 /*
2046  * Execute a CFP RCTX instruction.
2047  */
2048 static void
_cfprctx_exec(uint64_t raw)2049 _cfprctx_exec(uint64_t raw)
2050 {
2051 	asm volatile ( "ISB SY");
2052 	__asm__ volatile ("CFP RCTX, %0" :: "r" (raw));
2053 	asm volatile ( "DSB SY");
2054 	asm volatile ( "ISB SY");
2055 }
2056 
2057 /*
2058  * Execute a CPP RCTX instruction.
2059  */
2060 static void
_cpprctx_exec(uint64_t raw)2061 _cpprctx_exec(uint64_t raw)
2062 {
2063 	asm volatile ( "ISB SY");
2064 	__asm__ volatile ("CPP RCTX, %0" :: "r" (raw));
2065 	asm volatile ( "DSB SY");
2066 	asm volatile ( "ISB SY");
2067 }
2068 
2069 /*
2070  * Execute a DVP RCTX instruction.
2071  */
2072 static void
_dvprctx_exec(uint64_t raw)2073 _dvprctx_exec(uint64_t raw)
2074 {
2075 	asm volatile ( "ISB SY");
2076 	__asm__ volatile ("DVP RCTX, %0" :: "r" (raw));
2077 	asm volatile ( "DSB SY");
2078 	asm volatile ( "ISB SY");
2079 }
2080 
2081 static void
_specres_do_test_std(void (* impl)(uint64_t raw))2082 _specres_do_test_std(void (*impl)(uint64_t raw))
2083 {
2084 	typedef struct {
2085 		union {
2086 			struct {
2087 				uint64_t ASID:16;
2088 				uint64_t GASID:1;
2089 				uint64_t :7;
2090 				uint64_t EL:2;
2091 				uint64_t NS:1;
2092 				uint64_t NSE:1;
2093 				uint64_t :4;
2094 				uint64_t VMID:16;
2095 				uint64_t GVMID:1;
2096 			};
2097 			uint64_t raw;
2098 		};
2099 	} specres_ctx;
2100 
2101 	assert(sizeof(specres_ctx) == 8);
2102 
2103 	/*
2104 	 * Test various possible meaningful COSP_RCTX context ID.
2105 	 */
2106 
2107 	/* el : EL0 / EL1 / EL2. */
2108 	for (uint8_t el = 0; el < 3; el++) {
2109 		/* Always non-secure. */
2110 		const uint8_t ns = 1;
2111 		const uint8_t nse = 0;
2112 
2113 		/* Iterate over some couples of ASIDs / VMIDs. */
2114 		for (uint16_t xxid = 0; xxid < 256; xxid++) {
2115 			const uint16_t asid = (uint16_t) (xxid << 4);
2116 			const uint16_t vmid = (uint16_t) (256 - (xxid << 4));
2117 
2118 			/* Test 4 G[AS|VM]ID combinations. */
2119 			for (uint8_t bid = 0; bid < 4; bid++) {
2120 				const uint8_t gasid = bid & 1;
2121 				const uint8_t gvmid = bid & 2;
2122 
2123 				/* Generate the context descriptor. */
2124 				specres_ctx ctx = {0};
2125 				ctx.ASID = asid;
2126 				ctx.GASID = gasid;
2127 				ctx.EL = el;
2128 				ctx.NS = ns;
2129 				ctx.NSE = nse;
2130 				ctx.VMID = vmid;
2131 				ctx.GVMID = gvmid;
2132 
2133 				/* Execute the COSP instruction. */
2134 				(*impl)(ctx.raw);
2135 
2136 				/* Insert some operation. */
2137 				volatile uint8_t sum = 0;
2138 				for (volatile uint8_t i = 0; i < 64; i++) {
2139 					sum += i * sum + 3;
2140 				}
2141 
2142 				/* If el0 is not targetted, just need to do it once. */
2143 				if (el != 0) {
2144 					goto not_el0_skip;
2145 				}
2146 			}
2147 		}
2148 
2149 		/* El0 skip. */
2150 not_el0_skip:   ;
2151 	}
2152 }
2153 
2154 /*** RCTX ***/
2155 
2156 static void
_rctx_do_test(void)2157 _rctx_do_test(void)
2158 {
2159 	_specres_do_test_std(&_cfprctx_exec);
2160 	_specres_do_test_std(&_cpprctx_exec);
2161 	_specres_do_test_std(&_dvprctx_exec);
2162 #if HAS_SPECRES2
2163 	_specres_do_test_std(&_cosprctx_exec);
2164 #endif
2165 }
2166 
2167 kern_return_t
specres_test(void)2168 specres_test(void)
2169 {
2170 	/* Basic instructions test. */
2171 	_cfprctx_exec(0);
2172 	_cpprctx_exec(0);
2173 	_dvprctx_exec(0);
2174 #if HAS_SPECRES2
2175 	_cosprctx_exec(0);
2176 #endif
2177 
2178 	/* More advanced instructions test. */
2179 	_rctx_do_test();
2180 
2181 	return KERN_SUCCESS;
2182 }
2183 
2184 #endif /* HAS_SPECRES */
2185 #if BTI_ENFORCED
2186 typedef uint64_t (bti_landing_pad_func_t)(void);
2187 typedef uint64_t (bti_shim_func_t)(bti_landing_pad_func_t *);
2188 
2189 extern bti_shim_func_t arm64_bti_test_jump_shim;
2190 extern bti_shim_func_t arm64_bti_test_call_shim;
2191 
2192 extern bti_landing_pad_func_t arm64_bti_test_func_with_no_landing_pad;
2193 extern bti_landing_pad_func_t arm64_bti_test_func_with_call_landing_pad;
2194 extern bti_landing_pad_func_t arm64_bti_test_func_with_jump_landing_pad;
2195 extern bti_landing_pad_func_t arm64_bti_test_func_with_jump_call_landing_pad;
2196 #if __has_feature(ptrauth_returns)
2197 extern bti_landing_pad_func_t arm64_bti_test_func_with_pac_landing_pad;
2198 #endif /* __has_feature(ptrauth_returns) */
2199 
2200 typedef struct arm64_bti_test_func_case {
2201 	const char *func_str;
2202 	bti_landing_pad_func_t *func;
2203 	uint64_t expect_return_value;
2204 	uint8_t  expect_call_ok;
2205 	uint8_t  expect_jump_ok;
2206 } arm64_bti_test_func_case_s;
2207 
2208 static volatile uintptr_t bti_exception_handler_pc = 0;
2209 
2210 static bool
arm64_bti_test_exception_handler(arm_saved_state_t * state)2211 arm64_bti_test_exception_handler(arm_saved_state_t * state)
2212 {
2213 	uint64_t esr = get_saved_state_esr(state);
2214 	esr_exception_class_t class = ESR_EC(esr);
2215 
2216 	if (class != ESR_EC_BTI_FAIL) {
2217 		return false;
2218 	}
2219 
2220 	/* Capture any desired exception metrics */
2221 	bti_exception_handler_pc = get_saved_state_pc(state);
2222 
2223 	/* "Cancel" the function call by forging an early return */
2224 	set_saved_state_pc(state, get_saved_state_lr(state));
2225 
2226 	/* Clear BTYPE to prevent taking another exception after ERET */
2227 	uint32_t spsr = get_saved_state_cpsr(state);
2228 	spsr &= ~PSR_BTYPE_MASK;
2229 	set_saved_state_cpsr(state, spsr);
2230 
2231 	return true;
2232 }
2233 
2234 static void
arm64_bti_test_func_with_shim(uint8_t expect_ok,const char * shim_str,bti_shim_func_t * shim,arm64_bti_test_func_case_s * test_case)2235 arm64_bti_test_func_with_shim(
2236 	uint8_t expect_ok,
2237 	const char *shim_str,
2238 	bti_shim_func_t *shim,
2239 	arm64_bti_test_func_case_s *test_case)
2240 {
2241 	uint64_t result = -1;
2242 
2243 	/* Capture BTI exceptions triggered by our target function */
2244 	uintptr_t raw_func = (uintptr_t)ptrauth_strip(
2245 		(void *)test_case->func,
2246 		ptrauth_key_function_pointer);
2247 	ml_expect_fault_pc_begin(arm64_bti_test_exception_handler, raw_func);
2248 	bti_exception_handler_pc = 0;
2249 
2250 	/*
2251 	 * The assembly routines do not support C function type discriminators, so
2252 	 * strip and resign with zero if needed
2253 	 */
2254 	bti_landing_pad_func_t *resigned = ptrauth_auth_and_resign(
2255 		test_case->func,
2256 		ptrauth_key_function_pointer,
2257 		ptrauth_type_discriminator(bti_landing_pad_func_t),
2258 		ptrauth_key_function_pointer, 0);
2259 
2260 	result = shim(resigned);
2261 
2262 	ml_expect_fault_end();
2263 
2264 	if (!expect_ok && raw_func != bti_exception_handler_pc) {
2265 		T_FAIL("Expected BTI exception at 0x%llx but got one at %llx instead\n",
2266 		    raw_func, bti_exception_handler_pc);
2267 	} else if (expect_ok && bti_exception_handler_pc) {
2268 		T_FAIL("Did not expect BTI exception but got on at 0x%llx\n",
2269 		    bti_exception_handler_pc);
2270 	} else if (!expect_ok && !bti_exception_handler_pc) {
2271 		T_FAIL("Failed to hit expected exception!\n");
2272 	} else if (expect_ok && result != test_case->expect_return_value) {
2273 		T_FAIL("Incorrect test function result (expected=%llu, result=%llu\n)",
2274 		    test_case->expect_return_value, result);
2275 	} else {
2276 		T_PASS("%s (shim=%s)\n", test_case->func_str, shim_str);
2277 	}
2278 }
2279 
2280 /**
2281  * This test works to ensure that BTI exceptions are raised where expected
2282  * and only where they are expected by exhaustively testing all indirect branch
2283  * combinations with all landing pad options.
2284  */
2285 kern_return_t
arm64_bti_test(void)2286 arm64_bti_test(void)
2287 {
2288 	static arm64_bti_test_func_case_s tests[] = {
2289 		{
2290 			.func_str = "arm64_bti_test_func_with_no_landing_pad",
2291 			.func = &arm64_bti_test_func_with_no_landing_pad,
2292 			.expect_return_value     = 1,
2293 			.expect_call_ok          = 0,
2294 			.expect_jump_ok          = 0,
2295 		},
2296 		{
2297 			.func_str = "arm64_bti_test_func_with_call_landing_pad",
2298 			.func = &arm64_bti_test_func_with_call_landing_pad,
2299 			.expect_return_value     = 2,
2300 			.expect_call_ok          = 1,
2301 			.expect_jump_ok          = 0,
2302 		},
2303 		{
2304 			.func_str = "arm64_bti_test_func_with_jump_landing_pad",
2305 			.func = &arm64_bti_test_func_with_jump_landing_pad,
2306 			.expect_return_value     = 3,
2307 			.expect_call_ok          = 0,
2308 			.expect_jump_ok          = 1,
2309 		},
2310 		{
2311 			.func_str = "arm64_bti_test_func_with_jump_call_landing_pad",
2312 			.func = &arm64_bti_test_func_with_jump_call_landing_pad,
2313 			.expect_return_value     = 4,
2314 			.expect_call_ok          = 1,
2315 			.expect_jump_ok          = 1,
2316 		},
2317 #if __has_feature(ptrauth_returns)
2318 		{
2319 			.func_str = "arm64_bti_test_func_with_pac_landing_pad",
2320 			.func = &arm64_bti_test_func_with_pac_landing_pad,
2321 			.expect_return_value     = 5,
2322 			.expect_call_ok          = 1,
2323 			.expect_jump_ok          = 0,
2324 		},
2325 #endif /* __has_feature(ptrauth_returns) */
2326 	};
2327 
2328 	size_t test_count = sizeof(tests) / sizeof(*tests);
2329 	for (size_t i = 0; i < test_count; i++) {
2330 		arm64_bti_test_func_case_s *test_case = tests + i;
2331 
2332 		arm64_bti_test_func_with_shim(test_case->expect_call_ok,
2333 		    "arm64_bti_test_call_shim",
2334 		    arm64_bti_test_call_shim,
2335 		    test_case);
2336 
2337 
2338 		arm64_bti_test_func_with_shim(test_case->expect_jump_ok,
2339 		    "arm64_bti_test_jump_shim",
2340 		    arm64_bti_test_jump_shim,
2341 		    test_case);
2342 	}
2343 
2344 	return KERN_SUCCESS;
2345 }
2346 #endif /* BTI_ENFORCED */
2347 
2348 
2349 /**
2350  * Test the speculation guards
2351  * We can't easily ensure that the guards actually behave correctly under
2352  * speculation, but we can at least ensure that the guards are non-speculatively
2353  * correct.
2354  */
2355 kern_return_t
arm64_speculation_guard_test(void)2356 arm64_speculation_guard_test(void)
2357 {
2358 	uint64_t cookie1_64 = 0x5350454354524521ULL; /* SPECTRE! */
2359 	uint64_t cookie2_64 = 0x5941592043505553ULL; /* YAY CPUS */
2360 	uint32_t cookie1_32 = (uint32_t)cookie1_64;
2361 	uint32_t cookie2_32 = (uint32_t)cookie2_64;
2362 	uint64_t result64 = 0;
2363 	uint32_t result32 = 0;
2364 	bool result_valid;
2365 
2366 	/*
2367 	 * Test the zeroing guard
2368 	 * Since failing the guard triggers a panic, we don't actually test that
2369 	 * part as part of the automated tests.
2370 	 */
2371 
2372 	result64 = 0;
2373 	SPECULATION_GUARD_ZEROING_XXX(
2374 		/* out */ result64, /* out_valid */ result_valid,
2375 		/* value */ cookie1_64,
2376 		/* cmp_1 */ 0ULL, /* cmp_2 */ 1ULL, /* cc */ "NE");
2377 	T_EXPECT(result_valid, "result valid");
2378 	T_EXPECT_EQ_ULLONG(result64, cookie1_64, "64, 64 zeroing guard works");
2379 
2380 	result64 = 0;
2381 	SPECULATION_GUARD_ZEROING_XWW(
2382 		/* out */ result64, /* out_valid */ result_valid,
2383 		/* value */ cookie1_64,
2384 		/* cmp_1 */ 1U, /* cmp_2 */ 0U, /* cc */ "HI");
2385 	T_EXPECT(result_valid, "result valid");
2386 	T_EXPECT_EQ_ULLONG(result64, cookie1_64, "64, 32 zeroing guard works");
2387 
2388 	result32 = 0;
2389 	SPECULATION_GUARD_ZEROING_WXX(
2390 		/* out */ result32, /* out_valid */ result_valid,
2391 		/* value */ cookie1_32,
2392 		/* cmp_1 */ -1LL, /* cmp_2 */ 4LL, /* cc */ "LT");
2393 	T_EXPECT(result_valid, "result valid");
2394 	T_EXPECT_EQ_UINT(result32, cookie1_32, "32, 64 zeroing guard works");
2395 
2396 	result32 = 0;
2397 	SPECULATION_GUARD_ZEROING_WWW(
2398 		/* out */ result32, /* out_valid */ result_valid,
2399 		/* value */ cookie1_32,
2400 		/* cmp_1 */ 1, /* cmp_2 */ -4, /* cc */ "GT");
2401 	T_EXPECT(result_valid, "result valid");
2402 	T_EXPECT_EQ_UINT(result32, cookie1_32, "32, 32 zeroing guard works");
2403 
2404 	result32 = 0x41;
2405 	SPECULATION_GUARD_ZEROING_WWW(
2406 		/* out */ result32, /* out_valid */ result_valid,
2407 		/* value */ cookie1_32,
2408 		/* cmp_1 */ 1, /* cmp_2 */ -4, /* cc */ "LT");
2409 	T_EXPECT(!result_valid, "result invalid");
2410 	T_EXPECT_EQ_UINT(result32, 0, "zeroing guard works with failing condition");
2411 
2412 	/*
2413 	 * Test the selection guard
2414 	 */
2415 
2416 	result64 = 0;
2417 	SPECULATION_GUARD_SELECT_XXX(
2418 		/* out */ result64,
2419 		/* cmp_1 */ 16ULL, /* cmp_2 */ 32ULL,
2420 		/* cc   */ "EQ", /* sel_1 */ cookie1_64,
2421 		/* n_cc */ "NE", /* sel_2 */ cookie2_64);
2422 	T_EXPECT_EQ_ULLONG(result64, cookie2_64, "64, 64 select guard works (1)");
2423 
2424 	result64 = 0;
2425 	SPECULATION_GUARD_SELECT_XXX(
2426 		/* out */ result64,
2427 		/* cmp_1 */ 32ULL, /* cmp_2 */ 32ULL,
2428 		/* cc   */ "EQ", /* sel_1 */ cookie1_64,
2429 		/* n_cc */ "NE", /* sel_2 */ cookie2_64);
2430 	T_EXPECT_EQ_ULLONG(result64, cookie1_64, "64, 64 select guard works (2)");
2431 
2432 
2433 	result32 = 0;
2434 	SPECULATION_GUARD_SELECT_WXX(
2435 		/* out */ result32,
2436 		/* cmp_1 */ 16ULL, /* cmp_2 */ 32ULL,
2437 		/* cc   */ "HI", /* sel_1 */ cookie1_64,
2438 		/* n_cc */ "LS", /* sel_2 */ cookie2_64);
2439 	T_EXPECT_EQ_ULLONG(result32, cookie2_32, "32, 64 select guard works (1)");
2440 
2441 	result32 = 0;
2442 	SPECULATION_GUARD_SELECT_WXX(
2443 		/* out */ result32,
2444 		/* cmp_1 */ 16ULL, /* cmp_2 */ 2ULL,
2445 		/* cc   */ "HI", /* sel_1 */ cookie1_64,
2446 		/* n_cc */ "LS", /* sel_2 */ cookie2_64);
2447 	T_EXPECT_EQ_ULLONG(result32, cookie1_32, "32, 64 select guard works (2)");
2448 
2449 	return KERN_SUCCESS;
2450 }
2451