xref: /xnu-8020.101.4/osfmk/arm64/platform_tests.c (revision e7776783b89a353188416a9a346c6cdb4928faad)
1 /*
2  * Copyright (c) 2011-2018 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_COPYRIGHT@
30  */
31 /*
32  * Mach Operating System Copyright (c) 1991,1990,1989,1988,1987 Carnegie
33  * Mellon University All Rights Reserved.
34  *
35  * Permission to use, copy, modify and distribute this software and its
36  * documentation is hereby granted, provided that both the copyright notice
37  * and this permission notice appear in all copies of the software,
38  * derivative works or modified versions, and any portions thereof, and that
39  * both notices appear in supporting documentation.
40  *
41  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION.
42  * CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES
43  * WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
44  *
45  * Carnegie Mellon requests users of this software to return to
46  *
47  * Software Distribution Coordinator  or  [email protected]
48  * School of Computer Science Carnegie Mellon University Pittsburgh PA
49  * 15213-3890
50  *
51  * any improvements or extensions that they make and grant Carnegie Mellon the
52  * rights to redistribute these changes.
53  */
54 
55 #include <mach_ldebug.h>
56 
57 #define LOCK_PRIVATE 1
58 
59 #include <vm/pmap.h>
60 #include <vm/vm_map.h>
61 #include <kern/kalloc.h>
62 #include <kern/cpu_number.h>
63 #include <kern/locks.h>
64 #include <kern/misc_protos.h>
65 #include <kern/thread.h>
66 #include <kern/processor.h>
67 #include <kern/sched_prim.h>
68 #include <kern/debug.h>
69 #include <string.h>
70 #include <tests/xnupost.h>
71 
72 #if     MACH_KDB
73 #include <ddb/db_command.h>
74 #include <ddb/db_output.h>
75 #include <ddb/db_sym.h>
76 #include <ddb/db_print.h>
77 #endif                          /* MACH_KDB */
78 
79 #include <san/kasan.h>
80 #include <sys/kdebug.h>
81 #include <sys/munge.h>
82 #include <machine/cpu_capabilities.h>
83 #include <arm/cpu_data_internal.h>
84 #include <arm/pmap.h>
85 
86 #if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)
87 #include <arm64/amcc_rorgn.h>
88 #endif // defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)
89 
90 kern_return_t arm64_lock_test(void);
91 kern_return_t arm64_munger_test(void);
92 kern_return_t ex_cb_test(void);
93 kern_return_t arm64_pan_test(void);
94 kern_return_t arm64_late_pan_test(void);
95 #if defined(HAS_APPLE_PAC)
96 #include <ptrauth.h>
97 kern_return_t arm64_ropjop_test(void);
98 #endif
99 #if defined(KERNEL_INTEGRITY_CTRR)
100 kern_return_t ctrr_test(void);
101 kern_return_t ctrr_test_cpu(void);
102 #endif
103 
104 // exception handler ignores this fault address during PAN test
105 #if __ARM_PAN_AVAILABLE__
106 const uint64_t pan_ro_value = 0xFEEDB0B0DEADBEEF;
107 vm_offset_t pan_test_addr = 0;
108 vm_offset_t pan_ro_addr = 0;
109 volatile int pan_exception_level = 0;
110 volatile char pan_fault_value = 0;
111 #endif
112 
113 #include <libkern/OSAtomic.h>
114 #define LOCK_TEST_ITERATIONS 50
115 static hw_lock_data_t   lt_hw_lock;
116 static lck_spin_t       lt_lck_spin_t;
117 static lck_mtx_t        lt_mtx;
118 static lck_rw_t         lt_rwlock;
119 static volatile uint32_t lt_counter = 0;
120 static volatile int     lt_spinvolatile;
121 static volatile uint32_t lt_max_holders = 0;
122 static volatile uint32_t lt_upgrade_holders = 0;
123 static volatile uint32_t lt_max_upgrade_holders = 0;
124 static volatile uint32_t lt_num_holders = 0;
125 static volatile uint32_t lt_done_threads;
126 static volatile uint32_t lt_target_done_threads;
127 static volatile uint32_t lt_cpu_bind_id = 0;
128 
129 static void
lt_note_another_blocking_lock_holder()130 lt_note_another_blocking_lock_holder()
131 {
132 	hw_lock_lock(&lt_hw_lock, LCK_GRP_NULL);
133 	lt_num_holders++;
134 	lt_max_holders = (lt_max_holders < lt_num_holders) ? lt_num_holders : lt_max_holders;
135 	hw_lock_unlock(&lt_hw_lock);
136 }
137 
138 static void
lt_note_blocking_lock_release()139 lt_note_blocking_lock_release()
140 {
141 	hw_lock_lock(&lt_hw_lock, LCK_GRP_NULL);
142 	lt_num_holders--;
143 	hw_lock_unlock(&lt_hw_lock);
144 }
145 
146 static void
lt_spin_a_little_bit()147 lt_spin_a_little_bit()
148 {
149 	uint32_t i;
150 
151 	for (i = 0; i < 10000; i++) {
152 		lt_spinvolatile++;
153 	}
154 }
155 
156 static void
lt_sleep_a_little_bit()157 lt_sleep_a_little_bit()
158 {
159 	delay(100);
160 }
161 
162 static void
lt_grab_mutex()163 lt_grab_mutex()
164 {
165 	lck_mtx_lock(&lt_mtx);
166 	lt_note_another_blocking_lock_holder();
167 	lt_sleep_a_little_bit();
168 	lt_counter++;
169 	lt_note_blocking_lock_release();
170 	lck_mtx_unlock(&lt_mtx);
171 }
172 
173 static void
lt_grab_mutex_with_try()174 lt_grab_mutex_with_try()
175 {
176 	while (0 == lck_mtx_try_lock(&lt_mtx)) {
177 		;
178 	}
179 	lt_note_another_blocking_lock_holder();
180 	lt_sleep_a_little_bit();
181 	lt_counter++;
182 	lt_note_blocking_lock_release();
183 	lck_mtx_unlock(&lt_mtx);
184 }
185 
186 static void
lt_grab_rw_exclusive()187 lt_grab_rw_exclusive()
188 {
189 	lck_rw_lock_exclusive(&lt_rwlock);
190 	lt_note_another_blocking_lock_holder();
191 	lt_sleep_a_little_bit();
192 	lt_counter++;
193 	lt_note_blocking_lock_release();
194 	lck_rw_done(&lt_rwlock);
195 }
196 
197 static void
lt_grab_rw_exclusive_with_try()198 lt_grab_rw_exclusive_with_try()
199 {
200 	while (0 == lck_rw_try_lock_exclusive(&lt_rwlock)) {
201 		lt_sleep_a_little_bit();
202 	}
203 
204 	lt_note_another_blocking_lock_holder();
205 	lt_sleep_a_little_bit();
206 	lt_counter++;
207 	lt_note_blocking_lock_release();
208 	lck_rw_done(&lt_rwlock);
209 }
210 
211 /* Disabled until lt_grab_rw_shared() is fixed (rdar://30685840)
212  *  static void
213  *  lt_grab_rw_shared()
214  *  {
215  *       lck_rw_lock_shared(&lt_rwlock);
216  *       lt_counter++;
217  *
218  *       lt_note_another_blocking_lock_holder();
219  *       lt_sleep_a_little_bit();
220  *       lt_note_blocking_lock_release();
221  *
222  *       lck_rw_done(&lt_rwlock);
223  *  }
224  */
225 
226 /* Disabled until lt_grab_rw_shared_with_try() is fixed (rdar://30685840)
227  *  static void
228  *  lt_grab_rw_shared_with_try()
229  *  {
230  *       while(0 == lck_rw_try_lock_shared(&lt_rwlock));
231  *       lt_counter++;
232  *
233  *       lt_note_another_blocking_lock_holder();
234  *       lt_sleep_a_little_bit();
235  *       lt_note_blocking_lock_release();
236  *
237  *       lck_rw_done(&lt_rwlock);
238  *  }
239  */
240 
241 static void
lt_upgrade_downgrade_rw()242 lt_upgrade_downgrade_rw()
243 {
244 	boolean_t upgraded, success;
245 
246 	success = lck_rw_try_lock_shared(&lt_rwlock);
247 	if (!success) {
248 		lck_rw_lock_shared(&lt_rwlock);
249 	}
250 
251 	lt_note_another_blocking_lock_holder();
252 	lt_sleep_a_little_bit();
253 	lt_note_blocking_lock_release();
254 
255 	upgraded = lck_rw_lock_shared_to_exclusive(&lt_rwlock);
256 	if (!upgraded) {
257 		success = lck_rw_try_lock_exclusive(&lt_rwlock);
258 
259 		if (!success) {
260 			lck_rw_lock_exclusive(&lt_rwlock);
261 		}
262 	}
263 
264 	lt_upgrade_holders++;
265 	if (lt_upgrade_holders > lt_max_upgrade_holders) {
266 		lt_max_upgrade_holders = lt_upgrade_holders;
267 	}
268 
269 	lt_counter++;
270 	lt_sleep_a_little_bit();
271 
272 	lt_upgrade_holders--;
273 
274 	lck_rw_lock_exclusive_to_shared(&lt_rwlock);
275 
276 	lt_spin_a_little_bit();
277 	lck_rw_done(&lt_rwlock);
278 }
279 
280 #if __AMP__
281 const int limit = 1000000;
282 static int lt_stress_local_counters[MAX_CPUS];
283 
284 lck_ticket_t lt_ticket_lock;
285 lck_grp_t lt_ticket_grp;
286 
287 static void
lt_stress_ticket_lock()288 lt_stress_ticket_lock()
289 {
290 	int local_counter = 0;
291 
292 	uint cpuid = cpu_number();
293 
294 	kprintf("%s>cpu %d starting\n", __FUNCTION__, cpuid);
295 
296 	lck_ticket_lock(&lt_ticket_lock, &lt_ticket_grp);
297 	lt_counter++;
298 	local_counter++;
299 	lck_ticket_unlock(&lt_ticket_lock);
300 
301 	while (lt_counter < lt_target_done_threads) {
302 		;
303 	}
304 
305 	kprintf("%s>cpu %d started\n", __FUNCTION__, cpuid);
306 
307 	while (lt_counter < limit) {
308 		lck_ticket_lock(&lt_ticket_lock, &lt_ticket_grp);
309 		if (lt_counter < limit) {
310 			lt_counter++;
311 			local_counter++;
312 		}
313 		lck_ticket_unlock(&lt_ticket_lock);
314 	}
315 
316 	lt_stress_local_counters[cpuid] = local_counter;
317 
318 	kprintf("%s>final counter %d cpu %d incremented the counter %d times\n", __FUNCTION__, lt_counter, cpuid, local_counter);
319 }
320 #endif
321 
322 static void
lt_grab_hw_lock()323 lt_grab_hw_lock()
324 {
325 	hw_lock_lock(&lt_hw_lock, LCK_GRP_NULL);
326 	lt_counter++;
327 	lt_spin_a_little_bit();
328 	hw_lock_unlock(&lt_hw_lock);
329 }
330 
331 static void
lt_grab_hw_lock_with_try()332 lt_grab_hw_lock_with_try()
333 {
334 	while (0 == hw_lock_try(&lt_hw_lock, LCK_GRP_NULL)) {
335 		;
336 	}
337 	lt_counter++;
338 	lt_spin_a_little_bit();
339 	hw_lock_unlock(&lt_hw_lock);
340 }
341 
342 static __abortlike hw_lock_timeout_status_t
lt_hw_lock_to_panic(void * lock,uint64_t timeout,uint64_t start,uint64_t now,uint64_t interrupt_time)343 lt_hw_lock_to_panic(void *lock, uint64_t timeout, uint64_t start, uint64_t now, uint64_t interrupt_time)
344 {
345 #pragma unused(timeout, start, now, interrupt_time)
346 	panic("%s> acquiring lock %p timed out", __func__, lock);
347 }
348 
349 static void
lt_grab_hw_lock_with_to()350 lt_grab_hw_lock_with_to()
351 {
352 	(void)hw_lock_to(&lt_hw_lock, os_atomic_load(&LockTimeOut, relaxed),
353 	    lt_hw_lock_to_panic, LCK_GRP_NULL);
354 	lt_counter++;
355 	lt_spin_a_little_bit();
356 	hw_lock_unlock(&lt_hw_lock);
357 }
358 
359 static void
lt_grab_spin_lock()360 lt_grab_spin_lock()
361 {
362 	lck_spin_lock(&lt_lck_spin_t);
363 	lt_counter++;
364 	lt_spin_a_little_bit();
365 	lck_spin_unlock(&lt_lck_spin_t);
366 }
367 
368 static void
lt_grab_spin_lock_with_try()369 lt_grab_spin_lock_with_try()
370 {
371 	while (0 == lck_spin_try_lock(&lt_lck_spin_t)) {
372 		;
373 	}
374 	lt_counter++;
375 	lt_spin_a_little_bit();
376 	lck_spin_unlock(&lt_lck_spin_t);
377 }
378 
379 static volatile boolean_t lt_thread_lock_grabbed;
380 static volatile boolean_t lt_thread_lock_success;
381 
382 static void
lt_reset()383 lt_reset()
384 {
385 	lt_counter = 0;
386 	lt_max_holders = 0;
387 	lt_num_holders = 0;
388 	lt_max_upgrade_holders = 0;
389 	lt_upgrade_holders = 0;
390 	lt_done_threads = 0;
391 	lt_target_done_threads = 0;
392 	lt_cpu_bind_id = 0;
393 
394 	OSMemoryBarrier();
395 }
396 
397 static hw_lock_timeout_status_t
lt_hw_lock_to_allow(void * lock,uint64_t timeout,uint64_t start,uint64_t now,uint64_t interrupt_time)398 lt_hw_lock_to_allow(void *lock, uint64_t timeout, uint64_t start, uint64_t now, uint64_t interrupt_time)
399 {
400 #pragma unused(lock, timeout, start, now, interrupt_time)
401 	return HW_LOCK_TIMEOUT_RETURN;
402 }
403 
404 static void
lt_trylock_hw_lock_with_to()405 lt_trylock_hw_lock_with_to()
406 {
407 	OSMemoryBarrier();
408 	while (!lt_thread_lock_grabbed) {
409 		lt_sleep_a_little_bit();
410 		OSMemoryBarrier();
411 	}
412 	lt_thread_lock_success = hw_lock_to(&lt_hw_lock, 100,
413 	    lt_hw_lock_to_allow, LCK_GRP_NULL);
414 	OSMemoryBarrier();
415 	mp_enable_preemption();
416 }
417 
418 static void
lt_trylock_spin_try_lock()419 lt_trylock_spin_try_lock()
420 {
421 	OSMemoryBarrier();
422 	while (!lt_thread_lock_grabbed) {
423 		lt_sleep_a_little_bit();
424 		OSMemoryBarrier();
425 	}
426 	lt_thread_lock_success = lck_spin_try_lock(&lt_lck_spin_t);
427 	OSMemoryBarrier();
428 }
429 
430 static void
lt_trylock_thread(void * arg,wait_result_t wres __unused)431 lt_trylock_thread(void *arg, wait_result_t wres __unused)
432 {
433 	void (*func)(void) = (void (*)(void))arg;
434 
435 	func();
436 
437 	OSIncrementAtomic((volatile SInt32*) &lt_done_threads);
438 }
439 
440 static void
lt_start_trylock_thread(thread_continue_t func)441 lt_start_trylock_thread(thread_continue_t func)
442 {
443 	thread_t thread;
444 	kern_return_t kr;
445 
446 	kr = kernel_thread_start(lt_trylock_thread, func, &thread);
447 	assert(kr == KERN_SUCCESS);
448 
449 	thread_deallocate(thread);
450 }
451 
452 static void
lt_wait_for_lock_test_threads()453 lt_wait_for_lock_test_threads()
454 {
455 	OSMemoryBarrier();
456 	/* Spin to reduce dependencies */
457 	while (lt_done_threads < lt_target_done_threads) {
458 		lt_sleep_a_little_bit();
459 		OSMemoryBarrier();
460 	}
461 	OSMemoryBarrier();
462 }
463 
464 static kern_return_t
lt_test_trylocks()465 lt_test_trylocks()
466 {
467 	boolean_t success;
468 	extern unsigned int real_ncpus;
469 
470 	/*
471 	 * First mtx try lock succeeds, second fails.
472 	 */
473 	success = lck_mtx_try_lock(&lt_mtx);
474 	T_ASSERT_NOTNULL(success, "First mtx try lock");
475 	success = lck_mtx_try_lock(&lt_mtx);
476 	T_ASSERT_NULL(success, "Second mtx try lock for a locked mtx");
477 	lck_mtx_unlock(&lt_mtx);
478 
479 	/*
480 	 * After regular grab, can't try lock.
481 	 */
482 	lck_mtx_lock(&lt_mtx);
483 	success = lck_mtx_try_lock(&lt_mtx);
484 	T_ASSERT_NULL(success, "try lock should fail after regular lck_mtx_lock");
485 	lck_mtx_unlock(&lt_mtx);
486 
487 	/*
488 	 * Two shared try locks on a previously unheld rwlock suceed, and a
489 	 * subsequent exclusive attempt fails.
490 	 */
491 	success = lck_rw_try_lock_shared(&lt_rwlock);
492 	T_ASSERT_NOTNULL(success, "Two shared try locks on a previously unheld rwlock should succeed");
493 	success = lck_rw_try_lock_shared(&lt_rwlock);
494 	T_ASSERT_NOTNULL(success, "Two shared try locks on a previously unheld rwlock should succeed");
495 	success = lck_rw_try_lock_exclusive(&lt_rwlock);
496 	T_ASSERT_NULL(success, "exclusive lock attempt on previously held lock should fail");
497 	lck_rw_done(&lt_rwlock);
498 	lck_rw_done(&lt_rwlock);
499 
500 	/*
501 	 * After regular shared grab, can trylock
502 	 * for shared but not for exclusive.
503 	 */
504 	lck_rw_lock_shared(&lt_rwlock);
505 	success = lck_rw_try_lock_shared(&lt_rwlock);
506 	T_ASSERT_NOTNULL(success, "After regular shared grab another shared try lock should succeed.");
507 	success = lck_rw_try_lock_exclusive(&lt_rwlock);
508 	T_ASSERT_NULL(success, "After regular shared grab an exclusive lock attempt should fail.");
509 	lck_rw_done(&lt_rwlock);
510 	lck_rw_done(&lt_rwlock);
511 
512 	/*
513 	 * An exclusive try lock succeeds, subsequent shared and exclusive
514 	 * attempts fail.
515 	 */
516 	success = lck_rw_try_lock_exclusive(&lt_rwlock);
517 	T_ASSERT_NOTNULL(success, "An exclusive try lock should succeed");
518 	success = lck_rw_try_lock_shared(&lt_rwlock);
519 	T_ASSERT_NULL(success, "try lock in shared mode attempt after an exclusive grab should fail");
520 	success = lck_rw_try_lock_exclusive(&lt_rwlock);
521 	T_ASSERT_NULL(success, "try lock in exclusive mode attempt after an exclusive grab should fail");
522 	lck_rw_done(&lt_rwlock);
523 
524 	/*
525 	 * After regular exclusive grab, neither kind of trylock succeeds.
526 	 */
527 	lck_rw_lock_exclusive(&lt_rwlock);
528 	success = lck_rw_try_lock_shared(&lt_rwlock);
529 	T_ASSERT_NULL(success, "After regular exclusive grab, shared trylock should not succeed");
530 	success = lck_rw_try_lock_exclusive(&lt_rwlock);
531 	T_ASSERT_NULL(success, "After regular exclusive grab, exclusive trylock should not succeed");
532 	lck_rw_done(&lt_rwlock);
533 
534 	/*
535 	 * First spin lock attempts succeed, second attempts fail.
536 	 */
537 	success = hw_lock_try(&lt_hw_lock, LCK_GRP_NULL);
538 	T_ASSERT_NOTNULL(success, "First spin lock attempts should succeed");
539 	success = hw_lock_try(&lt_hw_lock, LCK_GRP_NULL);
540 	T_ASSERT_NULL(success, "Second attempt to spin lock should fail");
541 	hw_lock_unlock(&lt_hw_lock);
542 
543 	hw_lock_lock(&lt_hw_lock, LCK_GRP_NULL);
544 	success = hw_lock_try(&lt_hw_lock, LCK_GRP_NULL);
545 	T_ASSERT_NULL(success, "After taking spin lock, trylock attempt should fail");
546 	hw_lock_unlock(&lt_hw_lock);
547 
548 	lt_reset();
549 	lt_thread_lock_grabbed = false;
550 	lt_thread_lock_success = true;
551 	lt_target_done_threads = 1;
552 	OSMemoryBarrier();
553 	lt_start_trylock_thread(lt_trylock_hw_lock_with_to);
554 	success = hw_lock_to(&lt_hw_lock, 100, lt_hw_lock_to_allow, LCK_GRP_NULL);
555 	T_ASSERT_NOTNULL(success, "First spin lock with timeout should succeed");
556 	if (real_ncpus == 1) {
557 		mp_enable_preemption(); /* if we re-enable preemption, the other thread can timeout and exit */
558 	}
559 	OSIncrementAtomic((volatile SInt32*)&lt_thread_lock_grabbed);
560 	lt_wait_for_lock_test_threads();
561 	T_ASSERT_NULL(lt_thread_lock_success, "Second spin lock with timeout should fail and timeout");
562 	if (real_ncpus == 1) {
563 		mp_disable_preemption(); /* don't double-enable when we unlock */
564 	}
565 	hw_lock_unlock(&lt_hw_lock);
566 
567 	lt_reset();
568 	lt_thread_lock_grabbed = false;
569 	lt_thread_lock_success = true;
570 	lt_target_done_threads = 1;
571 	OSMemoryBarrier();
572 	lt_start_trylock_thread(lt_trylock_hw_lock_with_to);
573 	hw_lock_lock(&lt_hw_lock, LCK_GRP_NULL);
574 	if (real_ncpus == 1) {
575 		mp_enable_preemption(); /* if we re-enable preemption, the other thread can timeout and exit */
576 	}
577 	OSIncrementAtomic((volatile SInt32*)&lt_thread_lock_grabbed);
578 	lt_wait_for_lock_test_threads();
579 	T_ASSERT_NULL(lt_thread_lock_success, "after taking a spin lock, lock attempt with timeout should fail");
580 	if (real_ncpus == 1) {
581 		mp_disable_preemption(); /* don't double-enable when we unlock */
582 	}
583 	hw_lock_unlock(&lt_hw_lock);
584 
585 	success = lck_spin_try_lock(&lt_lck_spin_t);
586 	T_ASSERT_NOTNULL(success, "spin trylock of previously unheld lock should succeed");
587 	success = lck_spin_try_lock(&lt_lck_spin_t);
588 	T_ASSERT_NULL(success, "spin trylock attempt of previously held lock (with trylock) should fail");
589 	lck_spin_unlock(&lt_lck_spin_t);
590 
591 	lt_reset();
592 	lt_thread_lock_grabbed = false;
593 	lt_thread_lock_success = true;
594 	lt_target_done_threads = 1;
595 	lt_start_trylock_thread(lt_trylock_spin_try_lock);
596 	lck_spin_lock(&lt_lck_spin_t);
597 	if (real_ncpus == 1) {
598 		mp_enable_preemption(); /* if we re-enable preemption, the other thread can timeout and exit */
599 	}
600 	OSIncrementAtomic((volatile SInt32*)&lt_thread_lock_grabbed);
601 	lt_wait_for_lock_test_threads();
602 	T_ASSERT_NULL(lt_thread_lock_success, "spin trylock attempt of previously held lock should fail");
603 	if (real_ncpus == 1) {
604 		mp_disable_preemption(); /* don't double-enable when we unlock */
605 	}
606 	lck_spin_unlock(&lt_lck_spin_t);
607 
608 	return KERN_SUCCESS;
609 }
610 
611 static void
lt_thread(void * arg,wait_result_t wres __unused)612 lt_thread(void *arg, wait_result_t wres __unused)
613 {
614 	void (*func)(void) = (void (*)(void))arg;
615 	uint32_t i;
616 
617 	for (i = 0; i < LOCK_TEST_ITERATIONS; i++) {
618 		func();
619 	}
620 
621 	OSIncrementAtomic((volatile SInt32*) &lt_done_threads);
622 }
623 
624 static void
lt_start_lock_thread(thread_continue_t func)625 lt_start_lock_thread(thread_continue_t func)
626 {
627 	thread_t thread;
628 	kern_return_t kr;
629 
630 	kr = kernel_thread_start(lt_thread, func, &thread);
631 	assert(kr == KERN_SUCCESS);
632 
633 	thread_deallocate(thread);
634 }
635 
636 #if __AMP__
637 static void
lt_bound_thread(void * arg,wait_result_t wres __unused)638 lt_bound_thread(void *arg, wait_result_t wres __unused)
639 {
640 	void (*func)(void) = (void (*)(void))arg;
641 
642 	int cpuid = OSIncrementAtomic((volatile SInt32 *)&lt_cpu_bind_id);
643 
644 	processor_t processor = processor_list;
645 	while ((processor != NULL) && (processor->cpu_id != cpuid)) {
646 		processor = processor->processor_list;
647 	}
648 
649 	if (processor != NULL) {
650 		thread_bind(processor);
651 	}
652 
653 	thread_block(THREAD_CONTINUE_NULL);
654 
655 	func();
656 
657 	OSIncrementAtomic((volatile SInt32*) &lt_done_threads);
658 }
659 
660 static void
lt_e_thread(void * arg,wait_result_t wres __unused)661 lt_e_thread(void *arg, wait_result_t wres __unused)
662 {
663 	void (*func)(void) = (void (*)(void))arg;
664 
665 	thread_t thread = current_thread();
666 
667 	thread_bind_cluster_type(thread, 'e', false);
668 
669 	func();
670 
671 	OSIncrementAtomic((volatile SInt32*) &lt_done_threads);
672 }
673 
674 static void
lt_p_thread(void * arg,wait_result_t wres __unused)675 lt_p_thread(void *arg, wait_result_t wres __unused)
676 {
677 	void (*func)(void) = (void (*)(void))arg;
678 
679 	thread_t thread = current_thread();
680 
681 	thread_bind_cluster_type(thread, 'p', false);
682 
683 	func();
684 
685 	OSIncrementAtomic((volatile SInt32*) &lt_done_threads);
686 }
687 
688 static void
lt_start_lock_thread_e(thread_continue_t func)689 lt_start_lock_thread_e(thread_continue_t func)
690 {
691 	thread_t thread;
692 	kern_return_t kr;
693 
694 	kr = kernel_thread_start(lt_e_thread, func, &thread);
695 	assert(kr == KERN_SUCCESS);
696 
697 	thread_deallocate(thread);
698 }
699 
700 static void
lt_start_lock_thread_p(thread_continue_t func)701 lt_start_lock_thread_p(thread_continue_t func)
702 {
703 	thread_t thread;
704 	kern_return_t kr;
705 
706 	kr = kernel_thread_start(lt_p_thread, func, &thread);
707 	assert(kr == KERN_SUCCESS);
708 
709 	thread_deallocate(thread);
710 }
711 
712 static void
lt_start_lock_thread_bound(thread_continue_t func)713 lt_start_lock_thread_bound(thread_continue_t func)
714 {
715 	thread_t thread;
716 	kern_return_t kr;
717 
718 	kr = kernel_thread_start(lt_bound_thread, func, &thread);
719 	assert(kr == KERN_SUCCESS);
720 
721 	thread_deallocate(thread);
722 }
723 #endif
724 
725 static kern_return_t
lt_test_locks()726 lt_test_locks()
727 {
728 	kern_return_t kr = KERN_SUCCESS;
729 	lck_grp_attr_t *lga = lck_grp_attr_alloc_init();
730 	lck_grp_t *lg = lck_grp_alloc_init("lock test", lga);
731 
732 	lck_mtx_init(&lt_mtx, lg, LCK_ATTR_NULL);
733 	lck_rw_init(&lt_rwlock, lg, LCK_ATTR_NULL);
734 	lck_spin_init(&lt_lck_spin_t, lg, LCK_ATTR_NULL);
735 	hw_lock_init(&lt_hw_lock);
736 
737 	T_LOG("Testing locks.");
738 
739 	/* Try locks (custom) */
740 	lt_reset();
741 
742 	T_LOG("Running try lock test.");
743 	kr = lt_test_trylocks();
744 	T_EXPECT_NULL(kr, "try lock test failed.");
745 
746 	/* Uncontended mutex */
747 	T_LOG("Running uncontended mutex test.");
748 	lt_reset();
749 	lt_target_done_threads = 1;
750 	lt_start_lock_thread(lt_grab_mutex);
751 	lt_wait_for_lock_test_threads();
752 	T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
753 	T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
754 
755 	/* Contended mutex:try locks*/
756 	T_LOG("Running contended mutex test.");
757 	lt_reset();
758 	lt_target_done_threads = 3;
759 	lt_start_lock_thread(lt_grab_mutex);
760 	lt_start_lock_thread(lt_grab_mutex);
761 	lt_start_lock_thread(lt_grab_mutex);
762 	lt_wait_for_lock_test_threads();
763 	T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
764 	T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
765 
766 	/* Contended mutex: try locks*/
767 	T_LOG("Running contended mutex trylock test.");
768 	lt_reset();
769 	lt_target_done_threads = 3;
770 	lt_start_lock_thread(lt_grab_mutex_with_try);
771 	lt_start_lock_thread(lt_grab_mutex_with_try);
772 	lt_start_lock_thread(lt_grab_mutex_with_try);
773 	lt_wait_for_lock_test_threads();
774 	T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
775 	T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
776 
777 	/* Uncontended exclusive rwlock */
778 	T_LOG("Running uncontended exclusive rwlock test.");
779 	lt_reset();
780 	lt_target_done_threads = 1;
781 	lt_start_lock_thread(lt_grab_rw_exclusive);
782 	lt_wait_for_lock_test_threads();
783 	T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
784 	T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
785 
786 	/* Uncontended shared rwlock */
787 
788 	/* Disabled until lt_grab_rw_shared() is fixed (rdar://30685840)
789 	 *  T_LOG("Running uncontended shared rwlock test.");
790 	 *  lt_reset();
791 	 *  lt_target_done_threads = 1;
792 	 *  lt_start_lock_thread(lt_grab_rw_shared);
793 	 *  lt_wait_for_lock_test_threads();
794 	 *  T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
795 	 *  T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
796 	 */
797 
798 	/* Contended exclusive rwlock */
799 	T_LOG("Running contended exclusive rwlock test.");
800 	lt_reset();
801 	lt_target_done_threads = 3;
802 	lt_start_lock_thread(lt_grab_rw_exclusive);
803 	lt_start_lock_thread(lt_grab_rw_exclusive);
804 	lt_start_lock_thread(lt_grab_rw_exclusive);
805 	lt_wait_for_lock_test_threads();
806 	T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
807 	T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
808 
809 	/* One shared, two exclusive */
810 	/* Disabled until lt_grab_rw_shared() is fixed (rdar://30685840)
811 	 *  T_LOG("Running test with one shared and two exclusive rw lock threads.");
812 	 *  lt_reset();
813 	 *  lt_target_done_threads = 3;
814 	 *  lt_start_lock_thread(lt_grab_rw_shared);
815 	 *  lt_start_lock_thread(lt_grab_rw_exclusive);
816 	 *  lt_start_lock_thread(lt_grab_rw_exclusive);
817 	 *  lt_wait_for_lock_test_threads();
818 	 *  T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
819 	 *  T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
820 	 */
821 
822 	/* Four shared */
823 	/* Disabled until lt_grab_rw_shared() is fixed (rdar://30685840)
824 	 *  T_LOG("Running test with four shared holders.");
825 	 *  lt_reset();
826 	 *  lt_target_done_threads = 4;
827 	 *  lt_start_lock_thread(lt_grab_rw_shared);
828 	 *  lt_start_lock_thread(lt_grab_rw_shared);
829 	 *  lt_start_lock_thread(lt_grab_rw_shared);
830 	 *  lt_start_lock_thread(lt_grab_rw_shared);
831 	 *  lt_wait_for_lock_test_threads();
832 	 *  T_EXPECT_LE_UINT(lt_max_holders, 4, NULL);
833 	 */
834 
835 	/* Three doing upgrades and downgrades */
836 	T_LOG("Running test with threads upgrading and downgrading.");
837 	lt_reset();
838 	lt_target_done_threads = 3;
839 	lt_start_lock_thread(lt_upgrade_downgrade_rw);
840 	lt_start_lock_thread(lt_upgrade_downgrade_rw);
841 	lt_start_lock_thread(lt_upgrade_downgrade_rw);
842 	lt_wait_for_lock_test_threads();
843 	T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
844 	T_EXPECT_LE_UINT(lt_max_holders, 3, NULL);
845 	T_EXPECT_EQ_UINT(lt_max_upgrade_holders, 1, NULL);
846 
847 	/* Uncontended - exclusive trylocks */
848 	T_LOG("Running test with single thread doing exclusive rwlock trylocks.");
849 	lt_reset();
850 	lt_target_done_threads = 1;
851 	lt_start_lock_thread(lt_grab_rw_exclusive_with_try);
852 	lt_wait_for_lock_test_threads();
853 	T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
854 	T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
855 
856 	/* Uncontended - shared trylocks */
857 	/* Disabled until lt_grab_rw_shared_with_try() is fixed (rdar://30685840)
858 	 *  T_LOG("Running test with single thread doing shared rwlock trylocks.");
859 	 *  lt_reset();
860 	 *  lt_target_done_threads = 1;
861 	 *  lt_start_lock_thread(lt_grab_rw_shared_with_try);
862 	 *  lt_wait_for_lock_test_threads();
863 	 *  T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
864 	 *  T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
865 	 */
866 
867 	/* Three doing exclusive trylocks */
868 	T_LOG("Running test with threads doing exclusive rwlock trylocks.");
869 	lt_reset();
870 	lt_target_done_threads = 3;
871 	lt_start_lock_thread(lt_grab_rw_exclusive_with_try);
872 	lt_start_lock_thread(lt_grab_rw_exclusive_with_try);
873 	lt_start_lock_thread(lt_grab_rw_exclusive_with_try);
874 	lt_wait_for_lock_test_threads();
875 	T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
876 	T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
877 
878 	/* Three doing shared trylocks */
879 	/* Disabled until lt_grab_rw_shared_with_try() is fixed (rdar://30685840)
880 	 *  T_LOG("Running test with threads doing shared rwlock trylocks.");
881 	 *  lt_reset();
882 	 *  lt_target_done_threads = 3;
883 	 *  lt_start_lock_thread(lt_grab_rw_shared_with_try);
884 	 *  lt_start_lock_thread(lt_grab_rw_shared_with_try);
885 	 *  lt_start_lock_thread(lt_grab_rw_shared_with_try);
886 	 *  lt_wait_for_lock_test_threads();
887 	 *  T_EXPECT_LE_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
888 	 *  T_EXPECT_LE_UINT(lt_max_holders, 3, NULL);
889 	 */
890 
891 	/* Three doing various trylocks */
892 	/* Disabled until lt_grab_rw_shared_with_try() is fixed (rdar://30685840)
893 	 *  T_LOG("Running test with threads doing mixed rwlock trylocks.");
894 	 *  lt_reset();
895 	 *  lt_target_done_threads = 4;
896 	 *  lt_start_lock_thread(lt_grab_rw_shared_with_try);
897 	 *  lt_start_lock_thread(lt_grab_rw_shared_with_try);
898 	 *  lt_start_lock_thread(lt_grab_rw_exclusive_with_try);
899 	 *  lt_start_lock_thread(lt_grab_rw_exclusive_with_try);
900 	 *  lt_wait_for_lock_test_threads();
901 	 *  T_EXPECT_LE_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
902 	 *  T_EXPECT_LE_UINT(lt_max_holders, 2, NULL);
903 	 */
904 
905 	/* HW locks */
906 	T_LOG("Running test with hw_lock_lock()");
907 	lt_reset();
908 	lt_target_done_threads = 3;
909 	lt_start_lock_thread(lt_grab_hw_lock);
910 	lt_start_lock_thread(lt_grab_hw_lock);
911 	lt_start_lock_thread(lt_grab_hw_lock);
912 	lt_wait_for_lock_test_threads();
913 	T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
914 
915 #if __AMP__
916 	/* Ticket locks stress test */
917 	T_LOG("Running Ticket locks stress test with lck_ticket_lock()");
918 	extern unsigned int real_ncpus;
919 	lck_grp_init(&lt_ticket_grp, "ticket lock stress", LCK_GRP_ATTR_NULL);
920 	lck_ticket_init(&lt_ticket_lock, &lt_ticket_grp);
921 	lt_reset();
922 	lt_target_done_threads = real_ncpus;
923 	for (processor_t processor = processor_list; processor != NULL; processor = processor->processor_list) {
924 		lt_start_lock_thread_bound(lt_stress_ticket_lock);
925 	}
926 	lt_wait_for_lock_test_threads();
927 	bool starvation = false;
928 	uint total_local_count = 0;
929 	for (processor_t processor = processor_list; processor != NULL; processor = processor->processor_list) {
930 		starvation = starvation || (lt_stress_local_counters[processor->cpu_id] < 10);
931 		total_local_count += lt_stress_local_counters[processor->cpu_id];
932 	}
933 	if (total_local_count != lt_counter) {
934 		T_FAIL("Lock failure\n");
935 	} else if (starvation) {
936 		T_FAIL("Lock starvation found\n");
937 	} else {
938 		T_PASS("Ticket locks stress test with lck_ticket_lock()");
939 	}
940 
941 	/* AMP ticket locks stress test */
942 	T_LOG("Running AMP Ticket locks stress test bound to clusters with lck_ticket_lock()");
943 	lt_reset();
944 	lt_target_done_threads = real_ncpus;
945 	for (processor_t processor = processor_list; processor != NULL; processor = processor->processor_list) {
946 		processor_set_t pset = processor->processor_set;
947 		if (pset->pset_cluster_type == PSET_AMP_P) {
948 			lt_start_lock_thread_p(lt_stress_ticket_lock);
949 		} else if (pset->pset_cluster_type == PSET_AMP_E) {
950 			lt_start_lock_thread_e(lt_stress_ticket_lock);
951 		} else {
952 			lt_start_lock_thread(lt_stress_ticket_lock);
953 		}
954 	}
955 	lt_wait_for_lock_test_threads();
956 #endif
957 
958 	/* HW locks: trylocks */
959 	T_LOG("Running test with hw_lock_try()");
960 	lt_reset();
961 	lt_target_done_threads = 3;
962 	lt_start_lock_thread(lt_grab_hw_lock_with_try);
963 	lt_start_lock_thread(lt_grab_hw_lock_with_try);
964 	lt_start_lock_thread(lt_grab_hw_lock_with_try);
965 	lt_wait_for_lock_test_threads();
966 	T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
967 
968 	/* HW locks: with timeout */
969 	T_LOG("Running test with hw_lock_to()");
970 	lt_reset();
971 	lt_target_done_threads = 3;
972 	lt_start_lock_thread(lt_grab_hw_lock_with_to);
973 	lt_start_lock_thread(lt_grab_hw_lock_with_to);
974 	lt_start_lock_thread(lt_grab_hw_lock_with_to);
975 	lt_wait_for_lock_test_threads();
976 	T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
977 
978 	/* Spin locks */
979 	T_LOG("Running test with lck_spin_lock()");
980 	lt_reset();
981 	lt_target_done_threads = 3;
982 	lt_start_lock_thread(lt_grab_spin_lock);
983 	lt_start_lock_thread(lt_grab_spin_lock);
984 	lt_start_lock_thread(lt_grab_spin_lock);
985 	lt_wait_for_lock_test_threads();
986 	T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
987 
988 	/* Spin locks: trylocks */
989 	T_LOG("Running test with lck_spin_try_lock()");
990 	lt_reset();
991 	lt_target_done_threads = 3;
992 	lt_start_lock_thread(lt_grab_spin_lock_with_try);
993 	lt_start_lock_thread(lt_grab_spin_lock_with_try);
994 	lt_start_lock_thread(lt_grab_spin_lock_with_try);
995 	lt_wait_for_lock_test_threads();
996 	T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
997 
998 	return KERN_SUCCESS;
999 }
1000 
1001 #define MT_MAX_ARGS             8
1002 #define MT_INITIAL_VALUE        0xfeedbeef
1003 #define MT_W_VAL                (0x00000000feedbeefULL) /* Drop in zeros */
1004 #define MT_S_VAL                (0xfffffffffeedbeefULL) /* High bit is 1, so sign-extends as negative */
1005 #define MT_L_VAL                (((uint64_t)MT_INITIAL_VALUE) | (((uint64_t)MT_INITIAL_VALUE) << 32)) /* Two back-to-back */
1006 
1007 typedef void (*sy_munge_t)(void*);
1008 
1009 #define MT_FUNC(x) #x, x
1010 struct munger_test {
1011 	const char      *mt_name;
1012 	sy_munge_t      mt_func;
1013 	uint32_t        mt_in_words;
1014 	uint32_t        mt_nout;
1015 	uint64_t        mt_expected[MT_MAX_ARGS];
1016 } munger_tests[] = {
1017 	{MT_FUNC(munge_w), 1, 1, {MT_W_VAL}},
1018 	{MT_FUNC(munge_ww), 2, 2, {MT_W_VAL, MT_W_VAL}},
1019 	{MT_FUNC(munge_www), 3, 3, {MT_W_VAL, MT_W_VAL, MT_W_VAL}},
1020 	{MT_FUNC(munge_wwww), 4, 4, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}},
1021 	{MT_FUNC(munge_wwwww), 5, 5, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}},
1022 	{MT_FUNC(munge_wwwwww), 6, 6, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}},
1023 	{MT_FUNC(munge_wwwwwww), 7, 7, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}},
1024 	{MT_FUNC(munge_wwwwwwww), 8, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}},
1025 	{MT_FUNC(munge_wl), 3, 2, {MT_W_VAL, MT_L_VAL}},
1026 	{MT_FUNC(munge_wwl), 4, 3, {MT_W_VAL, MT_W_VAL, MT_L_VAL}},
1027 	{MT_FUNC(munge_wwlll), 8, 5, {MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL}},
1028 	{MT_FUNC(munge_wlw), 4, 3, {MT_W_VAL, MT_L_VAL, MT_W_VAL}},
1029 	{MT_FUNC(munge_wlwwwll), 10, 7, {MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL}},
1030 	{MT_FUNC(munge_wlwwwllw), 11, 8, {MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_W_VAL}},
1031 	{MT_FUNC(munge_wlwwlwlw), 11, 8, {MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL}},
1032 	{MT_FUNC(munge_wll), 5, 3, {MT_W_VAL, MT_L_VAL, MT_L_VAL}},
1033 	{MT_FUNC(munge_wlll), 7, 4, {MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL}},
1034 	{MT_FUNC(munge_wllwwll), 11, 7, {MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL}},
1035 	{MT_FUNC(munge_wwwlw), 6, 5, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL}},
1036 	{MT_FUNC(munge_wwwlww), 7, 6, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL}},
1037 	{MT_FUNC(munge_wwwlwww), 8, 7, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}},
1038 	{MT_FUNC(munge_wwwl), 5, 4, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}},
1039 	{MT_FUNC(munge_wwwwlw), 7, 6, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL}},
1040 	{MT_FUNC(munge_wwwwllww), 10, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL}},
1041 	{MT_FUNC(munge_wwwwl), 6, 5, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}},
1042 	{MT_FUNC(munge_wwwwwl), 7, 6, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}},
1043 	{MT_FUNC(munge_wwwwwlww), 9, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL}},
1044 	{MT_FUNC(munge_wwwwwllw), 10, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_W_VAL}},
1045 	{MT_FUNC(munge_wwwwwlll), 11, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL}},
1046 	{MT_FUNC(munge_wwwwwwl), 8, 7, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}},
1047 	{MT_FUNC(munge_wwwwwwlw), 9, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL}},
1048 	{MT_FUNC(munge_wwwwwwll), 10, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL}},
1049 	{MT_FUNC(munge_wsw), 3, 3, {MT_W_VAL, MT_S_VAL, MT_W_VAL}},
1050 	{MT_FUNC(munge_wws), 3, 3, {MT_W_VAL, MT_W_VAL, MT_S_VAL}},
1051 	{MT_FUNC(munge_wwwsw), 5, 5, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_S_VAL, MT_W_VAL}},
1052 	{MT_FUNC(munge_llllll), 12, 6, {MT_L_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL}},
1053 	{MT_FUNC(munge_llll), 8, 4, {MT_L_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL}},
1054 	{MT_FUNC(munge_l), 2, 1, {MT_L_VAL}},
1055 	{MT_FUNC(munge_lw), 3, 2, {MT_L_VAL, MT_W_VAL}},
1056 	{MT_FUNC(munge_lwww), 5, 4, {MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}},
1057 	{MT_FUNC(munge_lwwwwwww), 9, 8, {MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}},
1058 	{MT_FUNC(munge_wlwwwl), 8, 6, {MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}},
1059 	{MT_FUNC(munge_wwlwwwl), 9, 7, {MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}}
1060 };
1061 
1062 #define MT_TEST_COUNT (sizeof(munger_tests) / sizeof(struct munger_test))
1063 
1064 static void
mt_reset(uint32_t in_words,size_t total_size,uint32_t * data)1065 mt_reset(uint32_t in_words, size_t total_size, uint32_t *data)
1066 {
1067 	uint32_t i;
1068 
1069 	for (i = 0; i < in_words; i++) {
1070 		data[i] = MT_INITIAL_VALUE;
1071 	}
1072 
1073 	if (in_words * sizeof(uint32_t) < total_size) {
1074 		bzero(&data[in_words], total_size - in_words * sizeof(uint32_t));
1075 	}
1076 }
1077 
1078 static void
mt_test_mungers()1079 mt_test_mungers()
1080 {
1081 	uint64_t data[MT_MAX_ARGS];
1082 	uint32_t i, j;
1083 
1084 	for (i = 0; i < MT_TEST_COUNT; i++) {
1085 		struct munger_test *test = &munger_tests[i];
1086 		int pass = 1;
1087 
1088 		T_LOG("Testing %s", test->mt_name);
1089 
1090 		mt_reset(test->mt_in_words, sizeof(data), (uint32_t*)data);
1091 		test->mt_func(data);
1092 
1093 		for (j = 0; j < test->mt_nout; j++) {
1094 			if (data[j] != test->mt_expected[j]) {
1095 				T_FAIL("Index %d: expected %llx, got %llx.", j, test->mt_expected[j], data[j]);
1096 				pass = 0;
1097 			}
1098 		}
1099 		if (pass) {
1100 			T_PASS(test->mt_name);
1101 		}
1102 	}
1103 }
1104 
1105 /* Exception Callback Test */
1106 static ex_cb_action_t
excb_test_action(ex_cb_class_t cb_class,void * refcon,const ex_cb_state_t * state)1107 excb_test_action(
1108 	ex_cb_class_t           cb_class,
1109 	void                            *refcon,
1110 	const ex_cb_state_t     *state
1111 	)
1112 {
1113 	ex_cb_state_t *context = (ex_cb_state_t *)refcon;
1114 
1115 	if ((NULL == refcon) || (NULL == state)) {
1116 		return EXCB_ACTION_TEST_FAIL;
1117 	}
1118 
1119 	context->far = state->far;
1120 
1121 	switch (cb_class) {
1122 	case EXCB_CLASS_TEST1:
1123 		return EXCB_ACTION_RERUN;
1124 	case EXCB_CLASS_TEST2:
1125 		return EXCB_ACTION_NONE;
1126 	default:
1127 		return EXCB_ACTION_TEST_FAIL;
1128 	}
1129 }
1130 
1131 
1132 kern_return_t
ex_cb_test()1133 ex_cb_test()
1134 {
1135 	const vm_offset_t far1 = 0xdead0001;
1136 	const vm_offset_t far2 = 0xdead0002;
1137 	kern_return_t kr;
1138 	ex_cb_state_t test_context_1 = {0xdeadbeef};
1139 	ex_cb_state_t test_context_2 = {0xdeadbeef};
1140 	ex_cb_action_t action;
1141 
1142 	T_LOG("Testing Exception Callback.");
1143 
1144 	T_LOG("Running registration test.");
1145 
1146 	kr = ex_cb_register(EXCB_CLASS_TEST1, &excb_test_action, &test_context_1);
1147 	T_ASSERT(KERN_SUCCESS == kr, "First registration of TEST1 exception callback");
1148 	kr = ex_cb_register(EXCB_CLASS_TEST2, &excb_test_action, &test_context_2);
1149 	T_ASSERT(KERN_SUCCESS == kr, "First registration of TEST2 exception callback");
1150 
1151 	kr = ex_cb_register(EXCB_CLASS_TEST2, &excb_test_action, &test_context_2);
1152 	T_ASSERT(KERN_SUCCESS != kr, "Second registration of TEST2 exception callback");
1153 	kr = ex_cb_register(EXCB_CLASS_TEST1, &excb_test_action, &test_context_1);
1154 	T_ASSERT(KERN_SUCCESS != kr, "Second registration of TEST1 exception callback");
1155 
1156 	T_LOG("Running invocation test.");
1157 
1158 	action = ex_cb_invoke(EXCB_CLASS_TEST1, far1);
1159 	T_ASSERT(EXCB_ACTION_RERUN == action, NULL);
1160 	T_ASSERT(far1 == test_context_1.far, NULL);
1161 
1162 	action = ex_cb_invoke(EXCB_CLASS_TEST2, far2);
1163 	T_ASSERT(EXCB_ACTION_NONE == action, NULL);
1164 	T_ASSERT(far2 == test_context_2.far, NULL);
1165 
1166 	action = ex_cb_invoke(EXCB_CLASS_TEST3, 0);
1167 	T_ASSERT(EXCB_ACTION_NONE == action, NULL);
1168 
1169 	return KERN_SUCCESS;
1170 }
1171 
1172 #if defined(HAS_APPLE_PAC)
1173 
1174 
1175 kern_return_t
arm64_ropjop_test()1176 arm64_ropjop_test()
1177 {
1178 	T_LOG("Testing ROP/JOP");
1179 
1180 	/* how is ROP/JOP configured */
1181 	boolean_t config_rop_enabled = TRUE;
1182 	boolean_t config_jop_enabled = TRUE;
1183 
1184 
1185 	if (config_jop_enabled) {
1186 		/* jop key */
1187 		uint64_t apiakey_hi = __builtin_arm_rsr64("APIAKEYHI_EL1");
1188 		uint64_t apiakey_lo = __builtin_arm_rsr64("APIAKEYLO_EL1");
1189 
1190 		T_EXPECT(apiakey_hi != 0 && apiakey_lo != 0, NULL);
1191 	}
1192 
1193 	if (config_rop_enabled) {
1194 		/* rop key */
1195 		uint64_t apibkey_hi = __builtin_arm_rsr64("APIBKEYHI_EL1");
1196 		uint64_t apibkey_lo = __builtin_arm_rsr64("APIBKEYLO_EL1");
1197 
1198 		T_EXPECT(apibkey_hi != 0 && apibkey_lo != 0, NULL);
1199 
1200 		/* sign a KVA (the address of this function) */
1201 		uint64_t kva_signed = (uint64_t) ptrauth_sign_unauthenticated((void *)&config_rop_enabled, ptrauth_key_asib, 0);
1202 
1203 		/* assert it was signed (changed) */
1204 		T_EXPECT(kva_signed != (uint64_t)&config_rop_enabled, NULL);
1205 
1206 		/* authenticate the newly signed KVA */
1207 		uint64_t kva_authed = (uint64_t) ml_auth_ptr_unchecked((void *)kva_signed, ptrauth_key_asib, 0);
1208 
1209 		/* assert the authed KVA is the original KVA */
1210 		T_EXPECT(kva_authed == (uint64_t)&config_rop_enabled, NULL);
1211 
1212 		/* corrupt a signed ptr, auth it, ensure auth failed */
1213 		uint64_t kva_corrupted = kva_signed ^ 1;
1214 
1215 		/* authenticate the corrupted pointer */
1216 		kva_authed = (uint64_t) ml_auth_ptr_unchecked((void *)kva_corrupted, ptrauth_key_asib, 0);
1217 
1218 		/* when AuthIB fails, bits 63:62 will be set to 2'b10 */
1219 		uint64_t auth_fail_mask = 3ULL << 61;
1220 		uint64_t authib_fail = 2ULL << 61;
1221 
1222 		/* assert the failed authIB of corrupted pointer is tagged */
1223 		T_EXPECT((kva_authed & auth_fail_mask) == authib_fail, NULL);
1224 	}
1225 
1226 	return KERN_SUCCESS;
1227 }
1228 #endif /* defined(HAS_APPLE_PAC) */
1229 
1230 #if __ARM_PAN_AVAILABLE__
1231 
1232 struct pan_test_thread_args {
1233 	volatile bool join;
1234 };
1235 
1236 static void
arm64_pan_test_thread(void * arg,wait_result_t __unused wres)1237 arm64_pan_test_thread(void *arg, wait_result_t __unused wres)
1238 {
1239 	T_ASSERT(__builtin_arm_rsr("pan") != 0, NULL);
1240 
1241 	struct pan_test_thread_args *args = arg;
1242 
1243 	for (processor_t p = processor_list; p != NULL; p = p->processor_list) {
1244 		thread_bind(p);
1245 		thread_block(THREAD_CONTINUE_NULL);
1246 		kprintf("Running PAN test on cpu %d\n", p->cpu_id);
1247 		arm64_pan_test();
1248 	}
1249 
1250 	/* unbind thread from specific cpu */
1251 	thread_bind(PROCESSOR_NULL);
1252 	thread_block(THREAD_CONTINUE_NULL);
1253 
1254 	while (!args->join) {
1255 		;
1256 	}
1257 
1258 	thread_wakeup(args);
1259 }
1260 
1261 kern_return_t
arm64_late_pan_test()1262 arm64_late_pan_test()
1263 {
1264 	thread_t thread;
1265 	kern_return_t kr;
1266 
1267 	struct pan_test_thread_args args;
1268 	args.join = false;
1269 
1270 	kr = kernel_thread_start(arm64_pan_test_thread, &args, &thread);
1271 	assert(kr == KERN_SUCCESS);
1272 
1273 	thread_deallocate(thread);
1274 
1275 	assert_wait(&args, THREAD_UNINT);
1276 	args.join = true;
1277 	thread_block(THREAD_CONTINUE_NULL);
1278 	return KERN_SUCCESS;
1279 }
1280 
1281 // Disable KASAN checking for PAN tests as the fixed commpage address doesn't have a shadow mapping
1282 
1283 static NOKASAN bool
arm64_pan_test_pan_enabled_fault_handler(arm_saved_state_t * state)1284 arm64_pan_test_pan_enabled_fault_handler(arm_saved_state_t * state)
1285 {
1286 	bool retval                 = false;
1287 	uint32_t esr                = get_saved_state_esr(state);
1288 	esr_exception_class_t class = ESR_EC(esr);
1289 	fault_status_t fsc          = ISS_IA_FSC(ESR_ISS(esr));
1290 	uint32_t cpsr               = get_saved_state_cpsr(state);
1291 	uint64_t far                = get_saved_state_far(state);
1292 
1293 	if ((class == ESR_EC_DABORT_EL1) && (fsc == FSC_PERMISSION_FAULT_L3) &&
1294 	    (cpsr & PSR64_PAN) &&
1295 	    ((esr & ISS_DA_WNR) ? mmu_kvtop_wpreflight(far) : mmu_kvtop(far))) {
1296 		++pan_exception_level;
1297 		// read the user-accessible value to make sure
1298 		// pan is enabled and produces a 2nd fault from
1299 		// the exception handler
1300 		if (pan_exception_level == 1) {
1301 			ml_expect_fault_begin(arm64_pan_test_pan_enabled_fault_handler, far);
1302 			pan_fault_value = *(volatile char *)far;
1303 			ml_expect_fault_end();
1304 			__builtin_arm_wsr("pan", 1); // turn PAN back on after the nested exception cleared it for this context
1305 		}
1306 		// this fault address is used for PAN test
1307 		// disable PAN and rerun
1308 		mask_saved_state_cpsr(state, 0, PSR64_PAN);
1309 
1310 		retval = true;
1311 	}
1312 
1313 	return retval;
1314 }
1315 
1316 static NOKASAN bool
arm64_pan_test_pan_disabled_fault_handler(arm_saved_state_t * state)1317 arm64_pan_test_pan_disabled_fault_handler(arm_saved_state_t * state)
1318 {
1319 	bool retval             = false;
1320 	uint32_t esr            = get_saved_state_esr(state);
1321 	esr_exception_class_t class = ESR_EC(esr);
1322 	fault_status_t fsc      = ISS_IA_FSC(ESR_ISS(esr));
1323 	uint32_t cpsr           = get_saved_state_cpsr(state);
1324 
1325 	if ((class == ESR_EC_DABORT_EL1) && (fsc == FSC_PERMISSION_FAULT_L3) &&
1326 	    !(cpsr & PSR64_PAN)) {
1327 		++pan_exception_level;
1328 		// On an exception taken from a PAN-disabled context, verify
1329 		// that PAN is re-enabled for the exception handler and that
1330 		// accessing the test address produces a PAN fault.
1331 		ml_expect_fault_begin(arm64_pan_test_pan_enabled_fault_handler, pan_test_addr);
1332 		pan_fault_value = *(volatile char *)pan_test_addr;
1333 		ml_expect_fault_end();
1334 		__builtin_arm_wsr("pan", 1); // turn PAN back on after the nested exception cleared it for this context
1335 		add_saved_state_pc(state, 4);
1336 
1337 		retval = true;
1338 	}
1339 
1340 	return retval;
1341 }
1342 
1343 NOKASAN kern_return_t
arm64_pan_test()1344 arm64_pan_test()
1345 {
1346 	bool values_match = false;
1347 	vm_offset_t priv_addr = 0;
1348 
1349 	T_LOG("Testing PAN.");
1350 
1351 
1352 	T_ASSERT((__builtin_arm_rsr("SCTLR_EL1") & SCTLR_PAN_UNCHANGED) == 0, "SCTLR_EL1.SPAN must be cleared");
1353 
1354 	T_ASSERT(__builtin_arm_rsr("pan") != 0, NULL);
1355 
1356 	pan_exception_level = 0;
1357 	pan_fault_value = 0xDE;
1358 
1359 	// Create an empty pmap, so we can map a user-accessible page
1360 	pmap_t pmap = pmap_create_options(NULL, 0, PMAP_CREATE_64BIT);
1361 	T_ASSERT(pmap != NULL, NULL);
1362 
1363 	// Get a physical page to back the mapping
1364 	vm_page_t vm_page = vm_page_grab();
1365 	T_ASSERT(vm_page != VM_PAGE_NULL, NULL);
1366 	ppnum_t pn = VM_PAGE_GET_PHYS_PAGE(vm_page);
1367 	pmap_paddr_t pa = ptoa(pn);
1368 
1369 	// Write to the underlying physical page through the physical aperture
1370 	// so we can test against a known value
1371 	priv_addr = phystokv((pmap_paddr_t)pa);
1372 	*(volatile char *)priv_addr = 0xAB;
1373 
1374 	// Map the page in the user address space at some, non-zero address
1375 	pan_test_addr = PAGE_SIZE;
1376 	pmap_enter(pmap, pan_test_addr, pn, VM_PROT_READ, VM_PROT_READ, 0, true);
1377 
1378 	// Context-switch with PAN disabled is prohibited; prevent test logging from
1379 	// triggering a voluntary context switch.
1380 	mp_disable_preemption();
1381 
1382 	// Insert the user's pmap root table pointer in TTBR0
1383 	pmap_t old_pmap = vm_map_pmap(current_thread()->map);
1384 	pmap_switch(pmap);
1385 
1386 	// Below should trigger a PAN exception as pan_test_addr is accessible
1387 	// in user mode
1388 	// The exception handler, upon recognizing the fault address is pan_test_addr,
1389 	// will disable PAN and rerun this instruction successfully
1390 	ml_expect_fault_begin(arm64_pan_test_pan_enabled_fault_handler, pan_test_addr);
1391 	values_match = (*(volatile char *)pan_test_addr == *(volatile char *)priv_addr);
1392 	ml_expect_fault_end();
1393 	T_ASSERT(values_match, NULL);
1394 
1395 	T_ASSERT(pan_exception_level == 2, NULL);
1396 
1397 	T_ASSERT(__builtin_arm_rsr("pan") == 0, NULL);
1398 
1399 	T_ASSERT(pan_fault_value == *(char *)priv_addr, NULL);
1400 
1401 	pan_exception_level = 0;
1402 	pan_fault_value = 0xAD;
1403 	pan_ro_addr = (vm_offset_t) &pan_ro_value;
1404 
1405 	// Force a permission fault while PAN is disabled to make sure PAN is
1406 	// re-enabled during the exception handler.
1407 	ml_expect_fault_begin(arm64_pan_test_pan_disabled_fault_handler, pan_ro_addr);
1408 	*((volatile uint64_t*)pan_ro_addr) = 0xFEEDFACECAFECAFE;
1409 	ml_expect_fault_end();
1410 
1411 	T_ASSERT(pan_exception_level == 2, NULL);
1412 
1413 	T_ASSERT(__builtin_arm_rsr("pan") == 0, NULL);
1414 
1415 	T_ASSERT(pan_fault_value == *(char *)priv_addr, NULL);
1416 
1417 	pmap_switch(old_pmap);
1418 
1419 	pan_ro_addr = 0;
1420 
1421 	__builtin_arm_wsr("pan", 1);
1422 
1423 	mp_enable_preemption();
1424 
1425 	pmap_remove(pmap, pan_test_addr, pan_test_addr + PAGE_SIZE);
1426 	pan_test_addr = 0;
1427 
1428 	vm_page_lock_queues();
1429 	vm_page_free(vm_page);
1430 	vm_page_unlock_queues();
1431 	pmap_destroy(pmap);
1432 
1433 	return KERN_SUCCESS;
1434 }
1435 #endif /* __ARM_PAN_AVAILABLE__ */
1436 
1437 
1438 kern_return_t
arm64_lock_test()1439 arm64_lock_test()
1440 {
1441 	return lt_test_locks();
1442 }
1443 
1444 kern_return_t
arm64_munger_test()1445 arm64_munger_test()
1446 {
1447 	mt_test_mungers();
1448 	return 0;
1449 }
1450 
1451 #if defined(KERNEL_INTEGRITY_CTRR) && defined(CONFIG_XNUPOST)
1452 SECURITY_READ_ONLY_LATE(uint64_t) ctrr_ro_test;
1453 uint64_t ctrr_nx_test = 0xd65f03c0; /* RET */
1454 volatile uint64_t ctrr_exception_esr;
1455 vm_offset_t ctrr_test_va;
1456 vm_offset_t ctrr_test_page;
1457 
1458 kern_return_t
ctrr_test(void)1459 ctrr_test(void)
1460 {
1461 	processor_t p;
1462 	boolean_t ctrr_disable = FALSE;
1463 
1464 	PE_parse_boot_argn("-unsafe_kernel_text", &ctrr_disable, sizeof(ctrr_disable));
1465 
1466 #if CONFIG_CSR_FROM_DT
1467 	if (csr_unsafe_kernel_text) {
1468 		ctrr_disable = TRUE;
1469 	}
1470 #endif /* CONFIG_CSR_FROM_DT */
1471 
1472 	if (ctrr_disable) {
1473 		T_LOG("Skipping CTRR test when -unsafe_kernel_text boot-arg present");
1474 		return KERN_SUCCESS;
1475 	}
1476 
1477 	T_LOG("Running CTRR test.");
1478 
1479 	for (p = processor_list; p != NULL; p = p->processor_list) {
1480 		thread_bind(p);
1481 		thread_block(THREAD_CONTINUE_NULL);
1482 		T_LOG("Running CTRR test on cpu %d\n", p->cpu_id);
1483 		ctrr_test_cpu();
1484 	}
1485 
1486 	/* unbind thread from specific cpu */
1487 	thread_bind(PROCESSOR_NULL);
1488 	thread_block(THREAD_CONTINUE_NULL);
1489 
1490 	return KERN_SUCCESS;
1491 }
1492 
1493 static bool
ctrr_test_ro_fault_handler(arm_saved_state_t * state)1494 ctrr_test_ro_fault_handler(arm_saved_state_t * state)
1495 {
1496 	bool retval                 = false;
1497 	uint32_t esr                = get_saved_state_esr(state);
1498 	esr_exception_class_t class = ESR_EC(esr);
1499 	fault_status_t fsc          = ISS_DA_FSC(ESR_ISS(esr));
1500 
1501 	if ((class == ESR_EC_DABORT_EL1) && (fsc == FSC_PERMISSION_FAULT_L3)) {
1502 		ctrr_exception_esr = esr;
1503 		add_saved_state_pc(state, 4);
1504 		retval = true;
1505 	}
1506 
1507 	return retval;
1508 }
1509 
1510 static bool
ctrr_test_nx_fault_handler(arm_saved_state_t * state)1511 ctrr_test_nx_fault_handler(arm_saved_state_t * state)
1512 {
1513 	bool retval                 = false;
1514 	uint32_t esr                = get_saved_state_esr(state);
1515 	esr_exception_class_t class = ESR_EC(esr);
1516 	fault_status_t fsc          = ISS_IA_FSC(ESR_ISS(esr));
1517 
1518 	if ((class == ESR_EC_IABORT_EL1) && (fsc == FSC_PERMISSION_FAULT_L3)) {
1519 		ctrr_exception_esr = esr;
1520 		/* return to the instruction immediately after the call to NX page */
1521 		set_saved_state_pc(state, get_saved_state_lr(state));
1522 		retval = true;
1523 	}
1524 
1525 	return retval;
1526 }
1527 
1528 // Disable KASAN checking for CTRR tests as the test VA  doesn't have a shadow mapping
1529 
1530 /* test CTRR on a cpu, caller to bind thread to desired cpu */
1531 /* ctrr_test_page was reserved during bootstrap process */
1532 NOKASAN kern_return_t
ctrr_test_cpu(void)1533 ctrr_test_cpu(void)
1534 {
1535 	ppnum_t ro_pn, nx_pn;
1536 	uint64_t *ctrr_ro_test_ptr;
1537 	void (*ctrr_nx_test_ptr)(void);
1538 	kern_return_t kr;
1539 	uint64_t prot = 0;
1540 	extern vm_offset_t virtual_space_start;
1541 
1542 	/* ctrr read only region = [rorgn_begin_va, rorgn_end_va) */
1543 
1544 	vm_offset_t rorgn_begin_va = phystokv(ctrr_begin);
1545 	vm_offset_t rorgn_end_va = phystokv(ctrr_end) + 1;
1546 	vm_offset_t ro_test_va = (vm_offset_t)&ctrr_ro_test;
1547 	vm_offset_t nx_test_va = (vm_offset_t)&ctrr_nx_test;
1548 
1549 	T_EXPECT(rorgn_begin_va <= ro_test_va && ro_test_va < rorgn_end_va, "Expect ro_test_va to be inside the CTRR region");
1550 	T_EXPECT((nx_test_va < rorgn_begin_va) ^ (nx_test_va >= rorgn_end_va), "Expect nx_test_va to be outside the CTRR region");
1551 
1552 	ro_pn = pmap_find_phys(kernel_pmap, ro_test_va);
1553 	nx_pn = pmap_find_phys(kernel_pmap, nx_test_va);
1554 	T_EXPECT(ro_pn && nx_pn, "Expect ro page number and nx page number to be non zero");
1555 
1556 	T_LOG("test virtual page: %p, ctrr_ro_test: %p, ctrr_nx_test: %p, ro_pn: %x, nx_pn: %x ",
1557 	    (void *)ctrr_test_page, &ctrr_ro_test, &ctrr_nx_test, ro_pn, nx_pn);
1558 
1559 	prot = pmap_get_arm64_prot(kernel_pmap, ctrr_test_page);
1560 	T_EXPECT(~prot & ARM_TTE_VALID, "Expect ctrr_test_page to be unmapped");
1561 
1562 	T_LOG("Read only region test mapping virtual page %p to CTRR RO page number %d", ctrr_test_page, ro_pn);
1563 	kr = pmap_enter(kernel_pmap, ctrr_test_page, ro_pn,
1564 	    VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE, VM_WIMG_USE_DEFAULT, FALSE);
1565 	T_EXPECT(kr == KERN_SUCCESS, "Expect pmap_enter of RW mapping to succeed");
1566 
1567 	// assert entire mmu prot path (Hierarchical protection model) is NOT RO
1568 	// fetch effective block level protections from table/block entries
1569 	prot = pmap_get_arm64_prot(kernel_pmap, ctrr_test_page);
1570 	T_EXPECT(ARM_PTE_EXTRACT_AP(prot) == AP_RWNA && (prot & ARM_PTE_PNX), "Mapping is EL1 RWNX");
1571 
1572 	ctrr_test_va = ctrr_test_page + (ro_test_va & PAGE_MASK);
1573 	ctrr_ro_test_ptr = (void *)ctrr_test_va;
1574 
1575 	T_LOG("Read only region test writing to %p to provoke data abort", ctrr_ro_test_ptr);
1576 
1577 	// should cause data abort
1578 	ml_expect_fault_begin(ctrr_test_ro_fault_handler, ctrr_test_va);
1579 	*ctrr_ro_test_ptr = 1;
1580 	ml_expect_fault_end();
1581 
1582 	// ensure write permission fault at expected level
1583 	// data abort handler will set ctrr_exception_esr when ctrr_test_va takes a permission fault
1584 
1585 	T_EXPECT(ESR_EC(ctrr_exception_esr) == ESR_EC_DABORT_EL1, "Data Abort from EL1 expected");
1586 	T_EXPECT(ISS_DA_FSC(ESR_ISS(ctrr_exception_esr)) == FSC_PERMISSION_FAULT_L3, "Permission Fault Expected");
1587 	T_EXPECT(ESR_ISS(ctrr_exception_esr) & ISS_DA_WNR, "Write Fault Expected");
1588 
1589 	ctrr_test_va = 0;
1590 	ctrr_exception_esr = 0;
1591 	pmap_remove(kernel_pmap, ctrr_test_page, ctrr_test_page + PAGE_SIZE);
1592 
1593 	T_LOG("No execute test mapping virtual page %p to CTRR PXN page number %d", ctrr_test_page, nx_pn);
1594 
1595 	kr = pmap_enter(kernel_pmap, ctrr_test_page, nx_pn,
1596 	    VM_PROT_READ | VM_PROT_EXECUTE, VM_PROT_NONE, VM_WIMG_USE_DEFAULT, FALSE);
1597 	T_EXPECT(kr == KERN_SUCCESS, "Expect pmap_enter of RX mapping to succeed");
1598 
1599 	// assert entire mmu prot path (Hierarchical protection model) is NOT XN
1600 	prot = pmap_get_arm64_prot(kernel_pmap, ctrr_test_page);
1601 	T_EXPECT(ARM_PTE_EXTRACT_AP(prot) == AP_RONA && (~prot & ARM_PTE_PNX), "Mapping is EL1 ROX");
1602 
1603 	ctrr_test_va = ctrr_test_page + (nx_test_va & PAGE_MASK);
1604 #if __has_feature(ptrauth_calls)
1605 	ctrr_nx_test_ptr = ptrauth_sign_unauthenticated((void *)ctrr_test_va, ptrauth_key_function_pointer, 0);
1606 #else
1607 	ctrr_nx_test_ptr = (void *)ctrr_test_va;
1608 #endif
1609 
1610 	T_LOG("No execute test calling ctrr_nx_test_ptr(): %p to provoke instruction abort", ctrr_nx_test_ptr);
1611 
1612 	// should cause prefetch abort
1613 	ml_expect_fault_begin(ctrr_test_nx_fault_handler, ctrr_test_va);
1614 	ctrr_nx_test_ptr();
1615 	ml_expect_fault_end();
1616 
1617 	// TODO: ensure execute permission fault at expected level
1618 	T_EXPECT(ESR_EC(ctrr_exception_esr) == ESR_EC_IABORT_EL1, "Instruction abort from EL1 Expected");
1619 	T_EXPECT(ISS_DA_FSC(ESR_ISS(ctrr_exception_esr)) == FSC_PERMISSION_FAULT_L3, "Permission Fault Expected");
1620 
1621 	ctrr_test_va = 0;
1622 	ctrr_exception_esr = 0;
1623 
1624 	pmap_remove(kernel_pmap, ctrr_test_page, ctrr_test_page + PAGE_SIZE);
1625 
1626 	T_LOG("Expect no faults when reading CTRR region to verify correct programming of CTRR limits");
1627 	for (vm_offset_t addr = rorgn_begin_va; addr < rorgn_end_va; addr += 8) {
1628 		volatile uint64_t x = *(uint64_t *)addr;
1629 		(void) x; /* read for side effect only */
1630 	}
1631 
1632 	return KERN_SUCCESS;
1633 }
1634 #endif /* defined(KERNEL_INTEGRITY_CTRR) && defined(CONFIG_XNUPOST) */
1635 
1636