1 /*
2 * Copyright (c) 2007-2023 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <debug.h>
30
31 #include <types.h>
32
33 #include <mach/mach_types.h>
34 #include <mach/thread_status.h>
35 #include <mach/vm_types.h>
36
37 #include <kern/kern_types.h>
38 #include <kern/task.h>
39 #include <kern/thread.h>
40 #include <kern/misc_protos.h>
41 #include <kern/mach_param.h>
42 #include <kern/spl.h>
43 #include <kern/machine.h>
44 #include <kern/kpc.h>
45
46 #if MONOTONIC
47 #include <kern/monotonic.h>
48 #endif /* MONOTONIC */
49
50 #include <machine/atomic.h>
51 #include <arm64/proc_reg.h>
52 #include <arm64/machine_machdep.h>
53 #include <arm/cpu_data_internal.h>
54 #include <arm/machdep_call.h>
55 #include <arm/misc_protos.h>
56 #include <arm/cpuid.h>
57
58 #include <vm/vm_map.h>
59 #include <vm/vm_protos.h>
60
61 #include <sys/kdebug.h>
62
63
64 #include <san/kcov_stksz.h>
65
66 #include <IOKit/IOBSD.h>
67
68 #include <pexpert/pexpert.h>
69
70 extern int debug_task;
71
72 /* zone for debug_state area */
73 ZONE_DEFINE_TYPE(ads_zone, "arm debug state", arm_debug_state_t, ZC_NONE);
74 ZONE_DEFINE_TYPE(user_ss_zone, "user save state", arm_context_t, ZC_NONE);
75
76
77 /*
78 * Routine: consider_machine_collect
79 *
80 */
81 void
consider_machine_collect(void)82 consider_machine_collect(void)
83 {
84 pmap_gc();
85 }
86
87 /*
88 * Routine: consider_machine_adjust
89 *
90 */
91 void
consider_machine_adjust(void)92 consider_machine_adjust(void)
93 {
94 }
95
96
97
98
99
100
101
102 static inline void
machine_thread_switch_cpu_data(thread_t old,thread_t new)103 machine_thread_switch_cpu_data(thread_t old, thread_t new)
104 {
105 /*
106 * We build with -fno-strict-aliasing, so the load through temporaries
107 * is required so that this generates a single load / store pair.
108 */
109 cpu_data_t *datap = old->machine.CpuDatap;
110 vm_offset_t base = old->machine.pcpu_data_base;
111
112 /* TODO: Should this be ordered? */
113
114 old->machine.CpuDatap = NULL;
115 old->machine.pcpu_data_base = 0;
116
117 new->machine.CpuDatap = datap;
118 new->machine.pcpu_data_base = base;
119 }
120
121 /**
122 * routine: machine_switch_pmap_and_extended_context
123 *
124 * Helper function used by machine_switch_context and machine_stack_handoff to switch the
125 * extended context and switch the pmap if necessary.
126 *
127 */
128
129 static inline void
machine_switch_pmap_and_extended_context(thread_t old,thread_t new)130 machine_switch_pmap_and_extended_context(thread_t old, thread_t new)
131 {
132 pmap_t new_pmap;
133
134
135
136
137
138
139
140
141 new_pmap = new->map->pmap;
142 if (old->map->pmap != new_pmap) {
143 pmap_switch(new_pmap);
144 } else {
145 /*
146 * If the thread is preempted while performing cache or TLB maintenance,
147 * it may be migrated to a different CPU between the completion of the relevant
148 * maintenance instruction and the synchronizing DSB. ARM requires that the
149 * synchronizing DSB must be issued *on the PE that issued the maintenance instruction*
150 * in order to guarantee completion of the instruction and visibility of its effects.
151 * Issue DSB here to enforce that guarantee. We only do this for the case in which
152 * the pmap isn't changing, as we expect pmap_switch() to issue DSB when it updates
153 * TTBR0. Note also that cache maintenance may be performed in userspace, so we
154 * cannot further limit this operation e.g. by setting a per-thread flag to indicate
155 * a pending kernel TLB or cache maintenance instruction.
156 */
157 __builtin_arm_dsb(DSB_ISH);
158 }
159
160
161 machine_thread_switch_cpu_data(old, new);
162 }
163
164 /*
165 * Routine: machine_switch_context
166 *
167 */
168 thread_t
machine_switch_context(thread_t old,thread_continue_t continuation,thread_t new)169 machine_switch_context(thread_t old,
170 thread_continue_t continuation,
171 thread_t new)
172 {
173 thread_t retval;
174
175 #if __ARM_PAN_AVAILABLE__
176 if (__improbable(__builtin_arm_rsr("pan") == 0)) {
177 panic("context switch with PAN disabled");
178 }
179 #endif
180
181 #define machine_switch_context_kprintf(x...) \
182 /* kprintf("machine_switch_context: " x) */
183
184 if (old == new) {
185 panic("machine_switch_context");
186 }
187
188 kpc_off_cpu(old);
189
190 machine_switch_pmap_and_extended_context(old, new);
191
192 machine_switch_context_kprintf("old= %x contination = %x new = %x\n", old, continuation, new);
193
194 retval = Switch_context(old, continuation, new);
195 assert(retval != NULL);
196
197 return retval;
198 }
199
200 boolean_t
machine_thread_on_core(thread_t thread)201 machine_thread_on_core(thread_t thread)
202 {
203 return thread->machine.CpuDatap != NULL;
204 }
205
206 boolean_t
machine_thread_on_core_allow_invalid(thread_t thread)207 machine_thread_on_core_allow_invalid(thread_t thread)
208 {
209 extern int _copyin_atomic64(const char *src, uint64_t *dst);
210 uint64_t addr;
211
212 /*
213 * Utilize that the thread zone is sequestered which means
214 * that this kernel-to-kernel copyin can't read data
215 * from anything but a thread, zeroed or freed memory.
216 */
217 assert(get_preemption_level() > 0);
218 thread = pgz_decode_allow_invalid(thread, ZONE_ID_THREAD);
219 if (thread == THREAD_NULL) {
220 return false;
221 }
222 thread_require(thread);
223 if (_copyin_atomic64((void *)&thread->machine.CpuDatap, &addr) == 0) {
224 return addr != 0;
225 }
226 return false;
227 }
228
229
230 /*
231 * Routine: machine_thread_create
232 *
233 */
234 void
machine_thread_create(thread_t thread,task_t task,bool first_thread)235 machine_thread_create(thread_t thread, task_t task, bool first_thread)
236 {
237 #define machine_thread_create_kprintf(x...) \
238 /* kprintf("machine_thread_create: " x) */
239
240 machine_thread_create_kprintf("thread = %x\n", thread);
241
242 if (!first_thread) {
243 thread->machine.CpuDatap = (cpu_data_t *)0;
244 // setting this offset will cause trying to use it to panic
245 thread->machine.pcpu_data_base = (vm_offset_t)VM_MIN_KERNEL_ADDRESS;
246 }
247 thread->machine.arm_machine_flags = 0;
248 thread->machine.preemption_count = 0;
249 thread->machine.cthread_self = 0;
250 thread->machine.kpcb = NULL;
251 thread->machine.exception_trace_code = 0;
252 #if defined(HAS_APPLE_PAC)
253 thread->machine.rop_pid = task->rop_pid;
254 thread->machine.jop_pid = task->jop_pid;
255 if (task->disable_user_jop) {
256 thread->machine.arm_machine_flags |= ARM_MACHINE_THREAD_DISABLE_USER_JOP;
257 }
258 #endif
259
260
261
262
263 if (task != kernel_task) {
264 /* If this isn't a kernel thread, we'll have userspace state. */
265 arm_context_t *contextData = zalloc_flags(user_ss_zone,
266 Z_WAITOK | Z_NOFAIL);
267
268 #if __has_feature(ptrauth_calls)
269 uint64_t intr = ml_pac_safe_interrupts_disable();
270 zone_require(user_ss_zone, contextData);
271 #endif
272 thread->machine.contextData = contextData;
273 thread->machine.upcb = &contextData->ss;
274 thread->machine.uNeon = &contextData->ns;
275 #if __has_feature(ptrauth_calls)
276 ml_pac_safe_interrupts_restore(intr);
277 #endif
278
279 if (task_has_64Bit_data(task)) {
280 thread->machine.upcb->ash.flavor = ARM_SAVED_STATE64;
281 thread->machine.upcb->ash.count = ARM_SAVED_STATE64_COUNT;
282 thread->machine.uNeon->nsh.flavor = ARM_NEON_SAVED_STATE64;
283 thread->machine.uNeon->nsh.count = ARM_NEON_SAVED_STATE64_COUNT;
284
285 } else {
286 thread->machine.upcb->ash.flavor = ARM_SAVED_STATE32;
287 thread->machine.upcb->ash.count = ARM_SAVED_STATE32_COUNT;
288 thread->machine.uNeon->nsh.flavor = ARM_NEON_SAVED_STATE32;
289 thread->machine.uNeon->nsh.count = ARM_NEON_SAVED_STATE32_COUNT;
290 }
291 } else {
292 thread->machine.upcb = NULL;
293 thread->machine.uNeon = NULL;
294 thread->machine.contextData = NULL;
295 }
296
297
298
299
300 bzero(&thread->machine.perfctrl_state, sizeof(thread->machine.perfctrl_state));
301 machine_thread_state_initialize(thread);
302 }
303
304 /*
305 * Routine: machine_thread_process_signature
306 *
307 * Called to allow code signature dependent adjustments to the thread
308 * state. Note that this is usually called twice for the main thread:
309 * Once at thread creation by thread_create, when the signature is
310 * potentially not attached yet (which is usually the case for the
311 * first/main thread of a task), and once after the task's signature
312 * has actually been attached.
313 *
314 */
315 kern_return_t
machine_thread_process_signature(thread_t __unused thread,task_t __unused task)316 machine_thread_process_signature(thread_t __unused thread, task_t __unused task)
317 {
318 kern_return_t result = KERN_SUCCESS;
319
320 /*
321 * Reset to default state.
322 *
323 * In general, this function must not assume anything about the
324 * previous signature dependent thread state.
325 *
326 * At least at the time of writing this, threads don't transition
327 * to different code signatures, so each thread this function
328 * operates on is "fresh" in the sense that
329 * machine_thread_process_signature() has either not even been
330 * called on it yet, or only been called as part of thread
331 * creation when there was no signature yet.
332 *
333 * But for easier reasoning, and to prevent future bugs, this
334 * function should always recalculate all signature-dependent
335 * thread state, as if the signature could actually change from an
336 * actual signature to another.
337 */
338 #if !__ARM_KERNEL_PROTECT__
339 thread->machine.arm_machine_flags &= ~(ARM_MACHINE_THREAD_PRESERVE_X18);
340 #endif /* !__ARM_KERNEL_PROTECT__ */
341
342
343 /*
344 * Set signature dependent state.
345 */
346 if (task != kernel_task && task_has_64Bit_data(task)) {
347 #if !__ARM_KERNEL_PROTECT__
348 #if CONFIG_ROSETTA
349 if (task_is_translated(task)) {
350 /* Note that for x86_64 translation specifically, the
351 * context switch path implicitly switches x18 regardless
352 * of this flag. */
353 thread->machine.arm_machine_flags |= ARM_MACHINE_THREAD_PRESERVE_X18;
354 }
355 #endif /* CONFIG_ROSETTA */
356
357 if (task->preserve_x18) {
358 thread->machine.arm_machine_flags |= ARM_MACHINE_THREAD_PRESERVE_X18;
359 }
360 } else {
361 /*
362 * For informational value only, context switch only trashes
363 * x18 for user threads. (Except for devices with
364 * __ARM_KERNEL_PROTECT__, which make real destructive use of
365 * x18.)
366 */
367 thread->machine.arm_machine_flags |= ARM_MACHINE_THREAD_PRESERVE_X18;
368 #endif /* !__ARM_KERNEL_PROTECT__ */
369 }
370
371 return result;
372 }
373
374 /*
375 * Routine: machine_thread_destroy
376 *
377 */
378 void
machine_thread_destroy(thread_t thread)379 machine_thread_destroy(thread_t thread)
380 {
381 arm_context_t *thread_user_ss;
382
383 if (thread->machine.contextData) {
384 /* Disassociate the user save state from the thread before we free it. */
385 thread_user_ss = thread->machine.contextData;
386 thread->machine.upcb = NULL;
387 thread->machine.uNeon = NULL;
388 thread->machine.contextData = NULL;
389
390
391 zfree(user_ss_zone, thread_user_ss);
392 }
393
394 if (thread->machine.DebugData != NULL) {
395 if (thread->machine.DebugData == getCpuDatap()->cpu_user_debug) {
396 arm_debug_set(NULL);
397 }
398
399 if (os_ref_release(&thread->machine.DebugData->ref) == 0) {
400 zfree(ads_zone, thread->machine.DebugData);
401 }
402 }
403 }
404
405
406
407
408 /*
409 * Routine: machine_thread_init
410 *
411 */
412 void
machine_thread_init(void)413 machine_thread_init(void)
414 {
415
416 }
417
418 /*
419 * Routine: machine_thread_template_init
420 *
421 */
422 void
machine_thread_template_init(thread_t __unused thr_template)423 machine_thread_template_init(thread_t __unused thr_template)
424 {
425 /* Nothing to do on this platform. */
426 }
427
428 /*
429 * Routine: get_useraddr
430 *
431 */
432 user_addr_t
get_useraddr()433 get_useraddr()
434 {
435 return get_saved_state_pc(current_thread()->machine.upcb);
436 }
437
438 /*
439 * Routine: machine_stack_detach
440 *
441 */
442 vm_offset_t
machine_stack_detach(thread_t thread)443 machine_stack_detach(thread_t thread)
444 {
445 vm_offset_t stack;
446
447 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_DETACH),
448 (uintptr_t)thread_tid(thread), thread->priority, thread->sched_pri, 0, 0);
449
450 stack = thread->kernel_stack;
451 #if CONFIG_STKSZ
452 kcov_stksz_set_thread_stack(thread, stack);
453 #endif
454 thread->kernel_stack = 0;
455 thread->machine.kstackptr = NULL;
456
457 return stack;
458 }
459
460
461 /*
462 * Routine: machine_stack_attach
463 *
464 */
465 void
machine_stack_attach(thread_t thread,vm_offset_t stack)466 machine_stack_attach(thread_t thread,
467 vm_offset_t stack)
468 {
469 struct arm_kernel_context *context;
470 struct arm_kernel_saved_state *savestate;
471 struct arm_kernel_neon_saved_state *neon_savestate;
472 uint32_t current_el;
473
474 #define machine_stack_attach_kprintf(x...) \
475 /* kprintf("machine_stack_attach: " x) */
476
477 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_ATTACH),
478 (uintptr_t)thread_tid(thread), thread->priority, thread->sched_pri, 0, 0);
479
480 thread->kernel_stack = stack;
481 #if CONFIG_STKSZ
482 kcov_stksz_set_thread_stack(thread, 0);
483 #endif
484 void *kstackptr = (void *)(stack + kernel_stack_size - sizeof(struct thread_kernel_state));
485 thread->machine.kstackptr = kstackptr;
486 thread_initialize_kernel_state(thread);
487
488 machine_stack_attach_kprintf("kstackptr: %lx\n", (vm_address_t)kstackptr);
489
490 current_el = (uint32_t) __builtin_arm_rsr64("CurrentEL");
491 context = &((thread_kernel_state_t) thread->machine.kstackptr)->machine;
492 savestate = &context->ss;
493 savestate->fp = 0;
494 savestate->sp = (uint64_t)kstackptr;
495 savestate->pc_was_in_userspace = false;
496 #if defined(HAS_APPLE_PAC)
497 /* Sign the initial kernel stack saved state */
498 uint64_t intr = ml_pac_safe_interrupts_disable();
499 asm volatile (
500 "adrp x17, _thread_continue@page" "\n"
501 "add x17, x17, _thread_continue@pageoff" "\n"
502 "ldr x16, [%[ss], %[SS64_SP]]" "\n"
503 "pacia1716" "\n"
504 "str x17, [%[ss], %[SS64_LR]]" "\n"
505 :
506 : [ss] "r"(&context->ss),
507 [SS64_SP] "i"(offsetof(struct arm_kernel_saved_state, sp)),
508 [SS64_LR] "i"(offsetof(struct arm_kernel_saved_state, lr))
509 : "x16", "x17"
510 );
511 ml_pac_safe_interrupts_restore(intr);
512 #else
513 savestate->lr = (uintptr_t)thread_continue;
514 #endif /* defined(HAS_APPLE_PAC) */
515 neon_savestate = &context->ns;
516 neon_savestate->fpcr = FPCR_DEFAULT;
517 machine_stack_attach_kprintf("thread = %p pc = %llx, sp = %llx\n", thread, savestate->lr, savestate->sp);
518 }
519
520
521 /*
522 * Routine: machine_stack_handoff
523 *
524 */
525 void
machine_stack_handoff(thread_t old,thread_t new)526 machine_stack_handoff(thread_t old,
527 thread_t new)
528 {
529 vm_offset_t stack;
530
531 #if __ARM_PAN_AVAILABLE__
532 if (__improbable(__builtin_arm_rsr("pan") == 0)) {
533 panic("stack handoff with PAN disabled");
534 }
535 #endif
536
537 kpc_off_cpu(old);
538
539 stack = machine_stack_detach(old);
540 #if CONFIG_STKSZ
541 kcov_stksz_set_thread_stack(new, 0);
542 #endif
543 new->kernel_stack = stack;
544 void *kstackptr = (void *)(stack + kernel_stack_size - sizeof(struct thread_kernel_state));
545 new->machine.kstackptr = kstackptr;
546 if (stack == old->reserved_stack) {
547 assert(new->reserved_stack);
548 old->reserved_stack = new->reserved_stack;
549 #if KASAN_TBI
550 kasan_unpoison_stack(old->reserved_stack, kernel_stack_size);
551 #endif /* KASAN_TBI */
552 new->reserved_stack = stack;
553 }
554
555 machine_switch_pmap_and_extended_context(old, new);
556
557 machine_set_current_thread(new);
558 thread_initialize_kernel_state(new);
559 }
560
561
562 /*
563 * Routine: call_continuation
564 *
565 */
566 void
call_continuation(thread_continue_t continuation,void * parameter,wait_result_t wresult,boolean_t enable_interrupts)567 call_continuation(thread_continue_t continuation,
568 void *parameter,
569 wait_result_t wresult,
570 boolean_t enable_interrupts)
571 {
572 #define call_continuation_kprintf(x...) \
573 /* kprintf("call_continuation_kprintf:" x) */
574
575 call_continuation_kprintf("thread = %p continuation = %p, stack = %lx\n",
576 current_thread(), continuation, current_thread()->machine.kstackptr);
577 Call_continuation(continuation, parameter, wresult, enable_interrupts);
578 }
579
580 #define SET_DBGBCRn(n, value, accum) \
581 __asm__ volatile( \
582 "msr DBGBCR" #n "_EL1, %[val]\n" \
583 "orr %[result], %[result], %[val]\n" \
584 : [result] "+r"(accum) : [val] "r"((value)))
585
586 #define SET_DBGBVRn(n, value) \
587 __asm__ volatile("msr DBGBVR" #n "_EL1, %0" : : "r"(value))
588
589 #define SET_DBGWCRn(n, value, accum) \
590 __asm__ volatile( \
591 "msr DBGWCR" #n "_EL1, %[val]\n" \
592 "orr %[result], %[result], %[val]\n" \
593 : [result] "+r"(accum) : [val] "r"((value)))
594
595 #define SET_DBGWVRn(n, value) \
596 __asm__ volatile("msr DBGWVR" #n "_EL1, %0" : : "r"(value))
597
598 void
arm_debug_set32(arm_debug_state_t * debug_state)599 arm_debug_set32(arm_debug_state_t *debug_state)
600 {
601 struct cpu_data * cpu_data_ptr;
602 arm_debug_info_t * debug_info = arm_debug_info();
603 boolean_t intr;
604 arm_debug_state_t off_state;
605 arm_debug_state_t *cpu_debug;
606 uint64_t all_ctrls = 0;
607
608 intr = ml_set_interrupts_enabled(FALSE);
609 cpu_data_ptr = getCpuDatap();
610 cpu_debug = cpu_data_ptr->cpu_user_debug;
611
612 /*
613 * Retain and set new per-cpu state.
614 * Reference count does not matter when turning off debug state.
615 */
616 if (debug_state == NULL) {
617 bzero(&off_state, sizeof(off_state));
618 cpu_data_ptr->cpu_user_debug = NULL;
619 debug_state = &off_state;
620 } else {
621 os_ref_retain(&debug_state->ref);
622 cpu_data_ptr->cpu_user_debug = debug_state;
623 }
624
625 /* Release previous debug state. */
626 if (cpu_debug != NULL) {
627 if (os_ref_release(&cpu_debug->ref) == 0) {
628 zfree(ads_zone, cpu_debug);
629 }
630 }
631
632 switch (debug_info->num_breakpoint_pairs) {
633 case 16:
634 SET_DBGBVRn(15, (uint64_t)debug_state->uds.ds32.bvr[15]);
635 SET_DBGBCRn(15, (uint64_t)debug_state->uds.ds32.bcr[15], all_ctrls);
636 OS_FALLTHROUGH;
637 case 15:
638 SET_DBGBVRn(14, (uint64_t)debug_state->uds.ds32.bvr[14]);
639 SET_DBGBCRn(14, (uint64_t)debug_state->uds.ds32.bcr[14], all_ctrls);
640 OS_FALLTHROUGH;
641 case 14:
642 SET_DBGBVRn(13, (uint64_t)debug_state->uds.ds32.bvr[13]);
643 SET_DBGBCRn(13, (uint64_t)debug_state->uds.ds32.bcr[13], all_ctrls);
644 OS_FALLTHROUGH;
645 case 13:
646 SET_DBGBVRn(12, (uint64_t)debug_state->uds.ds32.bvr[12]);
647 SET_DBGBCRn(12, (uint64_t)debug_state->uds.ds32.bcr[12], all_ctrls);
648 OS_FALLTHROUGH;
649 case 12:
650 SET_DBGBVRn(11, (uint64_t)debug_state->uds.ds32.bvr[11]);
651 SET_DBGBCRn(11, (uint64_t)debug_state->uds.ds32.bcr[11], all_ctrls);
652 OS_FALLTHROUGH;
653 case 11:
654 SET_DBGBVRn(10, (uint64_t)debug_state->uds.ds32.bvr[10]);
655 SET_DBGBCRn(10, (uint64_t)debug_state->uds.ds32.bcr[10], all_ctrls);
656 OS_FALLTHROUGH;
657 case 10:
658 SET_DBGBVRn(9, (uint64_t)debug_state->uds.ds32.bvr[9]);
659 SET_DBGBCRn(9, (uint64_t)debug_state->uds.ds32.bcr[9], all_ctrls);
660 OS_FALLTHROUGH;
661 case 9:
662 SET_DBGBVRn(8, (uint64_t)debug_state->uds.ds32.bvr[8]);
663 SET_DBGBCRn(8, (uint64_t)debug_state->uds.ds32.bcr[8], all_ctrls);
664 OS_FALLTHROUGH;
665 case 8:
666 SET_DBGBVRn(7, (uint64_t)debug_state->uds.ds32.bvr[7]);
667 SET_DBGBCRn(7, (uint64_t)debug_state->uds.ds32.bcr[7], all_ctrls);
668 OS_FALLTHROUGH;
669 case 7:
670 SET_DBGBVRn(6, (uint64_t)debug_state->uds.ds32.bvr[6]);
671 SET_DBGBCRn(6, (uint64_t)debug_state->uds.ds32.bcr[6], all_ctrls);
672 OS_FALLTHROUGH;
673 case 6:
674 SET_DBGBVRn(5, (uint64_t)debug_state->uds.ds32.bvr[5]);
675 SET_DBGBCRn(5, (uint64_t)debug_state->uds.ds32.bcr[5], all_ctrls);
676 OS_FALLTHROUGH;
677 case 5:
678 SET_DBGBVRn(4, (uint64_t)debug_state->uds.ds32.bvr[4]);
679 SET_DBGBCRn(4, (uint64_t)debug_state->uds.ds32.bcr[4], all_ctrls);
680 OS_FALLTHROUGH;
681 case 4:
682 SET_DBGBVRn(3, (uint64_t)debug_state->uds.ds32.bvr[3]);
683 SET_DBGBCRn(3, (uint64_t)debug_state->uds.ds32.bcr[3], all_ctrls);
684 OS_FALLTHROUGH;
685 case 3:
686 SET_DBGBVRn(2, (uint64_t)debug_state->uds.ds32.bvr[2]);
687 SET_DBGBCRn(2, (uint64_t)debug_state->uds.ds32.bcr[2], all_ctrls);
688 OS_FALLTHROUGH;
689 case 2:
690 SET_DBGBVRn(1, (uint64_t)debug_state->uds.ds32.bvr[1]);
691 SET_DBGBCRn(1, (uint64_t)debug_state->uds.ds32.bcr[1], all_ctrls);
692 OS_FALLTHROUGH;
693 case 1:
694 SET_DBGBVRn(0, (uint64_t)debug_state->uds.ds32.bvr[0]);
695 SET_DBGBCRn(0, (uint64_t)debug_state->uds.ds32.bcr[0], all_ctrls);
696 OS_FALLTHROUGH;
697 default:
698 break;
699 }
700
701 switch (debug_info->num_watchpoint_pairs) {
702 case 16:
703 SET_DBGWVRn(15, (uint64_t)debug_state->uds.ds32.wvr[15]);
704 SET_DBGWCRn(15, (uint64_t)debug_state->uds.ds32.wcr[15], all_ctrls);
705 OS_FALLTHROUGH;
706 case 15:
707 SET_DBGWVRn(14, (uint64_t)debug_state->uds.ds32.wvr[14]);
708 SET_DBGWCRn(14, (uint64_t)debug_state->uds.ds32.wcr[14], all_ctrls);
709 OS_FALLTHROUGH;
710 case 14:
711 SET_DBGWVRn(13, (uint64_t)debug_state->uds.ds32.wvr[13]);
712 SET_DBGWCRn(13, (uint64_t)debug_state->uds.ds32.wcr[13], all_ctrls);
713 OS_FALLTHROUGH;
714 case 13:
715 SET_DBGWVRn(12, (uint64_t)debug_state->uds.ds32.wvr[12]);
716 SET_DBGWCRn(12, (uint64_t)debug_state->uds.ds32.wcr[12], all_ctrls);
717 OS_FALLTHROUGH;
718 case 12:
719 SET_DBGWVRn(11, (uint64_t)debug_state->uds.ds32.wvr[11]);
720 SET_DBGWCRn(11, (uint64_t)debug_state->uds.ds32.wcr[11], all_ctrls);
721 OS_FALLTHROUGH;
722 case 11:
723 SET_DBGWVRn(10, (uint64_t)debug_state->uds.ds32.wvr[10]);
724 SET_DBGWCRn(10, (uint64_t)debug_state->uds.ds32.wcr[10], all_ctrls);
725 OS_FALLTHROUGH;
726 case 10:
727 SET_DBGWVRn(9, (uint64_t)debug_state->uds.ds32.wvr[9]);
728 SET_DBGWCRn(9, (uint64_t)debug_state->uds.ds32.wcr[9], all_ctrls);
729 OS_FALLTHROUGH;
730 case 9:
731 SET_DBGWVRn(8, (uint64_t)debug_state->uds.ds32.wvr[8]);
732 SET_DBGWCRn(8, (uint64_t)debug_state->uds.ds32.wcr[8], all_ctrls);
733 OS_FALLTHROUGH;
734 case 8:
735 SET_DBGWVRn(7, (uint64_t)debug_state->uds.ds32.wvr[7]);
736 SET_DBGWCRn(7, (uint64_t)debug_state->uds.ds32.wcr[7], all_ctrls);
737 OS_FALLTHROUGH;
738 case 7:
739 SET_DBGWVRn(6, (uint64_t)debug_state->uds.ds32.wvr[6]);
740 SET_DBGWCRn(6, (uint64_t)debug_state->uds.ds32.wcr[6], all_ctrls);
741 OS_FALLTHROUGH;
742 case 6:
743 SET_DBGWVRn(5, (uint64_t)debug_state->uds.ds32.wvr[5]);
744 SET_DBGWCRn(5, (uint64_t)debug_state->uds.ds32.wcr[5], all_ctrls);
745 OS_FALLTHROUGH;
746 case 5:
747 SET_DBGWVRn(4, (uint64_t)debug_state->uds.ds32.wvr[4]);
748 SET_DBGWCRn(4, (uint64_t)debug_state->uds.ds32.wcr[4], all_ctrls);
749 OS_FALLTHROUGH;
750 case 4:
751 SET_DBGWVRn(3, (uint64_t)debug_state->uds.ds32.wvr[3]);
752 SET_DBGWCRn(3, (uint64_t)debug_state->uds.ds32.wcr[3], all_ctrls);
753 OS_FALLTHROUGH;
754 case 3:
755 SET_DBGWVRn(2, (uint64_t)debug_state->uds.ds32.wvr[2]);
756 SET_DBGWCRn(2, (uint64_t)debug_state->uds.ds32.wcr[2], all_ctrls);
757 OS_FALLTHROUGH;
758 case 2:
759 SET_DBGWVRn(1, (uint64_t)debug_state->uds.ds32.wvr[1]);
760 SET_DBGWCRn(1, (uint64_t)debug_state->uds.ds32.wcr[1], all_ctrls);
761 OS_FALLTHROUGH;
762 case 1:
763 SET_DBGWVRn(0, (uint64_t)debug_state->uds.ds32.wvr[0]);
764 SET_DBGWCRn(0, (uint64_t)debug_state->uds.ds32.wcr[0], all_ctrls);
765 OS_FALLTHROUGH;
766 default:
767 break;
768 }
769
770 #if defined(CONFIG_KERNEL_INTEGRITY)
771 if ((all_ctrls & (ARM_DBG_CR_MODE_CONTROL_PRIVILEGED | ARM_DBG_CR_HIGHER_MODE_ENABLE)) != 0) {
772 panic("sorry, self-hosted debug is not supported: 0x%llx", all_ctrls);
773 }
774 #endif
775
776 /*
777 * Breakpoint/Watchpoint Enable
778 */
779 if (all_ctrls != 0) {
780 update_mdscr(0, 0x8000); // MDSCR_EL1[MDE]
781 } else {
782 update_mdscr(0x8000, 0);
783 }
784
785 /*
786 * Software debug single step enable
787 */
788 if (debug_state->uds.ds32.mdscr_el1 & 0x1) {
789 update_mdscr(0x8000, 1); // ~MDE | SS : no brk/watch while single stepping (which we've set)
790
791 mask_user_saved_state_cpsr(current_thread()->machine.upcb, PSR64_SS, 0);
792 } else {
793 update_mdscr(0x1, 0);
794 }
795
796 __builtin_arm_isb(ISB_SY);
797 (void) ml_set_interrupts_enabled(intr);
798 }
799
800 void
arm_debug_set64(arm_debug_state_t * debug_state)801 arm_debug_set64(arm_debug_state_t *debug_state)
802 {
803 struct cpu_data * cpu_data_ptr;
804 arm_debug_info_t * debug_info = arm_debug_info();
805 boolean_t intr;
806 arm_debug_state_t off_state;
807 arm_debug_state_t *cpu_debug;
808 uint64_t all_ctrls = 0;
809
810 intr = ml_set_interrupts_enabled(FALSE);
811 cpu_data_ptr = getCpuDatap();
812 cpu_debug = cpu_data_ptr->cpu_user_debug;
813
814 /*
815 * Retain and set new per-cpu state.
816 * Reference count does not matter when turning off debug state.
817 */
818 if (debug_state == NULL) {
819 bzero(&off_state, sizeof(off_state));
820 cpu_data_ptr->cpu_user_debug = NULL;
821 debug_state = &off_state;
822 } else {
823 os_ref_retain(&debug_state->ref);
824 cpu_data_ptr->cpu_user_debug = debug_state;
825 }
826
827 /* Release previous debug state. */
828 if (cpu_debug != NULL) {
829 if (os_ref_release(&cpu_debug->ref) == 0) {
830 zfree(ads_zone, cpu_debug);
831 }
832 }
833
834 switch (debug_info->num_breakpoint_pairs) {
835 case 16:
836 SET_DBGBVRn(15, debug_state->uds.ds64.bvr[15]);
837 SET_DBGBCRn(15, (uint64_t)debug_state->uds.ds64.bcr[15], all_ctrls);
838 OS_FALLTHROUGH;
839 case 15:
840 SET_DBGBVRn(14, debug_state->uds.ds64.bvr[14]);
841 SET_DBGBCRn(14, (uint64_t)debug_state->uds.ds64.bcr[14], all_ctrls);
842 OS_FALLTHROUGH;
843 case 14:
844 SET_DBGBVRn(13, debug_state->uds.ds64.bvr[13]);
845 SET_DBGBCRn(13, (uint64_t)debug_state->uds.ds64.bcr[13], all_ctrls);
846 OS_FALLTHROUGH;
847 case 13:
848 SET_DBGBVRn(12, debug_state->uds.ds64.bvr[12]);
849 SET_DBGBCRn(12, (uint64_t)debug_state->uds.ds64.bcr[12], all_ctrls);
850 OS_FALLTHROUGH;
851 case 12:
852 SET_DBGBVRn(11, debug_state->uds.ds64.bvr[11]);
853 SET_DBGBCRn(11, (uint64_t)debug_state->uds.ds64.bcr[11], all_ctrls);
854 OS_FALLTHROUGH;
855 case 11:
856 SET_DBGBVRn(10, debug_state->uds.ds64.bvr[10]);
857 SET_DBGBCRn(10, (uint64_t)debug_state->uds.ds64.bcr[10], all_ctrls);
858 OS_FALLTHROUGH;
859 case 10:
860 SET_DBGBVRn(9, debug_state->uds.ds64.bvr[9]);
861 SET_DBGBCRn(9, (uint64_t)debug_state->uds.ds64.bcr[9], all_ctrls);
862 OS_FALLTHROUGH;
863 case 9:
864 SET_DBGBVRn(8, debug_state->uds.ds64.bvr[8]);
865 SET_DBGBCRn(8, (uint64_t)debug_state->uds.ds64.bcr[8], all_ctrls);
866 OS_FALLTHROUGH;
867 case 8:
868 SET_DBGBVRn(7, debug_state->uds.ds64.bvr[7]);
869 SET_DBGBCRn(7, (uint64_t)debug_state->uds.ds64.bcr[7], all_ctrls);
870 OS_FALLTHROUGH;
871 case 7:
872 SET_DBGBVRn(6, debug_state->uds.ds64.bvr[6]);
873 SET_DBGBCRn(6, (uint64_t)debug_state->uds.ds64.bcr[6], all_ctrls);
874 OS_FALLTHROUGH;
875 case 6:
876 SET_DBGBVRn(5, debug_state->uds.ds64.bvr[5]);
877 SET_DBGBCRn(5, (uint64_t)debug_state->uds.ds64.bcr[5], all_ctrls);
878 OS_FALLTHROUGH;
879 case 5:
880 SET_DBGBVRn(4, debug_state->uds.ds64.bvr[4]);
881 SET_DBGBCRn(4, (uint64_t)debug_state->uds.ds64.bcr[4], all_ctrls);
882 OS_FALLTHROUGH;
883 case 4:
884 SET_DBGBVRn(3, debug_state->uds.ds64.bvr[3]);
885 SET_DBGBCRn(3, (uint64_t)debug_state->uds.ds64.bcr[3], all_ctrls);
886 OS_FALLTHROUGH;
887 case 3:
888 SET_DBGBVRn(2, debug_state->uds.ds64.bvr[2]);
889 SET_DBGBCRn(2, (uint64_t)debug_state->uds.ds64.bcr[2], all_ctrls);
890 OS_FALLTHROUGH;
891 case 2:
892 SET_DBGBVRn(1, debug_state->uds.ds64.bvr[1]);
893 SET_DBGBCRn(1, (uint64_t)debug_state->uds.ds64.bcr[1], all_ctrls);
894 OS_FALLTHROUGH;
895 case 1:
896 SET_DBGBVRn(0, debug_state->uds.ds64.bvr[0]);
897 SET_DBGBCRn(0, (uint64_t)debug_state->uds.ds64.bcr[0], all_ctrls);
898 OS_FALLTHROUGH;
899 default:
900 break;
901 }
902
903 switch (debug_info->num_watchpoint_pairs) {
904 case 16:
905 SET_DBGWVRn(15, debug_state->uds.ds64.wvr[15]);
906 SET_DBGWCRn(15, (uint64_t)debug_state->uds.ds64.wcr[15], all_ctrls);
907 OS_FALLTHROUGH;
908 case 15:
909 SET_DBGWVRn(14, debug_state->uds.ds64.wvr[14]);
910 SET_DBGWCRn(14, (uint64_t)debug_state->uds.ds64.wcr[14], all_ctrls);
911 OS_FALLTHROUGH;
912 case 14:
913 SET_DBGWVRn(13, debug_state->uds.ds64.wvr[13]);
914 SET_DBGWCRn(13, (uint64_t)debug_state->uds.ds64.wcr[13], all_ctrls);
915 OS_FALLTHROUGH;
916 case 13:
917 SET_DBGWVRn(12, debug_state->uds.ds64.wvr[12]);
918 SET_DBGWCRn(12, (uint64_t)debug_state->uds.ds64.wcr[12], all_ctrls);
919 OS_FALLTHROUGH;
920 case 12:
921 SET_DBGWVRn(11, debug_state->uds.ds64.wvr[11]);
922 SET_DBGWCRn(11, (uint64_t)debug_state->uds.ds64.wcr[11], all_ctrls);
923 OS_FALLTHROUGH;
924 case 11:
925 SET_DBGWVRn(10, debug_state->uds.ds64.wvr[10]);
926 SET_DBGWCRn(10, (uint64_t)debug_state->uds.ds64.wcr[10], all_ctrls);
927 OS_FALLTHROUGH;
928 case 10:
929 SET_DBGWVRn(9, debug_state->uds.ds64.wvr[9]);
930 SET_DBGWCRn(9, (uint64_t)debug_state->uds.ds64.wcr[9], all_ctrls);
931 OS_FALLTHROUGH;
932 case 9:
933 SET_DBGWVRn(8, debug_state->uds.ds64.wvr[8]);
934 SET_DBGWCRn(8, (uint64_t)debug_state->uds.ds64.wcr[8], all_ctrls);
935 OS_FALLTHROUGH;
936 case 8:
937 SET_DBGWVRn(7, debug_state->uds.ds64.wvr[7]);
938 SET_DBGWCRn(7, (uint64_t)debug_state->uds.ds64.wcr[7], all_ctrls);
939 OS_FALLTHROUGH;
940 case 7:
941 SET_DBGWVRn(6, debug_state->uds.ds64.wvr[6]);
942 SET_DBGWCRn(6, (uint64_t)debug_state->uds.ds64.wcr[6], all_ctrls);
943 OS_FALLTHROUGH;
944 case 6:
945 SET_DBGWVRn(5, debug_state->uds.ds64.wvr[5]);
946 SET_DBGWCRn(5, (uint64_t)debug_state->uds.ds64.wcr[5], all_ctrls);
947 OS_FALLTHROUGH;
948 case 5:
949 SET_DBGWVRn(4, debug_state->uds.ds64.wvr[4]);
950 SET_DBGWCRn(4, (uint64_t)debug_state->uds.ds64.wcr[4], all_ctrls);
951 OS_FALLTHROUGH;
952 case 4:
953 SET_DBGWVRn(3, debug_state->uds.ds64.wvr[3]);
954 SET_DBGWCRn(3, (uint64_t)debug_state->uds.ds64.wcr[3], all_ctrls);
955 OS_FALLTHROUGH;
956 case 3:
957 SET_DBGWVRn(2, debug_state->uds.ds64.wvr[2]);
958 SET_DBGWCRn(2, (uint64_t)debug_state->uds.ds64.wcr[2], all_ctrls);
959 OS_FALLTHROUGH;
960 case 2:
961 SET_DBGWVRn(1, debug_state->uds.ds64.wvr[1]);
962 SET_DBGWCRn(1, (uint64_t)debug_state->uds.ds64.wcr[1], all_ctrls);
963 OS_FALLTHROUGH;
964 case 1:
965 SET_DBGWVRn(0, debug_state->uds.ds64.wvr[0]);
966 SET_DBGWCRn(0, (uint64_t)debug_state->uds.ds64.wcr[0], all_ctrls);
967 OS_FALLTHROUGH;
968 default:
969 break;
970 }
971
972 #if defined(CONFIG_KERNEL_INTEGRITY)
973 if ((all_ctrls & (ARM_DBG_CR_MODE_CONTROL_PRIVILEGED | ARM_DBG_CR_HIGHER_MODE_ENABLE)) != 0) {
974 panic("sorry, self-hosted debug is not supported: 0x%llx", all_ctrls);
975 }
976 #endif
977
978 /*
979 * Breakpoint/Watchpoint Enable
980 */
981 if (all_ctrls != 0) {
982 update_mdscr(0, 0x8000); // MDSCR_EL1[MDE]
983 } else {
984 update_mdscr(0x8000, 0);
985 }
986
987 /*
988 * Software debug single step enable
989 */
990 if (debug_state->uds.ds64.mdscr_el1 & 0x1) {
991
992 update_mdscr(0x8000, 1); // ~MDE | SS : no brk/watch while single stepping (which we've set)
993
994 mask_user_saved_state_cpsr(current_thread()->machine.upcb, PSR64_SS, 0);
995 } else {
996 update_mdscr(0x1, 0);
997 }
998
999 __builtin_arm_isb(ISB_SY);
1000 (void) ml_set_interrupts_enabled(intr);
1001 }
1002
1003 void
arm_debug_set(arm_debug_state_t * debug_state)1004 arm_debug_set(arm_debug_state_t *debug_state)
1005 {
1006 if (debug_state) {
1007 switch (debug_state->dsh.flavor) {
1008 case ARM_DEBUG_STATE32:
1009 arm_debug_set32(debug_state);
1010 break;
1011 case ARM_DEBUG_STATE64:
1012 arm_debug_set64(debug_state);
1013 break;
1014 default:
1015 panic("arm_debug_set");
1016 break;
1017 }
1018 } else {
1019 if (thread_is_64bit_data(current_thread())) {
1020 arm_debug_set64(debug_state);
1021 } else {
1022 arm_debug_set32(debug_state);
1023 }
1024 }
1025 }
1026
1027 #define VM_MAX_ADDRESS32 ((vm_address_t) 0x80000000)
1028 boolean_t
debug_legacy_state_is_valid(arm_legacy_debug_state_t * debug_state)1029 debug_legacy_state_is_valid(arm_legacy_debug_state_t *debug_state)
1030 {
1031 arm_debug_info_t *debug_info = arm_debug_info();
1032 uint32_t i;
1033 for (i = 0; i < debug_info->num_breakpoint_pairs; i++) {
1034 if (0 != debug_state->bcr[i] && VM_MAX_ADDRESS32 <= debug_state->bvr[i]) {
1035 return FALSE;
1036 }
1037 }
1038
1039 for (i = 0; i < debug_info->num_watchpoint_pairs; i++) {
1040 if (0 != debug_state->wcr[i] && VM_MAX_ADDRESS32 <= debug_state->wvr[i]) {
1041 return FALSE;
1042 }
1043 }
1044 return TRUE;
1045 }
1046
1047 boolean_t
debug_state_is_valid32(arm_debug_state32_t * debug_state)1048 debug_state_is_valid32(arm_debug_state32_t *debug_state)
1049 {
1050 arm_debug_info_t *debug_info = arm_debug_info();
1051 uint32_t i;
1052 for (i = 0; i < debug_info->num_breakpoint_pairs; i++) {
1053 if (0 != debug_state->bcr[i] && VM_MAX_ADDRESS32 <= debug_state->bvr[i]) {
1054 return FALSE;
1055 }
1056 }
1057
1058 for (i = 0; i < debug_info->num_watchpoint_pairs; i++) {
1059 if (0 != debug_state->wcr[i] && VM_MAX_ADDRESS32 <= debug_state->wvr[i]) {
1060 return FALSE;
1061 }
1062 }
1063 return TRUE;
1064 }
1065
1066 boolean_t
debug_state_is_valid64(arm_debug_state64_t * debug_state)1067 debug_state_is_valid64(arm_debug_state64_t *debug_state)
1068 {
1069 arm_debug_info_t *debug_info = arm_debug_info();
1070 uint32_t i;
1071 for (i = 0; i < debug_info->num_breakpoint_pairs; i++) {
1072 if (0 != debug_state->bcr[i] && MACH_VM_MAX_ADDRESS <= debug_state->bvr[i]) {
1073 return FALSE;
1074 }
1075 }
1076
1077 for (i = 0; i < debug_info->num_watchpoint_pairs; i++) {
1078 if (0 != debug_state->wcr[i] && MACH_VM_MAX_ADDRESS <= debug_state->wvr[i]) {
1079 return FALSE;
1080 }
1081 }
1082 return TRUE;
1083 }
1084
1085 /*
1086 * Duplicate one arm_debug_state_t to another. "all" parameter
1087 * is ignored in the case of ARM -- Is this the right assumption?
1088 */
1089 void
copy_legacy_debug_state(arm_legacy_debug_state_t * src,arm_legacy_debug_state_t * target,__unused boolean_t all)1090 copy_legacy_debug_state(arm_legacy_debug_state_t * src,
1091 arm_legacy_debug_state_t * target,
1092 __unused boolean_t all)
1093 {
1094 bcopy(src, target, sizeof(arm_legacy_debug_state_t));
1095 }
1096
1097 void
copy_debug_state32(arm_debug_state32_t * src,arm_debug_state32_t * target,__unused boolean_t all)1098 copy_debug_state32(arm_debug_state32_t * src,
1099 arm_debug_state32_t * target,
1100 __unused boolean_t all)
1101 {
1102 bcopy(src, target, sizeof(arm_debug_state32_t));
1103 }
1104
1105 void
copy_debug_state64(arm_debug_state64_t * src,arm_debug_state64_t * target,__unused boolean_t all)1106 copy_debug_state64(arm_debug_state64_t * src,
1107 arm_debug_state64_t * target,
1108 __unused boolean_t all)
1109 {
1110 bcopy(src, target, sizeof(arm_debug_state64_t));
1111 }
1112
1113 kern_return_t
machine_thread_set_tsd_base(thread_t thread,mach_vm_offset_t tsd_base)1114 machine_thread_set_tsd_base(thread_t thread,
1115 mach_vm_offset_t tsd_base)
1116 {
1117 if (get_threadtask(thread) == kernel_task) {
1118 return KERN_INVALID_ARGUMENT;
1119 }
1120
1121 if (thread_is_64bit_addr(thread)) {
1122 if (tsd_base > vm_map_max(thread->map)) {
1123 tsd_base = 0ULL;
1124 }
1125 } else {
1126 if (tsd_base > UINT32_MAX) {
1127 tsd_base = 0ULL;
1128 }
1129 }
1130
1131 thread->machine.cthread_self = tsd_base;
1132
1133 /* For current thread, make the TSD base active immediately */
1134 if (thread == current_thread()) {
1135 mp_disable_preemption();
1136 set_tpidrro(tsd_base);
1137 mp_enable_preemption();
1138 }
1139
1140 return KERN_SUCCESS;
1141 }
1142
1143 void
machine_tecs(__unused thread_t thr)1144 machine_tecs(__unused thread_t thr)
1145 {
1146 }
1147
1148 int
machine_csv(__unused cpuvn_e cve)1149 machine_csv(__unused cpuvn_e cve)
1150 {
1151 return 0;
1152 }
1153
1154 #if __ARM_ARCH_8_5__
1155 void
arm_context_switch_requires_sync()1156 arm_context_switch_requires_sync()
1157 {
1158 current_cpu_datap()->sync_on_cswitch = 1;
1159 }
1160 #endif
1161
1162 #if __has_feature(ptrauth_calls)
1163 boolean_t
arm_user_jop_disabled(void)1164 arm_user_jop_disabled(void)
1165 {
1166 return FALSE;
1167 }
1168 #endif /* __has_feature(ptrauth_calls) */
1169