1 /*
2 * Copyright (c) 2007-2023 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <debug.h>
30
31 #include <types.h>
32
33 #include <mach/mach_types.h>
34 #include <mach/thread_status.h>
35 #include <mach/vm_types.h>
36
37 #include <kern/kern_types.h>
38 #include <kern/task.h>
39 #include <kern/thread.h>
40 #include <kern/misc_protos.h>
41 #include <kern/mach_param.h>
42 #include <kern/spl.h>
43 #include <kern/machine.h>
44 #include <kern/kpc.h>
45 #include <kern/monotonic.h>
46
47 #include <machine/atomic.h>
48 #include <arm64/proc_reg.h>
49 #include <arm64/machine_machdep.h>
50 #include <arm/cpu_data_internal.h>
51 #include <arm/machdep_call.h>
52 #include <arm/misc_protos.h>
53 #include <arm/cpuid.h>
54 #include <arm/cpu_capabilities_public.h>
55
56 #include <vm/vm_map_xnu.h>
57 #include <vm/vm_protos.h>
58
59 #include <sys/kdebug.h>
60
61
62 #include <san/kcov_stksz.h>
63
64 #include <IOKit/IOBSD.h>
65
66 #include <pexpert/arm64/apple_arm64_cpu.h>
67 #include <pexpert/pexpert.h>
68
69 // fixme: rdar://114299113 tracks resolving the supportlib issue with hwtrace features
70
71 extern int debug_task;
72
73 /* zone for debug_state area */
74 ZONE_DEFINE_TYPE(ads_zone, "arm debug state", arm_debug_state_t, ZC_NONE);
75 ZONE_DEFINE_TYPE(user_ss_zone, "user save state", arm_context_t, ZC_NONE);
76
77 #if HAS_ARM_FEAT_SME
78 static SECURITY_READ_ONLY_LATE(uint16_t) sme_svl_b;
79 /* zone for arm_sme_saved_state_t allocations */
80 static SECURITY_READ_ONLY_LATE(zone_t) sme_ss_zone;
81 #endif
82
83 void
arm_get_matrix_cpu_state(struct arm_matrix_cpu_state * cpu_state)84 arm_get_matrix_cpu_state(struct arm_matrix_cpu_state *cpu_state)
85 {
86 #if HAS_ARM_FEAT_SME
87 cpu_state->have_sme = arm_sme_version() > 0;
88 if (cpu_state->have_sme) {
89 cpu_state->za_is_enabled = !!(__builtin_arm_rsr64("SVCR") & SVCR_ZA);
90 } else {
91 cpu_state->za_is_enabled = false;
92 }
93 #endif /* HAS_ARM_FEAT_SME */
94
95 #if !HAS_ARM_FEAT_SME
96 #pragma unused(cpu_state)
97 #endif
98 }
99
100 /*
101 * Routine: consider_machine_collect
102 *
103 */
104 void
consider_machine_collect(void)105 consider_machine_collect(void)
106 {
107 pmap_gc();
108 }
109
110 /*
111 * Routine: consider_machine_adjust
112 *
113 */
114 void
consider_machine_adjust(void)115 consider_machine_adjust(void)
116 {
117 }
118
119
120
121 #if HAS_ARM_FEAT_SME
122 static inline bool
machine_thread_has_valid_za(const arm_sme_saved_state_t * _Nullable sme_ss)123 machine_thread_has_valid_za(const arm_sme_saved_state_t *_Nullable sme_ss)
124 {
125 return sme_ss && (sme_ss->svcr & SVCR_ZA);
126 }
127
128 arm_sme_saved_state_t *
machine_thread_get_sme_state(thread_t thread)129 machine_thread_get_sme_state(thread_t thread)
130 {
131 arm_state_hdr_t *hdr = thread->machine.umatrix_hdr;
132 if (hdr) {
133 assert(hdr->flavor == ARM_SME_SAVED_STATE);
134 return thread->machine.usme;
135 }
136
137 return NULL;
138 }
139
140 static void
machine_save_sme_context(thread_t old,arm_sme_saved_state_t * old_sme_ss,const struct arm_matrix_cpu_state * cpu_state)141 machine_save_sme_context(thread_t old, arm_sme_saved_state_t *old_sme_ss, const struct arm_matrix_cpu_state *cpu_state)
142 {
143 /*
144 * Note: we're deliberately not saving old_sme_ss->svcr, since it
145 * already happened on kernel entry. Likewise we're not restoring the
146 * SM bit from new_sme_ss->svcr, since we don't want streaming SVE mode
147 * active while we're in kernel space; we'll put it back on kernel exit.
148 */
149
150 old->machine.tpidr2_el0 = __builtin_arm_rsr64("TPIDR2_EL0");
151
152
153 if (cpu_state->za_is_enabled) {
154 arm_save_sme_za_zt0(&old_sme_ss->context, old_sme_ss->svl_b);
155 }
156 }
157
158 static void
machine_restore_sme_context(thread_t new,const arm_sme_saved_state_t * new_sme_ss,const struct arm_matrix_cpu_state * cpu_state)159 machine_restore_sme_context(thread_t new, const arm_sme_saved_state_t *new_sme_ss, const struct arm_matrix_cpu_state *cpu_state)
160 {
161 __builtin_arm_wsr64("TPIDR2_EL0", new->machine.tpidr2_el0);
162
163 if (new_sme_ss) {
164 if (machine_thread_has_valid_za(new_sme_ss)) {
165 if (!cpu_state->za_is_enabled) {
166 asm volatile ("smstart za");
167 }
168 arm_load_sme_za_zt0(&new_sme_ss->context, new_sme_ss->svl_b);
169 } else if (cpu_state->za_is_enabled) {
170 asm volatile ("smstop za");
171 }
172
173 arm_sme_trap_at_el0(false);
174 }
175 }
176
177 static void
machine_disable_sme_context(const struct arm_matrix_cpu_state * cpu_state)178 machine_disable_sme_context(const struct arm_matrix_cpu_state *cpu_state)
179 {
180 if (cpu_state->za_is_enabled) {
181 asm volatile ("smstop za");
182 }
183
184 arm_sme_trap_at_el0(true);
185 }
186 #endif /* HAS_ARM_FEAT_SME */
187
188
189 #if HAVE_MACHINE_THREAD_MATRIX_STATE
190 static void
machine_switch_matrix_context(thread_t old,thread_t new)191 machine_switch_matrix_context(thread_t old, thread_t new)
192 {
193 struct arm_matrix_cpu_state cpu_state;
194 arm_get_matrix_cpu_state(&cpu_state);
195
196
197 #if HAS_ARM_FEAT_SME
198 arm_sme_saved_state_t *old_sme_ss = machine_thread_get_sme_state(old);
199 const arm_sme_saved_state_t *new_sme_ss = machine_thread_get_sme_state(new);
200
201 if (cpu_state.have_sme) {
202 machine_save_sme_context(old, old_sme_ss, &cpu_state);
203 }
204 #endif /* HAS_ARM_FEAT_SME */
205
206
207 #if HAS_ARM_FEAT_SME
208 if (cpu_state.have_sme && !new_sme_ss) {
209 machine_disable_sme_context(&cpu_state);
210 }
211 #endif /* HAS_ARM_FEAT_SME */
212
213
214 #if HAS_ARM_FEAT_SME
215 if (cpu_state.have_sme) {
216 machine_restore_sme_context(new, new_sme_ss, &cpu_state);
217 }
218 #endif /* HAS_ARM_FEAT_SME */
219
220
221 }
222
223
224 #endif /* HAVE_MACHINE_THREAD_MATRIX_STATE */
225
226
227
228
229 static inline void
machine_thread_switch_cpu_data(thread_t old,thread_t new)230 machine_thread_switch_cpu_data(thread_t old, thread_t new)
231 {
232 /*
233 * We build with -fno-strict-aliasing, so the load through temporaries
234 * is required so that this generates a single load / store pair.
235 */
236 cpu_data_t *datap = old->machine.CpuDatap;
237 vm_offset_t base = old->machine.pcpu_data_base_and_cpu_number;
238
239 /* TODO: Should this be ordered? */
240
241 old->machine.CpuDatap = NULL;
242 old->machine.pcpu_data_base_and_cpu_number = 0;
243
244 new->machine.CpuDatap = datap;
245 new->machine.pcpu_data_base_and_cpu_number = base;
246 }
247
248 /**
249 * routine: machine_switch_pmap_and_extended_context
250 *
251 * Helper function used by machine_switch_context and machine_stack_handoff to switch the
252 * extended context and switch the pmap if necessary.
253 *
254 */
255
256 static inline void
machine_switch_pmap_and_extended_context(thread_t old,thread_t new)257 machine_switch_pmap_and_extended_context(thread_t old, thread_t new)
258 {
259 pmap_t new_pmap;
260
261
262
263
264
265 #if HAVE_MACHINE_THREAD_MATRIX_STATE
266 machine_switch_matrix_context(old, new);
267 #endif
268
269
270
271
272 new_pmap = new->map->pmap;
273 bool pmap_changed = old->map->pmap != new_pmap;
274 bool sec_override_changed =
275 false;
276
277 if (pmap_changed || sec_override_changed) {
278 pmap_switch(new_pmap, new);
279 } else {
280 /*
281 * If the thread is preempted while performing cache or TLB maintenance,
282 * it may be migrated to a different CPU between the completion of the relevant
283 * maintenance instruction and the synchronizing DSB. ARM requires that the
284 * synchronizing DSB must be issued *on the PE that issued the maintenance instruction*
285 * in order to guarantee completion of the instruction and visibility of its effects.
286 * Issue DSB here to enforce that guarantee. We only do this for the case in which
287 * the pmap isn't changing, as we expect pmap_switch() to issue DSB when it updates
288 * TTBR0. Note also that cache maintenance may be performed in userspace, so we
289 * cannot further limit this operation e.g. by setting a per-thread flag to indicate
290 * a pending kernel TLB or cache maintenance instruction.
291 */
292 __builtin_arm_dsb(DSB_ISH);
293 }
294
295
296 machine_thread_switch_cpu_data(old, new);
297 }
298
299 /*
300 * Routine: machine_switch_context
301 *
302 */
303 thread_t
machine_switch_context(thread_t old,thread_continue_t continuation,thread_t new)304 machine_switch_context(thread_t old,
305 thread_continue_t continuation,
306 thread_t new)
307 {
308 thread_t retval;
309
310 #if __ARM_PAN_AVAILABLE__
311 if (__improbable(__builtin_arm_rsr("pan") == 0)) {
312 panic("context switch with PAN disabled");
313 }
314 #endif
315
316 #define machine_switch_context_kprintf(x...) \
317 /* kprintf("machine_switch_context: " x) */
318
319 if (old == new) {
320 panic("machine_switch_context");
321 }
322
323 #if CONFIG_CPU_COUNTERS
324 kpc_off_cpu(old);
325 #endif /* CONFIG_CPU_COUNTERS */
326
327 machine_switch_pmap_and_extended_context(old, new);
328
329 machine_switch_context_kprintf("old= %x contination = %x new = %x\n", old, continuation, new);
330
331 retval = Switch_context(old, continuation, new);
332 assert(retval != NULL);
333
334 return retval;
335 }
336
337 boolean_t
machine_thread_on_core(thread_t thread)338 machine_thread_on_core(thread_t thread)
339 {
340 return thread->machine.CpuDatap != NULL;
341 }
342
343 boolean_t
machine_thread_on_core_allow_invalid(thread_t thread)344 machine_thread_on_core_allow_invalid(thread_t thread)
345 {
346 #define _copyin_fn _copyin_atomic64
347
348 extern int _copyin_fn(const char *src, uint64_t *dst);
349 uint64_t addr;
350
351 /*
352 * Utilize that the thread zone is sequestered which means
353 * that this kernel-to-kernel copyin can't read data
354 * from anything but a thread, zeroed or freed memory.
355 */
356 assert(get_preemption_level() > 0);
357 thread = pgz_decode_allow_invalid(thread, ZONE_ID_THREAD);
358 if (thread == THREAD_NULL) {
359 return false;
360 }
361 thread_require(thread);
362 if (_copyin_fn((void *)&thread->machine.CpuDatap, &addr) == 0) {
363 return addr != 0;
364 }
365 return false;
366
367 #undef _copyin_fn
368 }
369
370
371 /*
372 * Routine: machine_thread_create
373 *
374 */
375 void
machine_thread_create(thread_t thread,task_t task,bool first_thread)376 machine_thread_create(thread_t thread, task_t task, bool first_thread)
377 {
378 #define machine_thread_create_kprintf(x...) \
379 /* kprintf("machine_thread_create: " x) */
380
381 machine_thread_create_kprintf("thread = %x\n", thread);
382
383 if (!first_thread) {
384 thread->machine.CpuDatap = (cpu_data_t *)0;
385 // setting this offset will cause trying to use it to panic
386 thread->machine.pcpu_data_base_and_cpu_number =
387 ml_make_pcpu_base_and_cpu_number(VM_MIN_KERNEL_ADDRESS, 0);
388 }
389 thread->machine.arm_machine_flags = 0;
390 thread->machine.preemption_count = 0;
391 thread->machine.cthread_self = 0;
392 thread->machine.kpcb = NULL;
393 thread->machine.exception_trace_code = 0;
394 #if defined(HAS_APPLE_PAC)
395 thread->machine.rop_pid = task->rop_pid;
396 thread->machine.jop_pid = task->jop_pid;
397 if (task->disable_user_jop) {
398 thread->machine.arm_machine_flags |= ARM_MACHINE_THREAD_DISABLE_USER_JOP;
399 }
400 #endif
401
402
403
404
405 if (task != kernel_task) {
406 /* If this isn't a kernel thread, we'll have userspace state. */
407 arm_context_t *contextData = zalloc_flags(user_ss_zone,
408 Z_WAITOK | Z_NOFAIL);
409
410 #if __has_feature(ptrauth_calls)
411 uint64_t intr = ml_pac_safe_interrupts_disable();
412 zone_require(user_ss_zone, contextData);
413 #endif
414 thread->machine.contextData = contextData;
415 thread->machine.upcb = &contextData->ss;
416 thread->machine.uNeon = &contextData->ns;
417 #if __has_feature(ptrauth_calls)
418 ml_pac_safe_interrupts_restore(intr);
419 #endif
420
421 if (task_has_64Bit_data(task)) {
422 thread->machine.upcb->ash.flavor = ARM_SAVED_STATE64;
423 thread->machine.upcb->ash.count = ARM_SAVED_STATE64_COUNT;
424 thread->machine.uNeon->nsh.flavor = ARM_NEON_SAVED_STATE64;
425 thread->machine.uNeon->nsh.count = ARM_NEON_SAVED_STATE64_COUNT;
426
427 } else {
428 thread->machine.upcb->ash.flavor = ARM_SAVED_STATE32;
429 thread->machine.upcb->ash.count = ARM_SAVED_STATE32_COUNT;
430 thread->machine.uNeon->nsh.flavor = ARM_NEON_SAVED_STATE32;
431 thread->machine.uNeon->nsh.count = ARM_NEON_SAVED_STATE32_COUNT;
432 }
433 } else {
434 thread->machine.upcb = NULL;
435 thread->machine.uNeon = NULL;
436 thread->machine.contextData = NULL;
437 }
438
439 #if HAVE_MACHINE_THREAD_MATRIX_STATE
440 thread->machine.umatrix_hdr = NULL;
441 #endif
442
443
444 #if HAS_ARM_FEAT_SME
445 thread->machine.tpidr2_el0 = 0;
446 #endif
447
448 bzero(&thread->machine.perfctrl_state, sizeof(thread->machine.perfctrl_state));
449 machine_thread_state_initialize(thread);
450 }
451
452 /*
453 * Routine: machine_thread_process_signature
454 *
455 * Called to allow code signature dependent adjustments to the thread
456 * state. Note that this is usually called twice for the main thread:
457 * Once at thread creation by thread_create, when the signature is
458 * potentially not attached yet (which is usually the case for the
459 * first/main thread of a task), and once after the task's signature
460 * has actually been attached.
461 *
462 */
463 kern_return_t
machine_thread_process_signature(thread_t __unused thread,task_t __unused task)464 machine_thread_process_signature(thread_t __unused thread, task_t __unused task)
465 {
466 kern_return_t result = KERN_SUCCESS;
467
468 /*
469 * Reset to default state.
470 *
471 * In general, this function must not assume anything about the
472 * previous signature dependent thread state.
473 *
474 * At least at the time of writing this, threads don't transition
475 * to different code signatures, so each thread this function
476 * operates on is "fresh" in the sense that
477 * machine_thread_process_signature() has either not even been
478 * called on it yet, or only been called as part of thread
479 * creation when there was no signature yet.
480 *
481 * But for easier reasoning, and to prevent future bugs, this
482 * function should always recalculate all signature-dependent
483 * thread state, as if the signature could actually change from an
484 * actual signature to another.
485 */
486 #if !__ARM_KERNEL_PROTECT__
487 thread->machine.arm_machine_flags &= ~(ARM_MACHINE_THREAD_PRESERVE_X18);
488 #endif /* !__ARM_KERNEL_PROTECT__ */
489 thread->machine.arm_machine_flags &= ~(ARM_MACHINE_THREAD_USES_1GHZ_TIMBASE);
490
491 /*
492 * Set signature dependent state.
493 */
494 if (task != kernel_task && task_has_64Bit_data(task)) {
495 #if !__ARM_KERNEL_PROTECT__
496 #if CONFIG_ROSETTA
497 if (task_is_translated(task)) {
498 /* Note that for x86_64 translation specifically, the
499 * context switch path implicitly switches x18 regardless
500 * of this flag. */
501 thread->machine.arm_machine_flags |= ARM_MACHINE_THREAD_PRESERVE_X18;
502 }
503 #endif /* CONFIG_ROSETTA */
504
505 if (task->preserve_x18) {
506 thread->machine.arm_machine_flags |= ARM_MACHINE_THREAD_PRESERVE_X18;
507 }
508 #endif /* !__ARM_KERNEL_PROTECT__ */
509
510 if (task->uses_1ghz_timebase) {
511 thread->machine.arm_machine_flags |= ARM_MACHINE_THREAD_USES_1GHZ_TIMBASE;
512 }
513 } else {
514 #if !__ARM_KERNEL_PROTECT__
515 /*
516 * For informational value only, context switch only trashes
517 * x18 for user threads. (Except for devices with
518 * __ARM_KERNEL_PROTECT__, which make real destructive use of
519 * x18.)
520 */
521 thread->machine.arm_machine_flags |= ARM_MACHINE_THREAD_PRESERVE_X18;
522 #endif /* !__ARM_KERNEL_PROTECT__ */
523 thread->machine.arm_machine_flags |= ARM_MACHINE_THREAD_USES_1GHZ_TIMBASE;
524 }
525
526 /**
527 * Make sure the machine flags are observed before the thread becomes available
528 * to run in user mode, especially in the posix_spawn() path.
529 */
530 os_atomic_thread_fence(release);
531 return result;
532 }
533
534 /*
535 * Routine: machine_thread_destroy
536 *
537 */
538 void
machine_thread_destroy(thread_t thread)539 machine_thread_destroy(thread_t thread)
540 {
541 arm_context_t *thread_user_ss;
542
543 if (thread->machine.contextData) {
544 /* Disassociate the user save state from the thread before we free it. */
545 thread_user_ss = thread->machine.contextData;
546 thread->machine.upcb = NULL;
547 thread->machine.uNeon = NULL;
548 thread->machine.contextData = NULL;
549
550 #if HAS_ARM_FEAT_SME
551 machine_thread_sme_state_free(thread);
552 #endif
553
554 zfree(user_ss_zone, thread_user_ss);
555 }
556
557 if (thread->machine.DebugData != NULL) {
558 if (thread->machine.DebugData == getCpuDatap()->cpu_user_debug) {
559 arm_debug_set(NULL);
560 }
561
562 if (os_ref_release(&thread->machine.DebugData->ref) == 0) {
563 zfree(ads_zone, thread->machine.DebugData);
564 }
565 }
566 }
567
568
569 #if HAS_ARM_FEAT_SME
570 static arm_sme_saved_state_t *
zalloc_sme_saved_state(void)571 zalloc_sme_saved_state(void)
572 {
573 arm_sme_saved_state_t *sme_ss = zalloc_flags(sme_ss_zone, Z_WAITOK | Z_ZERO | Z_NOFAIL);
574 sme_ss->hdr.flavor = ARM_SME_SAVED_STATE;
575 sme_ss->hdr.count = arm_sme_saved_state_count(sme_svl_b);
576 sme_ss->svl_b = sme_svl_b;
577 return sme_ss;
578 }
579
580 kern_return_t
machine_thread_sme_state_alloc(thread_t thread)581 machine_thread_sme_state_alloc(thread_t thread)
582 {
583 assert(arm_sme_version());
584
585
586 if (thread->machine.usme) {
587 panic("thread %p already has SME saved state %p",
588 thread, thread->machine.usme);
589 }
590
591 arm_sme_saved_state_t *sme_ss = zalloc_sme_saved_state();
592 disable_preemption();
593
594 arm_sme_trap_at_el0(false);
595 __builtin_arm_isb(ISB_SY);
596 thread->machine.usme = sme_ss;
597
598 enable_preemption();
599
600 return KERN_SUCCESS;
601 }
602
603 void
machine_thread_sme_state_free(thread_t thread)604 machine_thread_sme_state_free(thread_t thread)
605 {
606 arm_sme_saved_state_t *sme_ss = machine_thread_get_sme_state(thread);
607
608 if (sme_ss) {
609 thread->machine.usme = NULL;
610 zfree(sme_ss_zone, sme_ss);
611 }
612 }
613
614 static void
machine_thread_sme_state_dup(const arm_sme_saved_state_t * src_sme_ss,thread_t target)615 machine_thread_sme_state_dup(const arm_sme_saved_state_t *src_sme_ss, thread_t target)
616 {
617 arm_sme_saved_state_t *sme_ss = zalloc_sme_saved_state();
618 assert(sme_ss->svl_b == src_sme_ss->svl_b);
619
620 arm_sme_context_t *context = &sme_ss->context;
621 uint16_t svl_b = sme_ss->svl_b;
622
623 sme_ss->svcr = src_sme_ss->svcr;
624 /* Z and P are saved on kernel entry. ZA and ZT0 may be stale. */
625 if (sme_ss->svcr & SVCR_SM) {
626 const arm_sme_context_t *src_context = &src_sme_ss->context;
627 memcpy(arm_sme_z(context), const_arm_sme_z(src_context), arm_sme_z_size(svl_b));
628 memcpy(arm_sme_p(context, svl_b), const_arm_sme_p(src_context, svl_b), arm_sme_p_size(svl_b));
629 }
630 if (sme_ss->svcr & SVCR_ZA) {
631 arm_save_sme_za_zt0(context, svl_b);
632 }
633
634 target->machine.usme = sme_ss;
635 }
636 #endif /* HAS_ARM_FEAT_SME */
637
638 #if HAVE_MACHINE_THREAD_MATRIX_STATE
639 void
machine_thread_matrix_state_dup(thread_t target)640 machine_thread_matrix_state_dup(thread_t target)
641 {
642 assert(!target->machine.umatrix_hdr);
643 thread_t thread = current_thread();
644
645 #if HAS_ARM_FEAT_SME
646 const arm_sme_saved_state_t *sme_ss = machine_thread_get_sme_state(thread);
647 if (sme_ss) {
648 machine_thread_sme_state_dup(sme_ss, target);
649 return;
650 }
651 #endif
652
653 }
654 #endif /* HAVE_MACHINE_THREAD_MATRIX_STATE */
655
656 /*
657 * Routine: machine_thread_init
658 *
659 */
660 void
machine_thread_init(void)661 machine_thread_init(void)
662 {
663 #if HAS_ARM_FEAT_SME
664 if (arm_sme_version()) {
665 sme_svl_b = arm_sme_svl_b();
666 vm_size_t size = arm_sme_saved_state_count(sme_svl_b) * sizeof(unsigned int);
667 sme_ss_zone = zone_create_ext("SME saved state", size, ZC_NONE, ZONE_ID_ANY, NULL);
668 }
669 #endif
670 }
671
672 /*
673 * Routine: machine_thread_template_init
674 *
675 */
676 void
machine_thread_template_init(thread_t __unused thr_template)677 machine_thread_template_init(thread_t __unused thr_template)
678 {
679 /* Nothing to do on this platform. */
680 }
681
682 /*
683 * Routine: get_useraddr
684 *
685 */
686 user_addr_t
get_useraddr()687 get_useraddr()
688 {
689 return get_saved_state_pc(current_thread()->machine.upcb);
690 }
691
692 /*
693 * Routine: machine_stack_detach
694 *
695 */
696 vm_offset_t
machine_stack_detach(thread_t thread)697 machine_stack_detach(thread_t thread)
698 {
699 vm_offset_t stack;
700
701 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_DETACH),
702 (uintptr_t)thread_tid(thread), thread->priority, thread->sched_pri, 0, 0);
703
704 stack = thread->kernel_stack;
705 #if CONFIG_STKSZ
706 kcov_stksz_set_thread_stack(thread, stack);
707 #endif
708 thread->kernel_stack = 0;
709 thread->machine.kstackptr = NULL;
710
711 return stack;
712 }
713
714
715 /*
716 * Routine: machine_stack_attach
717 *
718 */
719 void
machine_stack_attach(thread_t thread,vm_offset_t stack)720 machine_stack_attach(thread_t thread,
721 vm_offset_t stack)
722 {
723 struct arm_kernel_context *context;
724 struct arm_kernel_saved_state *savestate;
725 struct arm_kernel_neon_saved_state *neon_savestate;
726 uint32_t current_el;
727
728 #define machine_stack_attach_kprintf(x...) \
729 /* kprintf("machine_stack_attach: " x) */
730
731 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_ATTACH),
732 (uintptr_t)thread_tid(thread), thread->priority, thread->sched_pri, 0, 0);
733
734 thread->kernel_stack = stack;
735 #if CONFIG_STKSZ
736 kcov_stksz_set_thread_stack(thread, 0);
737 #endif
738 void *kstackptr = (void *)(stack + kernel_stack_size - sizeof(struct thread_kernel_state));
739 thread->machine.kstackptr = kstackptr;
740 thread_initialize_kernel_state(thread);
741
742 machine_stack_attach_kprintf("kstackptr: %lx\n", (vm_address_t)kstackptr);
743
744 current_el = (uint32_t) __builtin_arm_rsr64("CurrentEL");
745 context = &((thread_kernel_state_t) thread->machine.kstackptr)->machine;
746 savestate = &context->ss;
747 savestate->fp = 0;
748 savestate->sp = (uint64_t)kstackptr;
749 savestate->pc_was_in_userspace = false;
750 #if defined(HAS_APPLE_PAC)
751 /* Sign the initial kernel stack saved state */
752 uint64_t intr = ml_pac_safe_interrupts_disable();
753 asm volatile (
754 "adrp x17, _thread_continue@page" "\n"
755 "add x17, x17, _thread_continue@pageoff" "\n"
756 "ldr x16, [%[ss], %[SS64_SP]]" "\n"
757 "pacia1716" "\n"
758 "str x17, [%[ss], %[SS64_LR]]" "\n"
759 :
760 : [ss] "r"(&context->ss),
761 [SS64_SP] "i"(offsetof(struct arm_kernel_saved_state, sp)),
762 [SS64_LR] "i"(offsetof(struct arm_kernel_saved_state, lr))
763 : "x16", "x17"
764 );
765 ml_pac_safe_interrupts_restore(intr);
766 #else
767 savestate->lr = (uintptr_t)thread_continue;
768 #endif /* defined(HAS_APPLE_PAC) */
769 neon_savestate = &context->ns;
770 neon_savestate->fpcr = FPCR_DEFAULT;
771 machine_stack_attach_kprintf("thread = %p pc = %llx, sp = %llx\n", thread, savestate->lr, savestate->sp);
772 }
773
774
775 /*
776 * Routine: machine_stack_handoff
777 *
778 */
779 void
machine_stack_handoff(thread_t old,thread_t new)780 machine_stack_handoff(thread_t old,
781 thread_t new)
782 {
783 vm_offset_t stack;
784
785 #if __ARM_PAN_AVAILABLE__
786 if (__improbable(__builtin_arm_rsr("pan") == 0)) {
787 panic("stack handoff with PAN disabled");
788 }
789 #endif
790
791 #if CONFIG_CPU_COUNTERS
792 kpc_off_cpu(old);
793 #endif /* CONFIG_CPU_COUNTERS */
794
795 stack = machine_stack_detach(old);
796 #if CONFIG_STKSZ
797 kcov_stksz_set_thread_stack(new, 0);
798 #endif
799 new->kernel_stack = stack;
800 void *kstackptr = (void *)(stack + kernel_stack_size - sizeof(struct thread_kernel_state));
801 new->machine.kstackptr = kstackptr;
802 if (stack == old->reserved_stack) {
803 assert(new->reserved_stack);
804 old->reserved_stack = new->reserved_stack;
805 #if KASAN_TBI
806 kasan_unpoison_stack(old->reserved_stack, kernel_stack_size);
807 #endif /* KASAN_TBI */
808 new->reserved_stack = stack;
809 }
810
811 machine_switch_pmap_and_extended_context(old, new);
812
813 machine_set_current_thread(new);
814 thread_initialize_kernel_state(new);
815 }
816
817
818 /*
819 * Routine: call_continuation
820 *
821 */
822 void
call_continuation(thread_continue_t continuation,void * parameter,wait_result_t wresult,boolean_t enable_interrupts)823 call_continuation(thread_continue_t continuation,
824 void *parameter,
825 wait_result_t wresult,
826 boolean_t enable_interrupts)
827 {
828 #define call_continuation_kprintf(x...) \
829 /* kprintf("call_continuation_kprintf:" x) */
830
831 call_continuation_kprintf("thread = %p continuation = %p, stack = %lx\n",
832 current_thread(), continuation, current_thread()->machine.kstackptr);
833 Call_continuation(continuation, parameter, wresult, enable_interrupts);
834 }
835
836 #define SET_DBGBCRn(n, value, accum) \
837 __asm__ volatile( \
838 "msr DBGBCR" #n "_EL1, %[val]\n" \
839 "orr %[result], %[result], %[val]\n" \
840 : [result] "+r"(accum) : [val] "r"((value)))
841
842 #define SET_DBGBVRn(n, value) \
843 __asm__ volatile("msr DBGBVR" #n "_EL1, %0" : : "r"(value))
844
845 #define SET_DBGWCRn(n, value, accum) \
846 __asm__ volatile( \
847 "msr DBGWCR" #n "_EL1, %[val]\n" \
848 "orr %[result], %[result], %[val]\n" \
849 : [result] "+r"(accum) : [val] "r"((value)))
850
851 #define SET_DBGWVRn(n, value) \
852 __asm__ volatile("msr DBGWVR" #n "_EL1, %0" : : "r"(value))
853
854 void
arm_debug_set32(arm_debug_state_t * debug_state)855 arm_debug_set32(arm_debug_state_t *debug_state)
856 {
857 struct cpu_data * cpu_data_ptr;
858 arm_debug_info_t * debug_info = arm_debug_info();
859 boolean_t intr;
860 arm_debug_state_t off_state;
861 arm_debug_state_t *cpu_debug;
862 uint64_t all_ctrls = 0;
863
864 // Non-developers should never need to have hardware break/watchpoints
865 // set on their phones.
866 extern bool developer_mode_state(void);
867 if (!developer_mode_state()) {
868 return;
869 }
870
871 intr = ml_set_interrupts_enabled(FALSE);
872 cpu_data_ptr = getCpuDatap();
873 cpu_debug = cpu_data_ptr->cpu_user_debug;
874
875 /*
876 * Retain and set new per-cpu state.
877 * Reference count does not matter when turning off debug state.
878 */
879 if (debug_state == NULL) {
880 bzero(&off_state, sizeof(off_state));
881 cpu_data_ptr->cpu_user_debug = NULL;
882 debug_state = &off_state;
883 } else {
884 os_ref_retain(&debug_state->ref);
885 cpu_data_ptr->cpu_user_debug = debug_state;
886 }
887
888 /* Release previous debug state. */
889 if (cpu_debug != NULL) {
890 if (os_ref_release(&cpu_debug->ref) == 0) {
891 zfree(ads_zone, cpu_debug);
892 }
893 }
894
895 switch (debug_info->num_breakpoint_pairs) {
896 case 16:
897 SET_DBGBVRn(15, (uint64_t)debug_state->uds.ds32.bvr[15]);
898 SET_DBGBCRn(15, (uint64_t)debug_state->uds.ds32.bcr[15], all_ctrls);
899 OS_FALLTHROUGH;
900 case 15:
901 SET_DBGBVRn(14, (uint64_t)debug_state->uds.ds32.bvr[14]);
902 SET_DBGBCRn(14, (uint64_t)debug_state->uds.ds32.bcr[14], all_ctrls);
903 OS_FALLTHROUGH;
904 case 14:
905 SET_DBGBVRn(13, (uint64_t)debug_state->uds.ds32.bvr[13]);
906 SET_DBGBCRn(13, (uint64_t)debug_state->uds.ds32.bcr[13], all_ctrls);
907 OS_FALLTHROUGH;
908 case 13:
909 SET_DBGBVRn(12, (uint64_t)debug_state->uds.ds32.bvr[12]);
910 SET_DBGBCRn(12, (uint64_t)debug_state->uds.ds32.bcr[12], all_ctrls);
911 OS_FALLTHROUGH;
912 case 12:
913 SET_DBGBVRn(11, (uint64_t)debug_state->uds.ds32.bvr[11]);
914 SET_DBGBCRn(11, (uint64_t)debug_state->uds.ds32.bcr[11], all_ctrls);
915 OS_FALLTHROUGH;
916 case 11:
917 SET_DBGBVRn(10, (uint64_t)debug_state->uds.ds32.bvr[10]);
918 SET_DBGBCRn(10, (uint64_t)debug_state->uds.ds32.bcr[10], all_ctrls);
919 OS_FALLTHROUGH;
920 case 10:
921 SET_DBGBVRn(9, (uint64_t)debug_state->uds.ds32.bvr[9]);
922 SET_DBGBCRn(9, (uint64_t)debug_state->uds.ds32.bcr[9], all_ctrls);
923 OS_FALLTHROUGH;
924 case 9:
925 SET_DBGBVRn(8, (uint64_t)debug_state->uds.ds32.bvr[8]);
926 SET_DBGBCRn(8, (uint64_t)debug_state->uds.ds32.bcr[8], all_ctrls);
927 OS_FALLTHROUGH;
928 case 8:
929 SET_DBGBVRn(7, (uint64_t)debug_state->uds.ds32.bvr[7]);
930 SET_DBGBCRn(7, (uint64_t)debug_state->uds.ds32.bcr[7], all_ctrls);
931 OS_FALLTHROUGH;
932 case 7:
933 SET_DBGBVRn(6, (uint64_t)debug_state->uds.ds32.bvr[6]);
934 SET_DBGBCRn(6, (uint64_t)debug_state->uds.ds32.bcr[6], all_ctrls);
935 OS_FALLTHROUGH;
936 case 6:
937 SET_DBGBVRn(5, (uint64_t)debug_state->uds.ds32.bvr[5]);
938 SET_DBGBCRn(5, (uint64_t)debug_state->uds.ds32.bcr[5], all_ctrls);
939 OS_FALLTHROUGH;
940 case 5:
941 SET_DBGBVRn(4, (uint64_t)debug_state->uds.ds32.bvr[4]);
942 SET_DBGBCRn(4, (uint64_t)debug_state->uds.ds32.bcr[4], all_ctrls);
943 OS_FALLTHROUGH;
944 case 4:
945 SET_DBGBVRn(3, (uint64_t)debug_state->uds.ds32.bvr[3]);
946 SET_DBGBCRn(3, (uint64_t)debug_state->uds.ds32.bcr[3], all_ctrls);
947 OS_FALLTHROUGH;
948 case 3:
949 SET_DBGBVRn(2, (uint64_t)debug_state->uds.ds32.bvr[2]);
950 SET_DBGBCRn(2, (uint64_t)debug_state->uds.ds32.bcr[2], all_ctrls);
951 OS_FALLTHROUGH;
952 case 2:
953 SET_DBGBVRn(1, (uint64_t)debug_state->uds.ds32.bvr[1]);
954 SET_DBGBCRn(1, (uint64_t)debug_state->uds.ds32.bcr[1], all_ctrls);
955 OS_FALLTHROUGH;
956 case 1:
957 SET_DBGBVRn(0, (uint64_t)debug_state->uds.ds32.bvr[0]);
958 SET_DBGBCRn(0, (uint64_t)debug_state->uds.ds32.bcr[0], all_ctrls);
959 OS_FALLTHROUGH;
960 default:
961 break;
962 }
963
964 switch (debug_info->num_watchpoint_pairs) {
965 case 16:
966 SET_DBGWVRn(15, (uint64_t)debug_state->uds.ds32.wvr[15]);
967 SET_DBGWCRn(15, (uint64_t)debug_state->uds.ds32.wcr[15], all_ctrls);
968 OS_FALLTHROUGH;
969 case 15:
970 SET_DBGWVRn(14, (uint64_t)debug_state->uds.ds32.wvr[14]);
971 SET_DBGWCRn(14, (uint64_t)debug_state->uds.ds32.wcr[14], all_ctrls);
972 OS_FALLTHROUGH;
973 case 14:
974 SET_DBGWVRn(13, (uint64_t)debug_state->uds.ds32.wvr[13]);
975 SET_DBGWCRn(13, (uint64_t)debug_state->uds.ds32.wcr[13], all_ctrls);
976 OS_FALLTHROUGH;
977 case 13:
978 SET_DBGWVRn(12, (uint64_t)debug_state->uds.ds32.wvr[12]);
979 SET_DBGWCRn(12, (uint64_t)debug_state->uds.ds32.wcr[12], all_ctrls);
980 OS_FALLTHROUGH;
981 case 12:
982 SET_DBGWVRn(11, (uint64_t)debug_state->uds.ds32.wvr[11]);
983 SET_DBGWCRn(11, (uint64_t)debug_state->uds.ds32.wcr[11], all_ctrls);
984 OS_FALLTHROUGH;
985 case 11:
986 SET_DBGWVRn(10, (uint64_t)debug_state->uds.ds32.wvr[10]);
987 SET_DBGWCRn(10, (uint64_t)debug_state->uds.ds32.wcr[10], all_ctrls);
988 OS_FALLTHROUGH;
989 case 10:
990 SET_DBGWVRn(9, (uint64_t)debug_state->uds.ds32.wvr[9]);
991 SET_DBGWCRn(9, (uint64_t)debug_state->uds.ds32.wcr[9], all_ctrls);
992 OS_FALLTHROUGH;
993 case 9:
994 SET_DBGWVRn(8, (uint64_t)debug_state->uds.ds32.wvr[8]);
995 SET_DBGWCRn(8, (uint64_t)debug_state->uds.ds32.wcr[8], all_ctrls);
996 OS_FALLTHROUGH;
997 case 8:
998 SET_DBGWVRn(7, (uint64_t)debug_state->uds.ds32.wvr[7]);
999 SET_DBGWCRn(7, (uint64_t)debug_state->uds.ds32.wcr[7], all_ctrls);
1000 OS_FALLTHROUGH;
1001 case 7:
1002 SET_DBGWVRn(6, (uint64_t)debug_state->uds.ds32.wvr[6]);
1003 SET_DBGWCRn(6, (uint64_t)debug_state->uds.ds32.wcr[6], all_ctrls);
1004 OS_FALLTHROUGH;
1005 case 6:
1006 SET_DBGWVRn(5, (uint64_t)debug_state->uds.ds32.wvr[5]);
1007 SET_DBGWCRn(5, (uint64_t)debug_state->uds.ds32.wcr[5], all_ctrls);
1008 OS_FALLTHROUGH;
1009 case 5:
1010 SET_DBGWVRn(4, (uint64_t)debug_state->uds.ds32.wvr[4]);
1011 SET_DBGWCRn(4, (uint64_t)debug_state->uds.ds32.wcr[4], all_ctrls);
1012 OS_FALLTHROUGH;
1013 case 4:
1014 SET_DBGWVRn(3, (uint64_t)debug_state->uds.ds32.wvr[3]);
1015 SET_DBGWCRn(3, (uint64_t)debug_state->uds.ds32.wcr[3], all_ctrls);
1016 OS_FALLTHROUGH;
1017 case 3:
1018 SET_DBGWVRn(2, (uint64_t)debug_state->uds.ds32.wvr[2]);
1019 SET_DBGWCRn(2, (uint64_t)debug_state->uds.ds32.wcr[2], all_ctrls);
1020 OS_FALLTHROUGH;
1021 case 2:
1022 SET_DBGWVRn(1, (uint64_t)debug_state->uds.ds32.wvr[1]);
1023 SET_DBGWCRn(1, (uint64_t)debug_state->uds.ds32.wcr[1], all_ctrls);
1024 OS_FALLTHROUGH;
1025 case 1:
1026 SET_DBGWVRn(0, (uint64_t)debug_state->uds.ds32.wvr[0]);
1027 SET_DBGWCRn(0, (uint64_t)debug_state->uds.ds32.wcr[0], all_ctrls);
1028 OS_FALLTHROUGH;
1029 default:
1030 break;
1031 }
1032
1033 #if defined(CONFIG_KERNEL_INTEGRITY)
1034 if ((all_ctrls & (ARM_DBG_CR_MODE_CONTROL_PRIVILEGED | ARM_DBG_CR_HIGHER_MODE_ENABLE)) != 0) {
1035 panic("sorry, self-hosted debug is not supported: 0x%llx", all_ctrls);
1036 }
1037 #endif
1038
1039 /*
1040 * Breakpoint/Watchpoint Enable
1041 */
1042 if (all_ctrls != 0) {
1043 update_mdscr(0, 0x8000); // MDSCR_EL1[MDE]
1044 } else {
1045 update_mdscr(0x8000, 0);
1046 }
1047
1048 /*
1049 * Software debug single step enable
1050 */
1051 if (debug_state->uds.ds32.mdscr_el1 & 0x1) {
1052 update_mdscr(0, 1); // MDSCR_EL1[SS]
1053
1054 mask_user_saved_state_cpsr(current_thread()->machine.upcb, PSR64_SS, 0);
1055 } else {
1056 update_mdscr(0x1, 0);
1057 }
1058
1059 __builtin_arm_isb(ISB_SY);
1060 (void) ml_set_interrupts_enabled(intr);
1061 }
1062
1063 void
arm_debug_set64(arm_debug_state_t * debug_state)1064 arm_debug_set64(arm_debug_state_t *debug_state)
1065 {
1066 struct cpu_data * cpu_data_ptr;
1067 arm_debug_info_t * debug_info = arm_debug_info();
1068 boolean_t intr;
1069 arm_debug_state_t off_state;
1070 arm_debug_state_t *cpu_debug;
1071 uint64_t all_ctrls = 0;
1072
1073 // Non-developers should never need to have hardware break/watchpoints
1074 // set on their phones.
1075 extern bool developer_mode_state(void);
1076 if (!developer_mode_state()) {
1077 return;
1078 }
1079
1080 intr = ml_set_interrupts_enabled(FALSE);
1081 cpu_data_ptr = getCpuDatap();
1082 cpu_debug = cpu_data_ptr->cpu_user_debug;
1083
1084 /*
1085 * Retain and set new per-cpu state.
1086 * Reference count does not matter when turning off debug state.
1087 */
1088 if (debug_state == NULL) {
1089 bzero(&off_state, sizeof(off_state));
1090 cpu_data_ptr->cpu_user_debug = NULL;
1091 debug_state = &off_state;
1092 } else {
1093 os_ref_retain(&debug_state->ref);
1094 cpu_data_ptr->cpu_user_debug = debug_state;
1095 }
1096
1097 /* Release previous debug state. */
1098 if (cpu_debug != NULL) {
1099 if (os_ref_release(&cpu_debug->ref) == 0) {
1100 zfree(ads_zone, cpu_debug);
1101 }
1102 }
1103
1104 switch (debug_info->num_breakpoint_pairs) {
1105 case 16:
1106 SET_DBGBVRn(15, debug_state->uds.ds64.bvr[15]);
1107 SET_DBGBCRn(15, (uint64_t)debug_state->uds.ds64.bcr[15], all_ctrls);
1108 OS_FALLTHROUGH;
1109 case 15:
1110 SET_DBGBVRn(14, debug_state->uds.ds64.bvr[14]);
1111 SET_DBGBCRn(14, (uint64_t)debug_state->uds.ds64.bcr[14], all_ctrls);
1112 OS_FALLTHROUGH;
1113 case 14:
1114 SET_DBGBVRn(13, debug_state->uds.ds64.bvr[13]);
1115 SET_DBGBCRn(13, (uint64_t)debug_state->uds.ds64.bcr[13], all_ctrls);
1116 OS_FALLTHROUGH;
1117 case 13:
1118 SET_DBGBVRn(12, debug_state->uds.ds64.bvr[12]);
1119 SET_DBGBCRn(12, (uint64_t)debug_state->uds.ds64.bcr[12], all_ctrls);
1120 OS_FALLTHROUGH;
1121 case 12:
1122 SET_DBGBVRn(11, debug_state->uds.ds64.bvr[11]);
1123 SET_DBGBCRn(11, (uint64_t)debug_state->uds.ds64.bcr[11], all_ctrls);
1124 OS_FALLTHROUGH;
1125 case 11:
1126 SET_DBGBVRn(10, debug_state->uds.ds64.bvr[10]);
1127 SET_DBGBCRn(10, (uint64_t)debug_state->uds.ds64.bcr[10], all_ctrls);
1128 OS_FALLTHROUGH;
1129 case 10:
1130 SET_DBGBVRn(9, debug_state->uds.ds64.bvr[9]);
1131 SET_DBGBCRn(9, (uint64_t)debug_state->uds.ds64.bcr[9], all_ctrls);
1132 OS_FALLTHROUGH;
1133 case 9:
1134 SET_DBGBVRn(8, debug_state->uds.ds64.bvr[8]);
1135 SET_DBGBCRn(8, (uint64_t)debug_state->uds.ds64.bcr[8], all_ctrls);
1136 OS_FALLTHROUGH;
1137 case 8:
1138 SET_DBGBVRn(7, debug_state->uds.ds64.bvr[7]);
1139 SET_DBGBCRn(7, (uint64_t)debug_state->uds.ds64.bcr[7], all_ctrls);
1140 OS_FALLTHROUGH;
1141 case 7:
1142 SET_DBGBVRn(6, debug_state->uds.ds64.bvr[6]);
1143 SET_DBGBCRn(6, (uint64_t)debug_state->uds.ds64.bcr[6], all_ctrls);
1144 OS_FALLTHROUGH;
1145 case 6:
1146 SET_DBGBVRn(5, debug_state->uds.ds64.bvr[5]);
1147 SET_DBGBCRn(5, (uint64_t)debug_state->uds.ds64.bcr[5], all_ctrls);
1148 OS_FALLTHROUGH;
1149 case 5:
1150 SET_DBGBVRn(4, debug_state->uds.ds64.bvr[4]);
1151 SET_DBGBCRn(4, (uint64_t)debug_state->uds.ds64.bcr[4], all_ctrls);
1152 OS_FALLTHROUGH;
1153 case 4:
1154 SET_DBGBVRn(3, debug_state->uds.ds64.bvr[3]);
1155 SET_DBGBCRn(3, (uint64_t)debug_state->uds.ds64.bcr[3], all_ctrls);
1156 OS_FALLTHROUGH;
1157 case 3:
1158 SET_DBGBVRn(2, debug_state->uds.ds64.bvr[2]);
1159 SET_DBGBCRn(2, (uint64_t)debug_state->uds.ds64.bcr[2], all_ctrls);
1160 OS_FALLTHROUGH;
1161 case 2:
1162 SET_DBGBVRn(1, debug_state->uds.ds64.bvr[1]);
1163 SET_DBGBCRn(1, (uint64_t)debug_state->uds.ds64.bcr[1], all_ctrls);
1164 OS_FALLTHROUGH;
1165 case 1:
1166 SET_DBGBVRn(0, debug_state->uds.ds64.bvr[0]);
1167 SET_DBGBCRn(0, (uint64_t)debug_state->uds.ds64.bcr[0], all_ctrls);
1168 OS_FALLTHROUGH;
1169 default:
1170 break;
1171 }
1172
1173 switch (debug_info->num_watchpoint_pairs) {
1174 case 16:
1175 SET_DBGWVRn(15, debug_state->uds.ds64.wvr[15]);
1176 SET_DBGWCRn(15, (uint64_t)debug_state->uds.ds64.wcr[15], all_ctrls);
1177 OS_FALLTHROUGH;
1178 case 15:
1179 SET_DBGWVRn(14, debug_state->uds.ds64.wvr[14]);
1180 SET_DBGWCRn(14, (uint64_t)debug_state->uds.ds64.wcr[14], all_ctrls);
1181 OS_FALLTHROUGH;
1182 case 14:
1183 SET_DBGWVRn(13, debug_state->uds.ds64.wvr[13]);
1184 SET_DBGWCRn(13, (uint64_t)debug_state->uds.ds64.wcr[13], all_ctrls);
1185 OS_FALLTHROUGH;
1186 case 13:
1187 SET_DBGWVRn(12, debug_state->uds.ds64.wvr[12]);
1188 SET_DBGWCRn(12, (uint64_t)debug_state->uds.ds64.wcr[12], all_ctrls);
1189 OS_FALLTHROUGH;
1190 case 12:
1191 SET_DBGWVRn(11, debug_state->uds.ds64.wvr[11]);
1192 SET_DBGWCRn(11, (uint64_t)debug_state->uds.ds64.wcr[11], all_ctrls);
1193 OS_FALLTHROUGH;
1194 case 11:
1195 SET_DBGWVRn(10, debug_state->uds.ds64.wvr[10]);
1196 SET_DBGWCRn(10, (uint64_t)debug_state->uds.ds64.wcr[10], all_ctrls);
1197 OS_FALLTHROUGH;
1198 case 10:
1199 SET_DBGWVRn(9, debug_state->uds.ds64.wvr[9]);
1200 SET_DBGWCRn(9, (uint64_t)debug_state->uds.ds64.wcr[9], all_ctrls);
1201 OS_FALLTHROUGH;
1202 case 9:
1203 SET_DBGWVRn(8, debug_state->uds.ds64.wvr[8]);
1204 SET_DBGWCRn(8, (uint64_t)debug_state->uds.ds64.wcr[8], all_ctrls);
1205 OS_FALLTHROUGH;
1206 case 8:
1207 SET_DBGWVRn(7, debug_state->uds.ds64.wvr[7]);
1208 SET_DBGWCRn(7, (uint64_t)debug_state->uds.ds64.wcr[7], all_ctrls);
1209 OS_FALLTHROUGH;
1210 case 7:
1211 SET_DBGWVRn(6, debug_state->uds.ds64.wvr[6]);
1212 SET_DBGWCRn(6, (uint64_t)debug_state->uds.ds64.wcr[6], all_ctrls);
1213 OS_FALLTHROUGH;
1214 case 6:
1215 SET_DBGWVRn(5, debug_state->uds.ds64.wvr[5]);
1216 SET_DBGWCRn(5, (uint64_t)debug_state->uds.ds64.wcr[5], all_ctrls);
1217 OS_FALLTHROUGH;
1218 case 5:
1219 SET_DBGWVRn(4, debug_state->uds.ds64.wvr[4]);
1220 SET_DBGWCRn(4, (uint64_t)debug_state->uds.ds64.wcr[4], all_ctrls);
1221 OS_FALLTHROUGH;
1222 case 4:
1223 SET_DBGWVRn(3, debug_state->uds.ds64.wvr[3]);
1224 SET_DBGWCRn(3, (uint64_t)debug_state->uds.ds64.wcr[3], all_ctrls);
1225 OS_FALLTHROUGH;
1226 case 3:
1227 SET_DBGWVRn(2, debug_state->uds.ds64.wvr[2]);
1228 SET_DBGWCRn(2, (uint64_t)debug_state->uds.ds64.wcr[2], all_ctrls);
1229 OS_FALLTHROUGH;
1230 case 2:
1231 SET_DBGWVRn(1, debug_state->uds.ds64.wvr[1]);
1232 SET_DBGWCRn(1, (uint64_t)debug_state->uds.ds64.wcr[1], all_ctrls);
1233 OS_FALLTHROUGH;
1234 case 1:
1235 SET_DBGWVRn(0, debug_state->uds.ds64.wvr[0]);
1236 SET_DBGWCRn(0, (uint64_t)debug_state->uds.ds64.wcr[0], all_ctrls);
1237 OS_FALLTHROUGH;
1238 default:
1239 break;
1240 }
1241
1242 #if defined(CONFIG_KERNEL_INTEGRITY)
1243 if ((all_ctrls & (ARM_DBG_CR_MODE_CONTROL_PRIVILEGED | ARM_DBG_CR_HIGHER_MODE_ENABLE)) != 0) {
1244 panic("sorry, self-hosted debug is not supported: 0x%llx", all_ctrls);
1245 }
1246 #endif
1247
1248 /*
1249 * Breakpoint/Watchpoint Enable
1250 */
1251 if (all_ctrls != 0) {
1252 update_mdscr(0, 0x8000); // MDSCR_EL1[MDE]
1253 } else {
1254 update_mdscr(0x8000, 0);
1255 }
1256
1257 /*
1258 * Software debug single step enable
1259 */
1260 if (debug_state->uds.ds64.mdscr_el1 & 0x1) {
1261
1262 update_mdscr(0, 1); // MDSCR_EL1[SS]
1263
1264 mask_user_saved_state_cpsr(current_thread()->machine.upcb, PSR64_SS, 0);
1265 } else {
1266 update_mdscr(0x1, 0);
1267 }
1268
1269 __builtin_arm_isb(ISB_SY);
1270 (void) ml_set_interrupts_enabled(intr);
1271 }
1272
1273 void
arm_debug_set(arm_debug_state_t * debug_state)1274 arm_debug_set(arm_debug_state_t *debug_state)
1275 {
1276 if (debug_state) {
1277 switch (debug_state->dsh.flavor) {
1278 case ARM_DEBUG_STATE32:
1279 arm_debug_set32(debug_state);
1280 break;
1281 case ARM_DEBUG_STATE64:
1282 arm_debug_set64(debug_state);
1283 break;
1284 default:
1285 panic("arm_debug_set");
1286 break;
1287 }
1288 } else {
1289 if (thread_is_64bit_data(current_thread())) {
1290 arm_debug_set64(debug_state);
1291 } else {
1292 arm_debug_set32(debug_state);
1293 }
1294 }
1295 }
1296
1297 #define VM_MAX_ADDRESS32 ((vm_address_t) 0x80000000)
1298 boolean_t
debug_legacy_state_is_valid(arm_legacy_debug_state_t * debug_state)1299 debug_legacy_state_is_valid(arm_legacy_debug_state_t *debug_state)
1300 {
1301 arm_debug_info_t *debug_info = arm_debug_info();
1302 uint32_t i;
1303 for (i = 0; i < debug_info->num_breakpoint_pairs; i++) {
1304 if (0 != debug_state->bcr[i] && VM_MAX_ADDRESS32 <= debug_state->bvr[i]) {
1305 return FALSE;
1306 }
1307 }
1308
1309 for (i = 0; i < debug_info->num_watchpoint_pairs; i++) {
1310 if (0 != debug_state->wcr[i] && VM_MAX_ADDRESS32 <= debug_state->wvr[i]) {
1311 return FALSE;
1312 }
1313 }
1314 return TRUE;
1315 }
1316
1317 boolean_t
debug_state_is_valid32(arm_debug_state32_t * debug_state)1318 debug_state_is_valid32(arm_debug_state32_t *debug_state)
1319 {
1320 arm_debug_info_t *debug_info = arm_debug_info();
1321 uint32_t i;
1322 for (i = 0; i < debug_info->num_breakpoint_pairs; i++) {
1323 if (0 != debug_state->bcr[i] && VM_MAX_ADDRESS32 <= debug_state->bvr[i]) {
1324 return FALSE;
1325 }
1326 }
1327
1328 for (i = 0; i < debug_info->num_watchpoint_pairs; i++) {
1329 if (0 != debug_state->wcr[i] && VM_MAX_ADDRESS32 <= debug_state->wvr[i]) {
1330 return FALSE;
1331 }
1332 }
1333 return TRUE;
1334 }
1335
1336 boolean_t
debug_state_is_valid64(arm_debug_state64_t * debug_state)1337 debug_state_is_valid64(arm_debug_state64_t *debug_state)
1338 {
1339 arm_debug_info_t *debug_info = arm_debug_info();
1340 uint32_t i;
1341 for (i = 0; i < debug_info->num_breakpoint_pairs; i++) {
1342 if (0 != debug_state->bcr[i] && MACH_VM_MAX_ADDRESS <= debug_state->bvr[i]) {
1343 return FALSE;
1344 }
1345 }
1346
1347 for (i = 0; i < debug_info->num_watchpoint_pairs; i++) {
1348 if (0 != debug_state->wcr[i] && MACH_VM_MAX_ADDRESS <= debug_state->wvr[i]) {
1349 return FALSE;
1350 }
1351 }
1352 return TRUE;
1353 }
1354
1355 /*
1356 * Duplicate one arm_debug_state_t to another. "all" parameter
1357 * is ignored in the case of ARM -- Is this the right assumption?
1358 */
1359 void
copy_legacy_debug_state(arm_legacy_debug_state_t * src,arm_legacy_debug_state_t * target,__unused boolean_t all)1360 copy_legacy_debug_state(arm_legacy_debug_state_t * src,
1361 arm_legacy_debug_state_t * target,
1362 __unused boolean_t all)
1363 {
1364 bcopy(src, target, sizeof(arm_legacy_debug_state_t));
1365 }
1366
1367 void
copy_debug_state32(arm_debug_state32_t * src,arm_debug_state32_t * target,__unused boolean_t all)1368 copy_debug_state32(arm_debug_state32_t * src,
1369 arm_debug_state32_t * target,
1370 __unused boolean_t all)
1371 {
1372 bcopy(src, target, sizeof(arm_debug_state32_t));
1373 }
1374
1375 void
copy_debug_state64(arm_debug_state64_t * src,arm_debug_state64_t * target,__unused boolean_t all)1376 copy_debug_state64(arm_debug_state64_t * src,
1377 arm_debug_state64_t * target,
1378 __unused boolean_t all)
1379 {
1380 bcopy(src, target, sizeof(arm_debug_state64_t));
1381 }
1382
1383 kern_return_t
machine_thread_set_tsd_base(thread_t thread,mach_vm_offset_t tsd_base)1384 machine_thread_set_tsd_base(thread_t thread,
1385 mach_vm_offset_t tsd_base)
1386 {
1387 if (get_threadtask(thread) == kernel_task) {
1388 return KERN_INVALID_ARGUMENT;
1389 }
1390
1391 if (thread_is_64bit_addr(thread)) {
1392 if (tsd_base > vm_map_max(thread->map)) {
1393 tsd_base = 0ULL;
1394 }
1395 } else {
1396 if (tsd_base > UINT32_MAX) {
1397 tsd_base = 0ULL;
1398 }
1399 }
1400
1401 thread->machine.cthread_self = tsd_base;
1402
1403 /* For current thread, make the TSD base active immediately */
1404 if (thread == current_thread()) {
1405 mp_disable_preemption();
1406 set_tpidrro(tsd_base);
1407 mp_enable_preemption();
1408 }
1409
1410 return KERN_SUCCESS;
1411 }
1412
1413 void
machine_tecs(__unused thread_t thr)1414 machine_tecs(__unused thread_t thr)
1415 {
1416 }
1417
1418 int
machine_csv(__unused cpuvn_e cve)1419 machine_csv(__unused cpuvn_e cve)
1420 {
1421 return 0;
1422 }
1423
1424 void
arm_context_switch_requires_sync()1425 arm_context_switch_requires_sync()
1426 {
1427 current_cpu_datap()->sync_on_cswitch = 1;
1428 }
1429
1430 void
arm_context_switch_sync()1431 arm_context_switch_sync()
1432 {
1433 if (__improbable(current_cpu_datap()->sync_on_cswitch != 0)) {
1434 __builtin_arm_isb(ISB_SY);
1435 current_cpu_datap()->sync_on_cswitch = 0;
1436 }
1437 }
1438
1439 #if __has_feature(ptrauth_calls)
1440 boolean_t
arm_user_jop_disabled(void)1441 arm_user_jop_disabled(void)
1442 {
1443 return FALSE;
1444 }
1445 #endif /* __has_feature(ptrauth_calls) */
1446